You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by da...@apache.org on 2024/04/24 07:31:09 UTC

(cloudstack) branch main updated: Log4j2 refactor cloud engine orchestration module (#8742)

This is an automated email from the ASF dual-hosted git repository.

dahn pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudstack.git


The following commit(s) were added to refs/heads/main by this push:
     new e330d76a72d Log4j2 refactor cloud engine orchestration module (#8742)
e330d76a72d is described below

commit e330d76a72d22ae860fa80af72f6dd2117e9606a
Author: Klaus de Freitas Dornsbach <kl...@gmail.com>
AuthorDate: Wed Apr 24 04:31:02 2024 -0300

    Log4j2 refactor cloud engine orchestration module (#8742)
    
    Co-authored-by: klaus.freitas.scclouds <kl...@scclouds.com.br>
---
 .../java/com/cloud/agent/manager/AgentAttache.java |  80 +--
 .../com/cloud/agent/manager/AgentManagerImpl.java  | 227 ++++----
 .../cloud/agent/manager/ClusteredAgentAttache.java |  30 +-
 .../agent/manager/ClusteredAgentManagerImpl.java   | 310 ++++-------
 .../cloud/agent/manager/ConnectedAgentAttache.java |   2 +-
 .../cloud/agent/manager/DirectAgentAttache.java    |  58 +--
 .../cloud/agent/manager/SynchronousListener.java   |   9 +-
 .../com/cloud/vm/VirtualMachineManagerImpl.java    | 573 ++++++++-------------
 .../cloud/vm/VirtualMachinePowerStateSyncImpl.java |  66 +--
 .../java/com/cloud/vm/VmWorkJobDispatcher.java     |  22 +-
 .../cloud/entity/api/VMEntityManagerImpl.java      |   4 +-
 .../engine/orchestration/DataMigrationUtility.java |   6 +-
 .../engine/orchestration/NetworkOrchestrator.java  | 368 ++++++-------
 .../engine/orchestration/StorageOrchestrator.java  |  14 +-
 .../engine/orchestration/VolumeOrchestrator.java   | 150 +++---
 15 files changed, 745 insertions(+), 1174 deletions(-)

diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java
index 1b7069c6e73..22c0b3fd71a 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java
@@ -105,6 +105,8 @@ public abstract class AgentAttache {
         }
     };
 
+    protected static String LOG_SEQ_FORMATTED_STRING;
+
     protected final long _id;
     protected String _name = null;
     protected final ConcurrentHashMap<Long, Listener> _waitForList;
@@ -137,6 +139,7 @@ public abstract class AgentAttache {
         _requests = new LinkedList<Request>();
         _agentMgr = agentMgr;
         _nextSequence = new Long(s_rand.nextInt(Short.MAX_VALUE)).longValue() << 48;
+        LOG_SEQ_FORMATTED_STRING = String.format("Seq %d-{}: {}", _id);
     }
 
     public synchronized long getNextSequence() {
@@ -197,9 +200,7 @@ public abstract class AgentAttache {
     }
 
     protected synchronized void cancel(final long seq) {
-        if (logger.isDebugEnabled()) {
-            logger.debug(log(seq, "Cancelling."));
-        }
+        logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling.");
         final Listener listener = _waitForList.remove(seq);
         if (listener != null) {
             listener.processDisconnect(_id, Status.Disconnected);
@@ -218,14 +219,8 @@ public abstract class AgentAttache {
         return Collections.binarySearch(_requests, seq, s_seqComparator);
     }
 
-    protected String log(final long seq, final String msg) {
-        return "Seq " + _id + "-" + seq + ": " + msg;
-    }
-
     protected void registerListener(final long seq, final Listener listener) {
-        if (logger.isTraceEnabled()) {
-            logger.trace(log(seq, "Registering listener"));
-        }
+        logger.trace(LOG_SEQ_FORMATTED_STRING, seq, "Registering listener");
         if (listener.getTimeout() != -1) {
             s_listenerExecutor.schedule(new Alarm(seq), listener.getTimeout(), TimeUnit.SECONDS);
         }
@@ -233,9 +228,7 @@ public abstract class AgentAttache {
     }
 
     protected Listener unregisterListener(final long sequence) {
-        if (logger.isTraceEnabled()) {
-            logger.trace(log(sequence, "Unregistering listener"));
-        }
+        logger.trace(LOG_SEQ_FORMATTED_STRING, sequence, "Unregistering listener");
         return _waitForList.remove(sequence);
     }
 
@@ -267,7 +260,7 @@ public abstract class AgentAttache {
                 final Listener monitor = entry.getValue();
                 if (!monitor.isRecurring()) {
                     //TODO - remove this debug statement later
-                    logger.debug("Listener is " + entry.getValue() + " waiting on " + entry.getKey());
+                    logger.debug("Listener is {} waiting on {}", entry.getValue(), entry.getKey());
                     nonRecurringListenersList.add(monitor);
                 }
             }
@@ -290,15 +283,10 @@ public abstract class AgentAttache {
                 if (answers[0] != null && answers[0].getResult()) {
                     processed = true;
                 }
-                if (logger.isDebugEnabled()) {
-                    logger.debug(log(seq, "Unable to find listener."));
-                }
+                logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Unable to find listener.");
             } else {
                 processed = monitor.processAnswers(_id, seq, answers);
-                if (logger.isTraceEnabled()) {
-                    logger.trace(log(seq, (processed ? "" : " did not ") + " processed "));
-                }
-
+                logger.trace(LOG_SEQ_FORMATTED_STRING, seq, (processed ? "" : " did not ") + " processed ");
                 if (!monitor.isRecurring()) {
                     unregisterListener(seq);
                 }
@@ -324,9 +312,7 @@ public abstract class AgentAttache {
                 final Map.Entry<Long, Listener> entry = it.next();
                 it.remove();
                 final Listener monitor = entry.getValue();
-                if (logger.isDebugEnabled()) {
-                    logger.debug(log(entry.getKey(), "Sending disconnect to " + monitor.getClass()));
-                }
+                logger.debug(LOG_SEQ_FORMATTED_STRING, entry.getKey(), "Sending disconnect to " + monitor.getClass());
                 monitor.processDisconnect(_id, state);
             }
         }
@@ -357,9 +343,8 @@ public abstract class AgentAttache {
         long seq = req.getSequence();
         if (listener != null) {
             registerListener(seq, listener);
-        } else if (logger.isDebugEnabled()) {
-            logger.debug(log(seq, "Routed from " + req.getManagementServerId()));
         }
+        logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Routed from " + req.getManagementServerId());
 
         synchronized (this) {
             try {
@@ -381,16 +366,14 @@ public abstract class AgentAttache {
 
                 if (req.executeInSequence() && _currentSequence == null) {
                     _currentSequence = seq;
-                    if (logger.isTraceEnabled()) {
-                        logger.trace(log(seq, " is current sequence"));
-                    }
+                    logger.trace(LOG_SEQ_FORMATTED_STRING, seq, " is current sequence");
                 }
             } catch (AgentUnavailableException e) {
-                logger.info(log(seq, "Unable to send due to " + e.getMessage()));
+                logger.info(LOG_SEQ_FORMATTED_STRING, seq, "Unable to send due to " + e.getMessage());
                 cancel(seq);
                 throw e;
             } catch (Exception e) {
-                logger.warn(log(seq, "Unable to send due to "), e);
+                logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Unable to send due to " + e.getMessage(), e);
                 cancel(seq);
                 throw new AgentUnavailableException("Problem due to other exception " + e.getMessage(), _id);
             }
@@ -409,20 +392,16 @@ public abstract class AgentAttache {
                 try {
                     answers = sl.waitFor(wait);
                 } catch (final InterruptedException e) {
-                    logger.debug(log(seq, "Interrupted"));
+                    logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Interrupted");
                 }
                 if (answers != null) {
-                    if (logger.isDebugEnabled()) {
-                        new Response(req, answers).logD("Received: ", false);
-                    }
+                    new Response(req, answers).logD("Received: ", false);
                     return answers;
                 }
 
                 answers = sl.getAnswers(); // Try it again.
                 if (answers != null) {
-                    if (logger.isDebugEnabled()) {
-                        new Response(req, answers).logD("Received after timeout: ", true);
-                    }
+                    new Response(req, answers).logD("Received after timeout: ", true);
 
                     _agentMgr.notifyAnswersToMonitors(_id, seq, answers);
                     return answers;
@@ -430,21 +409,16 @@ public abstract class AgentAttache {
 
                 final Long current = _currentSequence;
                 if (current != null && seq != current) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(log(seq, "Waited too long."));
-                    }
+                    logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Waited too long.");
 
                     throw new OperationTimedoutException(req.getCommands(), _id, seq, wait, false);
                 }
-
-                if (logger.isDebugEnabled()) {
-                    logger.debug(log(seq, "Waiting some more time because this is the current command"));
-                }
+                logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Waiting some more time because this is the current command");
             }
 
             throw new OperationTimedoutException(req.getCommands(), _id, seq, wait * 2, true);
         } catch (OperationTimedoutException e) {
-            logger.warn(log(seq, "Timed out on " + req.toString()));
+            logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Timed out on " + req.toString());
             cancel(seq);
             final Long current = _currentSequence;
             if (req.executeInSequence() && (current != null && current == seq)) {
@@ -452,7 +426,7 @@ public abstract class AgentAttache {
             }
             throw e;
         } catch (Exception e) {
-            logger.warn(log(seq, "Exception while waiting for answer"), e);
+            logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception while waiting for answer", e);
             cancel(seq);
             final Long current = _currentSequence;
             if (req.executeInSequence() && (current != null && current == seq)) {
@@ -467,22 +441,16 @@ public abstract class AgentAttache {
     protected synchronized void sendNext(final long seq) {
         _currentSequence = null;
         if (_requests.isEmpty()) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(log(seq, "No more commands found"));
-            }
+            logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "No more commands found");
             return;
         }
 
         Request req = _requests.pop();
-        if (logger.isDebugEnabled()) {
-            logger.debug(log(req.getSequence(), "Sending now.  is current sequence."));
-        }
+        logger.debug(LOG_SEQ_FORMATTED_STRING, req.getSequence(), "Sending now.  is current sequence.");
         try {
             send(req);
         } catch (AgentUnavailableException e) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(log(req.getSequence(), "Unable to send the next sequence"));
-            }
+            logger.debug(LOG_SEQ_FORMATTED_STRING, req.getSequence(), "Unable to send the next sequence");
             cancel(req.getSequence());
         }
         _currentSequence = req.getSequence();
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
index 50c7b151d77..b693729f841 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
@@ -208,12 +208,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
     @Override
     public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
 
-        logger.info("Ping Timeout is " + mgmtServiceConf.getPingTimeout());
+        logger.info("Ping Timeout is {}.", mgmtServiceConf.getPingTimeout());
 
         final int threads = DirectAgentLoadSize.value();
 
         _nodeId = ManagementServerNode.getManagementServerId();
-        logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId);
+        logger.info("Configuring AgentManagerImpl. management server node id(msid): {}.", _nodeId);
 
         final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout();
         _hostDao.markHostsAsDisconnected(_nodeId, lastPing);
@@ -229,13 +229,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         _connectExecutor.allowCoreThreadTimeOut(true);
 
         _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this, caService);
-        logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers");
+        logger.info("Listening on {} with {} workers.", Port.value(), Workers.value());
 
         // executes all agent commands other than cron and ping
         _directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent"));
         // executes cron and ping agent commands
         _cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob"));
-        logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value());
+        logger.debug("Created DirectAgentAttache pool with size: {}.", DirectAgentPoolSize.value());
         _directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0
 
         _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor"));
@@ -266,9 +266,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                     _cmdMonitors.add(new Pair<Integer, Listener>(_monitorId, listener));
                 }
             }
-            if (logger.isDebugEnabled()) {
-                logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId);
-            }
+            logger.debug("Registering listener {} with id {}", listener.getClass().getSimpleName(), _monitorId);
             return _monitorId;
         }
     }
@@ -288,7 +286,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
 
     @Override
     public void unregisterForHostEvents(final int id) {
-        logger.debug("Deregistering " + id);
+        logger.debug("Deregistering {}", id);
         _hostMonitors.remove(id);
     }
 
@@ -303,16 +301,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             }
         }
 
-        logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId());
+        logger.warn("No handling of agent control command: {} sent from {}", cmd, attache.getId());
         return new AgentControlAnswer(cmd);
     }
 
     public void handleCommands(final AgentAttache attache, final long sequence, final Command[] cmds) {
         for (final Pair<Integer, Listener> listener : _cmdMonitors) {
             final boolean processed = listener.second().processCommands(attache.getId(), sequence, cmds);
-            if (logger.isTraceEnabled()) {
-                logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass());
-            }
+            logger.trace("SeqA {}-{}: {} by {}", attache.getId(), sequence, (processed ? "processed" : "not processed"), listener.getClass());
         }
     }
 
@@ -349,9 +345,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                     String errorMsg = String.format("Error sending command %s to host %s, due to %s", cmd.getClass().getName(),
                             host.getUuid(), e.getLocalizedMessage());
                     logger.error(errorMsg);
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(errorMsg, e);
-                    }
+                    logger.debug(errorMsg, e);
                 }
                 if (answer != null) {
                     return answer;
@@ -372,7 +366,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         }
 
         if (answers != null && answers[0] instanceof UnsupportedAnswer) {
-            logger.warn("Unsupported Command: " + answers[0].getDetails());
+            logger.warn("Unsupported Command: {}", answers[0].getDetails());
             return answers[0];
         }
 
@@ -469,15 +463,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         final Long hostId = agent.getId();
         final HostVO host = _hostDao.findById(hostId);
         if (host != null && host.getType() != null && !host.getType().isVirtual()) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("checking if agent (" + hostId + ") is alive");
-            }
+            logger.debug("Checking if agent ({}) is alive", hostId);
             final Answer answer = easySend(hostId, new CheckHealthCommand());
             if (answer != null && answer.getResult()) {
                 final Status status = Status.Up;
-                if (logger.isDebugEnabled()) {
-                    logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status);
-                }
+                logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", hostId, status);
                 return status;
             }
             return _haMgr.investigate(hostId);
@@ -491,7 +481,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         }
         final AgentAttache agent = findAttache(hostId);
         if (agent == null) {
-            logger.debug("Unable to find agent for " + hostId);
+            logger.debug("Unable to find agent for {}", hostId);
             throw new AgentUnavailableException("Unable to find agent ", hostId);
         }
 
@@ -519,9 +509,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             return;
         }
         final long hostId = attache.getId();
-        if (logger.isDebugEnabled()) {
-            logger.debug("Remove Agent : " + hostId);
-        }
+        logger.debug("Remove Agent : {}", hostId);
         AgentAttache removed = null;
         boolean conflict = false;
         synchronized (_agents) {
@@ -533,16 +521,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             }
         }
         if (conflict) {
-            logger.debug("Agent for host " + hostId + " is created when it is being disconnected");
+            logger.debug("Agent for host {} is created when it is being disconnected", hostId);
         }
         if (removed != null) {
             removed.disconnect(nextState);
         }
 
         for (final Pair<Integer, Listener> monitor : _hostMonitors) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
-            }
+            logger.debug("Sending Disconnect to listener: {}", monitor.second().getClass().getName());
             monitor.second().processDisconnect(hostId, nextState);
         }
     }
@@ -550,9 +536,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
     @Override
     public void notifyMonitorsOfNewlyAddedHost(long hostId) {
         for (final Pair<Integer, Listener> monitor : _hostMonitors) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName());
-            }
+            logger.debug("Sending host added to listener: {}", monitor.second().getClass().getSimpleName());
 
             monitor.second().processHostAdded(hostId);
         }
@@ -562,9 +546,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         final long hostId = attache.getId();
         final HostVO host = _hostDao.findById(hostId);
         for (final Pair<Integer, Listener> monitor : _hostMonitors) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName());
-            }
+            logger.debug("Sending Connect to listener: {}", monitor.second().getClass().getSimpleName());
             for (int i = 0; i < cmd.length; i++) {
                 try {
                     monitor.second().processConnect(host, cmd[i], forRebalance);
@@ -584,7 +566,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                         handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
                         throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
                     } else {
-                        logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e);
+                        logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e);
                         handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
                         throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
                     }
@@ -607,7 +589,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             Map<String, String> detailsMap = readyAnswer.getDetailsMap();
             if (detailsMap != null) {
                 String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE);
-                logger.debug(String.format("Got HOST_UEFI_ENABLE [%s] for hostId [%s]:", uefiEnabled, host.getUuid()));
+                logger.debug("Got HOST_UEFI_ENABLE [{}] for hostId [{}]:", uefiEnabled, host.getUuid());
                 if (uefiEnabled != null) {
                     _hostDao.loadDetails(host);
                     if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) {
@@ -701,7 +683,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             try {
                 resource.configure(host.getName(), params);
             } catch (final ConfigurationException e) {
-                logger.warn("Unable to configure resource due to " + e.getMessage());
+                logger.warn("Unable to configure resource due to {}", e.getMessage());
                 return null;
             }
 
@@ -724,14 +706,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             // load the respective discoverer
             final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType());
             if (discoverer == null) {
-                logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType());
+                logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host.getId(), host.getHypervisorType());
                 resource = loadResourcesWithoutHypervisor(host);
             } else {
                 resource = discoverer.reloadResource(host);
             }
 
             if (resource == null) {
-                logger.warn("Unable to load the resource: " + host.getId());
+                logger.warn("Unable to load the resource: {}", host.getId());
                 return false;
             }
 
@@ -757,7 +739,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
     }
 
     protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException {
-        logger.debug("create DirectAgentAttache for " + host.getId());
+        logger.debug("create DirectAgentAttache for {}", host.getId());
         final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates());
 
         AgentAttache old = null;
@@ -778,14 +760,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             _connection.stop();
         }
 
-        logger.info("Disconnecting agents: " + _agents.size());
+        logger.info("Disconnecting agents: {}", _agents.size());
         synchronized (_agents) {
             for (final AgentAttache agent : _agents.values()) {
                 final HostVO host = _hostDao.findById(agent.getId());
                 if (host == null) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Cant not find host " + agent.getId());
-                    }
+                    logger.debug("Cant not find host {}", agent.getId());
                 } else {
                     if (!agent.forForward()) {
                         agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId);
@@ -803,9 +783,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         final Status currentStatus = host.getStatus();
         Status nextStatus;
         if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("Host %s is already %s", host.getUuid(), currentStatus));
-            }
+            logger.debug("Host {} is already {}", host.getUuid(), currentStatus);
             nextStatus = currentStatus;
         } else {
             try {
@@ -815,10 +793,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                 logger.debug(err);
                 throw new CloudRuntimeException(err);
             }
-
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("The next status of agent %s is %s, current status is %s", host.getUuid(), nextStatus, currentStatus));
-            }
+            logger.debug("The next status of agent {} is {}, current status is {}", host.getUuid(), nextStatus, currentStatus);
         }
         return nextStatus;
     }
@@ -830,20 +805,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         GlobalLock joinLock = getHostJoinLock(hostId);
         if (joinLock.lock(60)) {
             try {
-                logger.info(String.format("Host %d is disconnecting with event %s", hostId, event));
+                logger.info("Host {} is disconnecting with event {}", hostId, event);
                 Status nextStatus = null;
                 final HostVO host = _hostDao.findById(hostId);
                 if (host == null) {
-                    logger.warn(String.format("Can't find host with %d", hostId));
+                    logger.warn("Can't find host with {}", hostId);
                     nextStatus = Status.Removed;
                 } else {
                     nextStatus = getNextStatusOnDisconnection(host, event);
                     caService.purgeHostCertificate(host);
                 }
-
-                if (logger.isDebugEnabled()) {
-                    logger.debug(String.format("Deregistering link for %d with state %s", hostId, nextStatus));
-                }
+                logger.debug("Deregistering link for {} with state {}", hostId, nextStatus);
 
                 removeAgent(attache, nextStatus);
 
@@ -878,22 +850,22 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
 
             if (nextStatus == Status.Alert) {
                 /* OK, we are going to the bad status, let's see what happened */
-                logger.info("Investigating why host " + hostId + " has disconnected with event " + event);
+                logger.info("Investigating why host {} has disconnected with event", hostId, event);
 
                 Status determinedState = investigate(attache);
                 // if state cannot be determined do nothing and bail out
                 if (determinedState == null) {
                     if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) {
-                        logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state");
+                        logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", hostId, AlertWait, AlertWait.value());
                         determinedState = Status.Alert;
                     } else {
-                        logger.warn("Agent " + hostId + " state cannot be determined, do nothing");
+                        logger.warn("Agent {} state cannot be determined, do nothing", hostId);
                         return false;
                     }
                 }
 
                 final Status currentStatus = host.getStatus();
-                logger.info("The agent from host " + hostId + " state determined is " + determinedState);
+                logger.info("The agent from host {} state determined is {}", hostId, determinedState);
 
                 if (determinedState == Status.Down) {
                     final String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs";
@@ -908,15 +880,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                     agentStatusTransitTo(host, Status.Event.Ping, _nodeId);
                     return false;
                 } else if (determinedState == Status.Disconnected) {
-                    logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName() +
+                    logger.warn("Agent is disconnected but the host is still up: {}-{}", host.getId(), host.getName() +
                             '-' + host.getResourceState());
                     if (currentStatus == Status.Disconnected ||
                             (currentStatus == Status.Up && host.getResourceState() == ResourceState.PrepareForMaintenance)) {
                         if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) {
-                            logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected.");
+                            logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host.getId());
                             event = Status.Event.WaitedTooLong;
                         } else {
-                            logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet.");
+                            logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host.getId());
                             return false;
                         }
                     } else if (currentStatus == Status.Up) {
@@ -939,7 +911,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                             "In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName());
                 }
             } else {
-                logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened");
+                logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host.getId());
             }
         }
         handleDisconnectWithoutInvestigation(attache, event, true, true);
@@ -980,12 +952,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         try {
             final Host h = _hostDao.findById(hostId);
             if (h == null || h.getRemoved() != null) {
-                logger.debug("Host with id " + hostId + " doesn't exist");
+                logger.debug("Host with id {} doesn't exist", hostId);
                 return null;
             }
             final Status status = h.getStatus();
             if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) {
-                logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up");
+                logger.debug("Can not send command {} due to Host {} not being up", cmd, hostId);
                 return null;
             }
             final Answer answer = send(hostId, cmd);
@@ -995,7 +967,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             }
 
             if (logger.isDebugEnabled() && answer.getDetails() != null) {
-                logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails());
+                logger.debug("Details from executing {}: {}", cmd.getClass(), answer.getDetails());
             }
 
             return answer;
@@ -1004,7 +976,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             logger.warn(e.getMessage());
             return null;
         } catch (final OperationTimedoutException e) {
-            logger.warn("Operation timed out: " + e.getMessage());
+            logger.warn("Operation timed out: {}", e.getMessage());
             return null;
         } catch (final Exception e) {
             logger.warn("Exception while sending", e);
@@ -1035,7 +1007,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         }
 
         if (host.getStatus() == Status.Disconnected) {
-            logger.debug("Host is already disconnected, no work to be done: " + hostId);
+            logger.debug("Host is already disconnected, no work to be done: {}", hostId);
             return;
         }
 
@@ -1053,9 +1025,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
     @Override
     public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) {
         for (final Pair<Integer, Listener> monitor : _hostMonitors) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName());
-            }
+            logger.debug("Sending host about to be removed to listener: {}", monitor.second().getClass().getSimpleName());
 
             monitor.second().processHostAboutToBeRemoved(hostId);
         }
@@ -1064,9 +1034,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
     @Override
     public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) {
         for (final Pair<Integer, Listener> monitor : _hostMonitors) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName());
-            }
+            logger.debug("Sending host removed to listener: {}", monitor.second().getClass().getSimpleName());
 
             monitor.second().processHostRemoved(hostId, clusterId);
         }
@@ -1074,9 +1042,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
 
     public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
         if (event == Event.AgentDisconnected) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Received agent disconnect event for host " + hostId);
-            }
+            logger.debug("Received agent disconnect event for host {}", hostId);
             AgentAttache attache = null;
             attache = findAttache(hostId);
             if (attache != null) {
@@ -1088,7 +1054,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             try {
                 reconnect(hostId);
             } catch (CloudRuntimeException e) {
-                logger.debug("Error on shutdown request for hostID: " + hostId, e);
+                logger.debug("Error on shutdown request for hostID: {}", hostId, e);
                 return false;
             }
             return true;
@@ -1103,7 +1069,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
     }
 
     protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException {
-        logger.debug("create ConnectedAgentAttache for " + host.getId());
+        logger.debug("create ConnectedAgentAttache for {}", host.getId());
         final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates());
         link.attach(attache);
 
@@ -1141,7 +1107,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                     ready.setMsHostList(newMSList);
                     ready.setLbAlgorithm(indirectAgentLB.getLBAlgorithmName());
                     ready.setLbCheckInterval(indirectAgentLB.getLBPreferredHostCheckInterval(host.getClusterId()));
-                    logger.debug("Agent's management server host list is not up to date, sending list update:" + newMSList);
+                    logger.debug("Agent's management server host list is not up to date, sending list update: {}", newMSList);
                 }
 
                 attache = createAttacheForConnect(host, link);
@@ -1202,28 +1168,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         @Override
         protected void runInContext() {
             try {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Simulating start for resource " + resource.getName() + " id " + id);
-                }
+                logger.debug("Simulating start for resource {} id {}", resource.getName(), id);
 
                 if (tapLoadingAgents(id, TapAgentsAction.Add)) {
                     try {
                         final AgentAttache agentattache = findAttache(id);
                         if (agentattache == null) {
-                            logger.debug("Creating agent for host " + id);
+                            logger.debug("Creating agent for host {}", id);
                             _resourceMgr.createHostAndAgent(id, resource, details, false, null, false);
-                            logger.debug("Completed creating agent for host " + id);
+                            logger.debug("Completed creating agent for host {}", id);
                         } else {
-                            logger.debug("Agent already created in another thread for host " + id + ", ignore this");
+                            logger.debug("Agent already created in another thread for host {}, ignore this", id);
                         }
                     } finally {
                         tapLoadingAgents(id, TapAgentsAction.Del);
                     }
                 } else {
-                    logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this");
+                    logger.debug("Agent creation already getting processed in another thread for host {}, ignore this", id);
                 }
             } catch (final Exception e) {
-                logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e);
+                logger.warn("Unable to simulate start on resource {} name {}", id, resource.getName(), e);
             }
         }
     }
@@ -1249,7 +1213,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
 
             final AgentAttache attache = handleConnectedAgent(_link, startups, _request);
             if (attache == null) {
-                logger.warn("Unable to create attache for agent: " + _request);
+                logger.warn("Unable to create attache for agent: {}", _request);
             }
         }
     }
@@ -1272,7 +1236,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         try {
             link.send(response.toBytes());
         } catch (final ClosedChannelException e) {
-            logger.debug("Failed to send startupanswer: " + e.toString());
+            logger.debug("Failed to send startupanswer: {}", e.toString());
         }
         _connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request));
     }
@@ -1288,25 +1252,25 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             }
             HostVO host = _hostDao.findById(hostId);
             if (host == null) {
-                logger.error(String.format("Unable to find host with ID: %s", hostId));
+                logger.error("Unable to find host with ID: {}", hostId);
                 return;
             }
             if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) {
-                logger.debug(String.format("%s is disabled for the cluster %s, cannot process the health check result " +
-                        "received for the host %s", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName()));
+                logger.debug("{} is disabled for the cluster {}, cannot process the health check result " +
+                        "received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName());
                 return;
             }
 
             ResourceState.Event resourceEvent = hostHealthCheckResult ? ResourceState.Event.Enable : ResourceState.Event.Disable;
 
             try {
-                logger.info(String.format("Host health check %s, auto %s KVM host: %s",
+                logger.info("Host health check {}, auto {} KVM host: {}",
                         hostHealthCheckResult ? "succeeds" : "fails",
                         hostHealthCheckResult ? "enabling" : "disabling",
-                        host.getName()));
+                        host.getName());
                 _resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent);
             } catch (NoTransitionException e) {
-                logger.error(String.format("Cannot Auto %s host: %s", resourceEvent, host.getName()), e);
+                logger.error("Cannot Auto {} host: {}", resourceEvent, host.getName(), e);
             }
         }
 
@@ -1336,7 +1300,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
 
             if (attache == null) {
                 if (!(cmd instanceof StartupCommand)) {
-                    logger.warn("Throwing away a request because it came through as the first command on a connect: " + request);
+                    logger.warn("Throwing away a request because it came through as the first command on a connect: {}", request);
                 } else {
                     // submit the task for execution
                     request.logD("Scheduling the first command ");
@@ -1353,14 +1317,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             if (logger.isDebugEnabled()) {
                 if (cmd instanceof PingRoutingCommand) {
                     logD = false;
-                    logger.debug("Ping from Routing host " + hostId + "(" + hostName + ")");
-                    logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
+                    logger.debug("Ping from Routing host {}({})", hostId, hostName);
+                    logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request);
                 } else if (cmd instanceof PingCommand) {
                     logD = false;
-                    logger.debug("Ping from " + hostId + "(" + hostName + ")");
-                    logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
+                    logger.debug("Ping from {}({})", hostId, hostName);
+                    logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request);
                 } else {
-                    logger.debug("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
+                    logger.debug("SeqA {}-{}: {}", hostId, request.getSequence(), request);
                 }
             }
 
@@ -1385,7 +1349,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                     } else if (cmd instanceof ShutdownCommand) {
                         final ShutdownCommand shutdown = (ShutdownCommand)cmd;
                         final String reason = shutdown.getReason();
-                        logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " + shutdown.getDetail());
+                        logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache.getId(), reason, shutdown.getDetail());
                         if (reason.equals(ShutdownCommand.Update)) {
                             // disconnectWithoutInvestigation(attache, Event.UpdateNeeded);
                             throw new CloudRuntimeException("Agent update not implemented");
@@ -1423,7 +1387,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                                         _alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId());
                                     }
                                 } else {
-                                    logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId + "; can't find the host in the DB");
+                                    logger.debug("Not processing {} for agent id={}; can't find the host in the DB", PingRoutingCommand.class.getSimpleName(), cmdHostId);
                                 }
                             }
                             if (host!= null && host.getStatus() != Status.Up && gatewayAccessible) {
@@ -1433,9 +1397,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                         } else if (cmd instanceof ReadyAnswer) {
                             final HostVO host = _hostDao.findById(attache.getId());
                             if (host == null) {
-                                if (logger.isDebugEnabled()) {
-                                    logger.debug("Cant not find host " + attache.getId());
-                                }
+                                logger.debug("Cant not find host {}", attache.getId());
                             }
                             answer = new Answer(cmd);
                         } else {
@@ -1450,26 +1412,24 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             }
 
             final Response response = new Response(request, answers, _nodeId, attache.getId());
-            if (logger.isDebugEnabled()) {
-                if (logD) {
-                    logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
-                } else {
-                    logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
-                }
+            if (logD) {
+                logger.debug("SeqA {}-: Sending {}", attache.getId(), response.getSequence(), response);
+            } else {
+                logger.trace("SeqA {}-: Sending {}" + attache.getId(), response.getSequence(), response);
             }
             try {
                 link.send(response.toBytes());
             } catch (final ClosedChannelException e) {
-                logger.warn("Unable to send response because connection is closed: " + response);
+                logger.warn("Unable to send response because connection is closed: {}", response);
             }
         }
 
         protected void processResponse(final Link link, final Response response) {
             final AgentAttache attache = (AgentAttache)link.attachment();
             if (attache == null) {
-                logger.warn("Unable to process: " + response);
+                logger.warn("Unable to process: {}", response);
             } else if (!attache.processAnswers(response.getSequence(), response)) {
-                logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response);
+                logger.info("Host {} - Seq {}: Response is not processed: {}", attache.getId(), response.getSequence(), response);
             }
         }
 
@@ -1502,7 +1462,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                     if (attache != null) {
                         disconnectWithInvestigation(attache, Event.AgentDisconnected);
                     } else {
-                        logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done.");
+                        logger.info("Connection from {} closed but no cleanup was done.", link.getIpAddress());
                         link.close();
                         link.terminated();
                     }
@@ -1539,20 +1499,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
     public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) {
         try {
             _agentStatusLock.lock();
-            if (logger.isDebugEnabled()) {
-                final ResourceState state = host.getResourceState();
-                final StringBuilder msg = new StringBuilder("Transition:");
-                msg.append("[Resource state = ").append(state);
-                msg.append(", Agent event = ").append(e.toString());
-                msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]");
-                logger.debug(msg);
-            }
+            logger.debug("[Resource state = {}, Agent event = , Host id = {}, name = {}]", host.getResourceState(), e.toString(), host.getId(), host.getName());
 
             host.setManagementServerId(msId);
             try {
                 return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao);
             } catch (final NoTransitionException e1) {
-                logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() + ", management server id is " + msId);
+                logger.debug("Cannot transit agent status with event {} for host {}, name={}, management server id is {}", e, host.getId(), host.getName(), msId);
                 throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", management server id is " + msId + "," + e1.getMessage());
             }
         } finally {
@@ -1581,7 +1534,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
     protected boolean isHostOwnerSwitched(final long hostId) {
         final HostVO host = _hostDao.findById(hostId);
         if (host == null) {
-            logger.warn("Can't find the host " + hostId);
+            logger.warn("Can't find the host {}", hostId);
             return false;
         }
         return isHostOwnerSwitched(host);
@@ -1606,7 +1559,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
         } else {
             /* Agent is still in connecting process, don't allow to disconnect right away */
             if (tapLoadingAgents(hostId, TapAgentsAction.Contains)) {
-                logger.info("Host " + hostId + " is being loaded so no disconnects needed.");
+                logger.info("Host {} is being loaded no disconnects needed.", hostId);
                 return;
             }
 
@@ -1705,17 +1658,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
                             /*
                              * Host is in non-operation state, so no investigation and direct put agent to Disconnected
                              */
-                            logger.debug("Ping timeout but agent " + agentId + " is in resource state of " + resourceState + ", so no investigation");
+                            logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", agentId, resourceState);
                             disconnectWithoutInvestigation(agentId, Event.ShutdownRequested);
                         } else {
                             final HostVO host = _hostDao.findById(agentId);
                             if (host != null
                                     && (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) {
 
-                                logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId());
+                                logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host.getId());
                                 disconnectWithoutInvestigation(agentId, Event.ShutdownRequested);
                             } else {
-                                logger.debug("Ping timeout for agent " + agentId + ", do invstigation");
+                                logger.debug("Ping timeout for agent {}, do investigation", agentId);
                                 disconnectWithInvestigation(agentId, Event.PingTimeout);
                             }
                         }
@@ -1754,7 +1707,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             }
 
             if (agentsBehind.size() > 0) {
-                logger.info("Found the following agents behind on ping: " + agentsBehind);
+                logger.info("Found the following agents behind on ping: {}", agentsBehind);
             }
 
             return agentsBehind;
@@ -1937,7 +1890,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
             for (Long hostId : hostIds) {
                 Answer answer = easySend(hostId, cmds);
                 if (answer == null || !answer.getResult()) {
-                    logger.error("Error sending parameters to agent " + hostId);
+                    logger.error("Error sending parameters to agent {}", hostId);
                 }
             }
         }
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java
index beafb4da8eb..285ba4ffe60 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java
@@ -90,7 +90,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
                 String peerName = synchronous.getPeer();
                 if (peerName != null) {
                     if (s_clusteredAgentMgr != null) {
-                        logger.debug(log(seq, "Forwarding to peer to cancel due to timeout"));
+                        logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Forwarding to peer to cancel due to timeout");
                         s_clusteredAgentMgr.cancel(peerName, _id, seq, "Timed Out");
                     } else {
                         logger.error("Unable to forward cancel, ClusteredAgentAttache is not properly initialized");
@@ -105,29 +105,21 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
 
     @Override
     public void routeToAgent(final byte[] data) throws AgentUnavailableException {
-        if (logger.isDebugEnabled()) {
-            logger.debug(log(Request.getSequence(data), "Routing from " + Request.getManagementServerId(data)));
-        }
+        logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Routing from " + Request.getManagementServerId(data));
 
         if (_link == null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(log(Request.getSequence(data), "Link is closed"));
-            }
+            logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Link is closed");
             throw new AgentUnavailableException("Link is closed", _id);
         }
 
         try {
             _link.send(data);
         } catch (ClosedChannelException e) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(log(Request.getSequence(data), "Channel is closed"));
-            }
+            logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Channel is closed");
 
             throw new AgentUnavailableException("Channel to agent is closed", _id);
         } catch (NullPointerException e) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(log(Request.getSequence(data), "Link is closed"));
-            }
+            logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Link is closed");
             // Note: since this block is not in synchronized.  It is possible for _link to become null.
             throw new AgentUnavailableException("Channel to agent is null", _id);
         }
@@ -148,9 +140,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
 
         if (_transferMode) {
 
-            if (logger.isDebugEnabled()) {
-                logger.debug(log(seq, "Holding request as the corresponding agent is in transfer mode: "));
-            }
+            logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Holding request as the corresponding agent is in transfer mode: ");
 
             synchronized (this) {
                 addRequestToTransfer(req);
@@ -174,9 +164,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
 
                 ch = s_clusteredAgentMgr.connectToPeer(peerName, ch);
                 if (ch == null) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(log(seq, "Unable to forward " + req.toString()));
-                    }
+                    logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Unable to forward " + req.toString());
                     continue;
                 }
 
@@ -187,7 +175,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
 
                 try {
                     if (logger.isDebugEnabled()) {
-                        logger.debug(log(seq, "Forwarding " + req.toString() + " to " + peerName));
+                        logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Forwarding " + req.toString() + " to " + peerName);
                     }
                     if (req.executeInSequence() && listener != null && listener instanceof SynchronousListener) {
                         SynchronousListener synchronous = (SynchronousListener)listener;
@@ -198,7 +186,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
                     return;
                 } catch (IOException e) {
                     if (logger.isDebugEnabled()) {
-                        logger.debug(log(seq, "Error on connecting to management node: " + req.toString() + " try = " + i));
+                        logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Error on connecting to management node: " + req.toString() + " try = " + i);
                     }
 
                     if (logger.isInfoEnabled()) {
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java
index 1fe6b19ab68..a7fea0f2533 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java
@@ -152,7 +152,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         _sslEngines = new HashMap<String, SSLEngine>(7);
         _nodeId = ManagementServerNode.getManagementServerId();
 
-        logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId);
+        logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): {}", _nodeId);
 
         ClusteredAgentAttache.initialize(this);
 
@@ -170,9 +170,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             return false;
         }
         _timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, ScanInterval.value());
-        if (logger.isDebugEnabled()) {
-            logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds");
-        }
+        logger.debug("Scheduled direct agent scan task to run at an interval of {} seconds", ScanInterval.value());
 
         // Schedule tasks for agent rebalancing
         if (isAgentRebalanceEnabled()) {
@@ -186,9 +184,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
     public void scheduleHostScanTask() {
         _timer.schedule(new DirectAgentScanTimerTask(), 0);
-        if (logger.isDebugEnabled()) {
-            logger.debug("Scheduled a direct agent scan task");
-        }
+        logger.debug("Scheduled a direct agent scan task");
     }
 
     private void runDirectAgentScanTimerTask() {
@@ -196,9 +192,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     }
 
     private void scanDirectAgentToLoad() {
-        if (logger.isTraceEnabled()) {
-            logger.trace("Begin scanning directly connected hosts");
-        }
+        logger.trace("Begin scanning directly connected hosts");
 
         // for agents that are self-managed, threshold to be considered as disconnected after pingtimeout
         final long cutSeconds = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout();
@@ -208,35 +202,29 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         if (hosts != null) {
             hosts.addAll(appliances);
             if (hosts.size() > 0) {
-                logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them...");
+                logger.debug("Found {} unmanaged direct hosts, processing connect for them...", hosts.size());
                 for (final HostVO host : hosts) {
                     try {
                         final AgentAttache agentattache = findAttache(host.getId());
                         if (agentattache != null) {
                             // already loaded, skip
                             if (agentattache.forForward()) {
-                                if (logger.isInfoEnabled()) {
-                                    logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host");
-                                }
+                                logger.info("{} is detected down, but we have a forward attache running, disconnect this one before launching the host", host);
                                 removeAgent(agentattache, Status.Disconnected);
                             } else {
                                 continue;
                             }
                         }
 
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")");
-                        }
+                        logger.debug("Loading directly connected host {}({})", host.getId(), host.getName());
                         loadDirectlyConnectedHost(host, false);
                     } catch (final Throwable e) {
-                        logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e);
+                        logger.warn(" can not load directly connected host {}({}) due to ", host.getId(), host.getName(), e);
                     }
                 }
             }
         }
-        if (logger.isTraceEnabled()) {
-            logger.trace("End scanning directly connected hosts");
-        }
+        logger.trace("End scanning directly connected hosts");
     }
 
     private class DirectAgentScanTimerTask extends ManagedContextTimerTask {
@@ -245,7 +233,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             try {
                 runDirectAgentScanTimerTask();
             } catch (final Throwable e) {
-                logger.error("Unexpected exception " + e.getMessage(), e);
+                logger.error("Unexpected exception {}", e.getMessage(), e);
             }
         }
     }
@@ -256,7 +244,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     }
 
     protected AgentAttache createAttache(final long id) {
-        logger.debug("create forwarding ClusteredAgentAttache for " + id);
+        logger.debug("create forwarding ClusteredAgentAttache for {}", id);
         final HostVO host = _hostDao.findById(id);
         final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName());
         AgentAttache old = null;
@@ -265,9 +253,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             _agents.put(id, attache);
         }
         if (old != null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Remove stale agent attache from current management server");
-            }
+            logger.debug("Remove stale agent attache from current management server");
             removeAgent(old, Status.Removed);
         }
         return attache;
@@ -275,7 +261,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
     @Override
     protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) {
-        logger.debug("create ClusteredAgentAttache for " + host.getId());
+        logger.debug("create ClusteredAgentAttache for {}",  host.getId());
         final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates());
         link.attach(attache);
         AgentAttache old = null;
@@ -291,7 +277,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
     @Override
     protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) {
-        logger.debug(String.format("Create ClusteredDirectAgentAttache for %s.", host));
+        logger.debug("Create ClusteredDirectAgentAttache for {}.", host);
         final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates());
         AgentAttache old = null;
         synchronized (_agents) {
@@ -335,9 +321,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     @Override
     public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
         if (event == Event.AgentDisconnected) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Received agent disconnect event for host " + hostId);
-            }
+            logger.debug("Received agent disconnect event for host {}",  hostId);
             final AgentAttache attache = findAttache(hostId);
             if (attache != null) {
                 // don't process disconnect if the host is being rebalanced
@@ -345,7 +329,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                     final HostTransferMapVO transferVO = _hostTransferDao.findById(hostId);
                     if (transferVO != null) {
                         if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) {
-                            logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " + _nodeId);
+                            logger.debug("Not processing {} event for the host id={} as the host is being connected to {}",Event.AgentDisconnected, hostId, _nodeId);
                             return true;
                         }
                     }
@@ -354,7 +338,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 // don't process disconnect if the disconnect came for the host via delayed cluster notification,
                 // but the host has already reconnected to the current management server
                 if (!attache.forForward()) {
-                    logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is directly connected to the current management server " + _nodeId);
+                    logger.debug("Not processing {} event for the host id={} as the host is directly connected to the current management server {}", Event.AgentDisconnected, hostId, _nodeId);
                     return true;
                 }
 
@@ -387,26 +371,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
     // notifies MS peers to schedule a host scan task immediately, triggered during addHost operation
     public void notifyNodesInClusterToScheduleHostScanTask() {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Notifying other MS nodes to run host scan task");
-        }
+        logger.debug("Notifying other MS nodes to run host scan task");
         final Command[] cmds = new Command[] {new ScheduleHostScanTaskCommand()};
         _clusterMgr.broadcast(0, _gson.toJson(cmds));
     }
 
     protected void logT(final byte[] bytes, final String msg) {
-        logger.trace("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": "
-                + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
+        logger.trace("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes),
+                Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
     }
 
     protected void logD(final byte[] bytes, final String msg) {
-        logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": "
-                + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
+        logger.debug("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes),
+                Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
     }
 
     protected void logI(final byte[] bytes, final String msg) {
-        logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": "
-                + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
+        logger.info("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes),
+                Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
     }
 
     public boolean routeToPeer(final String peer, final byte[] bytes) {
@@ -430,9 +412,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 return false;
             }
             try {
-                if (logger.isDebugEnabled()) {
-                    logD(bytes, "Routing to peer");
-                }
+                logD(bytes, "Routing to peer");
                 Link.write(ch, new ByteBuffer[] {ByteBuffer.wrap(bytes)}, sslEngine);
                 return true;
             } catch (final IOException e) {
@@ -469,7 +449,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 try {
                     ch.close();
                 } catch (final IOException e) {
-                    logger.warn("Unable to close peer socket connection to " + peerName);
+                    logger.warn("Unable to close peer socket connection to {}", peerName);
                 }
             }
             _peers.remove(peerName);
@@ -485,13 +465,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 try {
                     prevCh.close();
                 } catch (final Exception e) {
-                    logger.info("[ignored]" + "failed to get close resource for previous channel Socket: " + e.getLocalizedMessage());
+                    logger.info("[ignored] failed to get close resource for previous channel Socket: {}", e.getLocalizedMessage());
                 }
             }
             if (ch == null || ch == prevCh) {
                 final ManagementServerHost ms = _clusterMgr.getPeer(peerName);
                 if (ms == null) {
-                    logger.info("Unable to find peer: " + peerName);
+                    logger.info("Unable to find peer: {}",  peerName);
                     return null;
                 }
                 final String ip = ms.getServiceIP();
@@ -518,14 +498,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                             ch1.close();
                             throw new IOException(String.format("SSL: Handshake failed with peer management server '%s' on %s:%d ", peerName, ip, port));
                         }
-                        logger.info(String.format("SSL: Handshake done with peer management server '%s' on %s:%d ", peerName, ip, port));
+                        logger.info("SSL: Handshake done with peer management server '{}' on {}:{} ", peerName, ip, port);
                     } catch (final Exception e) {
                         ch1.close();
                         throw new IOException("SSL: Fail to init SSL! " + e);
                     }
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip);
-                    }
+                    logger.debug("Connection to peer opened: {}, ip: {}", peerName, ip);
                     _peers.put(peerName, ch1);
                     _sslEngines.put(peerName, sslEngine);
                     return ch1;
@@ -534,17 +512,15 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                         try {
                             ch1.close();
                         } catch (final IOException ex) {
-                            logger.error("failed to close failed peer socket: " + ex);
+                            logger.error("failed to close failed peer socket: {}",  ex);
                         }
                     }
-                    logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e);
+                    logger.warn("Unable to connect to peer management server: {}, ip {} due to {}", peerName, ip, e.getMessage(), e);
                     return null;
                 }
             }
 
-            if (logger.isTraceEnabled()) {
-                logger.trace("Found open channel for peer: " + peerName);
-            }
+            logger.trace("Found open channel for peer: {}",  peerName);
             return ch;
         }
     }
@@ -569,9 +545,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         AgentAttache agent = findAttache(hostId);
         if (agent == null || !agent.forForward()) {
             if (isHostOwnerSwitched(host)) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache");
-                }
+                logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache",  hostId);
                 agent = createAttache(hostId);
             }
         }
@@ -589,10 +563,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         if (_peers != null) {
             for (final SocketChannel ch : _peers.values()) {
                 try {
-                    logger.info("Closing: " + ch.toString());
+                    logger.info("Closing: {}",  ch.toString());
                     ch.close();
                 } catch (final IOException e) {
-                    logger.info("[ignored] error on closing channel: " + ch.toString(), e);
+                    logger.info("[ignored] error on closing channel: {}",  ch.toString(), e);
                 }
             }
         }
@@ -649,9 +623,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                         final Request req = Request.parse(data);
                         final Command[] cmds = req.getCommands();
                         final CancelCommand cancel = (CancelCommand)cmds[0];
-                        if (logger.isDebugEnabled()) {
-                            logD(data, "Cancel request received");
-                        }
+                        logD(data, "Cancel request received");
                         agent.cancel(cancel.getSequence());
                         final Long current = agent._currentSequence;
                         // if the request is the current request, always have to trigger sending next request in
@@ -697,9 +669,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                             final AgentAttache attache = (AgentAttache)link.attachment();
                             if (attache != null) {
                                 attache.sendNext(Request.getSequence(data));
-                            } else if (logger.isDebugEnabled()) {
-                                logD(data, "No attache to process " + Request.parse(data).toString());
                             }
+                            logD(data, "No attache to process " + Request.parse(data).toString());
                         }
                         return;
                     } else {
@@ -710,11 +681,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                             final Response response = Response.parse(data);
                             final AgentAttache attache = findAttache(response.getAgentId());
                             if (attache == null) {
-                                logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString());
+                                logger.info("SeqA {}-{} Unable to find attache to forward {}", response.getAgentId(), response.getSequence(), response.toString());
                                 return;
                             }
                             if (!attache.processAnswers(response.getSequence(), response)) {
-                                logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString());
+                                logger.info("SeqA {}-{}: Response is not processed: {}", attache.getId(), response.getSequence(), response.toString());
                             }
                         }
                         return;
@@ -741,12 +712,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     @Override
     public void onManagementNodeLeft(final List<? extends ManagementServerHost> nodeList, final long selfNodeId) {
         for (final ManagementServerHost vo : nodeList) {
-            logger.info("Marking hosts as disconnected on Management server" + vo.getMsid());
+            logger.info("Marking hosts as disconnected on Management server {}",  vo.getMsid());
             final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout();
             _hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing);
             outOfBandManagementDao.expireServerOwnership(vo.getMsid());
             haConfigDao.expireServerOwnership(vo.getMsid());
-            logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid());
+            logger.info("Deleting entries from op_host_transfer table for Management server {}",  vo.getMsid());
             cleanupTransferMap(vo.getMsid());
         }
     }
@@ -773,7 +744,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             try {
                 result = rebalanceHost(agentId, currentOwnerId, futureOwnerId);
             } catch (final Exception e) {
-                logger.warn("Unable to rebalance host id=" + agentId, e);
+                logger.warn("Unable to rebalance host id={}",  agentId, e);
             }
         }
         return result;
@@ -806,19 +777,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             try {
                 if (!cancelled) {
                     startRebalanceAgents();
-                    if (logger.isInfoEnabled()) {
-                        logger.info("The agent load balancer task is now being cancelled");
-                    }
+                    logger.info("The agent load balancer task is now being cancelled");
                     cancelled = true;
                 }
             } catch (final Throwable e) {
-                logger.error("Unexpected exception " + e.toString(), e);
+                logger.error("Unexpected exception {}",  e.toString(), e);
             }
         }
     }
 
     public void startRebalanceAgents() {
-        logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents");
+        logger.debug("Management server {} is asking other peers to rebalance their agents", _nodeId);
         final List<ManagementServerHostVO> allMS = _mshostDao.listBy(ManagementServerHost.State.Up);
         final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
         sc.and(sc.entity().getManagementServerId(), Op.NNULL);
@@ -830,17 +799,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) {
             avLoad = allManagedAgents.size() / allMS.size();
         } else {
-            if (logger.isDebugEnabled()) {
-                logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is "
-                        + allManagedAgents.size());
-            }
+            logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is {};" +
+                    "number of managed agents is {}", allMS.size(), allManagedAgents.size());
             return;
         }
 
         if (avLoad == 0L) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("As calculated average load is less than 1, rounding it to 1");
-            }
+            logger.debug("As calculated average load is less than 1, rounding it to 1");
             avLoad = 1;
         }
 
@@ -852,20 +817,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                     hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad);
                     if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
                         break;
-                    } else {
-                        logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid());
                     }
+                    logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid());
                 }
 
                 if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
-                    logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid());
+                    logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node.getMsid());
                     for (final HostVO host : hostsToRebalance) {
                         final long hostId = host.getId();
-                        logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId);
+                        logger.debug("Asking management server {} to give away host id={}", node.getMsid(), hostId);
                         boolean result = true;
 
                         if (_hostTransferDao.findById(hostId) != null) {
-                            logger.warn("Somebody else is already rebalancing host id: " + hostId);
+                            logger.warn("Somebody else is already rebalancing host id: {}", hostId);
                             continue;
                         }
 
@@ -874,19 +838,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                             transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId);
                             final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance);
                             if (answer == null) {
-                                logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid());
+                                logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid());
                                 result = false;
                             }
                         } catch (final Exception ex) {
-                            logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex);
+                            logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid(), ex);
                             result = false;
                         } finally {
                             if (transfer != null) {
                                 final HostTransferMapVO transferState = _hostTransferDao.findByIdAndFutureOwnerId(transfer.getId(), _nodeId);
                                 if (!result && transferState != null && transferState.getState() == HostTransferState.TransferRequested) {
-                                    if (logger.isDebugEnabled()) {
-                                        logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode");
-                                    }
+                                    logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode");
                                     // just remove the mapping (if exists) as nothing was done on the peer management
                                     // server yet
                                     _hostTransferDao.remove(transfer.getId());
@@ -895,7 +857,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                         }
                     }
                 } else {
-                    logger.debug("Found no hosts to rebalance from the management server " + node.getMsid());
+                    logger.debug("Found no hosts to rebalance from the management server {}",  node.getMsid());
                 }
             }
         }
@@ -909,16 +871,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         final Command[] cmds = commands.toCommands();
 
         try {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Forwarding " + cmds[0].toString() + " to " + peer);
-            }
+            logger.debug("Forwarding {} to {}", cmds[0].toString(), peer);
             final String peerName = Long.toString(peer);
             final String cmdStr = _gson.toJson(cmds);
             final String ansStr = _clusterMgr.execute(peerName, agentId, cmdStr, true);
             final Answer[] answers = _gson.fromJson(ansStr, Answer[].class);
             return answers;
         } catch (final Exception e) {
-            logger.warn("Caught exception while talking to " + currentOwnerId, e);
+            logger.warn("Caught exception while talking to {}",  currentOwnerId, e);
             return null;
         }
     }
@@ -942,9 +902,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             return null;
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId);
-        }
+        logger.debug("Propagating agent change request event: {} to agent: {}", event.toString(), agentId);
         final Command[] cmds = new Command[1];
         cmds[0] = new ChangeAgentCommand(agentId, event);
 
@@ -955,9 +913,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
         final Answer[] answers = _gson.fromJson(ansStr, Answer[].class);
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Result for agent change is " + answers[0].getResult());
-        }
+        logger.debug("Result for agent change is {}",  answers[0].getResult());
 
         return answers[0].getResult();
     }
@@ -967,12 +923,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             @Override
             protected void runInContext() {
                 try {
-                    if (logger.isTraceEnabled()) {
-                        logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId);
-                    }
+                    logger.trace("Clustered agent transfer scan check, management server id: {}",  _nodeId);
                     synchronized (_agentToTransferIds) {
                         if (_agentToTransferIds.size() > 0) {
-                            logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer");
+                            logger.debug("Found {} agents to transfer", _agentToTransferIds.size());
                             // for (Long hostId : _agentToTransferIds) {
                             for (final Iterator<Long> iterator = _agentToTransferIds.iterator(); iterator.hasNext();) {
                                 final Long hostId = iterator.next();
@@ -988,14 +942,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                                 final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut));
 
                                 if (transferMap == null) {
-                                    logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host");
+                                    logger.debug("Timed out waiting for the host id={} to be ready to transfer, skipping rebalance for the host" + hostId);
                                     iterator.remove();
                                     _hostTransferDao.completeAgentTransfer(hostId);
                                     continue;
                                 }
 
                                 if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) {
-                                    logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host");
+                                    logger.debug("Management server {} doesn't own host id={} any more, skipping rebalance for the host", _nodeId, hostId);
                                     iterator.remove();
                                     _hostTransferDao.completeAgentTransfer(hostId);
                                     continue;
@@ -1003,7 +957,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
                                 final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner());
                                 if (ms != null && ms.getState() != ManagementServerHost.State.Up) {
-                                    logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host");
+                                    logger.debug("Can't transfer host {} as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, ms);
                                     iterator.remove();
                                     _hostTransferDao.completeAgentTransfer(hostId);
                                     continue;
@@ -1014,19 +968,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                                     try {
                                         _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner()));
                                     } catch (final RejectedExecutionException ex) {
-                                        logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution");
+                                        logger.warn("Failed to submit rebalance task for host id={}; postponing the execution", hostId);
                                         continue;
                                     }
 
                                 } else {
-                                    logger.debug("Agent " + hostId + " can't be transferred yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is "
-                                            + attache.getNonRecurringListenersSize());
+                                    logger.debug("Agent {} can't be transferred yet as its request queue size is {} and listener queue size is {}",
+                                            hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize());
                                 }
                             }
                         } else {
-                            if (logger.isTraceEnabled()) {
-                                logger.trace("Found no agents to be transferred by the management server " + _nodeId);
-                            }
+                            logger.trace("Found no agents to be transferred by the management server {}",  _nodeId);
                         }
                     }
 
@@ -1038,7 +990,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     }
 
     private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) {
-        logger.debug("Adding agent " + hostId + " to the list of agents to transfer");
+        logger.debug("Adding agent {} to the list of agents to transfer", hostId);
         synchronized (_agentToTransferIds) {
             return _agentToTransferIds.add(hostId);
         }
@@ -1060,24 +1012,22 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 }
 
             } catch (final Exception ex) {
-                logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex);
+                logger.warn("Host {} failed to connect to the management server {} as a part of rebalance process", hostId, futureOwnerId, ex);
                 result = false;
             }
 
             if (result) {
-                logger.debug("Successfully transferred host id=" + hostId + " to management server " + futureOwnerId);
+                logger.debug("Successfully transferred host id={} to management server {}", hostId, futureOwnerId);
                 finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted);
             } else {
-                logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId);
+                logger.warn("Failed to transfer host id={} to management server {}", hostId, futureOwnerId);
                 finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed);
             }
 
         } else if (futureOwnerId == _nodeId) {
             final HostVO host = _hostDao.findById(hostId);
             try {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
-                }
+                logger.debug("Disconnecting host {}({}) as a part of rebalance process without notification", host.getId(), host.getName());
 
                 final AgentAttache attache = findAttache(hostId);
                 if (attache != null) {
@@ -1085,24 +1035,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 }
 
                 if (result) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
-                    }
+                    logger.debug("Loading directly connected host {}({}) to the management server {} as a part of rebalance process", host.getId(), host.getName(), _nodeId);
                     result = loadDirectlyConnectedHost(host, true);
                 } else {
-                    logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
+                    logger.warn("Failed to disconnect {}({}) as a part of rebalance process without notification" + host.getId(), host.getName());
                 }
 
             } catch (final Exception ex) {
-                logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:",
-                        ex);
+                logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId, ex);
                 result = false;
             }
 
             if (result) {
-                logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
+                logger.debug("Successfully loaded directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId);
             } else {
-                logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
+                logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId);
             }
         }
 
@@ -1112,13 +1059,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) {
 
         final boolean success = event == Event.RebalanceCompleted ? true : false;
-        if (logger.isDebugEnabled()) {
-            logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event);
-        }
+        logger.debug("Finishing rebalancing for the agent {} with event {}", hostId, event);
 
         final AgentAttache attache = findAttache(hostId);
         if (attache == null || !(attache instanceof ClusteredAgentAttache)) {
-            logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already");
+            logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId);
             _hostTransferDao.completeAgentTransfer(hostId);
             return;
         }
@@ -1133,7 +1078,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             // 2) Get all transfer requests and route them to peer
             Request requestToTransfer = forwardAttache.getRequestToTransfer();
             while (requestToTransfer != null) {
-                logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " + _nodeId + " to " + futureOwnerId);
+                logger.debug("Forwarding request {} held in transfer attache {} from the management server {} to {}", requestToTransfer.getSequence(), hostId, _nodeId, futureOwnerId);
                 final boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes());
                 if (!routeResult) {
                     logD(requestToTransfer.getBytes(), "Failed to route request to peer");
@@ -1142,23 +1087,23 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 requestToTransfer = forwardAttache.getRequestToTransfer();
             }
 
-            logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId);
+            logger.debug("Management server {} completed agent {} rebalance to {}", _nodeId, hostId, futureOwnerId);
 
         } else {
             failRebalance(hostId);
         }
 
-        logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance");
+        logger.debug("Management server {} completed agent {} rebalance", _nodeId, hostId);
         _hostTransferDao.completeAgentTransfer(hostId);
     }
 
     protected void failRebalance(final long hostId) {
         try {
-            logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId);
+            logger.debug("Management server {} failed to rebalance agent {}", _nodeId, hostId);
             _hostTransferDao.completeAgentTransfer(hostId);
             handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true);
         } catch (final Exception ex) {
-            logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup");
+            logger.warn("Failed to reconnect host id={} as a part of failed rebalance task cleanup", hostId);
         }
     }
 
@@ -1176,18 +1121,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true);
                 final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId);
                 if (forwardAttache == null) {
-                    logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process");
+                    logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId);
                     return false;
                 }
-                logger.debug("Putting agent id=" + hostId + " to transfer mode");
+                logger.debug("Putting agent id={} to transfer mode", hostId);
                 forwardAttache.setTransferMode(true);
                 _agents.put(hostId, forwardAttache);
             } else {
                 if (attache == null) {
-                    logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing");
+                    logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", hostId, _nodeId);
                 } else {
-                    logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " + attache.getNonRecurringListenersSize()
-                    + ", can't start host rebalancing");
+                    logger.warn("Attache for the agent {} has request queue size= {} and listener queue size {}, can't start host rebalancing",
+                            hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize());
                 }
                 return false;
             }
@@ -1223,29 +1168,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         @Override
         protected void runInContext() {
             try {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Rebalancing host id=" + hostId);
-                }
+                logger.debug("Rebalancing host id={}", hostId);
                 rebalanceHost(hostId, currentOwnerId, futureOwnerId);
             } catch (final Exception e) {
-                logger.warn("Unable to rebalance host id=" + hostId, e);
+                logger.warn("Unable to rebalance host id={}", hostId, e);
             }
         }
     }
 
     private String handleScheduleHostScanTaskCommand(final ScheduleHostScanTaskCommand cmd) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd));
-        }
+        logger.debug("Intercepting resource manager command: {}", _gson.toJson(cmd));
 
         try {
             scheduleHostScanTask();
         } catch (final Exception e) {
             // Scheduling host scan task in peer MS is a best effort operation during host add, regular host scan
             // happens at fixed intervals anyways. So handling any exceptions that may be thrown
-            logger.warn(
-                    "Exception happened while trying to schedule host scan task on mgmt server " + _clusterMgr.getSelfPeerName() + ", ignoring as regular host scan happens at fixed interval anyways",
-                    e);
+            logger.warn("Exception happened while trying to schedule host scan task on mgmt server {}, ignoring as regular host scan happens at fixed " +
+                            "interval anyways", _clusterMgr.getSelfPeerName(), e);
             return null;
         }
 
@@ -1271,9 +1211,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         @Override
         public String dispatch(final ClusterServicePdu pdu) {
 
-            if (logger.isDebugEnabled()) {
-                logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage());
-            }
+            logger.debug("Dispatch ->{}, json: {}", pdu.getAgentId(), pdu.getJsonPackage());
 
             Command[] cmds = null;
             try {
@@ -1286,15 +1224,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { // intercepted
                 final ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0];
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent());
-                }
+                logger.debug("Intercepting command for agent change: agent {} event: {}", cmd.getAgentId(), cmd.getEvent());
                 boolean result = false;
                 try {
                     result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent());
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Result is " + result);
-                    }
+                    logger.debug("Result is {}", result);
 
                 } catch (final AgentUnavailableException e) {
                     logger.warn("Agent is unavailable", e);
@@ -1307,15 +1241,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             } else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) {
                 final TransferAgentCommand cmd = (TransferAgentCommand)cmds[0];
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent());
-                }
+                logger.debug("Intercepting command for agent rebalancing: agent {} event: {}", cmd.getAgentId(), cmd.getEvent());
                 boolean result = false;
                 try {
                     result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner());
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Result is " + result);
-                    }
+                    logger.debug("Result is {}", result);
 
                 } catch (final AgentUnavailableException e) {
                     logger.warn("Agent is unavailable", e);
@@ -1330,12 +1260,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) {
                 final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0];
 
-                logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId());
+                logger.debug("Intercepting command to propagate event {} for host {}", cmd.getEvent().name(), cmd.getHostId());
 
                 boolean result = false;
                 try {
                     result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent());
-                    logger.debug("Result is " + result);
+                    logger.debug("Result is {}", result);
                 } catch (final AgentUnavailableException ex) {
                     logger.warn("Agent is unavailable", ex);
                     return null;
@@ -1354,25 +1284,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
             try {
                 final long startTick = System.currentTimeMillis();
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage());
-                }
+                logger.debug("Dispatch -> {}, json: {}", pdu.getAgentId(), pdu.getJsonPackage());
 
                 final Answer[] answers = sendToAgent(pdu.getAgentId(), cmds, pdu.isStopOnError());
                 if (answers != null) {
                     final String jsonReturn = _gson.toJson(answers);
 
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return result: "
-                                + jsonReturn);
-                    }
+                    logger.debug("Completed dispatching -> {}, json: {} in {} ms, return result: {}", pdu.getAgentId(),
+                            pdu.getJsonPackage(), (System.currentTimeMillis() - startTick), jsonReturn);
 
                     return jsonReturn;
                 } else {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(
-                                "Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return null result");
-                    }
+                    logger.debug("Completed dispatching -> {}, json: {} in {} ms, return null result", pdu.getAgentId(),
+                            pdu.getJsonPackage(), (System.currentTimeMillis() - startTick));
                 }
             } catch (final AgentUnavailableException e) {
                 logger.warn("Agent is unavailable", e);
@@ -1432,9 +1356,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             @Override
             protected void runInContext() {
                 try {
-                    if (logger.isTraceEnabled()) {
-                        logger.trace("Agent rebalance task check, management server id:" + _nodeId);
-                    }
+                    logger.trace("Agent rebalance task check, management server id:{}", _nodeId);
                     // initiate agent lb task will be scheduled and executed only once, and only when number of agents
                     // loaded exceeds _connectedAgentsThreshold
                     if (!_agentLbHappened) {
@@ -1451,11 +1373,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                         if (allHostsCount > 0.0) {
                             final double load = managedHostsCount / allHostsCount;
                             if (load > ConnectedAgentThreshold.value()) {
-                                logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value());
+                                logger.debug("Scheduling agent rebalancing task as the average agent load {} is more than the threshold {}", load, ConnectedAgentThreshold.value());
                                 scheduleRebalanceAgents();
                                 _agentLbHappened = true;
                             } else {
-                                logger.debug("Not scheduling agent rebalancing task as the average load " + load + " has not crossed the threshold " + ConnectedAgentThreshold.value());
+                                logger.debug("Not scheduling agent rebalancing task as the average load {} has not crossed the threshold", load, ConnectedAgentThreshold.value());
                             }
                         }
                     }
@@ -1469,14 +1391,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     @Override
     public void rescan() {
         // schedule a scan task immediately
-        if (logger.isDebugEnabled()) {
-            logger.debug("Scheduling a host scan task");
-        }
+        logger.debug("Scheduling a host scan task");
         // schedule host scan task on current MS
         scheduleHostScanTask();
-        if (logger.isDebugEnabled()) {
-            logger.debug("Notifying all peer MS to schedule host scan task");
-        }
+        logger.debug("Notifying all peer MS to schedule host scan task");
     }
 
     @Override
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java
index 81c026348c8..c8e24301b29 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java
@@ -98,7 +98,7 @@ public class ConnectedAgentAttache extends AgentAttache {
             assert _link == null : "Duh...Says you....Forgot to call disconnect()!";
             synchronized (this) {
                 if (_link != null) {
-                    logger.warn("Lost attache " + _id + "(" + _name + ")");
+                    logger.warn("Lost attache {} ({})", _id, _name);
                     disconnect(Status.Alert);
                 }
             }
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java
index 969af3d1ba6..927da34104f 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java
@@ -60,9 +60,7 @@ public class DirectAgentAttache extends AgentAttache {
 
     @Override
     public void disconnect(Status state) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Processing disconnect " + _id + "(" + _name + ")");
-        }
+        logger.debug("Processing disconnect {}({})", _id, _name);
 
         for (ScheduledFuture<?> future : _futures) {
             future.cancel(false);
@@ -117,7 +115,7 @@ public class DirectAgentAttache extends AgentAttache {
         if (answers != null && answers[0] instanceof StartupAnswer) {
             StartupAnswer startup = (StartupAnswer)answers[0];
             int interval = startup.getPingInterval();
-            logger.info("StartupAnswer received " + startup.getHostId() + " Interval = " + interval);
+            logger.info("StartupAnswer received {} Interval = {}", startup.getHostId(), interval);
             _futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS));
         }
     }
@@ -128,7 +126,7 @@ public class DirectAgentAttache extends AgentAttache {
             assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?";
             synchronized (this) {
                 if (_resource != null) {
-                    logger.warn("Lost attache for " + _id + "(" + _name + ")");
+                    logger.warn("Lost attache for {}({})", _id, _name);
                     disconnect(Status.Alert);
                 }
             }
@@ -142,9 +140,7 @@ public class DirectAgentAttache extends AgentAttache {
     }
 
     private synchronized void scheduleFromQueue() {
-        if (logger.isTraceEnabled()) {
-            logger.trace("Agent attache=" + _id + ", task queue size=" + tasks.size() + ", outstanding tasks=" + _outstandingTaskCount.get());
-        }
+        logger.trace("Agent attache={}, task queue size={}, outstanding tasks={}", _id, tasks.size(), _outstandingTaskCount.get());
         while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) {
             _outstandingTaskCount.incrementAndGet();
             _agentMgr.getDirectAgentPool().execute(tasks.remove());
@@ -156,7 +152,7 @@ public class DirectAgentAttache extends AgentAttache {
         protected synchronized void runInContext() {
             try {
                 if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) {
-                    logger.warn("PingTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out");
+                    logger.warn("PingTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap());
                     return;
                 }
 
@@ -171,25 +167,21 @@ public class DirectAgentAttache extends AgentAttache {
                     }
 
                     if (cmd == null) {
-                        logger.warn("Unable to get current status on " + _id + "(" + _name + ")");
+                        logger.warn("Unable to get current status on {}({})", _id, _name);
                         return;
                     }
 
                     if (cmd.getContextParam("logid") != null) {
                         ThreadContext.put("logcontextid", cmd.getContextParam("logid"));
                     }
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Ping from " + _id + "(" + _name + ")");
-                    }
+                    logger.debug("Ping from {}({})", _id, _name);
                     long seq = _seq++;
 
-                    if (logger.isTraceEnabled()) {
-                        logger.trace("SeqA " + _id + "-" + seq + ": " + new Request(_id, -1, cmd, false).toString());
-                    }
+                    logger.trace("SeqA {}-{}: {}", _id, seq, new Request(_id, -1, cmd, false).toString());
 
                     _agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd});
                 } else {
-                    logger.debug("Unable to send ping because agent is disconnected " + _id + "(" + _name + ")");
+                    logger.debug("Unable to send ping because agent is disconnected {}", _id, _name);
                 }
             } catch (Exception e) {
                 logger.warn("Unable to complete the ping task", e);
@@ -218,7 +210,7 @@ public class DirectAgentAttache extends AgentAttache {
                 Response resp = new Response(_req, answers.toArray(new Answer[answers.size()]));
                 processAnswers(seq, resp);
             } catch (Exception e) {
-                logger.warn(log(seq, "Exception caught in bailout "), e);
+                logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception caught in bailout ", e);
             }
         }
 
@@ -227,7 +219,7 @@ public class DirectAgentAttache extends AgentAttache {
             long seq = _req.getSequence();
             try {
                 if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) {
-                    logger.warn("CronTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out");
+                    logger.warn("CronTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap());
                     bailout();
                     return;
                 }
@@ -236,9 +228,7 @@ public class DirectAgentAttache extends AgentAttache {
                 Command[] cmds = _req.getCommands();
                 boolean stopOnError = _req.stopOnError();
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug(log(seq, "Executing request"));
-                }
+                logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Executing request");
                 ArrayList<Answer> answers = new ArrayList<Answer>(cmds.length);
                 for (int i = 0; i < cmds.length; i++) {
                     Answer answer = null;
@@ -257,26 +247,24 @@ public class DirectAgentAttache extends AgentAttache {
                             answer = new Answer(cmds[i], false, "Agent is disconnected");
                         }
                     } catch (Exception e) {
-                        logger.warn(log(seq, "Exception Caught while executing command"), e);
+                        logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception Caught while executing command", e);
                         answer = new Answer(cmds[i], false, e.toString());
                     }
                     answers.add(answer);
                     if (!answer.getResult() && stopOnError) {
                         if (i < cmds.length - 1 && logger.isDebugEnabled()) {
-                            logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error."));
+                            logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling because one of the answers is false and it is stop on error.");
                         }
                         break;
                     }
                 }
 
                 Response resp = new Response(_req, answers.toArray(new Answer[answers.size()]));
-                if (logger.isDebugEnabled()) {
-                    logger.debug(log(seq, "Response Received: "));
-                }
+                logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Response Received: ");
 
                 processAnswers(seq, resp);
             } catch (Exception e) {
-                logger.warn(log(seq, "Exception caught "), e);
+                logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception caught ", e);
             } finally {
                 _outstandingCronTaskCount.decrementAndGet();
             }
@@ -298,9 +286,7 @@ public class DirectAgentAttache extends AgentAttache {
                 Command[] cmds = _req.getCommands();
                 boolean stopOnError = _req.stopOnError();
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug(log(seq, "Executing request"));
-                }
+                logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Executing request");
                 ArrayList<Answer> answers = new ArrayList<Answer>(cmds.length);
                 for (int i = 0; i < cmds.length; i++) {
                     Answer answer = null;
@@ -320,27 +306,25 @@ public class DirectAgentAttache extends AgentAttache {
                         }
                     } catch (Throwable t) {
                         // Catch Throwable as all exceptions will otherwise be eaten by the executor framework
-                        logger.warn(log(seq, "Throwable caught while executing command"), t);
+                        logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Throwable caught while executing command", t);
                         answer = new Answer(cmds[i], false, t.toString());
                     }
                     answers.add(answer);
                     if (!answer.getResult() && stopOnError) {
                         if (i < cmds.length - 1 && logger.isDebugEnabled()) {
-                            logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error."));
+                            logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling because one of the answers is false and it is stop on error.");
                         }
                         break;
                     }
                 }
 
                 Response resp = new Response(_req, answers.toArray(new Answer[answers.size()]));
-                if (logger.isDebugEnabled()) {
-                    logger.debug(log(seq, "Response Received: "));
-                }
+                logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Response Received: ");
 
                 processAnswers(seq, resp);
             } catch (Throwable t) {
                 // This is pretty serious as processAnswers might not be called and the calling process is stuck waiting for the full timeout
-                logger.error(log(seq, "Throwable caught in runInContext, this will cause the management to become unpredictable"), t);
+                logger.error(LOG_SEQ_FORMATTED_STRING, seq, "Throwable caught in runInContext, this will cause the management to become unpredictable", t);
             } finally {
                 _outstandingTaskCount.decrementAndGet();
                 scheduleFromQueue();
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java
index b5687e2636a..0e8aff2bccf 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java
@@ -71,8 +71,7 @@ public class SynchronousListener implements Listener {
 
     @Override
     public synchronized boolean processDisconnect(long agentId, Status state) {
-        if (logger.isTraceEnabled())
-            logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters");
+        logger.trace("Agent disconnected, agent id: {}, state: {}. Will notify waiters", agentId, state);
 
         _disconnected = true;
         notifyAll();
@@ -128,10 +127,8 @@ public class SynchronousListener implements Listener {
         }
         profiler.stop();
 
-        if (logger.isTraceEnabled()) {
-            logger.trace("Synchronized command - sending completed, time: " + profiler.getDurationInMillis() + ", answer: " +
-                (_answers != null ? _answers[0].toString() : "null"));
-        }
+        logger.trace("Synchronized command - sending completed, time: {}, answer: {}", profiler.getDurationInMillis(),
+            (_answers != null ? _answers[0].toString() : "null"));
         return _answers;
     }
 
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
index 9f743668cd5..824b9f5f45d 100755
--- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -475,15 +475,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map<String, Map<Integer, String>> extraDhcpOptions, final Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap)
                     throws InsufficientCapacityException {
 
-        logger.info(String.format("allocating virtual machine from template:%s with hostname:%s and %d networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size()));
+        logger.info("allocating virtual machine from template:{} with hostname:{} and {} networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size());
         VMInstanceVO persistedVm = null;
         try {
             final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName);
             final Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
 
-            if (logger.isDebugEnabled()) {
-                logger.debug("Allocating entries for VM: " + vm);
-            }
+            logger.debug("Allocating entries for VM: " + vm);
 
             vm.setDataCenterId(plan.getDataCenterId());
             if (plan.getPodId() != null) {
@@ -500,9 +498,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             }
             final Long rootDiskSizeFinal = rootDiskSize;
 
-            if (logger.isDebugEnabled()) {
-                logger.debug("Allocating nics for " + persistedVm);
-            }
+            logger.debug("Allocating nics for {}", persistedVm);
 
             try {
                 if (!vmProfile.getBootArgs().contains("ExternalLoadBalancerVm")) {
@@ -512,9 +508,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e);
             }
 
-            if (logger.isDebugEnabled()) {
-                logger.debug("Allocating disks for " + persistedVm);
-            }
+            logger.debug("Allocating disks for {}",  persistedVm);
 
             allocateRootVolume(persistedVm, template, rootDiskOfferingInfo, owner, rootDiskSizeFinal);
 
@@ -543,9 +537,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 CallContext.unregister();
             }
 
-            if (logger.isDebugEnabled()) {
-                logger.debug("Allocation completed for VM: " + persistedVm);
-            }
+            logger.debug("Allocation completed for VM: " + persistedVm);
         } catch (InsufficientCapacityException | CloudRuntimeException e) {
             // Failed VM will be in Stopped. Transition it to Error, so it can be expunged by ExpungeTask or similar
             try {
@@ -553,7 +545,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     stateTransitTo(persistedVm, VirtualMachine.Event.OperationFailedToError, null);
                 }
             } catch (NoTransitionException nte) {
-                logger.error(String.format("Failed to transition %s in %s state to Error state", persistedVm, persistedVm.getState().toString()));
+                logger.error("Failed to transition {} in {} state to Error state", persistedVm, persistedVm.getState().toString());
             }
             throw e;
         }
@@ -568,7 +560,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 volumeMgr.allocateRawVolume(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(),
                         rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vm, template, owner, null);
             } else if (template.getFormat() == ImageFormat.BAREMETAL) {
-                logger.debug(String.format("%s has format [%s]. Skipping ROOT volume [%s] allocation.", template.toString(), ImageFormat.BAREMETAL, rootVolumeName));
+                logger.debug("%s has format [{}]. Skipping ROOT volume [{}] allocation.", template.toString(), ImageFormat.BAREMETAL, rootVolumeName);
             } else {
                 volumeMgr.allocateTemplatedVolumes(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskSizeFinal,
                         rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vm, owner);
@@ -617,9 +609,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
     private boolean isVmDestroyed(VMInstanceVO vm) {
         if (vm == null || vm.getRemoved() != null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Unable to find vm or vm is expunged: " + vm);
-            }
+            logger.debug("Unable to find vm or vm is expunged: " + vm);
             return true;
         }
         return false;
@@ -644,20 +634,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             throw new CloudRuntimeException("Unable to expunge " + vm, e);
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Expunging vm " + vm);
-        }
+        logger.debug("Expunging vm " + vm);
 
         final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
 
         final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
 
         List<NicProfile> vmNics = profile.getNics();
-        logger.debug(String.format("Cleaning up NICS [%s] of %s.", vmNics.stream().map(nic -> nic.toString()).collect(Collectors.joining(", ")),vm.toString()));
+        logger.debug("Cleaning up NICS [{}] of {}.", vmNics.stream().map(nic -> nic.toString()).collect(Collectors.joining(", ")),vm.toString());
         final List<Command> nicExpungeCommands = hvGuru.finalizeExpungeNics(vm, profile.getNics());
         _networkMgr.cleanupNics(profile);
 
-        logger.debug(String.format("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage. Data from %s.", vm.toString()));
+        logger.debug("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage. Data from {}.", vm.toString());
 
         final List<Command> volumeExpungeCommands = hvGuru.finalizeExpungeVolumes(vm);
 
@@ -714,7 +702,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             if (!cmds.isSuccessful()) {
                 for (final Answer answer : cmds.getAnswers()) {
                     if (!answer.getResult()) {
-                        logger.warn("Failed to expunge vm due to: " + answer.getDetails());
+                        logger.warn("Failed to expunge vm due to: {}",  answer.getDetails());
                         throw new CloudRuntimeException(String.format("Unable to expunge %s due to %s", vm, answer.getDetails()));
                     }
                 }
@@ -727,15 +715,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         String vmToString = vm.toString();
 
         if (cmds.isSuccessful()) {
-            logger.debug(String.format("The commands [%s] to %s were successful.", cmdsStr, vmToString));
+            logger.debug("The commands [{}] to {} were successful.", cmdsStr, vmToString);
             return;
         }
 
-        logger.info(String.format("The commands [%s] to %s were unsuccessful. Handling answers.", cmdsStr, vmToString));
+        logger.info("The commands [{}] to {} were unsuccessful. Handling answers.", cmdsStr, vmToString);
 
         Answer[] answers = cmds.getAnswers();
         if (answers == null) {
-            logger.debug(String.format("There are no answers to commands [%s] to %s.", cmdsStr, vmToString));
+            logger.debug("There are no answers to commands [{}] to {}.", cmdsStr, vmToString);
             return;
         }
 
@@ -747,7 +735,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 throw new CloudRuntimeException(message);
             }
 
-            logger.debug(String.format("Commands [%s] to %s got answer [%s].", cmdsStr, vmToString, details));
+            logger.debug("Commands [{}] to {} got answer [{}].", cmdsStr, vmToString, details);
         }
     }
 
@@ -757,9 +745,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         }
         for (final Command command : cmdList) {
             command.setBypassHostMaintenance(isValidSystemVMType(vm));
-            if (logger.isTraceEnabled()) {
-                logger.trace(String.format("Adding expunge command [%s] for VM [%s]", command.toString(), vm.toString()));
-            }
+            logger.trace("Adding expunge command [{}] for VM [{}]", command.toString(), vm.toString());
             cmds.addCommand(command);
         }
     }
@@ -811,12 +797,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         Answer answer = _agentMgr.easySend(hostId, cmd);
 
         if (answer == null) {
-            logger.warn(String.format("Unable to get an answer to the modify targets command. Targets [%s].", cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", "))));
+            logger.warn("Unable to get an answer to the modify targets command. Targets [{}].",
+                    () -> cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")));
             return;
         }
 
         if (!answer.getResult()) {
-            logger.warn(String.format("Unable to modify targets [%s] on the host [%s].", cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")), hostId));
+            logger.warn("Unable to modify targets [{}] on the host [{}].",
+                    () -> cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")), () -> hostId);
         }
     }
 
@@ -879,39 +867,33 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         while (true) {
             final ItWorkVO vo = _workDao.findByOutstandingWork(vm.getId(), state);
             if (vo == null) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Unable to find work for VM: " + vm + " and state: " + state);
-                }
+                logger.debug("Unable to find work for VM: {} and state: {}", vm, state);
                 return true;
             }
 
             if (vo.getStep() == Step.Done) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Work for " + vm + " is " + vo.getStep());
-                }
+                logger.debug("Work for {} is {}", vm, vo.getStep());
                 return true;
             }
 
             final VMInstanceVO instance = _vmDao.findById(vm.getId());
             if (instance != null && instance.getState() == State.Running) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("VM is already started in DB: " + vm);
-                }
+                logger.debug("VM is already started in DB: {}",  vm);
                 return true;
             }
 
             if (vo.getSecondsTaskIsInactive() > VmOpCancelInterval.value()) {
-                logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive());
+                logger.warn("The task item for vm {} has been inactive for {}", vm, vo.getSecondsTaskIsInactive());
                 return false;
             }
 
             try {
                 Thread.sleep(VmOpWaitInterval.value()*1000);
             } catch (final InterruptedException e) {
-                logger.info("Waiting for " + vm + " but is interrupted");
+                logger.info("Waiting for {} but is interrupted", vm);
                 throw new ConcurrentOperationException("Waiting for " + vm + " but is interrupted");
             }
-            logger.debug("Waiting some more to make sure there's no activity on " + vm);
+            logger.debug("Waited some more to make sure there's no activity on " + vm);
         }
 
     }
@@ -935,9 +917,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                                 final ReservationContextImpl context = new ReservationContextImpl(work.getId(), journal, caller, account);
 
                                 if (stateTransitTo(vm, Event.StartRequested, null, work.getId())) {
-                                    if (logger.isDebugEnabled()) {
-                                        logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId());
-                                    }
+                                    logger.debug("Successfully transitioned to start state for {} reservation id = {}", vm, work.getId());
                                     if (VirtualMachine.Type.User.equals(vm.type) && ResourceCountRunningVMsonly.value()) {
                                         _resourceLimitMgr.incrementVmResourceCount(owner.getAccountId(), vm.isDisplay(), offering, template);
                                     }
@@ -953,9 +933,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     return result;
                 }
             } catch (final NoTransitionException e) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Unable to transition into Starting state due to " + e.getMessage());
-                }
+                logger.debug("Unable to transition into Starting state due to " + e.getMessage());
             }
 
             final VMInstanceVO instance = _vmDao.findById(vmId);
@@ -963,15 +941,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 throw new ConcurrentOperationException("Unable to acquire lock on " + vm);
             }
 
-            if (logger.isDebugEnabled()) {
-                logger.debug("Determining why we're unable to update the state to Starting for " + instance + ".  Retry=" + retry);
-            }
+            logger.debug("Determining why we're unable to update the state to Starting for " + instance + ".  Retry=" + retry);
 
             final State state = instance.getState();
             if (state == State.Running) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("VM is already started: " + vm);
-                }
+                logger.debug("VM is already started: " + vm);
                 return null;
             }
 
@@ -1028,12 +1002,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
 
         final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
-        if ( jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("start parameter value of %s == %s during dispatching",
-                        VirtualMachineProfile.Param.BootIntoSetup.getName(),
-                        (params == null?"<very null>":params.get(VirtualMachineProfile.Param.BootIntoSetup))));
-            }
+        if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
+            logger.debug("start parameter value of {} == {} during dispatching",
+                    VirtualMachineProfile.Param.BootIntoSetup.getName(),
+                    (params == null?"<very null>":params.get(VirtualMachineProfile.Param.BootIntoSetup)));
 
             final VirtualMachine vm = _vmDao.findByUuid(vmUuid);
             VmWorkJobVO placeHolder = createPlaceHolderWork(vm.getId());
@@ -1045,11 +1017,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 }
             }
         } else {
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("start parameter value of %s == %s during processing of queued job",
-                        VirtualMachineProfile.Param.BootIntoSetup.getName(),
-                        (params == null?"<very null>":params.get(VirtualMachineProfile.Param.BootIntoSetup))));
-            }
+            logger.debug("start parameter value of {} == {} during processing of queued job",
+                    VirtualMachineProfile.Param.BootIntoSetup.getName(),
+                    (params == null?"<very null>":params.get(VirtualMachineProfile.Param.BootIntoSetup)));
+
             final Outcome<VirtualMachine> outcome = startVmThroughJobQueue(vmUuid, params, planToDeploy, planner);
 
             retrieveVmFromJobOutcome(outcome, vmUuid, "startVm");
@@ -1089,13 +1060,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         final VMTemplateVO template = _templateDao.findById(vm.getTemplateId());
         if (template == null) {
             String msg = "Template for the VM instance can not be found, VM instance configuration needs to be updated";
-            logger.error(String.format("%s. Template ID: %d seems to be removed", msg, vm.getTemplateId()));
+            logger.error("{}. Template ID: {} seems to be removed", msg, vm.getTemplateId());
             throw new CloudRuntimeException(msg);
         }
         final VMTemplateZoneVO templateZoneVO = templateZoneDao.findByZoneTemplate(vm.getDataCenterId(), template.getId());
         if (templateZoneVO == null) {
             String msg = "Template for the VM instance can not be found in the zone ID: %s, VM instance configuration needs to be updated";
-            logger.error(String.format("%s. %s", msg, template));
+            logger.error("{}. {}", msg, template);
             throw new CloudRuntimeException(msg);
         }
     }
@@ -1171,6 +1142,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             if (avoids == null) {
                 avoids = new ExcludeList();
             }
+            logger.debug("Deploy avoids pods: {}, clusters: {}, hosts: {}", avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid());
 
             boolean planChangedByVolume = false;
             boolean reuseVolume = true;
@@ -1180,24 +1152,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
             int retry = StartRetry.value();
             while (retry-- != 0) {
-                logger.debug("VM start attempt #" + (StartRetry.value() - retry));
+                logger.debug("VM start attempt #{}", (StartRetry.value() - retry));
 
                 if (reuseVolume) {
                     final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
                     for (final VolumeVO vol : vols) {
                         final Long volTemplateId = vol.getTemplateId();
                         if (volTemplateId != null && volTemplateId != template.getId()) {
-                            if (logger.isDebugEnabled()) {
-                                logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
-                            }
+                            logger.debug("{} of {} is READY, but template ids don't match, let the planner reassign a new pool", vol, vm);
                             continue;
                         }
 
                         final StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
                         if (!pool.isInMaintenance()) {
-                            if (logger.isDebugEnabled()) {
-                                logger.debug("Root volume is ready, need to place VM in volume's cluster");
-                            }
+                            logger.debug("Root volume is ready, need to place VM in volume's cluster");
                             final long rootVolDcId = pool.getDataCenterId();
                             final Long rootVolPodId = pool.getPodId();
                             final Long rootVolClusterId = pool.getClusterId();
@@ -1205,10 +1173,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                                 final Long clusterIdSpecified = planToDeploy.getClusterId();
                                 if (clusterIdSpecified != null && rootVolClusterId != null) {
                                     if (!rootVolClusterId.equals(clusterIdSpecified)) {
-                                        if (logger.isDebugEnabled()) {
-                                            logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " +
-                                                    rootVolClusterId + ", cluster specified: " + clusterIdSpecified);
-                                        }
+                                        logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " +
+                                                rootVolClusterId + ", cluster specified: " + clusterIdSpecified);
                                         throw new ResourceUnavailableException(
                                                 "Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " +
                                                         vm, Cluster.class, clusterIdSpecified);
@@ -1219,10 +1185,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                                                 planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
                             } else {
                                 plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
-                                if (logger.isDebugEnabled()) {
-                                    logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId +
-                                            " , and clusterId: " + rootVolClusterId);
-                                }
+                                logger.debug("{} is READY, changing deployment plan to use this pool's dcId: {}, podId: {}, and clusterId: {}", vol, rootVolDcId,
+                                        rootVolPodId, rootVolClusterId);
                                 planChangedByVolume = true;
                             }
                         }
@@ -1344,7 +1308,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                             syncDiskChainChange(startAnswer);
 
                             if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
-                                logger.error("Unable to transition to a new state. VM uuid: "+vm.getUuid()+    "VM oldstate:"+vm.getState()+"Event:"+Event.OperationSucceeded);
+                                logger.error("Unable to transition to a new state. VM uuid: {}, VM oldstate: {}, Event: {}", vm.getUuid(), vm.getState(), Event.OperationSucceeded);
                                 throw new ConcurrentOperationException("Failed to deploy VM"+ vm.getUuid());
                             }
 
@@ -1358,9 +1322,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                             }
 
                             startedVm = vm;
-                            if (logger.isDebugEnabled()) {
-                                logger.debug("Start completed for VM " + vm);
-                            }
+                            logger.debug("Start completed for VM {}", vm);
                             final Host vmHost = _hostDao.findById(destHostId);
                             if (vmHost != null && (VirtualMachine.Type.ConsoleProxy.equals(vm.getType()) ||
                                     VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType())) && caManager.canProvisionCertificates()) {
@@ -1370,20 +1332,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                                                 new ArrayList<>(ipAddressDetails.values()), CAManager.CertValidityPeriod.value(), null);
                                         final boolean result = caManager.deployCertificate(vmHost, certificate, false, sshAccessDetails);
                                         if (!result) {
-                                            logger.error("Failed to setup certificate for system vm: " + vm.getInstanceName());
+                                            logger.error("Failed to setup certificate for system vm: {}", vm.getInstanceName());
                                         }
                                         return;
                                     } catch (final Exception e) {
-                                        logger.error("Retrying after catching exception while trying to secure agent for systemvm id=" + vm.getId(), e);
+                                        logger.error("Retrying after catching exception while trying to secure agent for systemvm id={}", vm.getId(), e);
                                     }
                                 }
                                 throw new CloudRuntimeException("Failed to setup and secure agent for systemvm id=" + vm.getId());
                             }
                             return;
                         } else {
-                            if (logger.isDebugEnabled()) {
-                                logger.info("The guru did not like the answers so stopping " + vm);
-                            }
+                            logger.info("The guru did not like the answers so stopping {}", vm);
                             StopCommand stopCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false);
                             stopCmd.setControlIp(getControlNicIpForVM(vm));
                             Map<String, Boolean> vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId());
@@ -1405,20 +1365,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                             }
 
                             if (answer == null || !answer.getResult()) {
-                                logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
+                                logger.warn("Unable to stop {} dut to {}", vm, (answer != null ? answer.getDetails() : "no answers"));
                                 _haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
-                                throw new ExecutionException("Unable to stop this VM, "+vm.getUuid()+" so we are unable to retry the start operation");
+                                throw new ExecutionException("Unable to stop this VM, " + vm.getUuid() + " so we are unable to retry the start operation");
                             }
-                            throw new ExecutionException("Unable to start  VM:"+vm.getUuid()+" due to error in finalizeStart, not retrying");
+                            throw new ExecutionException("Unable to start  VM:" + vm.getUuid() + " due to error in finalizeStart, not retrying");
                         }
                     }
-                    logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
+                    logger.info("Unable to start VM on {} due to {}", dest.getHost(), (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
                     if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) {
                         break;
                     }
 
                 } catch (OperationTimedoutException e) {
-                    logger.debug("Unable to send the start command to host " + dest.getHost()+" failed to start VM: "+vm.getUuid());
+                    logger.debug("Unable to send the start command to host {} failed to start VM: {}", dest.getHost(), vm.getUuid());
                     if (e.isActive()) {
                         _haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
                     }
@@ -1430,7 +1390,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                         if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
                             throw e;
                         } else {
-                            logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
+                            logger.warn("unexpected ResourceUnavailableException : {}", e.getScope().getName(), e);
                             throw e;
                         }
                     }
@@ -1440,11 +1400,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                         if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
                             throw e;
                         } else {
-                            logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
+                            logger.warn("unexpected InsufficientCapacityException : {}", e.getScope().getName(), e);
                         }
                     }
                 } catch (ExecutionException | NoTransitionException e) {
-                    logger.error("Failed to start instance " + vm, e);
+                    logger.error("Failed to start instance {}", vm, e);
                     throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
                 } catch (final StorageAccessException e) {
                     logger.warn("Unable to access storage on host", e);
@@ -1761,9 +1721,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             @Override
             public Boolean doInTransaction(TransactionStatus status) {
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Unmanaging vm " + vm);
-                }
+                logger.debug("Unmanaging VM {}", vm);
 
                 final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
                 final VirtualMachineGuru guru = getVmGuru(vm);
@@ -1775,7 +1733,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
                     guru.finalizeUnmanage(vm);
                 } catch (Exception e) {
-                    logger.error("Error while unmanaging VM " + vm, e);
+                    logger.error("Error while unmanaging VM {}", vm, e);
                     return false;
                 }
 
@@ -1815,7 +1773,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
      * - If 'unmanage.vm.preserve.nics' = false: then the NICs are removed while unmanaging
      */
     private void unmanageVMNics(VirtualMachineProfile profile, VMInstanceVO vm) {
-        logger.debug(String.format("Cleaning up NICs of %s.", vm.toString()));
+        logger.debug("Cleaning up NICs of {}.", vm.toString());
         Boolean preserveNics = UnmanagedVMsManager.UnmanageVMPreserveNic.valueIn(vm.getDataCenterId());
         if (BooleanUtils.isTrue(preserveNics)) {
             logger.debug("Preserve NICs configuration enabled");
@@ -1884,7 +1842,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 }
                 if (!answer.getResult()) {
                     final String details = answer.getDetails();
-                    logger.debug("Unable to stop VM due to " + details);
+                    logger.debug("Unable to stop VM due to {}", details);
                     return false;
                 }
 
@@ -1898,12 +1856,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     }
                 }
             } else {
-                logger.error("Invalid answer received in response to a StopCommand for " + vm.getInstanceName());
+                logger.error("Invalid answer received in response to a StopCommand for {}", vm.getInstanceName());
                 return false;
             }
 
         } catch (final AgentUnavailableException | OperationTimedoutException e) {
-            logger.warn(String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()), e);
+            logger.warn("Unable to stop {} due to [{}].", vm.toString(), e.getMessage(), e);
             if (!force) {
                 return false;
             }
@@ -1915,33 +1873,33 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     protected boolean cleanup(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final ItWorkVO work, final Event event, final boolean cleanUpEvenIfUnableToStop) {
         final VirtualMachine vm = profile.getVirtualMachine();
         final State state = vm.getState();
-        logger.debug("Cleaning up resources for the vm " + vm + " in " + state + " state");
+        logger.debug("Cleaning up resources for the vm {} in {} state", vm, state);
         try {
             if (state == State.Starting) {
                 if (work != null) {
                     final Step step = work.getStep();
                     if (step == Step.Starting && !cleanUpEvenIfUnableToStop) {
-                        logger.warn("Unable to cleanup vm " + vm + "; work state is incorrect: " + step);
+                        logger.warn("Unable to cleanup vm {}; work state is incorrect: {}", vm, step);
                         return false;
                     }
 
                     if (step == Step.Started || step == Step.Starting || step == Step.Release) {
                         if (vm.getHostId() != null) {
                             if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) {
-                                logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process");
+                                logger.warn("Failed to stop vm {} in {} state as a part of cleanup process", vm, State.Starting);
                                 return false;
                             }
                         }
                     }
 
                     if (step != Step.Release && step != Step.Prepare && step != Step.Started && step != Step.Starting) {
-                        logger.debug("Cleanup is not needed for vm " + vm + "; work state is incorrect: " + step);
+                        logger.debug("Cleanup is not needed for vm {}; work state is incorrect: {}", vm, step);
                         return true;
                     }
                 } else {
                     if (vm.getHostId() != null) {
                         if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) {
-                            logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process");
+                            logger.warn("Failed to stop vm {} in {} state as a part of cleanup process", vm, State.Starting);
                             return false;
                         }
                     }
@@ -1950,26 +1908,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             } else if (state == State.Stopping) {
                 if (vm.getHostId() != null) {
                     if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) {
-                        logger.warn("Failed to stop vm " + vm + " in " + State.Stopping + " state as a part of cleanup process");
+                        logger.warn("Failed to stop vm {} in {} state as a part of cleanup process", vm, State.Stopping);
                         return false;
                     }
                 }
             } else if (state == State.Migrating) {
-                if (vm.getHostId() != null) {
+                if (vm.getHostId() != null || vm.getLastHostId() != null) {
                     if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) {
-                        logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process");
-                        return false;
-                    }
-                }
-                if (vm.getLastHostId() != null) {
-                    if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) {
-                        logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process");
+                        logger.warn("Failed to stop vm {} in {} state as a part of cleanup process", vm, State.Migrating);
                         return false;
                     }
                 }
             } else if (state == State.Running) {
                 if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) {
-                    logger.warn("Failed to stop vm " + vm + " in " + State.Running + " state as a part of cleanup process");
+                    logger.warn("Failed to stop vm {} in {} state as a part of cleanup process", vm, State.Running);
                     return false;
                 }
             }
@@ -1985,21 +1937,21 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         final State state = vm.getState();
         try {
             _networkMgr.release(profile, forced);
-            logger.debug(String.format("Successfully released network resources for the VM %s in %s state", vm, state));
+            logger.debug("Successfully released network resources for the VM {} in {} state", vm, state);
         } catch (final Exception e) {
-            logger.warn(String.format("Unable to release some network resources for the VM %s in %s state", vm, state), e);
+            logger.warn("Unable to release some network resources for the VM {} in {} state", vm, state, e);
         }
 
         try {
             if (vm.getHypervisorType() != HypervisorType.BareMetal) {
                 volumeMgr.release(profile);
-                logger.debug(String.format("Successfully released storage resources for the VM %s in %s state", vm, state));
+                logger.debug("Successfully released storage resources for the VM {} in {} state", vm, state);
             }
         } catch (final Exception e) {
-            logger.warn(String.format("Unable to release storage resources for the VM %s in %s state", vm, state), e);
+            logger.warn("Unable to release storage resources for the VM {} in {} state", vm, state, e);
         }
 
-        logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state));
+        logger.debug("Successfully cleaned up resources for the VM {} in {} state", vm, state);
     }
 
     @Override
@@ -2101,31 +2053,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     ConcurrentOperationException {
         final State state = vm.getState();
         if (state == State.Stopped) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("VM is already stopped: " + vm);
-            }
+            logger.debug("VM is already stopped: {}", vm);
             return;
         }
 
         if (state == State.Destroyed || state == State.Expunging || state == State.Error) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Stopped called on " + vm + " but the state is " + state);
-            }
+            logger.debug("Stopped called on {} but the state is {}", vm, state);
             return;
         }
 
         final ItWorkVO work = _workDao.findByOutstandingWork(vm.getId(), vm.getState());
         if (work != null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Found an outstanding work item for this vm " + vm + " with state:" + vm.getState() + ", work id:" + work.getId());
-            }
+            logger.debug("Found an outstanding work item for this vm {} with state: {}, work id: {}", vm, vm.getState(), work.getId());
         }
         final Long hostId = vm.getHostId();
         if (hostId == null) {
             if (!cleanUpEvenIfUnableToStop) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("HostId is null but this is not a forced stop, cannot stop vm " + vm + " with state:" + vm.getState());
-                }
+                logger.debug("HostId is null but this is not a forced stop, cannot stop vm {} with state: {}", vm, vm.getState());
                 throw new CloudRuntimeException("Unable to stop " + vm);
             }
             try {
@@ -2135,9 +2079,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             }
 
             if (work != null) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Updating work item to Done, id:" + work.getId());
-                }
+                logger.debug("Updating work item to Done, id: {}", work.getId());
                 work.setStep(Step.Done);
                 _workDao.update(work.getId(), work);
             }
@@ -2145,7 +2087,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         } else {
             HostVO host = _hostDao.findById(hostId);
             if (!cleanUpEvenIfUnableToStop && vm.getState() == State.Running && host.getResourceState() == ResourceState.PrepareForMaintenance) {
-                logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: " + vm.getId() + " is not allowed");
+                logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: {} is not allowed", vm.getId());
                 throw new CloudRuntimeException("Stop VM operation on the VM id: " + vm.getId() + " is not allowed as host is preparing for maintenance mode");
             }
         }
@@ -2162,28 +2104,24 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 throw new CloudRuntimeException("We cannot stop " + vm + " when it is in state " + vm.getState());
             }
             final boolean doCleanup = true;
-            if (logger.isDebugEnabled()) {
-                logger.warn("Unable to transition the state but we're moving on because it's forced stop", e1);
-            }
+            logger.warn("Unable to transition the state but we're moving on because it's forced stop", e1);
 
             if (doCleanup) {
                 if (cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.StopRequested, cleanUpEvenIfUnableToStop)) {
                     try {
-                        if (logger.isDebugEnabled() && work != null) {
-                            logger.debug("Updating work item to Done, id:" + work.getId());
+                        if (work != null) {
+                            logger.debug("Updating work item to Done, id: {}", work.getId());
                         }
                         if (!changeState(vm, Event.AgentReportStopped, null, work, Step.Done)) {
                             throw new CloudRuntimeException("Unable to stop " + vm);
                         }
 
                     } catch (final NoTransitionException e) {
-                        logger.warn("Unable to cleanup " + vm);
+                        logger.warn("Unable to cleanup {}", vm);
                         throw new CloudRuntimeException("Unable to stop " + vm, e);
                     }
                 } else {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Failed to cleanup VM: " + vm);
-                    }
+                    logger.debug("Failed to cleanup VM: {}", vm);
                     throw new CloudRuntimeException("Failed to cleanup " + vm + " , current state " + vm.getState());
                 }
             }
@@ -2233,11 +2171,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             }
 
         } catch (AgentUnavailableException | OperationTimedoutException e) {
-            logger.warn(String.format("Unable to stop %s due to [%s].", profile.toString(), e.toString()), e);
+            logger.warn("Unable to stop {} due to [{}].", profile.toString(), e.toString(), e);
         } finally {
             if (!stopped) {
                 if (!cleanUpEvenIfUnableToStop) {
-                    logger.warn("Unable to stop vm " + vm);
+                    logger.warn("Unable to stop vm {}", vm);
                     try {
                         stateTransitTo(vm, Event.OperationFailed, vm.getHostId());
                     } catch (final NoTransitionException e) {
@@ -2245,7 +2183,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     }
                     throw new CloudRuntimeException("Unable to stop " + vm);
                 } else {
-                    logger.warn("Unable to actually stop " + vm + " but continue with release because it's a force stop");
+                    logger.warn("Unable to actually stop {} but continue with release because it's a force stop", vm);
                     vmGuru.finalizeStop(profile, answer);
                 }
             } else {
@@ -2259,17 +2197,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             }
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug(vm + " is stopped on the host.  Proceeding to release resource held.");
-        }
+        logger.debug("{} is stopped on the host.  Proceeding to release resource held.", vm);
 
         releaseVmResources(profile, cleanUpEvenIfUnableToStop);
 
         try {
             if (work != null) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Updating the outstanding work item to Done, id:" + work.getId());
-                }
+                logger.debug("Updating the outstanding work item to Done, id: {}", work.getId());
                 work.setStep(Step.Done);
                 _workDao.update(work.getId(), work);
             }
@@ -2334,15 +2268,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     public void destroy(final String vmUuid, final boolean expunge) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException {
         VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
         if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Unable to find vm or vm is destroyed: " + vm);
-            }
+            logger.debug("Unable to find vm or vm is destroyed: {}", vm);
             return;
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Destroying vm " + vm + ", expunge flag " + (expunge ? "on" : "off"));
-        }
+        logger.debug("Destroying vm {}, expunge flag {}", vm, (expunge ? "on" : "off"));
 
         advanceStop(vmUuid, VmDestroyForcestop.value());
 
@@ -2354,12 +2284,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
                 try {
                     if (!stateTransitTo(vm, VirtualMachine.Event.DestroyRequested, vm.getHostId())) {
-                        logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm);
+                        logger.debug("Unable to destroy the vm because it is not in the correct state: {}", vm);
                         throw new CloudRuntimeException("Unable to destroy " + vm);
                     } else {
                         if (expunge) {
                             if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) {
-                                logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm);
+                                logger.debug("Unable to expunge the vm because it is not in the correct state: {}", vm);
                                 throw new CloudRuntimeException("Unable to expunge " + vm);
                             }
                         }
@@ -2383,7 +2313,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     private void deleteVMSnapshots(VMInstanceVO vm, boolean expunge) {
         if (! vm.getHypervisorType().equals(HypervisorType.VMware)) {
             if (!_vmSnapshotMgr.deleteAllVMSnapshots(vm.getId(), null)) {
-                logger.debug("Unable to delete all snapshots for " + vm);
+                logger.debug("Unable to delete all snapshots for {}", vm);
                 throw new CloudRuntimeException("Unable to delete vm snapshots for " + vm);
             }
         }
@@ -2413,7 +2343,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             if (command != null) {
                 RestoreVMSnapshotAnswer restoreVMSnapshotAnswer = (RestoreVMSnapshotAnswer) _agentMgr.send(hostId, command);
                 if (restoreVMSnapshotAnswer == null || !restoreVMSnapshotAnswer.getResult()) {
-                    logger.warn("Unable to restore the vm snapshot from image file after live migration of vm with vmsnapshots: " + restoreVMSnapshotAnswer == null ? "null answer" : restoreVMSnapshotAnswer.getDetails());
+                    logger.warn("Unable to restore the vm snapshot from image file after live migration of vm with vmsnapshots: {}", restoreVMSnapshotAnswer == null ? "null answer" : restoreVMSnapshotAnswer.getDetails());
                 }
             }
         }
@@ -2454,11 +2384,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         Map<Volume, StoragePool> volumeToPoolMap = prepareVmStorageMigration(vm, volumeToPool);
 
         try {
-            if(logger.isDebugEnabled()) {
-                logger.debug(String.format("Offline migration of %s vm %s with volumes",
-                                vm.getHypervisorType().toString(),
-                                vm.getInstanceName()));
-            }
+            logger.debug("Offline migration of {} vm {} with volumes",
+                            vm.getHypervisorType().toString(),
+                            vm.getInstanceName());
 
             migrateThroughHypervisorOrStorage(vm, volumeToPoolMap);
 
@@ -2494,18 +2422,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             try {
                 return  _agentMgr.send(hostId, commandsContainer);
             } catch (AgentUnavailableException | OperationTimedoutException e) {
-                logger.warn(String.format("Hypervisor migration failed for the VM: %s", vm), e);
+                logger.warn("Hypervisor migration failed for the VM: {}", vm, e);
             }
         }
         return null;
     }
 
     private void afterHypervisorMigrationCleanup(VMInstanceVO vm, Map<Volume, StoragePool> volumeToPool, Long sourceClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException {
-        boolean isDebugEnabled = logger.isDebugEnabled();
-        if(isDebugEnabled) {
-            String msg = String.format("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid());
-            logger.debug(msg);
-        }
+        logger.debug("Cleaning up after hypervisor pool migration volumes for VM {}({})", vm.getInstanceName(), vm.getUuid());
 
         StoragePool rootVolumePool = null;
         if (MapUtils.isNotEmpty(volumeToPool)) {
@@ -2519,10 +2443,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         setDestinationPoolAndReallocateNetwork(rootVolumePool, vm);
         Long destClusterId = rootVolumePool != null ? rootVolumePool.getClusterId() : null;
         if (destClusterId != null && !destClusterId.equals(sourceClusterId)) {
-            if(isDebugEnabled) {
-                String msg = String.format("Resetting lastHost for VM %s(%s)", vm.getInstanceName(), vm.getUuid());
-                logger.debug(msg);
-            }
+            logger.debug("Resetting lastHost for VM {}({})", vm.getInstanceName(), vm.getUuid());
             vm.setLastHostId(null);
             vm.setPodIdToDeployIn(rootVolumePool.getPodId());
         }
@@ -2536,9 +2457,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             throw new CloudRuntimeException(String.format("VM ID: %s migration failed. %s", vm.getUuid(), hypervisorMigrationResults[0].getDetails()));
         }
         for (Answer answer : hypervisorMigrationResults) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("Received an %s: %s", answer.getClass().getSimpleName(), answer));
-            }
+            logger.debug("Received an {}: {}", answer.getClass().getSimpleName(), answer);
             if (answer instanceof MigrateVmToPoolAnswer) {
                 relevantAnswer = (MigrateVmToPoolAnswer) answer;
             }
@@ -2551,14 +2470,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             results = new ArrayList<>();
         }
         List<VolumeVO> volumes = _volsDao.findUsableVolumesForInstance(vm.getId());
-        if(logger.isDebugEnabled()) {
-            String msg = String.format("Found %d volumes for VM %s(uuid:%s, id:%d)", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId());
-            logger.debug(msg);
-        }
+        logger.debug("Found {} volumes for VM {}(uuid:{}, id:{})", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId());
         for (VolumeObjectTO result : results ) {
-            if(logger.isDebugEnabled()) {
-                logger.debug(String.format("Updating volume (%d) with path '%s' on pool '%s'", result.getId(), result.getPath(), result.getDataStoreUuid()));
-            }
+            logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getId(), result.getPath(), result.getDataStoreUuid());
             VolumeVO volume = _volsDao.findById(result.getId());
             StoragePool pool = _storagePoolDao.findPoolByUUID(result.getDataStoreUuid());
             if (volume == null || pool == null) {
@@ -2689,10 +2603,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
     private void setDestinationPoolAndReallocateNetwork(StoragePool destPool, VMInstanceVO vm) throws InsufficientCapacityException {
         if (destPool != null && destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) {
-            if (logger.isDebugEnabled()) {
-                String msg = String.format("as the pod for vm %s has changed we are reallocating its network", vm.getInstanceName());
-                logger.debug(msg);
-            }
+            logger.debug("as the pod for vm {} has changed we are reallocating its network", vm.getInstanceName());
             final DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null);
             final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null);
             _networkMgr.reallocate(vmProfile, plan);
@@ -2711,8 +2622,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     }
 
     private void removeStaleVmFromSource(VMInstanceVO vm, HostVO srcHost) {
-        logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() +
-                " from source host: " + srcHost.getId());
+        logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: {} from source host: {}",
+                vm.getInstanceName(), srcHost.getId());
         final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName());
         uvc.setCleanupVmFiles(true);
         try {
@@ -2754,20 +2665,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     private void orchestrateMigrate(final String vmUuid, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException {
         final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
         if (vm == null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Unable to find the vm " + vmUuid);
-            }
+            logger.debug("Unable to find the vm {}", vmUuid);
             throw new CloudRuntimeException("Unable to find a virtual machine with id " + vmUuid);
         }
         migrate(vm, srcHostId, dest);
     }
 
     protected void migrate(final VMInstanceVO vm, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException {
-        logger.info("Migrating " + vm + " to " + dest);
+        logger.info("Migrating {} to {}", vm, dest);
         final long dstHostId = dest.getHost().getId();
         final Host fromHost = _hostDao.findById(srcHostId);
         if (fromHost == null) {
-            logger.info("Unable to find the host to migrate from: " + srcHostId);
+            logger.info("Unable to find the host to migrate from: {}", srcHostId);
             throw new CloudRuntimeException("Unable to find the host to migrate from: " + srcHostId);
         }
 
@@ -2775,8 +2684,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             final List<VolumeVO> volumes = _volsDao.findCreatedByInstance(vm.getId());
             for (final VolumeVO volume : volumes) {
                 if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) {
-                    logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: "
-                            + dest.getHost().getId());
+                    logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: {}",
+                            dest.getHost().getId());
                     throw new CloudRuntimeException(
                             "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: "
                                     + dest.getHost().getId());
@@ -2787,9 +2696,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         final VirtualMachineGuru vmGuru = getVmGuru(vm);
 
         if (vm.getState() != State.Running) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("VM is not Running, unable to migrate the vm " + vm);
-            }
+            logger.debug("VM is not Running, unable to migrate the vm {}", vm);
             throw new CloudRuntimeException("VM is not Running, unable to migrate the vm currently " + vm + " , current state: " + vm.getState().toString());
         }
 
@@ -2852,18 +2759,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     volumeMgr.release(vm.getId(), dstHostId);
                 }
 
-                logger.info("Migration cancelled because state has changed: " + vm);
+                logger.info("Migration cancelled because state has changed: {}", vm);
                 throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm);
             }
         } catch (final NoTransitionException e1) {
             _networkMgr.rollbackNicForMigration(vmSrc, profile);
             volumeMgr.release(vm.getId(), dstHostId);
-            logger.info("Migration cancelled because " + e1.getMessage());
+            logger.info("Migration cancelled because {}", e1.getMessage());
             throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage());
         } catch (final CloudRuntimeException e2) {
             _networkMgr.rollbackNicForMigration(vmSrc, profile);
             volumeMgr.release(vm.getId(), dstHostId);
-            logger.info("Migration cancelled because " + e2.getMessage());
+            logger.info("Migration cancelled because {}", e2.getMessage());
             work.setStep(Step.Done);
             _workDao.update(work.getId(), work);
             try {
@@ -2887,7 +2794,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 }
             } catch (final OperationTimedoutException e) {
                 if (e.isActive()) {
-                    logger.warn("Active migration command so scheduling a restart for " + vm, e);
+                    logger.warn("Active migration command so scheduling a restart for {}", vm, e);
                     _haMgr.scheduleRestart(vm, true);
                 }
                 throw new AgentUnavailableException("Operation timed out on migrating " + vm, dstHostId);
@@ -2903,22 +2810,22 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
             try {
                 if (!checkVmOnHost(vm, dstHostId)) {
-                    logger.error("Unable to complete migration for " + vm);
+                    logger.error("Unable to complete migration for {}", vm);
                     try {
                         _agentMgr.send(srcHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null);
                     } catch (final AgentUnavailableException e) {
-                        logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId, e);
+                        logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e);
                     }
                     cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
                     throw new CloudRuntimeException("Unable to complete migration for " + vm);
                 }
             } catch (final OperationTimedoutException e) {
-                logger.warn("Error while checking the vm " + vm + " on host " + dstHostId, e);
+                logger.warn("Error while checking the vm {} on host {}", vm, dstHostId, e);
             }
             migrated = true;
         } finally {
             if (!migrated) {
-                logger.info("Migration was unsuccessful.  Cleaning up: " + vm);
+                logger.info("Migration was unsuccessful.  Cleaning up: {}", vm);
                 _networkMgr.rollbackNicForMigration(vmSrc, profile);
                 volumeMgr.release(vm.getId(), dstHostId);
 
@@ -2960,7 +2867,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
         Map<String, Boolean> vlanToPersistenceMap = getVlanToPersistenceMapForVM(vmInstance.getId());
         if (MapUtils.isNotEmpty(vlanToPersistenceMap)) {
-            logger.debug(String.format("Setting VLAN persistence to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO));
+            logger.debug("Setting VLAN persistence to [{}] as part of migrate command for VM [{}].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO);
             migrateCommand.setVlanToPersistenceMap(vlanToPersistenceMap);
         }
 
@@ -2971,15 +2878,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
         Map<String, DpdkTO> answerDpdkInterfaceMapping = prepareForMigrationAnswer.getDpdkInterfaceMapping();
         if (MapUtils.isNotEmpty(answerDpdkInterfaceMapping) && dpdkInterfaceMapping != null) {
-            logger.debug(String.format("Setting DPDK interface mapping to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap),
-                    virtualMachineTO));
+            logger.debug("Setting DPDK interface mapping to [{}] as part of migrate command for VM [{}].", new Gson().toJson(vlanToPersistenceMap),
+                    virtualMachineTO);
             dpdkInterfaceMapping.putAll(answerDpdkInterfaceMapping);
             migrateCommand.setDpdkInterfaceMapping(dpdkInterfaceMapping);
         }
 
         Integer newVmCpuShares = prepareForMigrationAnswer.getNewVmCpuShares();
         if (newVmCpuShares != null) {
-            logger.debug(String.format("Setting CPU shares to [%d] as part of migrate command for VM [%s].", newVmCpuShares, virtualMachineTO));
+            logger.debug("Setting CPU shares to [{}] as part of migrate command for VM [{}].", newVmCpuShares, virtualMachineTO);
             migrateCommand.setNewVmCpuShares(newVmCpuShares);
         }
 
@@ -3053,7 +2960,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                                 volume.getUuid(), targetPool.getUuid(), profile.getUuid(), targetHost.getUuid()));
             }
             if (currentPool.getId() == targetPool.getId()) {
-                logger.info(String.format("The volume [%s] is already allocated in storage pool [%s].", volume.getUuid(), targetPool.getUuid()));
+                logger.info("The volume [{}] is already allocated in storage pool [{}].", volume.getUuid(), targetPool.getUuid());
             }
             volumeToPoolObjectMap.put(volume, targetPool);
         }
@@ -3340,9 +3247,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     AttachOrDettachConfigDriveCommand dettachCommand = new AttachOrDettachConfigDriveCommand(vm.getInstanceName(), vmData, VmConfigDriveLabel.value(), false);
                     try {
                         _agentMgr.send(srcHost.getId(), dettachCommand);
-                        logger.debug("Deleted config drive ISO for  vm " + vm.getInstanceName() + " In host " + srcHost);
+                        logger.debug("Deleted config drive ISO for  vm {} in host {}", vm.getInstanceName(), srcHost);
                     } catch (OperationTimedoutException e) {
-                        logger.error("TIme out occurred while exeuting command AttachOrDettachConfigDrive " + e.getMessage(), e);
+                        logger.error("TIme out occurred while exeuting command AttachOrDettachConfigDrive {}", e.getMessage(), e);
 
                     }
                 }
@@ -3354,22 +3261,22 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
             try {
                 if (!checkVmOnHost(vm, destHostId)) {
-                    logger.error("Vm not found on destination host. Unable to complete migration for " + vm);
+                    logger.error("Vm not found on destination host. Unable to complete migration for {}", vm);
                     try {
                         _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null);
                     } catch (final AgentUnavailableException e) {
-                        logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId, e);
+                        logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e);
                     }
                     cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
                     throw new CloudRuntimeException("VM not found on destination host. Unable to complete migration for " + vm);
                 }
             } catch (final OperationTimedoutException e) {
-                logger.error("Error while checking the vm " + vm + " is on host " + destHost, e);
+                logger.error("Error while checking the vm {} is on host {}", vm, destHost, e);
             }
             migrated = true;
         } finally {
             if (!migrated) {
-                logger.info("Migration was unsuccessful.  Cleaning up: " + vm);
+                logger.info("Migration was unsuccessful.  Cleaning up: {}", vm);
                 _networkMgr.rollbackNicForMigration(vmSrc, profile);
                 volumeMgr.release(vm.getId(), destHostId);
 
@@ -3412,7 +3319,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 try {
                     final List<ItWorkVO> works = _workDao.listWorkInProgressFor(nodeId);
                     for (final ItWorkVO work : works) {
-                        logger.info("Handling unfinished work item: " + work);
+                        logger.info("Handling unfinished work item: {}", work);
                         try {
                             final VMInstanceVO vm = _vmDao.findById(work.getInstanceId());
                             if (vm != null) {
@@ -3433,7 +3340,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                                 }
                             }
                         } catch (final Exception e) {
-                            logger.error("Error while handling " + work, e);
+                            logger.error("Error while handling {}", work, e);
                         }
                     }
                 } finally {
@@ -3455,7 +3362,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 try {
                     orchestrateMigrateAway(vmUuid, srcHostId, null);
                 } catch (final InsufficientServerCapacityException e) {
-                    logger.warn("Failed to deploy vm " + vmUuid + " with original planner, sending HAPlanner");
+                    logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner", vmUuid);
                     orchestrateMigrateAway(vmUuid, srcHostId, _haMgr.getHAPlanner());
                 }
             } finally {
@@ -3518,26 +3425,24 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 throw new CloudRuntimeException(message, e2);
             }
             if (dest == null) {
-                logger.warn("Unable to find destination for migrating the vm " + profile);
+                logger.warn("Unable to find destination for migrating the vm {}", profile);
                 throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", DataCenter.class, host.getDataCenterId());
             }
-            if (logger.isDebugEnabled()) {
-                logger.debug("Found destination " + dest + " for migrating to.");
-            }
+            logger.debug("Found destination {} for migrating to.", dest);
 
             excludes.addHost(dest.getHost().getId());
             try {
                 migrate(vm, srcHostId, dest);
                 return;
             } catch (ResourceUnavailableException | ConcurrentOperationException e) {
-                logger.warn(String.format("Unable to migrate %s to %s due to [%s]", vm.toString(), dest.getHost().toString(), e.getMessage()), e);
+                logger.warn("Unable to migrate {} to {} due to [{}]", vm.toString(), dest.getHost().toString(), e.getMessage(), e);
             }
 
             try {
                 advanceStop(vmUuid, true);
                 throw new CloudRuntimeException("Unable to migrate " + vm);
             } catch (final ResourceUnavailableException | ConcurrentOperationException | OperationTimedoutException e) {
-                logger.error(String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()), e);
+                logger.error("Unable to stop {} due to [{}].", vm.toString(), e.getMessage(), e);
                 throw new CloudRuntimeException("Unable to migrate " + vm);
             }
         }
@@ -3623,10 +3528,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             final VirtualMachine vm = _vmDao.findByUuid(vmUuid);
             VmWorkJobVO placeHolder = createPlaceHolderWork(vm.getId());
             try {
-                if (logger.isDebugEnabled()) {
-                    logger.debug(String.format("reboot parameter value of %s == %s at orchestration", VirtualMachineProfile.Param.BootIntoSetup.getName(),
-                            (params == null? "<very null>":params.get(VirtualMachineProfile.Param.BootIntoSetup))));
-                }
+                logger.debug("reboot parameter value of {} == {} at orchestration", VirtualMachineProfile.Param.BootIntoSetup.getName(),
+                        (params == null? "<very null>":params.get(VirtualMachineProfile.Param.BootIntoSetup)));
                 orchestrateReboot(vmUuid, params);
             } finally {
                 if (placeHolder != null) {
@@ -3634,10 +3537,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 }
             }
         } else {
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("reboot parameter value of %s == %s through job-queue", VirtualMachineProfile.Param.BootIntoSetup.getName(),
-                        (params == null? "<very null>":params.get(VirtualMachineProfile.Param.BootIntoSetup))));
-            }
+            logger.debug("reboot parameter value of {} == {} through job-queue", VirtualMachineProfile.Param.BootIntoSetup.getName(),
+                    (params == null? "<very null>":params.get(VirtualMachineProfile.Param.BootIntoSetup)));
             final Outcome<VirtualMachine> outcome = rebootVmThroughJobQueue(vmUuid, params);
 
             retrieveVmFromJobOutcome(outcome, vmUuid, "rebootVm");
@@ -3650,7 +3551,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     ResourceUnavailableException {
         final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
         if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) {
-            logger.error("Unable to reboot VM " + vm + " due to: " + vm.getInstanceName() + " has active VM snapshots tasks");
+            logger.error("Unable to reboot VM {} due to: {} has active VM snapshot tasks", vm, vm.getInstanceName());
             throw new CloudRuntimeException("Unable to reboot VM " + vm + " due to: " + vm.getInstanceName() + " has active VM snapshots tasks");
         }
         final DataCenter dc = _entityMgr.findById(DataCenter.class, vm.getDataCenterId());
@@ -3686,7 +3587,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             logger.info(errorMsg);
             throw new CloudRuntimeException(errorMsg);
         } catch (final OperationTimedoutException e) {
-            logger.warn("Unable to send the reboot command to host " + dest.getHost() + " for the vm " + vm + " due to operation timeout", e);
+            logger.warn("Unable to send the reboot command to host {} for the vm {} due to operation timeout.", dest.getHost(), vm, e);
             throw new CloudRuntimeException("Failed to reboot the vm on host " + dest.getHost(), e);
         }
     }
@@ -3696,9 +3597,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         if (params != null) {
             enterSetup = (Boolean) params.get(VirtualMachineProfile.Param.BootIntoSetup);
         }
-        if (logger.isDebugEnabled()) {
-            logger.debug(String.format("orchestrating VM reboot for '%s' %s set to %s", vmTo.getName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup));
-        }
+        logger.debug("Orchestrating VM reboot for '{}' {} set to {}", vmTo.getName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup);
         vmTo.setEnterHardwareSetup(enterSetup == null ? false : enterSetup);
     }
 
@@ -3752,7 +3651,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             case DomainRouter:
                 return vm.getPrivateIpAddress();
             default:
-                logger.debug(String.format("%s is a [%s], returning null for control Nic IP.", vm.toString(), vm.getType()));
+                logger.debug("{} is a [{}], returning null for control Nic IP.", vm.toString(), vm.getType());
                 return null;
         }
     }
@@ -3898,14 +3797,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             return;
         }
 
-        if(logger.isDebugEnabled()) {
-            logger.debug("Received startup command from hypervisor host. host id: " + agent.getId());
-        }
+        logger.debug("Received startup command from hypervisor host. host id: {}", agent.getId());
 
         _syncMgr.resetHostSyncState(agent.getId());
 
         if (forRebalance) {
-            logger.debug("Not processing listener " + this + " as connect happens on rebalance process");
+            logger.debug("Not processing listener {} as connect happens on rebalance process", this);
             return;
         }
         final Long clusterId = agent.getClusterId();
@@ -3915,9 +3812,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             final ClusterVMMetaDataSyncCommand syncVMMetaDataCmd = new ClusterVMMetaDataSyncCommand(ClusterVMMetaDataSyncInterval.value(), clusterId);
             try {
                 final long seq_no = _agentMgr.send(agentId, new Commands(syncVMMetaDataCmd), this);
-                logger.debug("Cluster VM metadata sync started with jobid " + seq_no);
+                logger.debug("Cluster VM metadata sync started with jobid {}", seq_no);
             } catch (final AgentUnavailableException e) {
-                logger.fatal("The Cluster VM metadata sync process failed for cluster id " + clusterId + " with ", e);
+                logger.fatal("The Cluster VM metadata sync process failed for cluster id {} with {}", clusterId, e);
             }
         }
     }
@@ -3971,16 +3868,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         }
 
         if (!(vmInstance.getState().equals(State.Stopped) || vmInstance.getState().equals(State.Running))) {
-            logger.warn("Unable to upgrade virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState());
+            logger.warn("Unable to upgrade virtual machine {} in state {}", vmInstance.toString(), vmInstance.getState());
             throw new InvalidParameterValueException("Unable to upgrade virtual machine " + vmInstance.toString() + " " + " in state " + vmInstance.getState() +
                     "; make sure the virtual machine is stopped/running");
         }
 
         if (!newServiceOffering.isDynamic() && vmInstance.getServiceOfferingId() == newServiceOffering.getId()) {
-            if (logger.isInfoEnabled()) {
-                logger.info("Not upgrading vm " + vmInstance.toString() + " since it already has the requested " + "service offering (" + newServiceOffering.getName() +
-                        ")");
-            }
+            logger.info("Not upgrading vm {} since it already has the requested service offering ({})", vmInstance.toString(), newServiceOffering.getName());
 
             throw new InvalidParameterValueException("Not upgrading vm " + vmInstance.toString() + " since it already " + "has the requested service offering (" +
                     newServiceOffering.getName() + ")");
@@ -4114,7 +4008,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         final CallContext cctx = CallContext.current();
 
         checkIfNetworkExistsForUserVM(vm, network);
-        logger.debug("Adding vm " + vm + " to network " + network + "; requested nic profile " + requested);
+        logger.debug("Adding vm {} to network {}; requested nic profile {}", vm, network, requested);
         final VMInstanceVO vmVO = _vmDao.findById(vm.getId());
         final ReservationContext context = new ReservationContextImpl(null, null, cctx.getCallingUser(), cctx.getCallingAccount());
 
@@ -4133,14 +4027,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             final NicTO nicTO = toNicTO(nic, vmProfile.getVirtualMachine().getHypervisorType());
 
             //4) plug the nic to the vm
-            logger.debug("Plugging nic for vm " + vm + " in network " + network);
+            logger.debug("Plugging nic for vm {} in network {}", vm, network);
 
             boolean result = false;
             try {
                 result = plugNic(network, nicTO, vmTO, context, dest);
                 if (result) {
                     _userVmMgr.setupVmForPvlan(true, vm.getHostId(), nic);
-                    logger.debug("Nic is plugged successfully for vm " + vm + " in network " + network + ". Vm  is a part of network now");
+                    logger.debug("Nic is plugged successfully for vm {} in network {}. VM is a part of network now.", vm, network);
                     final long isDefault = nic.isDefaultNic() ? 1 : 0;
 
                     if(VirtualMachine.Type.User.equals(vmVO.getType())) {
@@ -4149,19 +4043,19 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     }
                     return nic;
                 } else {
-                    logger.warn("Failed to plug nic to the vm " + vm + " in network " + network);
+                    logger.warn("Failed to plug nic to the vm {} in network {}", vm, network);
                     return null;
                 }
             } finally {
                 if (!result) {
-                    logger.debug("Removing nic " + nic + " from vm " + vmProfile.getVirtualMachine() + " as nic plug failed on the backend");
+                    logger.debug("Removing nic {} from vm {} as nic plug failed on the backend.", nic, vmProfile.getVirtualMachine());
                     _networkMgr.removeNic(vmProfile, _nicsDao.findById(nic.getId()));
                 }
             }
         } else if (vm.getState() == State.Stopped) {
             return _networkMgr.createNicForVm(network, requested, context, vmProfile, false);
         } else {
-            logger.warn("Unable to add vm " + vm + " to network  " + network);
+            logger.warn("Unable to add vm {} to network {}", vm, network);
             throw new ResourceUnavailableException("Unable to add vm " + vm + " to network, is not in the right state", DataCenter.class, vm.getDataCenterId());
         }
     }
@@ -4225,25 +4119,25 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
         if (vm.getState() == State.Running) {
             final NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType());
-            logger.debug("Un-plugging nic " + nic + " for vm " + vm + " from network " + network);
+            logger.debug("Un-plugging nic {} for vm {} from network {}.", nic, vm, network);
             final boolean result = unplugNic(network, nicTO, vmTO, context, dest);
             if (result) {
                 _userVmMgr.setupVmForPvlan(false, vm.getHostId(), nicProfile);
-                logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network);
+                logger.debug("Nic is unplugged successfully for vm {} in network {}.", vm, network);
                 final long isDefault = nic.isDefaultNic() ? 1 : 0;
                 UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(),
                         Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay());
             } else {
-                logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network);
+                logger.warn("Failed to unplug nic for the vm {} from network {}.", vm, network);
                 return false;
             }
         } else if (vm.getState() != State.Stopped) {
-            logger.warn("Unable to remove vm " + vm + " from network  " + network);
+            logger.warn("Unable to remove vm {} from network {}", vm, network);
             throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId());
         }
 
         _networkMgr.releaseNic(vmProfile, nic);
-        logger.debug("Successfully released nic " + nic + "for vm " + vm);
+        logger.debug("Successfully released nic {} for vm {}", nic, vm);
 
         _networkMgr.removeNic(vmProfile, nic);
         _nicsDao.remove(nic.getId());
@@ -4278,29 +4172,25 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         }
 
         if (nic == null) {
-            logger.warn("Could not get a nic with " + network);
+            logger.warn("Could not get a nic with {}", network);
             return false;
         }
 
         if (nic.isDefaultNic() && vm.getType() == VirtualMachine.Type.User) {
-            logger.warn("Failed to remove nic from " + vm + " in " + network + ", nic is default.");
+            logger.warn("Failed to remove nic from {} in {}, nic is default.", vm, network);
             throw new CloudRuntimeException("Failed to remove nic from " + vm + " in " + network + ", nic is default.");
         }
 
         final Nic lock = _nicsDao.acquireInLockTable(nic.getId());
         if (lock == null) {
             if (_nicsDao.findById(nic.getId()) == null) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Not need to remove the vm " + vm + " from network " + network + " as the vm doesn't have nic in this network");
-                }
+                logger.debug("Not need to remove the vm {} from network {} as the vm doesn't have nic in this network.", vm, network);
                 return true;
             }
             throw new ConcurrentOperationException("Unable to lock nic " + nic.getId());
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Lock is acquired for nic id " + lock.getId() + " as a part of remove vm " + vm + " from network " + network);
-        }
+        logger.debug("Lock is acquired for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network);
 
         try {
             final NicProfile nicProfile =
@@ -4309,29 +4199,27 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
             if (vm.getState() == State.Running) {
                 final NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType());
-                logger.debug("Un-plugging nic for vm " + vm + " from network " + network);
+                logger.debug("Un-plugging nic for vm {} from network {}", vm, network);
                 final boolean result = unplugNic(network, nicTO, vmTO, context, dest);
                 if (result) {
-                    logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network);
+                    logger.debug("Nic is unplugged successfully for vm {} in network {}", vm, network);
                 } else {
-                    logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network);
+                    logger.warn("Failed to unplug nic for the vm {} from network {}", vm, network);
                     return false;
                 }
             } else if (vm.getState() != State.Stopped) {
-                logger.warn("Unable to remove vm " + vm + " from network  " + network);
+                logger.warn("Unable to remove vm {} from network {}", vm, network);
                 throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId());
             }
 
             _networkMgr.releaseNic(vmProfile, nic);
-            logger.debug("Successfully released nic " + nic + "for vm " + vm);
+            logger.debug("Successfully released nic {} for vm {}", nic, vm);
 
             _networkMgr.removeNic(vmProfile, nic);
             return true;
         } finally {
             _nicsDao.releaseFromLockTable(lock.getId());
-            if (logger.isDebugEnabled()) {
-                logger.debug("Lock is released for nic id " + lock.getId() + " as a part of remove vm " + vm + " from network " + network);
-            }
+            logger.debug("Lock is released for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network);
         }
     }
 
@@ -4371,9 +4259,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         }
 
         if (dest != null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(" Found " + dest + " for scaling the vm to.");
-            }
+            logger.debug("Found {} for scaling the vm to.", dest);
         }
 
         if (dest == null) {
@@ -4384,7 +4270,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         try {
             migrateForScale(vm.getUuid(), srcHostId, dest, oldSvcOfferingId);
         } catch (ResourceUnavailableException | ConcurrentOperationException e) {
-            logger.warn(String.format("Unable to migrate %s to %s due to [%s]", vm.toString(), dest.getHost().toString(), e.getMessage()), e);
+            logger.warn("Unable to migrate {} to {} due to [{}]", vm.toString(), dest.getHost().toString(), e.getMessage(), e);
             throw e;
         }
     }
@@ -4420,7 +4306,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             throws ResourceUnavailableException, ConcurrentOperationException {
 
         VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
-        logger.info(String.format("Migrating %s to %s", vm, dest));
+        logger.info("Migrating {} to {}", vm, dest);
 
         vm.getServiceOfferingId();
         final long dstHostId = dest.getHost().getId();
@@ -4520,7 +4406,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 }
             } catch (final OperationTimedoutException e) {
                 if (e.isActive()) {
-                    logger.warn("Active migration command so scheduling a restart for " + vm, e);
+                    logger.warn("Active migration command so scheduling a restart for {}", vm, e);
                     _haMgr.scheduleRestart(vm, true);
                 }
                 throw new AgentUnavailableException("Operation timed out on migrating " + vm, dstHostId, e);
@@ -4539,23 +4425,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
             try {
                 if (!checkVmOnHost(vm, dstHostId)) {
-                    logger.error("Unable to complete migration for " + vm);
+                    logger.error("Unable to complete migration for {}", vm);
                     try {
                         _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null);
                     } catch (final AgentUnavailableException e) {
-                        logger.error(String.format("Unable to cleanup source host [%s] due to [%s].", srcHostId, e.getMessage()), e);
+                        logger.error("Unable to cleanup source host [{}] due to [{}].", srcHostId, e.getMessage(), e);
                     }
                     cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
                     throw new CloudRuntimeException("Unable to complete migration for " + vm);
                 }
             } catch (final OperationTimedoutException e) {
-                logger.debug(String.format("Error while checking the %s on %s", vm, dstHost), e);
+                logger.debug("Error while checking the {} on {}", vm, dstHost, e);
             }
 
             migrated = true;
         } finally {
             if (!migrated) {
-                logger.info("Migration was unsuccessful.  Cleaning up: " + vm);
+                logger.info("Migration was unsuccessful.  Cleaning up: {}", vm);
 
                 String alertSubject = String.format("Unable to migrate %s from %s in Zone [%s] and Pod [%s].",
                         vm.getInstanceName(), fromHost, dest.getDataCenter().getName(), dest.getPod().getName());
@@ -4597,7 +4483,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 _agentMgr.send(host.getId(), cmds);
                 final ReplugNicAnswer replugNicAnswer = cmds.getAnswer(ReplugNicAnswer.class);
                 if (replugNicAnswer == null || !replugNicAnswer.getResult()) {
-                    logger.warn("Unable to replug nic for vm " + vm.getName());
+                    logger.warn("Unable to replug nic for vm {}", vm.getName());
                     result = false;
                 }
             } catch (final OperationTimedoutException e) {
@@ -4623,7 +4509,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 NetworkDetailVO pvlanTypeDetail = networkDetailsDao.findDetail(network.getId(), ApiConstants.ISOLATED_PVLAN_TYPE);
                 if (pvlanTypeDetail != null) {
                     Map<NetworkOffering.Detail, String> nicDetails = nic.getDetails() == null ? new HashMap<>() : nic.getDetails();
-                    logger.debug("Found PVLAN type: " + pvlanTypeDetail.getValue() + " on network details, adding it as part of the PlugNicCommand");
+                    logger.debug("Found PVLAN type: {} on network details, adding it as part of the PlugNicCommand", pvlanTypeDetail.getValue());
                     nicDetails.putIfAbsent(NetworkOffering.Detail.pvlanType, pvlanTypeDetail.getValue());
                     nic.setDetails(nicDetails);
                 }
@@ -4633,7 +4519,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 _agentMgr.send(dest.getHost().getId(), cmds);
                 final PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class);
                 if (plugNicAnswer == null || !plugNicAnswer.getResult()) {
-                    logger.warn("Unable to plug nic for vm " + vm.getName());
+                    logger.warn("Unable to plug nic for vm {}", vm.getName());
                     result = false;
                 }
             } catch (final OperationTimedoutException e) {
@@ -4673,14 +4559,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
                 final UnPlugNicAnswer unplugNicAnswer = cmds.getAnswer(UnPlugNicAnswer.class);
                 if (unplugNicAnswer == null || !unplugNicAnswer.getResult()) {
-                    logger.warn("Unable to unplug nic from router " + router);
+                    logger.warn("Unable to unplug nic from router {}", router);
                     result = false;
                 }
             } catch (final OperationTimedoutException e) {
                 throw new AgentUnavailableException("Unable to unplug nic from rotuer " + router + " from network " + network, dest.getHost().getId(), e);
             }
         } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) {
-            logger.debug("Vm " + router.getInstanceName() + " is in " + router.getState() + ", so not sending unplug nic command to the backend");
+            logger.debug("Vm {} is in {}, so not sending unplug nic command to the backend", router.getInstanceName(), router.getState());
         } else {
             String message = String.format("Unable to apply unplug nic, VM [%s] is not in the right state (\"Running\"). VM state [%s].", router.toString(), router.getState());
             logger.warn(message);
@@ -4769,7 +4655,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             Answer reconfigureAnswer = _agentMgr.send(vm.getHostId(), scaleVmCommand);
 
             if (reconfigureAnswer == null || !reconfigureAnswer.getResult()) {
-                logger.error("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails()));
+                logger.error("Unable to scale vm due to {}", (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails()));
                 throw new CloudRuntimeException("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails()));
             }
 
@@ -4875,10 +4761,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     break;
                 }
             } else {
-                logger.warn("VM " + vmId + " no longer exists when processing VM state report");
+                logger.warn("VM {} no longer exists when processing VM state report.", vmId);
             }
         } else {
-            logger.info("There is pending job or HA tasks working on the VM. vm id: " + vmId + ", postpone power-change report by resetting power-change counters");
+            logger.info("There is pending job or HA tasks working on the VM. vm id: {}, postpone power-change report by resetting power-change counters.", vmId );
             _vmDao.resetVmPowerStateTracking(vmId);
         }
     }
@@ -4889,7 +4775,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
         switch (vm.getState()) {
         case Starting:
-            logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it");
+            logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState());
 
             try {
                 stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId());
@@ -4897,7 +4783,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 logger.warn("Unexpected VM state transition exception, race-condition?", e);
             }
 
-            logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor");
+            logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName());
 
             _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(),
                     VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName()
@@ -4918,7 +4804,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
         case Stopping:
         case Stopped:
-            logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it");
+            logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState());
 
             try {
                 stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId());
@@ -4931,29 +4817,27 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
             ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, vm.getDomainId(),
                     EventTypes.EVENT_VM_START, "Out of band VM power on", vm.getId(), ApiCommandResourceType.VirtualMachine.toString());
-            logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor");
+            logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName());
             break;
 
         case Destroyed:
         case Expunging:
-            logger.info("Receive power on report when VM is in destroyed or expunging state. vm: "
-                    + vm.getId() + ", state: " + vm.getState());
+            logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm.getId(), vm.getState());
             break;
 
         case Migrating:
-            logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it");
+            logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState());
             try {
                 stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId());
             } catch (final NoTransitionException e) {
                 logger.warn("Unexpected VM state transition exception, race-condition?", e);
             }
-            logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor");
+            logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName());
             break;
 
         case Error:
         default:
-            logger.info("Receive power on report when VM is in error or unexpected state. vm: "
-                    + vm.getId() + ", state: " + vm.getState());
+            logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm.getId(), vm.getState());
             break;
         }
     }
@@ -4967,20 +4851,17 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM,vm.getDomainId(),
                     EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), ApiCommandResourceType.VirtualMachine.toString());
         case Migrating:
-            if (logger.isInfoEnabled()) {
-                logger.info(
-                        String.format("VM %s is at %s and we received a %s report while there is no pending jobs on it"
-                                , vm.getInstanceName(), vm.getState(), vm.getPowerState()));
-            }
+            logger.info("VM {} is at {} and we received a {} report while there is no pending jobs on it"
+                            , vm.getInstanceName(), vm.getState(), vm.getPowerState());
             if((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && vm.getState() == State.Running
                     && HaVmRestartHostUp.value()
                     && vm.getHypervisorType() != HypervisorType.VMware
                     && vm.getHypervisorType() != HypervisorType.Hyperv) {
-                logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart");
+                logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm.getInstanceName());
                 if (!_haMgr.hasPendingHaWork(vm.getId())) {
                     _haMgr.scheduleRestart(vm, true);
                 } else {
-                    logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it");
+                    logger.info("VM {} already has a pending HA task working on it.", vm.getInstanceName());
                 }
                 return;
             }
@@ -5010,7 +4891,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState()
                     + " -> Stopped) from out-of-context transition.");
 
-            logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Stopped state according to power-off report from hypervisor");
+            logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm.getInstanceName());
 
             break;
 
@@ -5080,7 +4961,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     l.add(rs.getLong(1));
                 }
             } catch (SQLException e) {
-                logger.error(String.format("Unable to execute SQL [%s] with params {\"h.id\": %s, \"i.power_state_update_time\": \"%s\"} due to [%s].", sql, hostId, cutTimeStr, e.getMessage()), e);
+                logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\"} due to [{}].", sql, hostId, cutTimeStr, e.getMessage(), e);
             }
         }
         return l;
@@ -5109,7 +4990,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     l.add(rs.getLong(1));
                 }
             } catch (final SQLException e) {
-                logger.error(String.format("Unable to execute SQL [%s] with params {\"h.id\": %s, \"i.power_state_update_time\": \"%s\", \"j.job_status\": %s} due to [%s].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage()), e);
+                logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage(), e);
             }
             return l;
         }
@@ -5137,7 +5018,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     l.add(rs.getLong(1));
                 }
             } catch (final SQLException e) {
-                logger.error(String.format("Unable to execute SQL [%s] with params {\"i.power_state_update_time\": \"%s\", \"j.job_status\": %s} due to [%s].", sql, cutTimeStr, jobStatusInProgress, e.getMessage()), e);
+                logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e);
             }
             return l;
         }
@@ -5415,9 +5296,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
             }
             workJob = pendingWorkJobs.get(0);
         } else {
-            if (logger.isTraceEnabled()) {
-                logger.trace(String.format("no jobs to add network %s for vm %s yet", network, vm));
-            }
+            logger.trace("no jobs to add network {} for vm {} yet", network, vm);
 
             workJob = createVmWorkJobToAddNetwork(vm, network, requested, context, user, account);
         }
@@ -5533,9 +5412,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         VMInstanceVO vm = findVmById(work.getVmId());
 
         Boolean enterSetup = (Boolean)work.getParams().get(VirtualMachineProfile.Param.BootIntoSetup);
-        if (logger.isDebugEnabled()) {
-            logger.debug(String.format("orchestrating VM start for '%s' %s set to %s", vm.getInstanceName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup));
-        }
+        logger.debug("orchestrating VM start for '{}' {} set to {}", vm.getInstanceName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup);
 
         try {
             orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner()));
@@ -5576,7 +5453,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         try {
             orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null);
         } catch (final InsufficientServerCapacityException e) {
-            logger.warn("Failed to deploy vm " + vm.getId() + " with original planner, sending HAPlanner", e);
+            logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm.getId(), e);
             orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner());
         }
 
@@ -5723,7 +5600,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     }
 
     private UserVm orchestrateRestoreVirtualMachine(final long vmId, final Long newTemplateId, final Long rootDiskOfferingId, final boolean expunge, final Map<String, String> details) throws ResourceUnavailableException, InsufficientCapacityException {
-        logger.debug("Restoring vm " + vmId + " with templateId : " + newTemplateId + " diskOfferingId : " + rootDiskOfferingId + " details : " + details);
+        logger.debug("Restoring vm {} with templateId: {}, diskOfferingId: {}, details: {}", vmId, newTemplateId, rootDiskOfferingId, details);
         final CallContext context = CallContext.current();
         final Account account = context.getCallingAccount();
         return _userVmService.restoreVirtualMachine(account, vmId, newTemplateId, rootDiskOfferingId, expunge, details);
@@ -5791,7 +5668,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
     private Boolean orchestrateUpdateDefaultNicForVM(final VirtualMachine vm, final Nic nic, final Nic defaultNic) {
 
-        logger.debug("Updating default nic of vm " + vm + " from nic " + defaultNic.getUuid() + " to nic " + nic.getUuid());
+        logger.debug("Updating default nic of vm {} from nic {} to nic {}", vm, defaultNic.getUuid(), nic.getUuid());
         Integer chosenID = nic.getDeviceId();
         Integer existingID = defaultNic.getDeviceId();
         NicVO nicVO = _nicsDao.findById(nic.getId());
@@ -5874,9 +5751,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         Long clusterId = null;
         if(hostId == null) {
             hostId = vm.getLastHostId();
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("host id is null, using last host id %d", hostId) );
-            }
+            logger.debug("host id is null, using last host id {}", hostId);
         }
         if (hostId == null) {
             return findClusterAndHostIdForVmFromVolumes(vm.getId());
@@ -6018,7 +5893,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, vmId);
 
         if (vm == null) {
-            logger.warn(String.format("Could not find VM [%s].", vmId));
+            logger.warn("Could not find VM [{}].", vmId);
         }
 
         assert vm != null;
@@ -6140,9 +6015,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         for (StoragePoolAllocator allocator : _storagePoolAllocators) {
             List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, 1);
             if (CollectionUtils.isNotEmpty(poolListFromAllocator)) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug(String.format("Found a suitable pool: %s for disk offering: %s", poolListFromAllocator.get(0).getName(), diskOffering.getName()));
-                }
+                logger.debug("Found a suitable pool: {} for disk offering: {}", poolListFromAllocator.get(0).getName(), diskOffering.getName());
                 return true;
             }
         }
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java
index bbd4510f6f5..4c89a75d215 100644
--- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java
@@ -47,13 +47,13 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
 
     @Override
     public void resetHostSyncState(long hostId) {
-        logger.info("Reset VM power state sync for host: " + hostId);
+        logger.info("Reset VM power state sync for host: {}.", hostId);
         _instanceDao.resetHostPowerStateTracking(hostId);
     }
 
     @Override
     public void processHostVmStateReport(long hostId, Map<String, HostVmStateReportEntry> report) {
-            logger.debug("Process host VM state report. host: " + hostId);
+        logger.debug("Process host VM state report. host: {}.", hostId);
 
         Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
         processReport(hostId, translatedInfo, false);
@@ -61,8 +61,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
 
     @Override
     public void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report, boolean force) {
-        if (logger.isDebugEnabled())
-            logger.debug("Process host VM state report from ping process. host: " + hostId);
+        logger.debug("Process host VM state report from ping process. host: {}.", hostId);
 
         Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
         processReport(hostId, translatedInfo, force);
@@ -70,25 +69,18 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
 
     private void processReport(long hostId, Map<Long, VirtualMachine.PowerState> translatedInfo, boolean force) {
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size());
-        }
+        logger.debug("Process VM state report. host: {}, number of records in report: {}.", hostId, translatedInfo.size());
 
         for (Map.Entry<Long, VirtualMachine.PowerState> entry : translatedInfo.entrySet()) {
 
-            if (logger.isDebugEnabled())
-                logger.debug("VM state report. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue());
+            logger.debug("VM state report. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue());
 
             if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) {
-                if (logger.isInfoEnabled()) {
-                    logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue());
-                }
+                logger.debug("VM state report is updated. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue());
 
                 _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey());
             } else {
-                if (logger.isTraceEnabled()) {
-                    logger.trace("VM power state does not change, skip DB writing. vm id: " + entry.getKey());
-                }
+                logger.trace("VM power state does not change, skip DB writing. vm id: {}.", entry.getKey());
             }
         }
 
@@ -107,9 +99,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
         // here we need to be wary of out of band migration as opposed to other, more unexpected state changes
         if (vmsThatAreMissingReport.size() > 0) {
             Date currentTime = DateUtil.currentGMTTime();
-            if (logger.isDebugEnabled()) {
-                logger.debug("Run missing VM report. current time: " + currentTime.getTime());
-            }
+            logger.debug("Run missing VM report. current time: {}", currentTime.getTime());
 
             // 2 times of sync-update interval for graceful period
             long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L;
@@ -119,7 +109,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
                 // Make sure powerState is up to date for missing VMs
                 try {
                     if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) {
-                        logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: " + instance.getId());
+                        logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: {}.", instance.getId());
                         _instanceDao.resetVmPowerStateTracking(instance.getId());
                         continue;
                     }
@@ -130,52 +120,44 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
 
                 Date vmStateUpdateTime = instance.getPowerStateUpdateTime();
                 if (vmStateUpdateTime == null) {
-                    logger.warn("VM power state update time is null, falling back to update time for vm id: " + instance.getId());
+                    logger.warn("VM power state update time is null, falling back to update time for vm id: {}.", instance.getId());
                     vmStateUpdateTime = instance.getUpdateTime();
                     if (vmStateUpdateTime == null) {
-                        logger.warn("VM update time is null, falling back to creation time for vm id: " + instance.getId());
+                        logger.warn("VM update time is null, falling back to creation time for vm id: {}", instance.getId());
                         vmStateUpdateTime = instance.getCreated();
                     }
                 }
 
-                if (logger.isInfoEnabled()) {
-                    String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime);
-                    logger.debug(
-                            String.format("Detected missing VM. host: %d, vm id: %d(%s), power state: %s, last state update: %s"
-                                    , hostId
-                                    , instance.getId()
-                                    , instance.getUuid()
-                                    , VirtualMachine.PowerState.PowerReportMissing
-                                    , lastTime));
-                }
+                String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime);
+                logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}"
+                                , hostId
+                                , instance.getId()
+                                , instance.getUuid()
+                                , VirtualMachine.PowerState.PowerReportMissing
+                                , lastTime);
 
                 long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime();
 
                 if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) {
-                    logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has passed graceful period");
+                    logger.debug("vm id: {} - time since last state update({}ms) has passed graceful period.", instance.getId(), milliSecondsSinceLastStateUpdate);
 
                     // this is were a race condition might have happened if we don't re-fetch the instance;
                     // between the startime of this job and the currentTime of this missing-branch
                     // an update might have occurred that we should not override in case of out of band migration
                     if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + instance.getId() + ", power state: PowerReportMissing ");
-                        }
+                        logger.debug("VM state report is updated. host: {}, vm id: {}, power state: PowerReportMissing.", hostId, instance.getId());
 
                         _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId());
                     } else {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("VM power state does not change, skip DB writing. vm id: " + instance.getId());
-                        }
+                        logger.debug("VM power state does not change, skip DB writing. vm id: {}", instance.getId());
                     }
                 } else {
-                    logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has not passed graceful period yet");
+                    logger.debug("vm id: {} - time since last state update({}ms) has not passed graceful period yet.", instance.getId(), milliSecondsSinceLastStateUpdate);
                 }
             }
         }
 
-        if (logger.isDebugEnabled())
-            logger.debug("Done with process of VM state report. host: " + hostId);
+        logger.debug("Done with process of VM state report. host: {}", hostId);
     }
 
     @Override
@@ -190,7 +172,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
             if (vm != null) {
                 map.put(vm.getId(), entry.getValue().getState());
             } else {
-                logger.debug("Unable to find matched VM in CloudStack DB. name: " + entry.getKey());
+                logger.debug("Unable to find matched VM in CloudStack DB. name: {}", entry.getKey());
             }
         }
 
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobDispatcher.java b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobDispatcher.java
index c7e30e366a9..d6c55d7038b 100644
--- a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobDispatcher.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobDispatcher.java
@@ -63,24 +63,23 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
             try {
                 workClz = Class.forName(job.getCmd());
             } catch (ClassNotFoundException e) {
-                logger.error("VM work class " + cmd + " is not found" + ", job origin: " + job.getRelated(), e);
+                logger.error("VM work class {} is not found, job origin: {}", cmd, job.getRelated(), e);
                 _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, e.getMessage());
                 return;
             }
 
             work = VmWorkSerializer.deserialize(workClz, job.getCmdInfo());
             if(work == null) {
-                logger.error("Unable to deserialize VM work " + job.getCmd() + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated());
+                logger.error("Unable to deserialize VM work {}, job info: {}, job origin: {}", job.getCmd(), job.getCmdInfo(), job.getRelated());
                 _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to deserialize VM work");
                 return;
             }
 
-            if (logger.isDebugEnabled())
-                logger.debug("Run VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated());
+            logger.debug("Run VM work job: {} for VM {}, job origin: {}", cmd, work.getVmId(), job.getRelated());
             try {
                 if (_handlers == null || _handlers.isEmpty()) {
-                    logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo()
-                            + ", job origin: " + job.getRelated());
+                    logger.error("Invalid startup configuration, no work job handler is found. cmd: {}, job info: {}, job origin: {}", job.getCmd(), job.getCmdInfo(),
+                            job.getRelated());
                     _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Invalid startup configuration. no job handler is found");
                     return;
                 }
@@ -88,8 +87,8 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
                 VmWorkJobHandler handler = _handlers.get(work.getHandlerName());
 
                 if (handler == null) {
-                    logger.error("Unable to find work job handler. handler name: " + work.getHandlerName() + ", job cmd: " + job.getCmd()
-                            + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated());
+                    logger.error("Unable to find work job handler. handler name: {}, job cmd: {}, job info: {}, job origin: {}", work.getHandlerName(), job.getCmd(),
+                            job.getCmdInfo(), job.getRelated());
                     _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to find work job handler");
                     return;
                 }
@@ -103,14 +102,13 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
                     CallContext.unregister();
                 }
             } finally {
-                if (logger.isDebugEnabled())
-                    logger.debug("Done with run of VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated());
+                logger.debug("Done with run of VM work job: {} for VM {}, job origin: {}", cmd, work.getVmId(), job.getRelated());
             }
         } catch(InvalidParameterValueException e) {
-            logger.error("Unable to complete " + job + ", job origin:" + job.getRelated());
+            logger.error("Unable to complete {}, job origin: {}", job, job.getRelated());
             _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, _asyncJobMgr.marshallResultObject(e));
         } catch(Throwable e) {
-            logger.error("Unable to complete " + job + ", job origin:" + job.getRelated(), e);
+            logger.error("Unable to complete {}, job origin: {}", job, job.getRelated(), e);
 
             //RuntimeException ex = new RuntimeException("Job failed due to exception " + e.getMessage());
             _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, _asyncJobMgr.marshallResultObject(e));
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java
index 8d4fa21754c..a94cbb2bf18 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java
@@ -214,9 +214,7 @@ public class VMEntityManagerImpl implements VMEntityManager {
                 if (reservationId != null) {
                     return reservationId;
                 } else {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Cannot finalize the VM reservation for this destination found, retrying");
-                    }
+                    logger.debug("Cannot finalize the VM reservation for this destination found, retrying");
                     exclude.addHost(dest.getHost().getId());
                     continue;
                 }
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java
index 31230442f17..c260f48dcf8 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java
@@ -97,15 +97,15 @@ public class DataMigrationUtility {
         boolean isReady = true;
         for (TemplateDataStoreVO template : templates) {
             isReady &= (Arrays.asList(validStates).contains(template.getState()));
-            logger.trace(String.format("template state: %s", template.getState()));
+            logger.trace("template state: {}", template.getState());
         }
         for (SnapshotDataStoreVO snapshot : snapshots) {
             isReady &= (Arrays.asList(validStates).contains(snapshot.getState()));
-            logger.trace(String.format("snapshot state: %s", snapshot.getState()));
+            logger.trace("snapshot state: {}", snapshot.getState());
         }
         for (VolumeDataStoreVO volume : volumes) {
             isReady &= (Arrays.asList(validStates).contains(volume.getState()));
-            logger.trace(String.format("volume state: %s", volume.getState()));
+            logger.trace("volume state: {}", volume.getState());
         }
         return isReady;
     }
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
index 5e3c8cfaa57..ba9e5646bb5 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
@@ -701,7 +701,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
     @Override
     public boolean start() {
         final int netGcInterval = NumbersUtil.parseInt(_configDao.getValue(NetworkGcInterval.key()), 60);
-        logger.info("Network Manager will run the NetworkGarbageCollector every '" + netGcInterval + "' seconds.");
+        logger.info("Network Manager will run the NetworkGarbageCollector every '{}' seconds.", netGcInterval);
 
         _executor.scheduleWithFixedDelay(new NetworkGarbageCollector(), netGcInterval, netGcInterval, TimeUnit.SECONDS);
         return true;
@@ -815,7 +815,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
             return networks;
         } finally {
-            logger.debug("Releasing lock for " + locked);
+            logger.debug("Releasing lock for {}", locked);
             _accountDao.releaseFromLockTable(locked.getId());
         }
     }
@@ -836,9 +836,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
     private List<? extends Network> existingConfiguration(final NetworkOffering offering, List<NetworkVO> configs,
                                                           final boolean errorIfAlreadySetup) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0));
-        }
+        logger.debug("Found existing network configuration for offering {}: {}", offering, configs.get(0));
 
         if (errorIfAlreadySetup) {
             final InvalidParameterValueException ex = new InvalidParameterValueException(
@@ -856,9 +854,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
     public void allocate(final VirtualMachineProfile vm, final LinkedHashMap<? extends Network, List<? extends NicProfile>> networks, final Map<String, Map<Integer, String>> extraDhcpOptions) throws InsufficientCapacityException,
             ConcurrentOperationException {
 
-        if (logger.isTraceEnabled()) {
-            logger.trace(String.format("allocating networks for %s(template %s); %d networks", vm.getInstanceName(), vm.getTemplate().getUuid(), networks.size()));
-        }
+        logger.trace("allocating networks for {}(template {}); {} networks", vm.getInstanceName(), vm.getTemplate().getUuid(), networks.size());
         int deviceId = 0;
         int size;
         size = determineNumberOfNicsRequired(vm, networks);
@@ -1002,7 +998,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
      */
     private void createExtraNics(final VirtualMachineProfile vm, int size, List<NicProfile> nics, Network finalNetwork) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException {
         if (nics.size() != size) {
-            logger.warn("Number of nics " + nics.size() + " doesn't match number of requested nics " + size);
+            logger.warn("Number of nics {} doesn't match number of requested nics {}", nics.size(), size);
             if (nics.size() > size) {
                 throw new CloudRuntimeException("Number of nics " + nics.size() + " doesn't match number of requested networks " + size);
             } else {
@@ -1041,7 +1037,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException {
 
         final NetworkVO ntwkVO = _networksDao.findById(network.getId());
-        logger.debug("Allocating nic for vm " + vm.getVirtualMachine() + " in network " + network + " with requested profile " + requested);
+        logger.debug("Allocating nic for vm {} in network {} with requested profile {}", vm.getVirtualMachine(), network, requested);
         final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, ntwkVO.getGuruName());
 
         if (requested != null && requested.getMode() == null) {
@@ -1431,17 +1427,17 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                 final SetupPersistentNetworkAnswer answer = (SetupPersistentNetworkAnswer) _agentMgr.send(host.getId(), cmd);
 
                 if (answer == null) {
-                    logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent:" + host.getId());
+                    logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host.getId());
                     clusterToHostsMap.get(host.getClusterId()).remove(host.getId());
                     continue;
                 }
 
                 if (!answer.getResult()) {
-                    logger.warn("Unable to setup agent " + host.getId() + " due to " + answer.getDetails());
+                    logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails());
                     clusterToHostsMap.get(host.getClusterId()).remove(host.getId());
                 }
             } catch (Exception e) {
-                logger.warn("Failed to connect to host: " + host.getName());
+                logger.warn("Failed to connect to host: {}", host.getName());
             }
         }
         if (clusterToHostsMap.keySet().size() != clusterVOs.size()) {
@@ -1468,7 +1464,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         NetworkVO network = _networksDao.findById(networkId);
         final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName());
         if (isNetworkImplemented(network)) {
-            logger.debug("Network id=" + networkId + " is already implemented");
+            logger.debug("Network id={} is already implemented", networkId);
             implemented.set(guru, network);
             return implemented;
         }
@@ -1482,20 +1478,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             throw ex;
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Lock is acquired for network id " + networkId + " as a part of network implement");
-        }
+        logger.debug("Lock is acquired for network id {} as a part of network implement", networkId);
 
         try {
             if (isNetworkImplemented(network)) {
-                logger.debug("Network id=" + networkId + " is already implemented");
+                logger.debug("Network id={} is already implemented", networkId);
                 implemented.set(guru, network);
                 return implemented;
             }
 
-            if (logger.isDebugEnabled()) {
-                logger.debug("Asking " + guru.getName() + " to implement " + network);
-            }
+            logger.debug("Asking {} to implement {}", guru.getName(), network);
 
             final NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId());
 
@@ -1535,11 +1527,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             logger.error(e.getMessage());
             return new Pair<NetworkGuru, NetworkVO>(null, null);
         } catch (final CloudRuntimeException | OperationTimedoutException e) {
-            logger.error("Caught exception: " + e.getMessage());
+            logger.error("Caught exception: {}", e.getMessage());
             return new Pair<NetworkGuru, NetworkVO>(null, null);
         } finally {
             if (implemented.first() == null) {
-                logger.debug("Cleaning up because we're unable to implement the network " + network);
+                logger.debug("Cleaning up because we're unable to implement the network {}", network);
                 try {
                     if (isSharedNetworkWithServices(network)) {
                         network.setState(Network.State.Shutdown);
@@ -1560,9 +1552,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             }
 
             _networksDao.releaseFromLockTable(networkId);
-            if (logger.isDebugEnabled()) {
-                logger.debug("Lock is released for network id " + networkId + " as a part of network implement");
-            }
+            logger.debug("Lock is released for network id {} as a part of network implement", networkId);
         }
     }
 
@@ -1587,13 +1577,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                 ips = _ipAddressDao.listByAssociatedVpc(network.getVpcId(), true);
                 if (ips.isEmpty()) {
                     final Vpc vpc = _vpcMgr.getActiveVpc(network.getVpcId());
-                    logger.debug("Creating a source nat ip for vpc " + vpc);
+                    logger.debug("Creating a source nat ip for vpc {}", vpc);
                     _vpcMgr.assignSourceNatIpAddressToVpc(owner, vpc);
                 }
             } else {
                 ips = _ipAddressDao.listByAssociatedNetwork(network.getId(), true);
                 if (ips.isEmpty()) {
-                    logger.debug("Creating a source nat ip for network " + network);
+                    logger.debug("Creating a source nat ip for network {}", network);
                     _ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network);
                 }
             }
@@ -1618,9 +1608,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         try {
             // reapply all the firewall/staticNat/lb rules
-            logger.debug("Reprogramming network " + network + " as a part of network implement");
+            logger.debug("Reprogramming network {} as a part of network implement", network);
             if (!reprogramNetworkRules(network.getId(), CallContext.current().getCallingAccount(), network)) {
-                logger.warn("Failed to re-program the network as a part of network " + network + " implement");
+                logger.warn("Failed to re-program the network as a part of network {} implement", network);
                 // see DataCenterVO.java
                 final ResourceUnavailableException ex = new ResourceUnavailableException("Unable to apply network rules as a part of network " + network + " implement", DataCenter.class,
                         network.getDataCenterId());
@@ -1630,7 +1620,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             for (final NetworkElement element : networkElements) {
                 if (element instanceof AggregatedCommandExecutor && providersToImplement.contains(element.getProvider())) {
                     if (!((AggregatedCommandExecutor) element).completeAggregatedExecution(network, dest)) {
-                        logger.warn("Failed to re-program the network as a part of network " + network + " implement due to aggregated commands execution failure!");
+                        logger.warn("Failed to re-program the network as a part of network {} implement due to aggregated commands execution failure!", network);
                         // see DataCenterVO.java
                         final ResourceUnavailableException ex = new ResourceUnavailableException("Unable to apply network rules as a part of network " + network + " implement", DataCenter.class,
                                 network.getDataCenterId());
@@ -1661,9 +1651,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                             + network.getPhysicalNetworkId());
                 }
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Asking " + element.getName() + " to implement " + network);
-                }
+                logger.debug("Asking {} to implement {}", element.getName(), network);
 
                 if (!element.implement(network, offering, dest, context)) {
                     CloudRuntimeException ex = new CloudRuntimeException("Failed to implement provider " + element.getProvider().getName() + " for network with specified id");
@@ -1688,50 +1676,50 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.isEgressDefaultPolicy(), true);
         }
         if (!_firewallMgr.applyFirewallRules(firewallEgressRulesToApply, false, caller)) {
-            logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id=" + networkId + " restart");
+            logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id={} restart", networkId);
             success = false;
         }
 
         // associate all ip addresses
         if (!_ipAddrMgr.applyIpAssociations(network, false)) {
-            logger.warn("Failed to apply ip addresses as a part of network id" + networkId + " restart");
+            logger.warn("Failed to apply ip addresses as a part of network id {} restart", networkId);
             success = false;
         }
 
         // apply static nat
         if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) {
-            logger.warn("Failed to apply static nats a part of network id" + networkId + " restart");
+            logger.warn("Failed to apply static nats a part of network id {} restart", networkId);
             success = false;
         }
 
         // apply firewall rules
         final List<FirewallRuleVO> firewallIngressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress);
         if (!_firewallMgr.applyFirewallRules(firewallIngressRulesToApply, false, caller)) {
-            logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id=" + networkId + " restart");
+            logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id={} restart", networkId);
             success = false;
         }
 
         // apply port forwarding rules
         if (!_rulesMgr.applyPortForwardingRulesForNetwork(networkId, false, caller)) {
-            logger.warn("Failed to reapply port forwarding rule(s) as a part of network id=" + networkId + " restart");
+            logger.warn("Failed to reapply port forwarding rule(s) as a part of network id={} restart", networkId);
             success = false;
         }
 
         // apply static nat rules
         if (!_rulesMgr.applyStaticNatRulesForNetwork(networkId, false, caller)) {
-            logger.warn("Failed to reapply static nat rule(s) as a part of network id=" + networkId + " restart");
+            logger.warn("Failed to reapply static nat rule(s) as a part of network id={} restart", networkId);
             success = false;
         }
 
         // apply public load balancer rules
         if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) {
-            logger.warn("Failed to reapply Public load balancer rules as a part of network id=" + networkId + " restart");
+            logger.warn("Failed to reapply Public load balancer rules as a part of network id={} restart", networkId);
             success = false;
         }
 
         // apply internal load balancer rules
         if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) {
-            logger.warn("Failed to reapply internal load balancer rules as a part of network id=" + networkId + " restart");
+            logger.warn("Failed to reapply internal load balancer rules as a part of network id={} restart", networkId);
             success = false;
         }
 
@@ -1741,7 +1729,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             for (final RemoteAccessVpn vpn : vpnsToReapply) {
                 // Start remote access vpn per ip
                 if (_vpnMgr.startRemoteAccessVpn(vpn.getServerAddressId(), false) == null) {
-                    logger.warn("Failed to reapply vpn rules as a part of network id=" + networkId + " restart");
+                    logger.warn("Failed to reapply vpn rules as a part of network id={} restart", networkId);
                     success = false;
                 }
             }
@@ -1749,7 +1737,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         //apply network ACLs
         if (!_networkACLMgr.applyACLToNetwork(networkId)) {
-            logger.warn("Failed to reapply network ACLs as a part of  of network id=" + networkId + " restart");
+            logger.warn("Failed to reapply network ACLs as a part of  of network id={}", networkId);
             success = false;
         }
 
@@ -1846,14 +1834,14 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         Account caller = _accountDao.findById(Account.ACCOUNT_ID_SYSTEM);
         long userId = User.UID_SYSTEM;
         //remove all PF/Static Nat rules for the network
-        logger.info("Services:" + services + " are no longer supported in network:" + network.getUuid() +
-                " after applying new network offering:" + network.getNetworkOfferingId() + " removing the related configuration");
+        logger.info("Services: {} are no longer supported in network: {} after applying new network offering: {} removing the related configuration",
+                services, network.getUuid(), network.getNetworkOfferingId());
         if (services.contains(Service.StaticNat.getName()) || services.contains(Service.PortForwarding.getName())) {
             try {
                 if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) {
-                    logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId);
+                    logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId);
                 } else {
-                    logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup");
+                    logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId);
                 }
                 if (services.contains(Service.StaticNat.getName())) {
                     //removing static nat configured on ips.
@@ -1872,7 +1860,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     });
                 }
             } catch (ResourceUnavailableException ex) {
-                logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex);
+                logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex);
             }
         }
         if (services.contains(Service.SourceNat.getName())) {
@@ -1891,9 +1879,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         if (services.contains(Service.Lb.getName())) {
             //remove all LB rules for the network
             if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, userId)) {
-                logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId);
+                logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId);
             } else {
-                logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup");
+                logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId);
             }
         }
 
@@ -1901,12 +1889,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             //revoke all firewall rules for the network
             try {
                 if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) {
-                    logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId);
+                    logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId);
                 } else {
-                    logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup");
+                    logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId);
                 }
             } catch (ResourceUnavailableException ex) {
-                logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex);
+                logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex);
             }
         }
 
@@ -1916,7 +1904,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             try {
                 _vpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, true);
             } catch (ResourceUnavailableException ex) {
-                logger.warn("Failed to cleanup remote access vpn resources of network:" + network.getUuid() + " due to Exception: ", ex);
+                logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network.getUuid(), ex);
             }
         }
     }
@@ -2006,7 +1994,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                         logger.error(errorMsg);
                     }
                 } catch (ResourceUnavailableException e) {
-                    logger.error(String.format("%s, error states %s", errorMsg, e));
+                    logger.error("{}, error states {}", errorMsg, e);
                 }
             }
         }
@@ -2020,7 +2008,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                 _nicDao.update(nic.getId(), nic);
 
                 if (nic.getVmType() == VirtualMachine.Type.User) {
-                    logger.debug("Changing active number of nics for network id=" + networkId + " on " + count);
+                    logger.debug("Changing active number of nics for network id={} on {}", networkId, count);
                     _networksDao.changeActiveNicsBy(networkId, count);
                 }
 
@@ -2053,7 +2041,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         for (final NicVO nic : nics) {
             final Pair<NetworkGuru, NetworkVO> implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter);
             if (implemented == null || implemented.first() == null) {
-                logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId());
+                logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId());
                 throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId());
             }
 
@@ -2128,9 +2116,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: "
                             + network.getPhysicalNetworkId());
                 }
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Asking " + element.getName() + " to prepare for " + nic);
-                }
+                logger.debug("Asking {} to prepare for {}", element.getName(), nic);
                 if (!prepareElement(element, network, profile, vmProfile, dest, context)) {
                     throw new InsufficientAddressCapacityException("unable to configure the dhcp service, due to insufficiant address capacity", Network.class, network.getId());
                 }
@@ -2170,7 +2156,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     _networkModel.getNetworkTag(vm.getHypervisorType(), network));
             if (guru instanceof NetworkMigrationResponder) {
                 if (!((NetworkMigrationResponder) guru).prepareMigration(profile, network, vm, dest, context)) {
-                    logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error
+                    logger.error("NetworkGuru {} prepareForMigration failed.", guru); // XXX: Transaction error
                 }
             }
 
@@ -2187,7 +2173,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     }
                     if (element instanceof NetworkMigrationResponder) {
                         if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) {
-                            logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error
+                            logger.error("NetworkElement {} prepareForMigration failed.", element); // XXX: Transaction error
                         }
                     }
                 }
@@ -2219,7 +2205,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vm.getHypervisorType(), network));
             if (guru instanceof NetworkMigrationResponder) {
                 if (!((NetworkMigrationResponder) guru).prepareMigration(profile, network, vm, dest, context)) {
-                    logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error
+                    logger.error("NetworkGuru {} prepareForMigration failed.", guru); // XXX: Transaction error
                 }
             }
             final List<Provider> providersToImplement = getNetworkProviders(network.getId());
@@ -2230,7 +2216,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     }
                     if (element instanceof NetworkMigrationResponder) {
                         if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) {
-                            logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error
+                            logger.error("NetworkElement {} prepareForMigration failed.", element); // XXX: Transaction error
                         }
                     }
                 }
@@ -2251,7 +2237,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                 if (nic == null && !addedURIs.contains(broadcastUri.toString())) {
                     //Nic details are not available in DB
                     //Create nic profile for migration
-                    logger.debug("Creating nic profile for migration. BroadcastUri: " + broadcastUri.toString() + " NetworkId: " + ntwkId + " Vm: " + vm.getId());
+                    logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), ntwkId, vm.getId());
                     final NetworkVO network = _networksDao.findById(ntwkId);
                     final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName());
                     final NicProfile profile = new NicProfile();
@@ -2390,9 +2376,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                         final NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), null, _networkModel
                                 .isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getHypervisorType(), network));
                         if (guru.release(profile, vmProfile, nic.getReservationId())) {
-                            if (logger.isDebugEnabled()) {
-                                logger.debug(String.format("The nic %s on %s was released according to %s by guru %s, now updating record.", nic, profile, vmProfile, guru));
-                            }
+                            logger.debug("The nic {} on {} was released according to {} by guru {}, now updating record.", nic, profile, vmProfile, guru);
                             applyProfileToNicForRelease(nic, profile);
                             nic.setState(Nic.State.Allocated);
                             if (originalState == Nic.State.Reserved) {
@@ -2431,9 +2415,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             final List<Provider> providersToImplement = getNetworkProviders(network.getId());
             for (final NetworkElement element : networkElements) {
                 if (providersToImplement.contains(element.getProvider())) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Asking " + element.getName() + " to release " + profile);
-                    }
+                    logger.debug("Asking {} to release {}", element.getName(), profile);
                     //NOTE: Context appear to never be used in release method
                     //implementations. Consider removing it from interface Element
                     element.release(network, profile, vmProfile, null);
@@ -2444,9 +2426,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
     @Override
     public void cleanupNics(final VirtualMachineProfile vm) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Cleaning network for vm: " + vm.getId());
-        }
+        logger.debug("Cleaning network for vm: {}", vm.getId());
 
         final List<NicVO> nics = _nicDao.listByVmId(vm.getId());
         for (final NicVO nic : nics) {
@@ -2467,7 +2447,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             try {
                 releaseNic(vm, nic.getId());
             } catch (final Exception ex) {
-                logger.warn("Failed to release nic: " + nic.toString() + " as part of remove operation due to", ex);
+                logger.warn("Failed to release nic: {} as part of remove operation due to", nic.toString(), ex);
             }
         }
 
@@ -2498,9 +2478,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             final List<Provider> providersToImplement = getNetworkProviders(network.getId());
             for (final NetworkElement element : networkElements) {
                 if (providersToImplement.contains(element.getProvider())) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(String.format("Asking %s to release %s, according to the reservation strategy %s", element.getName(), nic, nic.getReservationStrategy()));
-                    }
+                    logger.debug("Asking {} to release {}, according to the reservation strategy {}.", element.getName(), nic, nic.getReservationStrategy());
                     try {
                         element.release(network, profile, vm, null);
                     } catch (final ConcurrentOperationException ex) {
@@ -2545,7 +2523,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             _nicDao.remove(nic.getId());
         }
 
-        logger.debug("Removed nic id=" + nic.getId());
+        logger.debug("Removed nic id={}", nic.getId());
         // release assigned IPv6 for Isolated Network VR NIC
 
         if (Type.User.equals(vm.getType()) && GuestType.Isolated.equals(network.getGuestType())
@@ -2558,7 +2536,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         //remove the secondary ip addresses corresponding to this nic
         if (!removeVmSecondaryIpsOfNic(nic.getId())) {
-            logger.debug("Removing nic " + nic.getId() + " secondary ip addresses failed");
+            logger.debug("Removing nic {} secondary ip addresses failed", nic.getId());
         }
     }
 
@@ -2597,7 +2575,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     }
                 });
                 if (!dhcpServiceProvider.removeDhcpSupportForSubnet(network)) {
-                    logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip " + ipAlias.getIp4Address());
+                    logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip {}", ipAlias.getIp4Address());
                 }
             }
         } catch (final ResourceUnavailableException e) {
@@ -3082,12 +3060,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
     public boolean shutdownNetwork(final long networkId, final ReservationContext context, final boolean cleanupElements) {
         NetworkVO network = _networksDao.findById(networkId);
         if (network.getState() == Network.State.Allocated) {
-            logger.debug("Network is already shutdown: " + network);
+            logger.debug("Network is already shutdown: {}", network);
             return true;
         }
 
         if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) {
-            logger.debug("Network is not implemented: " + network);
+            logger.debug("Network is not implemented: {}", network);
             return false;
         }
 
@@ -3095,20 +3073,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             //do global lock for the network
             network = _networksDao.acquireInLockTable(networkId, NetworkLockTimeout.value());
             if (network == null) {
-                logger.warn("Unable to acquire lock for the network " + network + " as a part of network shutdown");
+                logger.warn("Unable to acquire lock for the network {} as a part of network shutdown", network);
                 return false;
             }
-            if (logger.isDebugEnabled()) {
-                logger.debug("Lock is acquired for network " + network + " as a part of network shutdown");
-            }
+            logger.debug("Lock is acquired for network {} as a part of network shutdown", network);
 
             if (network.getState() == Network.State.Allocated) {
-                logger.debug("Network is already shutdown: " + network);
+                logger.debug("Network is already shutdown: {}", network);
                 return true;
             }
 
             if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) {
-                logger.debug("Network is not implemented: " + network);
+                logger.debug("Network is not implemented: {}", network);
                 return false;
             }
 
@@ -3133,9 +3109,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     boolean result = false;
 
                     if (success) {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now.");
-                        }
+                        logger.debug("Network id={} is shutdown successfully, cleaning up corresponding resources now.", networkId);
                         final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName());
                         final NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId());
                         guru.shutdown(profile, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId()));
@@ -3173,9 +3147,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         } finally {
             if (network != null) {
                 _networksDao.releaseFromLockTable(network.getId());
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Lock is released for network " + network + " as a part of network shutdown");
-                }
+                logger.debug("Lock is released for network {} as a part of network shutdown", network);
             }
         }
     }
@@ -3201,11 +3173,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                 cleanupResult = shutdownNetworkResources(network.getId(), context.getAccount(), context.getCaller().getId());
             }
         } catch (final Exception ex) {
-            logger.warn("shutdownNetworkRules failed during the network " + network + " shutdown due to ", ex);
+            logger.warn("shutdownNetworkRules failed during the network {} shutdown due to", network, ex);
         } finally {
             // just warn the administrator that the network elements failed to shutdown
             if (!cleanupResult) {
-                logger.warn("Failed to cleanup network id=" + network.getId() + " resources as a part of shutdownNetwork");
+                logger.warn("Failed to cleanup network id={} resources as a part of shutdownNetwork", network.getId());
             }
         }
 
@@ -3214,21 +3186,19 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         for (final NetworkElement element : networkElements) {
             if (providersToShutdown.contains(element.getProvider())) {
                 try {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Sending network shutdown to " + element.getName());
-                    }
+                    logger.debug("Sending network shutdown to {}", element.getName());
                     if (!element.shutdown(network, context, cleanupElements)) {
-                        logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName());
+                        logger.warn("Unable to complete shutdown of the network elements due to element: {}", element.getName());
                         success = false;
                     }
                 } catch (final ResourceUnavailableException e) {
-                    logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e);
+                    logger.warn("Unable to complete shutdown of the network elements due to element: {}", element.getName(), e);
                     success = false;
                 } catch (final ConcurrentOperationException e) {
-                    logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e);
+                    logger.warn("Unable to complete shutdown of the network elements due to element: {}", element.getName(), e);
                     success = false;
                 } catch (final Exception e) {
-                    logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e);
+                    logger.warn("Unable to complete shutdown of the network elements due to element: {}", element.getName(), e);
                     success = false;
                 }
             }
@@ -3249,15 +3219,15 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                         CleanupPersistentNetworkResourceCommand cmd = new CleanupPersistentNetworkResourceCommand(to);
                         CleanupPersistentNetworkResourceAnswer answer = (CleanupPersistentNetworkResourceAnswer) _agentMgr.send(host.getId(), cmd);
                         if (answer == null) {
-                            logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent:" + host.getId());
+                            logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host.getId());
                             continue;
                         }
 
                         if (!answer.getResult()) {
-                            logger.warn("Unable to setup agent " + host.getId() + " due to " + answer.getDetails());
+                            logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails());
                         }
                     } catch (Exception e) {
-                        logger.warn("Failed to cleanup network resources on host: " + host.getName());
+                        logger.warn("Failed to cleanup network resources on host: {}", host.getName());
                     }
                 }
             }
@@ -3271,7 +3241,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         NetworkVO network = _networksDao.findById(networkId);
         if (network == null) {
-            logger.debug("Unable to find network with id: " + networkId);
+            logger.debug("Unable to find network with id: {}", networkId);
             return false;
         }
         // Make sure that there are no user vms in the network that are not Expunged/Error
@@ -3279,7 +3249,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         for (final UserVmVO vm : userVms) {
             if (!(vm.getState() == VirtualMachine.State.Expunging && vm.getRemoved() != null)) {
-                logger.warn("Can't delete the network, not all user vms are expunged. Vm " + vm + " is in " + vm.getState() + " state");
+                logger.warn("Can't delete the network, not all user vms are expunged. Vm {} is in {} state", vm, vm.getState());
                 return false;
             }
         }
@@ -3287,7 +3257,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         // Don't allow to delete network via api call when it has vms assigned to it
         final int nicCount = getActiveNicsInNetwork(networkId);
         if (nicCount > 0) {
-            logger.debug("The network id=" + networkId + " has active Nics, but shouldn't.");
+            logger.debug("The network id={} has active Nics, but shouldn't.", networkId);
             // at this point we have already determined that there are no active user vms in network
             // if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks
             _networksDao.changeActiveNicsBy(networkId, -1 * nicCount);
@@ -3311,13 +3281,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         // get updated state for the network
         network = _networksDao.findById(networkId);
         if (network.getState() != Network.State.Allocated && network.getState() != Network.State.Setup && !forced) {
-            logger.debug("Network is not in the correct state to be destroyed: " + network.getState());
+            logger.debug("Network is not in the correct state to be destroyed: {}", network.getState());
             return false;
         }
 
         boolean success = true;
         if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) {
-            logger.warn("Unable to delete network id=" + networkId + ": failed to cleanup network resources");
+            logger.warn("Unable to delete network id={}: failed to cleanup network resources", networkId);
             return false;
         }
 
@@ -3326,31 +3296,27 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         for (final NetworkElement element : networkElements) {
             if (providersToDestroy.contains(element.getProvider())) {
                 try {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Sending destroy to " + element);
-                    }
+                    logger.debug("Sending destroy to {}", element);
 
                     if (!element.destroy(network, context)) {
                         success = false;
-                        logger.warn("Unable to complete destroy of the network: failed to destroy network element " + element.getName());
+                        logger.warn("Unable to complete destroy of the network: failed to destroy network element {}", element.getName());
                     }
                 } catch (final ResourceUnavailableException e) {
-                    logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e);
+                    logger.warn("Unable to complete destroy of the network due to element: {}", element.getName(), e);
                     success = false;
                 } catch (final ConcurrentOperationException e) {
-                    logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e);
+                    logger.warn("Unable to complete destroy of the network due to element: {}", element.getName(), e);
                     success = false;
                 } catch (final Exception e) {
-                    logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e);
+                    logger.warn("Unable to complete destroy of the network due to element: {}", element.getName(), e);
                     success = false;
                 }
             }
         }
 
         if (success) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now.");
-            }
+            logger.debug("Network id={} is destroyed successfully, cleaning up corresponding resources now.", networkId);
 
             final NetworkVO networkFinal = network;
             try {
@@ -3364,7 +3330,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                         }
 
                         if (!deleteVlansInNetwork(networkFinal, context.getCaller().getId(), callerAccount)) {
-                            logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges");
+                            logger.warn("Failed to delete network {}; was unable to cleanup corresponding ip ranges", networkFinal);
                             throw new CloudRuntimeException("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges");
                         } else {
                             // commit transaction only when ips and vlans for the network are released successfully
@@ -3428,7 +3394,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         boolean result = true;
         for (final VlanVO vlan : publicVlans) {
             if (!_configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount)) {
-                logger.warn("Failed to delete vlan " + vlan.getId() + ");");
+                logger.warn("Failed to delete vlan {});", vlan.getId());
                 result = false;
             }
         }
@@ -3436,16 +3402,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         //cleanup private vlans
         final int privateIpAllocCount = _privateIpDao.countAllocatedByNetworkId(networkId);
         if (privateIpAllocCount > 0) {
-            logger.warn("Can't delete Private ip range for network " + networkId + " as it has allocated ip addresses");
+            logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", networkId);
             result = false;
         } else {
             _privateIpDao.deleteByNetworkId(networkId);
-            logger.debug("Deleted ip range for private network id=" + networkId);
+            logger.debug("Deleted ip range for private network id={}", networkId);
         }
 
         // release vlans of user-shared networks without specifyvlan
         if (isSharedNetworkWithoutSpecifyVlan(_networkOfferingDao.findById(network.getNetworkOfferingId()))) {
-            logger.debug("Releasing vnet for the network id=" + network.getId());
+            logger.debug("Releasing vnet for the network id={}", network.getId());
             _dcDao.releaseVnet(BroadcastDomainType.getValue(network.getBroadcastUri()), network.getDataCenterId(),
                     network.getPhysicalNetworkId(), network.getAccountId(), network.getReservationId());
         }
@@ -3477,7 +3443,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
                 final List<Long> networkIds = _networksDao.findNetworksToGarbageCollect();
                 final int netGcWait = NumbersUtil.parseInt(_configDao.getValue(NetworkGcWait.key()), 60);
-                logger.info("NetworkGarbageCollector uses '" + netGcWait + "' seconds for GC interval.");
+                logger.info("NetworkGarbageCollector uses '{}' seconds for GC interval.", netGcWait);
 
                 for (final Long networkId : networkIds) {
                     if (!_networkModel.isNetworkReadyForGc(networkId)) {
@@ -3485,20 +3451,16 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     }
 
                     if (!networkDetailsDao.findDetails(Network.AssociatedNetworkId, String.valueOf(networkId), null).isEmpty()) {
-                        logger.debug(String.format("Network %s is associated to a shared network, skipping", networkId));
+                        logger.debug("Network {} is associated to a shared network, skipping", networkId);
                         continue;
                     }
 
                     final Long time = _lastNetworkIdsToFree.remove(networkId);
                     if (time == null) {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("We found network " + networkId + " to be free for the first time.  Adding it to the list: " + currentTime);
-                        }
+                        logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", networkId, currentTime);
                         stillFree.put(networkId, currentTime);
                     } else if (time > currentTime - netGcWait) {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time);
-                        }
+                        logger.debug("Network {} is still free but it's not time to shutdown yet: {}",networkId, time);
                         stillFree.put(networkId, time);
                     } else {
                         shutdownList.add(networkId);
@@ -3513,7 +3475,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
                     // If network is removed, unset gc flag for it
                     if (_networksDao.findById(networkId) == null) {
-                        logger.debug("Network id=" + networkId + " is removed, so clearing up corresponding gc check");
+                        logger.debug("Network id={} is removed, so clearing up corresponding gc check", networkId);
                         _networksDao.clearCheckForGc(networkId);
                     } else {
                         try {
@@ -3525,7 +3487,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
                             shutdownNetwork(networkId, context, false);
                         } catch (final Exception e) {
-                            logger.warn("Unable to shutdown network: " + networkId);
+                            logger.warn("Unable to shutdown network: {}", networkId);
                         }
                     }
                 }
@@ -3548,10 +3510,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         }
 
         // implement the network
-        logger.debug("Starting network " + network + "...");
+        logger.debug("Starting network {}...", network);
         final Pair<NetworkGuru, NetworkVO> implementedNetwork = implementNetwork(networkId, dest, context);
         if (implementedNetwork == null || implementedNetwork.first() == null) {
-            logger.warn("Failed to start the network " + network);
+            logger.warn("Failed to start the network {}", network);
             return false;
         } else {
             return true;
@@ -3565,7 +3527,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         boolean restartRequired = false;
         final NetworkVO network = _networksDao.findById(networkId);
 
-        logger.debug("Restarting network " + networkId + "...");
+        logger.debug("Restarting network {}...", networkId);
 
         final ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount);
         final NetworkOffering offering = _networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId());
@@ -3594,7 +3556,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     }
                     Pair<Boolean, String> patched = mgr.updateSystemVM(instanceVO, true);
                     if (patched.first()) {
-                        logger.info(String.format("Successfully patched router %s", router));
+                        logger.info("Successfully patched router {}", router);
                     }
                 } catch (CloudRuntimeException e) {
                     throw new CloudRuntimeException(String.format("Failed to live patch router: %s", router), e);
@@ -3603,13 +3565,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             }
         }
 
-        logger.debug("Implementing the network " + network + " elements and resources as a part of network restart without cleanup");
+        logger.debug("Implementing the network {} elements and resources as a part of network restart without cleanup", network);
         try {
             implementNetworkElementsAndResources(dest, context, network, offering);
             setRestartRequired(network, false);
             return true;
         } catch (final Exception ex) {
-            logger.warn("Failed to implement network " + network + " elements and resources as a part of network restart due to ", ex);
+            logger.warn("Failed to implement network {} elements and resources as a part of network restart due to ", network, ex);
             return false;
         }
     }
@@ -3622,7 +3584,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     router.getState() == VirtualMachine.State.Error ||
                     router.getState() == VirtualMachine.State.Shutdown ||
                     router.getState() == VirtualMachine.State.Unknown) {
-                logger.debug("Destroying old router " + router);
+                logger.debug("Destroying old router {}", router);
                 _routerService.destroyRouter(router.getId(), context.getAccount(), context.getCaller().getId());
             } else {
                 remainingRouters.add(router);
@@ -3651,7 +3613,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
     public boolean areRoutersRunning(final List<? extends VirtualRouter> routers) {
         for (final VirtualRouter router : routers) {
             if (router.getState() != VirtualMachine.State.Running) {
-                logger.debug("Found new router " + router.getInstanceName() + " to be in non-Running state: " + router.getState() + ". Please try restarting network again.");
+                logger.debug("Found new router {} to be in non-Running state: {}. Please try restarting network again." + router.getInstanceName(), router.getState());
                 return false;
             }
         }
@@ -3706,10 +3668,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                 implementNetworkElementsAndResources(dest, context, network, offering);
                 return true;
             }
-            logger.debug("Failed to shutdown the network elements and resources as a part of network restart: " + network.getState());
+            logger.debug("Failed to shutdown the network elements and resources as a part of network restart: {}", network.getState());
             return false;
         }
-        logger.debug("Performing rolling restart of routers of network " + network);
+        logger.debug("Performing rolling restart of routers of network {}", network);
         destroyExpendableRouters(routerDao.findByNetwork(network.getId()), context);
 
         final List<Provider> providersToImplement = getNetworkProviders(network.getId());
@@ -3750,7 +3712,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
     }
 
     private void setRestartRequired(final NetworkVO network, final boolean restartRequired) {
-        logger.debug("Marking network " + network + " with restartRequired=" + restartRequired);
+        logger.debug("Marking network {} with restartRequired={}", network, restartRequired);
         network.setRestartRequired(restartRequired);
         _networksDao.update(network.getId(), network);
     }
@@ -3774,7 +3736,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         final String passwordProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData);
 
         if (passwordProvider == null) {
-            logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName());
+            logger.debug("Network {} doesn't support service {}", network, Service.UserData.getName());
             return null;
         }
 
@@ -3786,7 +3748,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         final String SSHKeyProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData);
 
         if (SSHKeyProvider == null) {
-            logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName());
+            logger.debug("Network {} doesn't support service", network, Service.UserData.getName());
             return null;
         }
 
@@ -3798,7 +3760,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         final String DhcpProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
 
         if (DhcpProvider == null) {
-            logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName());
+            logger.debug("Network {} doesn't support service {}", network, Service.Dhcp.getName());
             return null;
         }
 
@@ -3815,7 +3777,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         final String dnsProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.Dns);
 
         if (dnsProvider == null) {
-            logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName());
+            logger.debug("Network {} doesn't support service {}", network, Service.Dhcp.getName());
             return null;
         }
 
@@ -3912,51 +3874,51 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         //remove all PF/Static Nat rules for the network
         try {
             if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, callerUserId, caller)) {
-                logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId);
+                logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId);
             } else {
                 success = false;
-                logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup");
+                logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId);
             }
         } catch (final ResourceUnavailableException ex) {
             success = false;
             // shouldn't even come here as network is being cleaned up after all network elements are shutdown
-            logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex);
+            logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex);
         }
 
         //remove all LB rules for the network
         if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, callerUserId)) {
-            logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId);
+            logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId);
         } else {
             // shouldn't even come here as network is being cleaned up after all network elements are shutdown
             success = false;
-            logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup");
+            logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId);
         }
 
         //revoke all firewall rules for the network
         try {
             if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) {
-                logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId);
+                logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId);
             } else {
                 success = false;
-                logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup");
+                logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId);
             }
         } catch (final ResourceUnavailableException ex) {
             success = false;
             // shouldn't even come here as network is being cleaned up after all network elements are shutdown
-            logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex);
+            logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex);
         }
 
         //revoke all network ACLs for network
         try {
             if (_networkACLMgr.revokeACLItemsForNetwork(networkId)) {
-                logger.debug("Successfully cleaned up NetworkACLs for network id=" + networkId);
+                logger.debug("Successfully cleaned up NetworkACLs for network id={}", networkId);
             } else {
                 success = false;
-                logger.warn("Failed to cleanup NetworkACLs as a part of network id=" + networkId + " cleanup");
+                logger.warn("Failed to cleanup NetworkACLs as a part of network id={} cleanup", networkId);
             }
         } catch (final ResourceUnavailableException ex) {
             success = false;
-            logger.warn("Failed to cleanup Network ACLs as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex);
+            logger.warn("Failed to cleanup Network ACLs as a part of network id={} cleanup due to resourceUnavailable ", networkId, ex);
         }
 
         //release all ip addresses
@@ -3971,7 +3933,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     // so as part of network clean up just break IP association with guest network
                     ipToRelease.setAssociatedWithNetworkId(null);
                     _ipAddressDao.update(ipToRelease.getId(), ipToRelease);
-                    logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any network");
+                    logger.debug("Portable IP address {} is no longer associated with any network", ipToRelease);
                 }
             } else {
                 _vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId());
@@ -3980,7 +3942,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         try {
             if (!_ipAddrMgr.applyIpAssociations(network, true)) {
-                logger.warn("Unable to apply ip address associations for " + network);
+                logger.warn("Unable to apply ip address associations for {}", network);
                 success = false;
             }
         } catch (final ResourceUnavailableException e) {
@@ -3999,12 +3961,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         // Mark all PF rules as revoked and apply them on the backend (not in the DB)
         final List<PortForwardingRuleVO> pfRules = _portForwardingRulesDao.listByNetwork(networkId);
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId + " as a part of shutdownNetworkRules");
-        }
+        logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), networkId);
 
         for (final PortForwardingRuleVO pfRule : pfRules) {
-            logger.trace("Marking pf rule " + pfRule + " with Revoke state");
+            logger.trace("Marking pf rule {} with Revoke state", pfRule);
             pfRule.setState(FirewallRule.State.Revoke);
         }
 
@@ -4021,12 +3981,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         // Mark all static rules as revoked and apply them on the backend (not in the DB)
         final List<FirewallRuleVO> firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat);
         final List<StaticNatRule> staticNatRules = new ArrayList<StaticNatRule>();
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing " + firewallStaticNatRules.size() + " static nat rules for network id=" + networkId + " as a part of shutdownNetworkRules");
-        }
+        logger.debug("Releasing {} static nat rules for network id={} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), networkId);
 
         for (final FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) {
-            logger.trace("Marking static nat rule " + firewallStaticNatRule + " with Revoke state");
+            logger.trace("Marking static nat rule {} with Revoke state", firewallStaticNatRule);
             final IpAddress ip = _ipAddressDao.findById(firewallStaticNatRule.getSourceIpAddressId());
             final FirewallRuleVO ruleVO = _firewallDao.findById(firewallStaticNatRule.getId());
 
@@ -4071,12 +4029,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         // revoke all firewall rules for the network w/o applying them on the DB
         final List<FirewallRuleVO> firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress);
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing " + firewallRules.size() + " firewall ingress rules for network id=" + networkId + " as a part of shutdownNetworkRules");
-        }
+        logger.debug("Releasing firewall ingress rules for network id={} as a part of shutdownNetworkRules", firewallRules.size(), networkId);
 
         for (final FirewallRuleVO firewallRule : firewallRules) {
-            logger.trace("Marking firewall ingress rule " + firewallRule + " with Revoke state");
+            logger.trace("Marking firewall ingress rule {} with Revoke state", firewallRule);
             firewallRule.setState(FirewallRule.State.Revoke);
         }
 
@@ -4091,9 +4047,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         }
 
         final List<FirewallRuleVO> firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress);
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing " + firewallEgressRules.size() + " firewall egress rules for network id=" + networkId + " as a part of shutdownNetworkRules");
-        }
+        logger.debug("Releasing {} firewall egress rules for network id={} as a part of shutdownNetworkRules", firewallEgressRules.size(), networkId);
 
         try {
             // delete default egress rule
@@ -4110,7 +4064,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         }
 
         for (final FirewallRuleVO firewallRule : firewallEgressRules) {
-            logger.trace("Marking firewall egress rule " + firewallRule + " with Revoke state");
+            logger.trace("Marking firewall egress rule {} with Revoke state", firewallRule);
             firewallRule.setState(FirewallRule.State.Revoke);
         }
 
@@ -4125,9 +4079,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         }
 
         if (network.getVpcId() != null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Releasing Network ACL Items for network id=" + networkId + " as a part of shutdownNetworkRules");
-            }
+            logger.debug("Releasing Network ACL Items for network id={} as a part of shutdownNetworkRules", networkId);
 
             try {
                 //revoke all Network ACLs for the network w/o applying them in the DB
@@ -4144,7 +4096,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         //release all static nats for the network
         if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) {
-            logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id " + networkId);
+            logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id {}", networkId);
             success = false;
         }
 
@@ -4161,7 +4113,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
         try {
             if (!_ipAddrMgr.applyIpAssociations(network, true, true, publicIpsToRelease)) {
-                logger.warn("Unable to apply ip address associations for " + network + " as a part of shutdownNetworkRules");
+                logger.warn("Unable to apply ip address associations for {} as a part of shutdownNetworkRules", network);
                 success = false;
             }
         } catch (final ResourceUnavailableException e) {
@@ -4214,9 +4166,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         dcId = dc.getId();
         final HypervisorType hypervisorType = startup.getHypervisorType();
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Host's hypervisorType is: " + hypervisorType);
-        }
+        logger.debug("Host's hypervisorType is: {}", hypervisorType);
 
         final List<PhysicalNetworkSetupInfo> networkInfoList = new ArrayList<PhysicalNetworkSetupInfo>();
 
@@ -4243,20 +4193,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         }
 
         // send the names to the agent
-        if (logger.isDebugEnabled()) {
-            logger.debug("Sending CheckNetworkCommand to check the Network is setup correctly on Agent");
-        }
+        logger.debug("Sending CheckNetworkCommand to check the Network is setup correctly on Agent");
         final CheckNetworkCommand nwCmd = new CheckNetworkCommand(networkInfoList);
 
         final CheckNetworkAnswer answer = (CheckNetworkAnswer) _agentMgr.easySend(hostId, nwCmd);
 
         if (answer == null) {
-            logger.warn("Unable to get an answer to the CheckNetworkCommand from agent:" + host.getId());
+            logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host.getId());
             throw new ConnectionException(true, "Unable to get an answer to the CheckNetworkCommand from agent: " + host.getId());
         }
 
         if (!answer.getResult()) {
-            logger.warn("Unable to setup agent " + hostId + " due to " + answer.getDetails());
+            logger.warn("Unable to setup agent {} due to {}", hostId, answer.getDetails());
             final String msg = "Incorrect Network setup on agent, Reinitialize agent after network names are setup, details : " + answer.getDetails();
             _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, host.getPodId(), msg, msg);
             throw new ConnectionException(true, msg);
@@ -4264,9 +4212,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
             if (answer.needReconnect()) {
                 throw new ConnectionException(false, "Reinitialize agent after network setup.");
             }
-            if (logger.isDebugEnabled()) {
-                logger.debug("Network setup is correct on Agent");
-            }
+            logger.debug("Network setup is correct on Agent");
             return;
         }
     }
@@ -4407,18 +4353,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                 final VMNetworkMapVO vno = new VMNetworkMapVO(vm.getId(), network.getId());
                 _vmNetworkMapDao.persist(vno);
             }
-            logger.debug("Nic is allocated successfully for vm " + vm + " in network " + network);
+            logger.debug("Nic is allocated successfully for vm {} in network {}", vm, network);
         }
 
         //2) prepare nic
         if (prepare) {
             final Pair<NetworkGuru, NetworkVO> implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter);
             if (implemented == null || implemented.first() == null) {
-                logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId());
+                logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId());
                 throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId());
             }
             nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second());
-            logger.debug("Nic is prepared successfully for vm " + vm + " in network " + network);
+            logger.debug("Nic is prepared successfully for vm {} in network {}", vm, network);
         }
 
         return nic;
@@ -4426,7 +4372,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
     private boolean getNicProfileDefaultNic(NicProfile nicProfile) {
         if (nicProfile != null) {
-            logger.debug(String.format("Using requested nic profile isDefaultNic value [%s].", nicProfile.isDefaultNic()));
+            logger.debug("Using requested nic profile isDefaultNic value [{}].", nicProfile.isDefaultNic());
             return nicProfile.isDefaultNic();
         }
 
@@ -4526,18 +4472,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
         final List<Provider> providers = getProvidersForServiceInNetwork(network, service);
         //Only support one provider now
         if (providers == null) {
-            logger.error("Cannot find " + service.getName() + " provider for network " + network.getId());
+            logger.error("Cannot find {} provider for network {}", service.getName(), network.getId());
             return null;
         }
         if (providers.size() != 1 && service != Service.Lb) {
             //support more than one LB providers only
-            logger.error("Found " + providers.size() + " " + service.getName() + " providers for network!" + network.getId());
+            logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network.getId());
             return null;
         }
 
         for (final Provider provider : providers) {
             final NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName());
-            logger.info("Let " + element.getName() + " handle " + service.getName() + " in network " + network.getId());
+            logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network.getId());
             elements.add(element);
         }
         return elements;
@@ -4631,7 +4577,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
     @Override
     public Pair<NicProfile, Integer> importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter dataCenter, final boolean forced)
             throws ConcurrentOperationException, InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException {
-        logger.debug("Allocating nic for vm " + vm.getUuid() + " in network " + network + " during import");
+        logger.debug("Allocating nic for vm {} in network {} during import", vm.getUuid(), network);
         String selectedIp = null;
         if (ipAddresses != null && StringUtils.isNotEmpty(ipAddresses.getIp4Address())) {
             if (ipAddresses.getIp4Address().equals("auto")) {
@@ -4675,7 +4621,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
                 int count = 1;
                 if (vo.getVmType() == VirtualMachine.Type.User) {
-                    logger.debug("Changing active number of nics for network id=" + network.getUuid() + " on " + count);
+                    logger.debug("Changing active number of nics for network id={} on {}", network.getUuid(), count);
                     _networksDao.changeActiveNicsBy(network.getId(), count);
                 }
                 if (vo.getVmType() == VirtualMachine.Type.User
@@ -4743,9 +4689,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
                     " and forced flag is disabled");
         }
         try {
-            logger.debug(String.format("Generating a new mac address on network %s as the mac address %s already exists", network.getName(), macAddress));
+            logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network.getName(), macAddress);
             String newMacAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId());
-            logger.debug(String.format("Successfully generated the mac address %s, using it instead of the conflicting address %s", newMacAddress, macAddress));
+            logger.debug("Successfully generated the mac address {}, using it instead of the conflicting address {}", newMacAddress, macAddress);
             return newMacAddress;
         } catch (InsufficientAddressCapacityException e) {
             String msg = String.format("Could not generate a new mac address on network %s", network.getName());
@@ -4756,9 +4702,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
 
     @Override
     public void unmanageNics(VirtualMachineProfile vm) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Unmanaging NICs for VM: " + vm.getId());
-        }
+        logger.debug("Unmanaging NICs for VM: {}", vm.getId());
 
         VirtualMachine virtualMachine = vm.getVirtualMachine();
         final List<NicVO> nics = _nicDao.listByVmId(vm.getId());
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java
index 1faf46361c6..ec5d5efb5cf 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java
@@ -159,7 +159,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
         }
         storageCapacities.put(srcDataStoreId, new Pair<>(null, null));
         if (migrationPolicy == MigrationPolicy.COMPLETE) {
-            logger.debug(String.format("Setting source image store: %s to read-only", srcDatastore.getId()));
+            logger.debug("Setting source image store: {} to read-only", srcDatastore.getId());
             storageService.updateImageStoreStatus(srcDataStoreId, true);
         }
 
@@ -200,7 +200,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
             }
 
             if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destDatastoreId).first()) {
-                logger.debug(String.format("%s: %s too large to be migrated to %s",  chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId));
+                logger.debug("{}: {} too large to be migrated to {}",  chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId);
                 skipped += 1;
                 continue;
             }
@@ -267,7 +267,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
             }
 
             if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destImgStoreId).first()) {
-                logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId));
+                logger.debug("{}: {} too large to be migrated to {}", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId);
                 continue;
             }
 
@@ -302,7 +302,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
         boolean success = true;
         if (destDatastoreId == srcDatastore.getId() && !files.isEmpty()) {
             if (migrationPolicy == MigrationPolicy.BALANCE) {
-                logger.debug("Migration completed : data stores have been balanced ");
+                logger.debug("Migration completed : data stores have been balanced");
                 if (destDatastoreId == srcDatastore.getId()) {
                     message = "Seems like source datastore has more free capacity than the destination(s)";
                 }
@@ -353,7 +353,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
             task.setTemplateChain(templateChains);
         }
         futures.add((executor.submit(task)));
-        logger.debug(String.format("Migration of %s: %s is initiated. ", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()));
+        logger.debug(String.format("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()));
         return storageCapacities;
     }
 
@@ -510,10 +510,10 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
         Pair<Long, Long> imageStoreCapacity = storageCapacities.get(destStoreId);
         long usedCapacity = imageStoreCapacity.second() - imageStoreCapacity.first();
         if (imageStoreCapacity != null && (usedCapacity / (imageStoreCapacity.second() * 1.0)) <= CapacityManager.SecondaryStorageCapacityThreshold.value()) {
-            logger.debug("image store: " + destStoreId + " has sufficient capacity to proceed with migration of file");
+            logger.debug("image store: {} has sufficient capacity to proceed with migration of file.", destStoreId);
             return true;
         }
-        logger.debug("Image store capacity threshold exceeded, migration not possible");
+        logger.debug("Image store capacity threshold exceeded, migration not possible.");
         return false;
     }
 
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index e7cc2f8b4d9..fd6a8d034c9 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -349,13 +349,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
         if (storagePool.isPresent()) {
             String storagePoolToString = getReflectOnlySelectedFields(storagePool.get());
-            logger.debug(String.format("The storage pool [%s] was specified for this account [%s] and will be used for allocation.", storagePoolToString, vm.getAccountId()));
+            logger.debug("The storage pool [{}] was specified for this account [{}] and will be used for allocation.", storagePoolToString, vm.getAccountId());
 
         } else {
             String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value();
             storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList);
-            storagePool.ifPresent(pool -> logger.debug(String.format("The storage pool [%s] was specified in the Global Settings and will be used for allocation.",
-                    getReflectOnlySelectedFields(pool))));
+            storagePool.ifPresent(pool -> logger.debug("The storage pool [{}] was specified in the Global Settings and will be used for allocation.",
+                    getReflectOnlySelectedFields(pool)));
         }
         return storagePool;
     }
@@ -378,23 +378,21 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 StorageUtil.traceLogStoragePools(poolList, logger, "pools to choose from: ");
                 // Check if the preferred storage pool can be used. If yes, use it.
                 Optional<StoragePool> storagePool = getPreferredStoragePool(poolList, vm);
-                if (logger.isTraceEnabled()) {
-                    logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent()));
-                }
+                logger.trace("we have a preferred pool: {}", storagePool.isPresent());
 
                 StoragePool storage;
                 if (storagePool.isPresent()) {
                     storage = (StoragePool)this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary);
-                    logger.debug(String.format("VM [%s] has a preferred storage pool [%s]. Volume Orchestrator found this storage using Storage Pool Allocator [%s] and will"
-                            + " use it.", vm, storage, allocator.getClass().getSimpleName()));
+                    logger.debug("VM [{}] has a preferred storage pool [{}]. Volume Orchestrator found this storage using Storage Pool Allocator [{}] and will"
+                            + " use it.", vm, storage, allocator.getClass().getSimpleName());
                 } else {
                     storage = (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
-                    logger.debug(String.format("VM [%s] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool"
-                            + " [%s], which was discovered using Storage Pool Allocator [%s].", vm, storage, allocator.getClass().getSimpleName()));
+                    logger.debug("VM [{}] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool"
+                            + " [{}], which was discovered using Storage Pool Allocator [{}].", vm, storage, allocator.getClass().getSimpleName());
                 }
                 return storage;
             }
-            logger.debug(String.format("Could not find any available Storage Pool using Storage Pool Allocator [%s].", allocator.getClass().getSimpleName()));
+            logger.debug("Could not find any available Storage Pool using Storage Pool Allocator [{}].", allocator.getClass().getSimpleName());
         }
         logger.info("Volume Orchestrator could not find any available Storage Pool.");
         return null;
@@ -543,9 +541,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 if (pool != null) {
                     String poolToString = getReflectOnlySelectedFields(pool);
 
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(String.format("Found a suitable pool [%s] to create the volume [%s] in.", poolToString, volumeToString));
-                    }
+                    logger.debug("Found a suitable pool [{}] to create the volume [{}] in.", poolToString, volumeToString);
                     break;
                 }
             }
@@ -706,10 +702,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
         String poolToString = getReflectOnlySelectedFields(pool);
 
-        if (logger.isDebugEnabled()) {
-            logger.debug(String.format("Trying to create volume [%s] on storage pool [%s].",
-                    volumeToString, poolToString));
-        }
+        logger.debug("Trying to create volume [{}] on storage pool [{}].",
+                volumeToString, poolToString);
         DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
         for (int i = 0; i < 2; i++) {
             // retry one more time in case of template reload is required for Vmware case
@@ -725,7 +719,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 VolumeApiResult result = future.get();
                 if (result.isFailed()) {
                     if (result.getResult().contains(REQUEST_TEMPLATE_RELOAD) && (i == 0)) {
-                        logger.debug(String.format("Retrying to deploy template [%s] for VMware, attempt 2/2. ", templateToString));
+                        logger.debug("Retrying to deploy template [{}] for VMware, attempt 2/2. ", templateToString);
                         continue;
                     } else {
                         String msg = String.format("Failed to create volume [%s] due to [%s].", volumeToString, result.getResult());
@@ -891,11 +885,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             } else {
                 rootDisksize = rootDisksize * 1024 * 1024 * 1024;
                 if (rootDisksize > size) {
-                    logger.debug(String.format("Using root disk size of [%s] bytes for the volume [%s].", toHumanReadableSize(rootDisksize), name));
+                    logger.debug("Using root disk size of [{}] bytes for the volume [{}].", toHumanReadableSize(rootDisksize), name);
                     size = rootDisksize;
                 } else {
-                    logger.debug(String.format("The specified root disk size of [%s] bytes is smaller than the template. Using root disk size of [%s] bytes for the volume [%s].",
-                            toHumanReadableSize(rootDisksize), size, name));
+                    logger.debug("The specified root disk size of [{}] bytes is smaller than the template. Using root disk size of [{}] bytes for the volume [{}].",
+                            toHumanReadableSize(rootDisksize), size, name);
                 }
             }
         }
@@ -985,8 +979,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         if (template.isDeployAsIs() && vm.getType() != VirtualMachine.Type.SecondaryStorageVm) {
             List<SecondaryStorageVmVO> runningSSVMs = secondaryStorageVmDao.getSecStorageVmListInStates(null, vm.getDataCenterId(), State.Running);
             if (CollectionUtils.isEmpty(runningSSVMs)) {
-                logger.info(String.format("Could not find a running SSVM in datacenter [%s] for deploying VM as is. Not deploying VM [%s] as is.",
-                        vm.getDataCenterId(), vm));
+                logger.info("Could not find a running SSVM in datacenter [{}] for deploying VM as is. Not deploying VM [{}] as is.",
+                        vm.getDataCenterId(), vm);
             } else {
                 UserVmDetailVO configurationDetail = userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.DEPLOY_AS_IS_CONFIGURATION);
                 if (configurationDetail != null) {
@@ -1020,7 +1014,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 volumeSize = templateAsIsDisks.get(number).getVirtualSize();
                 deviceId = templateAsIsDisks.get(number).getDiskNumber();
             }
-            logger.info(String.format("Adding disk object [%s] to VM [%s]", volumeName, vm));
+            logger.info("Adding disk object [{}] to VM [{}]", volumeName, vm);
             DiskProfile diskProfile = allocateTemplatedVolume(type, volumeName, offering, volumeSize, minIops, maxIops,
                     template, vm, owner, deviceId, configurationId);
             profiles.add(diskProfile);
@@ -1117,18 +1111,14 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
         VirtualMachineTemplate rootDiskTmplt = _entityMgr.findById(VirtualMachineTemplate.class, vm.getTemplateId());
         DataCenter dcVO = _entityMgr.findById(DataCenter.class, vm.getDataCenterId());
-        if (logger.isTraceEnabled()) {
-            logger.trace(String.format("storage-pool %s/%s is associated with pod %d",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId()));
-        }
+        logger.trace("storage-pool {}/{} is associated with pod {}",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId());
         Long podId = storagePool.getPodId() != null ? storagePool.getPodId() : vm.getPodIdToDeployIn();
         Pod pod = _entityMgr.findById(Pod.class, podId);
 
         ServiceOffering svo = _entityMgr.findById(ServiceOffering.class, vm.getServiceOfferingId());
         DiskOffering diskVO = _entityMgr.findById(DiskOffering.class, volumeInfo.getDiskOfferingId());
         Long clusterId = storagePool.getClusterId();
-        if (logger.isTraceEnabled()) {
-            logger.trace(String.format("storage-pool %s/%s is associated with cluster %d",storagePool.getName(), storagePool.getUuid(), clusterId));
-        }
+        logger.trace("storage-pool {}/{} is associated with cluster {}",storagePool.getName(), storagePool.getUuid(), clusterId);
         Long hostId = vm.getHostId();
         if (hostId == null && storagePool.isLocal()) {
             List<StoragePoolHostVO> poolHosts = storagePoolHostDao.listByPoolId(storagePool.getId());
@@ -1172,9 +1162,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         Long volTemplateId = existingVolume.getTemplateId();
         long vmTemplateId = vm.getTemplateId();
         if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) {
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("switchVolume: Old volume's templateId [%s] does not match the VM's templateId [%s]. Updating templateId in the new volume.", volTemplateId, vmTemplateId));
-            }
+            logger.debug("switchVolume: Old volume's templateId [{}] does not match the VM's templateId [{}]. Updating templateId in the new volume.", volTemplateId, vmTemplateId);
             templateIdToUse = vmTemplateId;
         }
 
@@ -1186,16 +1174,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 try {
                     stateTransitTo(existingVolume, Volume.Event.DestroyRequested);
                 } catch (NoTransitionException e) {
-                    logger.error(String.format("Unable to destroy existing volume [%s] due to [%s].", volumeToString, e.getMessage()));
+                    logger.error("Unable to destroy existing volume [{}] due to [{}].", volumeToString, e.getMessage());
                 }
                 // In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk
                 if (vm.getHypervisorType() == HypervisorType.VMware) {
-                    logger.info(String.format("Trying to expunge volume [%s] from primary data storage.", volumeToString));
+                    logger.info("Trying to expunge volume [{}] from primary data storage.", volumeToString);
                     AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId()));
                     try {
                         future.get();
                     } catch (Exception e) {
-                        logger.error(String.format("Failed to expunge volume [%s] from primary data storage due to [%s].", volumeToString, e.getMessage()));
+                        logger.error("Failed to expunge volume [{}] from primary data storage due to [{}].", volumeToString, e.getMessage());
                         logger.debug("Exception: ", e);
                     }
                 }
@@ -1222,9 +1210,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
         HostVO host = _hostDao.findById(hostId);
 
-        if (logger.isDebugEnabled()) {
-            logger.debug(String.format("Releasing [%s] volumes for VM [%s] from host [%s].", volumesForVm.size(), _userVmDao.findById(vmId), host));
-        }
+        logger.debug("Releasing [{}] volumes for VM [{}] from host [{}].", volumesForVm.size(), _userVmDao.findById(vmId), host);
 
         for (VolumeVO volumeForVm : volumesForVm) {
             VolumeInfo volumeInfo = volFactory.getVolume(volumeForVm.getId());
@@ -1247,9 +1233,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
     public void cleanupVolumes(long vmId) throws ConcurrentOperationException {
         VMInstanceVO vm = _userVmDao.findById(vmId);
 
-        if (logger.isDebugEnabled()) {
-            logger.debug(String.format("Cleaning storage for VM [%s].", vm));
-        }
+        logger.debug("Cleaning storage for VM [{}].", vm);
         final List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
         final List<VolumeVO> toBeExpunged = new ArrayList<VolumeVO>();
 
@@ -1265,13 +1249,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                         if (!volumeAlreadyDestroyed) {
                             destroyVolumeInContext(vol);
                         } else {
-                            logger.debug(String.format("Skipping destroy for the volume [%s] as it is in [%s] state.", volumeToString, vol.getState().toString()));
+                            logger.debug("Skipping destroy for the volume [{}] as it is in [{}] state.", volumeToString, vol.getState().toString());
                         }
                         toBeExpunged.add(vol);
                     } else {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug(String.format("Detaching volume [%s].", volumeToString));
-                        }
+                        logger.debug("Detaching volume [{}].", volumeToString);
                         if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
                             _volumeApiService.detachVolumeViaDestroyVM(vmId, vol.getId());
                         }
@@ -1289,7 +1271,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             try {
                 future.get();
             } catch (InterruptedException | ExecutionException e) {
-                logger.error(String.format("Failed to expunge volume [%s] due to [%s].", expungeToString, e.getMessage()));
+                logger.error("Failed to expunge volume [{}] due to [{}].", expungeToString, e.getMessage());
                 logger.debug("Exception: ", e);
             }
         }
@@ -1407,12 +1389,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         try {
             VolumeApiResult result = future.get();
             if (result.isFailed()) {
-                logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, result.getResult()));
+                logger.error("Volume [{}] migration failed due to [{}].", volToString, result.getResult());
                 return null;
             }
             return result.getVolume();
         } catch (InterruptedException | ExecutionException e) {
-            logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, e.getMessage()));
+            logger.error("Volume [{}] migration failed due to [{}].", volToString, e.getMessage());
             logger.debug("Exception: ", e);
             return null;
         }
@@ -1453,7 +1435,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 throw new CloudRuntimeException(msg);
             }
         } catch (InterruptedException |  ExecutionException e) {
-            logger.error(String.format("Failed to migrate VM [%s] along with its volumes due to [%s].", vm, e.getMessage()));
+            logger.error("Failed to migrate VM [{}] along with its volumes due to [{}].", vm, e.getMessage());
             logger.debug("Exception: ", e);
         }
     }
@@ -1475,7 +1457,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             }
 
             if (volume.getPoolId() == pool.getId()) {
-                logger.debug(String.format("Volume [%s] already is on the elected storage pool [%s].", volumeToString, poolToString));
+                logger.debug("Volume [{}] already is on the elected storage pool [{}].", volumeToString, poolToString);
                 continue;
             }
             volumeStoragePoolMap.put(volume, volumeToPool.get(volume));
@@ -1485,9 +1467,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             logger.debug("No volume needs to be migrated.");
             return true;
         }
-        if (logger.isDebugEnabled()) {
-            logger.debug("Offline VM migration was not done up the stack in VirtualMachineManager. Trying to migrate the VM here.");
-        }
+        logger.debug("Offline VM migration was not done up the stack in VirtualMachineManager. Trying to migrate the VM here.");
         for (Map.Entry<Volume, StoragePool> entry : volumeStoragePoolMap.entrySet()) {
             Volume result = migrateVolume(entry.getKey(), entry.getValue());
             if (result == null) {
@@ -1500,9 +1480,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
     @Override
     public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) {
         List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
-        if (logger.isDebugEnabled()) {
-            logger.debug(String.format("Preparing to migrate [%s] volumes for VM [%s].", vols.size(), vm.getVirtualMachine()));
-        }
+        logger.debug("Preparing to migrate [{}] volumes for VM [{}].", vols.size(), vm.getVirtualMachine());
 
         for (VolumeVO vol : vols) {
             VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
@@ -1624,17 +1602,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                     tasks.add(task);
                 } else {
                     if (vol.isRecreatable()) {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug(String.format("Volume [%s] will be recreated on storage pool [%s], assigned by deploymentPlanner.", volToString, assignedPoolToString));
-                        }
+                        logger.debug("Volume [{}] will be recreated on storage pool [{}], assigned by deploymentPlanner.", volToString, assignedPoolToString);
                         VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null);
                         tasks.add(task);
                     } else {
                         if (assignedPool.getId() != vol.getPoolId()) {
-                            if (logger.isDebugEnabled()) {
-                                logger.debug(String.format("Mismatch with the storage pool [%s] assigned by deploymentPlanner and the one associated with the volume [%s].",
-                                        assignedPoolToString, volToString));
-                            }
+                            logger.debug("Mismatch with the storage pool [{}] assigned by deploymentPlanner and the one associated with the volume [{}].",
+                                    assignedPoolToString, volToString);
                             DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId());
                             if (diskOffering.isUseLocalStorage()) {
                                 // Currently migration of local volume is not supported so bail out
@@ -1653,10 +1627,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                                     storageMigrationEnabled = StorageMigrationEnabled.value();
                                 }
                                 if (storageMigrationEnabled) {
-                                    if (logger.isDebugEnabled()) {
-                                        logger.debug(String.format("Shared volume [%s] will be migrated to the storage pool [%s], assigned by deploymentPlanner.",
-                                                volToString, assignedPoolToString));
-                                    }
+                                    logger.debug("Shared volume [{}] will be migrated to the storage pool [{}], assigned by deploymentPlanner.",
+                                            volToString, assignedPoolToString);
                                     VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool);
                                     tasks.add(task);
                                 } else {
@@ -1679,10 +1651,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
                 StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId());
 
-                if (logger.isDebugEnabled()) {
-                    logger.debug(String.format("No need to recreate the volume [%s] since it already has an assigned pool: [%s]. Adding disk to the VM.",
-                            volToString, pool.getUuid()));
-                }
+                logger.debug("No need to recreate the volume [{}] since it already has an assigned pool: [{}]. Adding disk to the VM.",
+                        volToString, pool.getUuid());
                 VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool);
                 tasks.add(task);
             }
@@ -1695,7 +1665,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         if (Objects.equals(originalEntry.getSize(), updateEntry.getSize())) {
             return;
         }
-        logger.debug(String.format("Size mismatch found for %s after creation, old size: %d, new size: %d. Updating resource count", updateEntry, originalEntry.getSize(), updateEntry.getSize()));
+        logger.debug("Size mismatch found for {} after creation, old size: {}, new size: {}. Updating resource count", updateEntry, originalEntry.getSize(), updateEntry.getSize());
         if (ObjectUtils.anyNull(originalEntry.getSize(), updateEntry.getSize())) {
             _resourceLimitMgr.recalculateResourceCount(updateEntry.getAccountId(), updateEntry.getDomainId(),
                     ResourceType.primary_storage.getOrdinal());
@@ -1717,7 +1687,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
             destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
             String destPoolToString = getReflectOnlySelectedFields(destPool);
-            logger.debug(String.format("Existing pool: [%s].", destPoolToString));
+            logger.debug("Existing pool: [{}].", destPoolToString);
         } else {
             StoragePool pool = dest.getStorageForDisks().get(vol);
             destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
@@ -1739,9 +1709,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 dest.getStorageForDisks().put(newVol, poolWithOldVol);
                 dest.getStorageForDisks().remove(vol);
             }
-            if (logger.isDebugEnabled()) {
-                logger.debug(String.format("Created new volume [%s] from old volume [%s].", newVolToString, volToString));
-            }
+            logger.debug("Created new volume [{}] from old volume [{}].", newVolToString, volToString);
         }
         VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool);
         Long templateId = newVol.getTemplateId();
@@ -1762,7 +1730,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             } else {
                 final VirtualMachineTemplate template = _entityMgr.findById(VirtualMachineTemplate.class, templateId);
                 if (template == null) {
-                    logger.error(String.format("Failed to find template: %d for %s", templateId, volume));
+                    logger.error("Failed to find template: {} for {}", templateId, volume);
                     throw new CloudRuntimeException(String.format("Failed to find template for volume ID: %s", volume.getUuid()));
                 }
                 TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId());
@@ -1774,8 +1742,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                         if (!primaryDataStore.isManaged()) {
                             templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId());
                         } else {
-                            logger.debug(String.format("Directly downloading template [%s] on host [%s] and copying it to the managed storage pool [%s].",
-                                    templateId, dest.getHost().getUuid(), destPool.getUuid()));
+                            logger.debug("Directly downloading template [{}] on host [{}] and copying it to the managed storage pool [{}].",
+                                    templateId, dest.getHost().getUuid(), destPool.getUuid());
                             templ = volService.createManagedStorageTemplate(templateId, destPool.getId(), dest.getHost().getId());
                         }
 
@@ -2029,12 +1997,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         String volumeToString = getReflectOnlySelectedFields(volume);
 
         if (volume.getState().equals(Volume.State.Creating)) {
-            logger.debug(String.format("Removing volume [%s], as it was leftover from the last management server stop.", volumeToString));
+            logger.debug("Removing volume [{}], as it was leftover from the last management server stop.", volumeToString);
             _volsDao.remove(volume.getId());
         }
 
         if (volume.getState().equals(Volume.State.Attaching)) {
-            logger.warn(String.format("Volume [%s] failed to attach to the VM [%s] on the last management server stop, changing state back to Ready.", volumeToString, _userVmDao.findById(vmId)));
+            logger.warn("Volume [{}] failed to attach to the VM [{}] on the last management server stop, changing state back to Ready.", volumeToString, _userVmDao.findById(vmId));
             volume.setState(Volume.State.Ready);
             _volsDao.update(volumeId, volume);
         }
@@ -2057,11 +2025,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             if (duplicateVol != null) {
                 String duplicateVolToString = getReflectOnlySelectedFields(duplicateVol);
 
-                logger.debug(String.format("Removing volume [%s] from storage pool [%s] because it's duplicated.", duplicateVolToString, destPoolToString));
+                logger.debug("Removing volume [{}] from storage pool [{}] because it's duplicated.", duplicateVolToString, destPoolToString);
                 _volsDao.remove(duplicateVol.getId());
             }
 
-            logger.debug(String.format("Changing volume [%s] state from Migrating to Ready in case of migration failure.", volumeToString));
+            logger.debug("Changing volume [{}] state from Migrating to Ready in case of migration failure.", volumeToString);
             volume.setState(Volume.State.Ready);
             _volsDao.update(volumeId, volume);
         }
@@ -2075,7 +2043,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         String volumeToString = getReflectOnlySelectedFields(volume);
 
         if (volume.getState() == Volume.State.Snapshotting) {
-            logger.debug(String.format("Changing volume [%s] state back to Ready.", volumeToString));
+            logger.debug("Changing volume [{}] state back to Ready.", volumeToString);
             volume.setState(Volume.State.Ready);
             _volsDao.update(volume.getId(), volume);
         }
@@ -2099,7 +2067,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                     cleanupVolumeDuringSnapshotFailure(work.getVolumeId(), work.getSnapshotId());
                 }
             } catch (Exception e) {
-                logger.error(String.format("Clean up job failed due to [%s]. Will continue with other clean up jobs.", e.getMessage()));
+                logger.error("Clean up job failed due to [{}]. Will continue with other clean up jobs.", e.getMessage());
                 logger.debug("Exception: ", e);
             }
         }
@@ -2176,8 +2144,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         }
 
         if (needUpdate) {
-            logger.info(String.format("Updating volume's disk chain info. Volume: [%s]. Path: [%s] -> [%s], Disk Chain Info: [%s] -> [%s].",
-                    volToString, vol.getPath(), path, vol.getChainInfo(), chainInfo));
+            logger.info("Updating volume's disk chain info. Volume: [{}]. Path: [{}] -> [{}], Disk Chain Info: [{}] -> [{}].",
+                    volToString, vol.getPath(), path, vol.getChainInfo(), chainInfo);
             vol.setPath(path);
             vol.setChainInfo(chainInfo);
             if (updatedDataStoreUUID != null) {
@@ -2289,7 +2257,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
     @Override
     public void unmanageVolumes(long vmId) {
         if (logger.isDebugEnabled()) {
-            logger.debug(String.format("Unmanaging storage for VM [%s].", _userVmDao.findById(vmId)));
+            logger.debug("Unmanaging storage for VM [{}].", _userVmDao.findById(vmId));
         }
         final List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
 
@@ -2302,7 +2270,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                     boolean volumeAlreadyDestroyed = (vol.getState() == Volume.State.Destroy || vol.getState() == Volume.State.Expunged
                             || vol.getState() == Volume.State.Expunging);
                     if (volumeAlreadyDestroyed) {
-                        logger.debug(String.format("Skipping Destroy for the volume [%s] as it is in [%s] state.", volToString, vol.getState().toString()));
+                        logger.debug("Skipping Destroy for the volume [{}] as it is in [{}] state.", volToString, vol.getState().toString());
                     } else {
                         volService.unmanageVolume(vol.getId());
                     }