You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2017/05/26 22:31:10 UTC
[2/2] hive git commit: HIVE-16285: Servlet for dynamically
configuring log levels (Prasanth Jayachandran reviewed by Siddharth Seth,
Gopal V)
HIVE-16285: Servlet for dynamically configuring log levels (Prasanth Jayachandran reviewed by Siddharth Seth, Gopal V)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ca80968e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ca80968e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ca80968e
Branch: refs/heads/master
Commit: ca80968e039382b8def51adb2a4520e76c89f7fb
Parents: c3c6175
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Fri May 26 15:30:53 2017 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Fri May 26 15:30:53 2017 -0700
----------------------------------------------------------------------
.../java/org/apache/hive/http/HttpServer.java | 1 +
.../hive/http/Log4j2ConfiguratorServlet.java | 275 +++++++++++++++++++
.../llap/daemon/impl/TaskExecutorService.java | 42 ++-
.../llap/tezplugins/LlapTaskCommunicator.java | 5 +-
.../hive/ql/exec/AppMasterEventOperator.java | 14 +-
.../hadoop/hive/ql/exec/CommonJoinOperator.java | 2 +-
.../hadoop/hive/ql/exec/DemuxOperator.java | 26 +-
.../hadoop/hive/ql/exec/FileSinkOperator.java | 20 +-
.../hadoop/hive/ql/exec/GroupByOperator.java | 17 +-
.../hive/ql/exec/HashTableSinkOperator.java | 4 +-
.../hadoop/hive/ql/exec/JoinOperator.java | 2 +-
.../hadoop/hive/ql/exec/MapJoinOperator.java | 8 +-
.../apache/hadoop/hive/ql/exec/MapOperator.java | 10 +-
.../apache/hadoop/hive/ql/exec/MuxOperator.java | 8 +-
.../apache/hadoop/hive/ql/exec/Operator.java | 49 ++--
.../hive/ql/exec/OrcFileMergeOperator.java | 6 +-
.../hadoop/hive/ql/exec/ReduceSinkOperator.java | 12 +-
.../hadoop/hive/ql/exec/SMBMapJoinOperator.java | 8 +-
.../hadoop/hive/ql/exec/ScriptOperator.java | 8 +-
.../hadoop/hive/ql/exec/SelectOperator.java | 2 +-
.../hadoop/hive/ql/exec/TableScanOperator.java | 8 +-
.../hadoop/hive/ql/exec/UnionOperator.java | 2 +-
.../hadoop/hive/ql/exec/mr/ExecReducer.java | 12 +-
.../hadoop/hive/ql/exec/mr/ObjectCache.java | 5 +-
.../ql/exec/spark/SparkMapRecordHandler.java | 7 +-
.../ql/exec/spark/SparkReduceRecordHandler.java | 9 +-
.../ql/exec/tez/ColumnarSplitSizeEstimator.java | 5 +-
.../tez/HostAffinitySplitLocationProvider.java | 2 +-
.../hive/ql/exec/tez/LlapObjectCache.java | 12 +-
.../hive/ql/exec/tez/RecordProcessor.java | 6 -
.../mapjoin/VectorMapJoinCommonOperator.java | 12 +-
.../VectorMapJoinGenerateResultOperator.java | 8 +-
.../VectorMapJoinInnerBigOnlyLongOperator.java | 8 +-
...ctorMapJoinInnerBigOnlyMultiKeyOperator.java | 8 +-
...VectorMapJoinInnerBigOnlyStringOperator.java | 8 +-
.../mapjoin/VectorMapJoinInnerLongOperator.java | 8 +-
.../VectorMapJoinInnerMultiKeyOperator.java | 8 +-
.../VectorMapJoinInnerStringOperator.java | 8 +-
.../VectorMapJoinLeftSemiLongOperator.java | 8 +-
.../VectorMapJoinLeftSemiMultiKeyOperator.java | 8 +-
.../VectorMapJoinLeftSemiStringOperator.java | 8 +-
...ectorMapJoinOuterGenerateResultOperator.java | 12 +-
.../mapjoin/VectorMapJoinOuterLongOperator.java | 10 +-
.../VectorMapJoinOuterMultiKeyOperator.java | 10 +-
.../VectorMapJoinOuterStringOperator.java | 10 +-
.../fast/VectorMapJoinFastBytesHashTable.java | 6 +-
.../fast/VectorMapJoinFastLongHashTable.java | 6 +-
.../VectorReduceSinkCommonOperator.java | 8 +-
.../hadoop/hive/ql/io/orc/ExternalCache.java | 3 +-
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 11 +-
.../stats/annotation/StatsRulesProcFactory.java | 56 ++--
.../hadoop/hive/serde2/lazy/LazyBinary.java | 5 +-
52 files changed, 528 insertions(+), 288 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/common/src/java/org/apache/hive/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/HttpServer.java b/common/src/java/org/apache/hive/http/HttpServer.java
index 0bc0032..7368a91 100644
--- a/common/src/java/org/apache/hive/http/HttpServer.java
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -425,6 +425,7 @@ public class HttpServer {
addServlet("jmx", "/jmx", JMXJsonServlet.class);
addServlet("conf", "/conf", ConfServlet.class);
addServlet("stacks", "/stacks", StackServlet.class);
+ addServlet("conflog", "/conflog", Log4j2ConfiguratorServlet.class);
for (Pair<String, Class<? extends HttpServlet>> p : b.servlets) {
addServlet(p.getFirst(), "/" + p.getFirst(), p.getSecond());
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
new file mode 100644
index 0000000..8042f21
--- /dev/null
+++ b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.http;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A servlet to configure log4j2.
+ * <br/>
+ * HTTP GET returns all loggers and it's log level in JSON formatted response.
+ * <br/>
+ * HTTP POST is used for configuring the loggers. POST data should be in the same format as GET's response.
+ * To configure (add/update existing loggers), use HTTP POST with logger names and level in the following JSON format.
+ *
+ * <br/>
+ * <pre>
+ * <code>{
+ * "loggers": [ {
+ * "logger" : "",
+ * "level" : "INFO"
+ * }, {
+ * "logger" : "LlapIoOrc",
+ * "level" : "WARN"
+ * }, {
+ * "logger" : "org.apache.zookeeper.server.NIOServerCnxn",
+ * "level" : "WARN"
+ * }]
+ * }<code>
+ * </pre>
+ *
+ * <br/>
+ * Example usage:
+ * <li>
+ * Returns all loggers with levels in JSON format:
+ * <pre>
+ * curl http://hostame:port/conflog
+ * </pre>
+ * </li>
+ * <li>
+ * Set root logger to INFO:
+ * <pre>
+ * curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [ { "logger" : "", "level" : "INFO" } ] }'
+ * http://hostame:port/conflog
+ * </pre>
+ * </li>
+ * <li>
+ * Set logger with level:
+ * <pre>
+ * curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [
+ * { "logger" : "LlapIoOrc", "level" : "INFO" } ] }' http://hostame:port/conflog
+ * </pre>
+ * </li>
+ * <li>
+ * Set log level for all classes under a package:
+ * <pre>
+ * curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [
+ * { "logger" : "org.apache.orc", "level" : "INFO" } ] }' http://hostame:port/conflog
+ * </pre>
+ * </li>
+ * <li>
+ * Set log levels for multiple loggers:
+ * <pre>
+ * curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [ { "logger" : "", "level" : "INFO" },
+ * { "logger" : "LlapIoOrc", "level" : "WARN" },
+ * { "logger" : "org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon", "level" : "INFO" },
+ * { "logger" : "org.apache.orc", "level" : "INFO" } ] }' http://hostame:port/conflog
+ * </pre>
+ * </li>
+ * <br/>
+ * Response Status Codes:
+ * <br/>
+ * <li>200 - OK : If the POST data is valid and if the request succeeds or if GET request succeeds.</li>
+ * <li>401 - UNAUTHORIZED : If the user does not have privileges to access instrumentation servlets.
+ * Refer <code>hadoop.security.instrumentation.requires.admin</code> config for more info.</li>
+ * <li>400 - BAD_REQUEST : If the POST data is not a valid JSON.</li>
+ * <li>500 - INTERNAL_SERVER_ERROR : If GET requests throws any IOException during JSON output generation.</li>
+ */
+public class Log4j2ConfiguratorServlet extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+ private static final Logger LOG = LoggerFactory.getLogger(Log4j2ConfiguratorServlet.class);
+ private static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods";
+ private static final String ALLOWED_METHODS = "POST, GET";
+ private static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin";
+ private static final String CONTENT_TYPE_JSON_UTF8 = "application/json; charset=utf8";
+
+ private transient LoggerContext context;
+ private transient Configuration conf;
+
+ private static class ConfLoggers {
+ private List<ConfLogger> loggers;
+
+ public ConfLoggers() {
+ this.loggers = new ArrayList<>();
+ }
+
+ public List<ConfLogger> getLoggers() {
+ return loggers;
+ }
+
+ public void setLoggers(final List<ConfLogger> loggers) {
+ this.loggers = loggers;
+ }
+ }
+
+ private static class ConfLogger {
+ private String logger;
+ private String level;
+
+ // no-arg ctor required for JSON deserialization
+ public ConfLogger() {
+ this(null, null);
+ }
+
+ public ConfLogger(String logger, String level) {
+ this.logger = logger;
+ this.level = level;
+ }
+
+ public String getLogger() {
+ return logger == null ? logger : logger.trim();
+ }
+
+ public void setLogger(final String logger) {
+ this.logger = logger;
+ }
+
+ public String getLevel() {
+ return level == null ? level : level.trim().toUpperCase();
+ }
+
+ public void setLevel(final String level) {
+ this.level = level;
+ }
+ }
+
+ /**
+ * Initialize this servlet.
+ */
+ @Override
+ public void init() throws ServletException {
+ context = (LoggerContext) LogManager.getContext(false);
+ conf = context.getConfiguration();
+ }
+
+ @Override
+ public void doGet(HttpServletRequest request, HttpServletResponse response)
+ throws ServletException, IOException {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+ request, response)) {
+ response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+ return;
+ }
+
+ setResponseHeader(response);
+
+ // list the loggers and their levels
+ listLoggers(response);
+ }
+
+ private void setResponseHeader(final HttpServletResponse response) {
+ response.setContentType(CONTENT_TYPE_JSON_UTF8);
+ response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS);
+ response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
+ }
+
+ @Override
+ protected void doPost(final HttpServletRequest request, final HttpServletResponse response)
+ throws ServletException, IOException {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+ request, response)) {
+ response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+ return;
+ }
+ setResponseHeader(response);
+
+ String dataJson = request.getReader().lines().collect(Collectors.joining());
+ ObjectMapper objectMapper = new ObjectMapper();
+ try {
+ ConfLoggers confLoggers = objectMapper.readValue(dataJson, ConfLoggers.class);
+ configureLogger(confLoggers);
+ } catch (IOException e) {
+ response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
+ LOG.error("Error configuring log4j2 via /conflog endpoint.", e);
+ return;
+ }
+
+ response.setStatus(HttpServletResponse.SC_OK);
+ }
+
+ private void configureLogger(final ConfLoggers confLoggers) {
+ if (confLoggers != null) {
+ for (ConfLogger logger : confLoggers.getLoggers()) {
+ String loggerName = logger.getLogger();
+ Level logLevel = Level.getLevel(logger.getLevel());
+ if (logLevel == null) {
+ LOG.warn("Invalid log level: {} for logger: {}. Ignoring reconfiguration.", loggerName, logger.getLevel());
+ continue;
+ }
+
+ LoggerConfig loggerConfig = conf.getLoggerConfig(loggerName);
+ // if the logger name is not found, root logger is returned. We don't want to change root logger level
+ // since user either requested a new logger or specified invalid input. In which, we will add the logger
+ // that user requested.
+ if (!loggerName.equals(LogManager.ROOT_LOGGER_NAME) &&
+ loggerConfig.getName().equals(LogManager.ROOT_LOGGER_NAME)) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Requested logger ({}) not found. Adding as new logger with {} level", loggerName, logLevel);
+ }
+ // requested logger not found. Add the new logger with the requested level
+ conf.addLogger(loggerName, new LoggerConfig(loggerName, logLevel, true));
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Updating logger ({}) to {} level", loggerName, logLevel);
+ }
+ // update the log level for the specified logger
+ loggerConfig.setLevel(logLevel);
+ }
+ }
+ context.updateLoggers(conf);
+ }
+ }
+
+ private void listLoggers(final HttpServletResponse response) throws IOException {
+ PrintWriter writer = null;
+ try {
+ writer = response.getWriter();
+ ConfLoggers confLoggers = new ConfLoggers();
+ Collection<LoggerConfig> loggerConfigs = conf.getLoggers().values();
+ loggerConfigs.forEach(lc -> confLoggers.getLoggers().add(new ConfLogger(lc.getName(), lc.getLevel().toString())));
+ ObjectMapper objectMapper = new ObjectMapper();
+ objectMapper.writerWithDefaultPrettyPrinter().writeValue(writer, confLoggers);
+ } catch (IOException e) {
+ LOG.error("Caught an exception while processing Log4j2 configuration request", e);
+ response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ return;
+ } finally {
+ if (writer != null) {
+ writer.close();
+ }
+ }
+ response.setStatus(HttpServletResponse.SC_OK);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 7f8c947..dd459b1 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -84,8 +84,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
public class TaskExecutorService extends AbstractService
implements Scheduler<TaskRunnerCallable>, SchedulerFragmentCompletingListener {
private static final Logger LOG = LoggerFactory.getLogger(TaskExecutorService.class);
- private static final boolean isInfoEnabled = LOG.isInfoEnabled();
- private static final boolean isDebugEnabled = LOG.isDebugEnabled();
private static final String TASK_EXECUTOR_THREAD_NAME_FORMAT = "Task-Executor-%d";
private static final String WAIT_QUEUE_SCHEDULER_THREAD_NAME_FORMAT = "Wait-Queue-Scheduler-%d";
private static final long PREEMPTION_KILL_GRACE_MS = 500; // 500ms
@@ -294,7 +292,7 @@ public class TaskExecutorService extends AbstractService
// (numSlotsAvailable can go negative, if the callback after the thread completes is delayed)
boolean shouldWait = numSlotsAvailable.get() <= 0 && lastKillTimeMs == null;
if (task.getTaskRunnerCallable().canFinish()) {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Attempting to schedule task {}, canFinish={}. Current state: "
+ "preemptionQueueSize={}, numSlotsAvailable={}, waitQueueSize={}",
task.getRequestId(), task.getTaskRunnerCallable().canFinish(),
@@ -335,7 +333,7 @@ public class TaskExecutorService extends AbstractService
lock.wait(PREEMPTION_KILL_GRACE_SLEEP_MS);
}
} else {
- if (isDebugEnabled && lastKillTimeMs != null) {
+ if (LOG.isDebugEnabled() && lastKillTimeMs != null) {
LOG.debug("Grace period ended for the previous kill; preemtping more tasks");
}
if (handleScheduleAttemptedRejection(task)) {
@@ -406,18 +404,18 @@ public class TaskExecutorService extends AbstractService
if (evictedTask == null || !evictedTask.equals(taskWrapper)) {
knownTasks.put(taskWrapper.getRequestId(), taskWrapper);
taskWrapper.setIsInWaitQueue(true);
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("{} added to wait queue. Current wait queue size={}", task.getRequestId(),
waitQueue.size());
}
result = evictedTask == null ? SubmissionState.ACCEPTED : SubmissionState.EVICTED_OTHER;
- if (isDebugEnabled && evictedTask != null) {
+ if (LOG.isDebugEnabled() && evictedTask != null) {
LOG.debug("Eviction: {} {} {}", taskWrapper, result, evictedTask);
}
} else {
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info(
"wait queue full, size={}. numSlotsAvailable={}, runningFragmentCount={}. {} not added",
waitQueue.size(), numSlotsAvailable.get(), runningFragmentCount.get(), task.getRequestId());
@@ -426,7 +424,7 @@ public class TaskExecutorService extends AbstractService
result = SubmissionState.REJECTED;
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("{} is {} as wait queue is full", taskWrapper.getRequestId(), result);
}
if (metrics != null) {
@@ -440,7 +438,7 @@ public class TaskExecutorService extends AbstractService
// after some other submission has evicted it.
boolean stateChanged = !taskWrapper.maybeRegisterForFinishedStateNotifications(canFinish);
if (stateChanged) {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Finishable state of {} updated to {} during registration for state updates",
taskWrapper.getRequestId(), !canFinish);
}
@@ -455,12 +453,12 @@ public class TaskExecutorService extends AbstractService
// Register for state change notifications so that the waitQueue can be re-ordered correctly
// if the fragment moves in or out of the finishable state.
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Wait Queue: {}", waitQueue);
}
if (evictedTask != null) {
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("{} evicted from wait queue in favor of {} because of lower priority",
evictedTask.getRequestId(), task.getRequestId());
}
@@ -503,7 +501,7 @@ public class TaskExecutorService extends AbstractService
// Can be null since the task may have completed meanwhile.
if (taskWrapper != null) {
if (taskWrapper.isInWaitQueue()) {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Removing {} from waitQueue", fragmentId);
}
taskWrapper.setIsInWaitQueue(false);
@@ -514,7 +512,7 @@ public class TaskExecutorService extends AbstractService
}
}
if (taskWrapper.isInPreemptionQueue()) {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Removing {} from preemptionQueue", fragmentId);
}
removeFromPreemptionQueue(taskWrapper);
@@ -558,7 +556,7 @@ public class TaskExecutorService extends AbstractService
@VisibleForTesting
/** Assumes the epic lock is already taken. */
void tryScheduleUnderLock(final TaskWrapper taskWrapper) throws RejectedExecutionException {
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Attempting to execute {}", taskWrapper);
}
ListenableFuture<TaskRunner2Result> future = executorService.submit(
@@ -572,7 +570,7 @@ public class TaskExecutorService extends AbstractService
Futures.addCallback(future, wrappedCallback, executionCompletionExecutorService);
boolean canFinish = taskWrapper.getTaskRunnerCallable().canFinish();
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("{} scheduled for execution. canFinish={}", taskWrapper.getRequestId(), canFinish);
}
@@ -580,7 +578,7 @@ public class TaskExecutorService extends AbstractService
// to the tasks are not ready yet, the task is eligible for pre-emptable.
if (enablePreemption) {
if (!canFinish) {
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("{} is not finishable. Adding it to pre-emption queue",
taskWrapper.getRequestId());
}
@@ -596,7 +594,7 @@ public class TaskExecutorService extends AbstractService
private boolean handleScheduleAttemptedRejection(TaskWrapper taskWrapper) {
if (enablePreemption && taskWrapper.getTaskRunnerCallable().canFinish()
&& !preemptionQueue.isEmpty()) {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Preemption Queue: " + preemptionQueue);
}
@@ -610,7 +608,7 @@ public class TaskExecutorService extends AbstractService
pRequest.getRequestId());
continue; // Try something else.
}
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Invoking kill task for {} due to pre-emption to run {}",
pRequest.getRequestId(), taskWrapper.getRequestId());
}
@@ -767,7 +765,7 @@ public class TaskExecutorService extends AbstractService
if (enablePreemption) {
String state = reason == null ? "FAILED" : reason.name();
boolean removed = removeFromPreemptionQueueUnlocked(taskWrapper);
- if (removed && isInfoEnabled) {
+ if (removed && LOG.isInfoEnabled()) {
TaskRunnerCallable trc = taskWrapper.getTaskRunnerCallable();
LOG.info(TaskRunnerCallable.getTaskIdentifierString(trc.getRequest(),
trc.getVertexSpec(), trc.getQueryId()) + " request " + state + "! Removed from preemption list.");
@@ -778,7 +776,7 @@ public class TaskExecutorService extends AbstractService
if (metrics != null) {
metrics.setNumExecutorsAvailable(numSlotsAvailable.get());
}
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Task {} complete. WaitQueueSize={}, numSlotsAvailable={}, preemptionQueueSize={}",
taskWrapper.getRequestId(), waitQueue.size(), numSlotsAvailable.get(),
preemptionQueue.size());
@@ -831,7 +829,7 @@ public class TaskExecutorService extends AbstractService
public void shutDown(boolean awaitTermination) {
if (!isShutdown.getAndSet(true)) {
if (awaitTermination) {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("awaitTermination: " + awaitTermination + " shutting down task executor" +
" service gracefully");
}
@@ -839,7 +837,7 @@ public class TaskExecutorService extends AbstractService
shutdownExecutor(executorService);
shutdownExecutor(executionCompletionExecutorService);
} else {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("awaitTermination: " + awaitTermination + " shutting down task executor" +
" service immediately");
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
----------------------------------------------------------------------
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
index 18ce03c..ff00aba 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -102,7 +102,6 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
private static final Logger LOG = LoggerFactory.getLogger(LlapTaskCommunicator.class);
- private static final boolean isInfoEnabled = LOG.isInfoEnabled();
private static final String RESOURCE_URI_STR = "/ws/v1/applicationhistory";
private static final Joiner JOINER = Joiner.on("");
private static final Joiner PATH_JOINER = Joiner.on("/");
@@ -598,7 +597,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
Long old = knownNodeMap.putIfAbsent(nodeId,
TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS));
if (old == null) {
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Added new known node: {}", nodeId);
}
}
@@ -609,7 +608,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
PingingNodeInfo ni = new PingingNodeInfo(currentTs);
PingingNodeInfo old = pingedNodeMap.put(nodeId, ni);
if (old == null) {
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Added new pinging node: [{}]", nodeId);
}
} else {
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
index bf30ef1..b151a1d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
@@ -93,9 +93,9 @@ public class AppMasterEventOperator extends Operator<AppMasterEventDesc> {
Writable writableRow = serializer.serialize(row, rowInspector);
writableRow.write(buffer);
if (buffer.getLength() > MAX_SIZE) {
- if (isLogInfoEnabled) {
- LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength());
- }
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength());
+ }
hasReachedMaxSize = true;
buffer = null;
}
@@ -103,7 +103,7 @@ public class AppMasterEventOperator extends Operator<AppMasterEventDesc> {
throw new HiveException(e);
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("AppMasterEvent: " + row);
}
forward(row, rowInspector);
@@ -130,9 +130,9 @@ public class AppMasterEventOperator extends Operator<AppMasterEventDesc> {
InputInitializerEvent.create(vertexName, inputName,
ByteBuffer.wrap(payload, 0, payload.length));
- if (isLogInfoEnabled) {
- LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName
- + ". Payload size = " + payload.length);
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName
+ + ". Payload size = " + payload.length);
}
context.getTezProcessorContext().sendEvents(Collections.singletonList(event));
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
index df1898e..07fd653 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
@@ -383,7 +383,7 @@ public abstract class CommonJoinOperator<T extends JoinDesc> extends
joinEmitInterval = -1;
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " + totalSz);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
index c184742..a9f2218 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
@@ -188,7 +188,7 @@ public class DemuxOperator extends Operator<DemuxDesc>
}
newChildOperatorsTag[i] = toArray(childOperatorTags);
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("newChildOperatorsTag " + Arrays.toString(newChildOperatorsTag));
}
@@ -214,15 +214,14 @@ public class DemuxOperator extends Operator<DemuxDesc>
@Override
protected void initializeChildren(Configuration hconf) throws HiveException {
state = State.INIT;
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Operator " + id + " " + getName() + " initialized");
LOG.info("Initializing children of " + id + " " + getName());
}
for (int i = 0; i < childOperatorsArray.length; i++) {
- if (isLogInfoEnabled) {
- LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " +
- childOperatorsArray[i].getName() +
- " " + childInputObjInspectors[i].length);
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " +
+ childOperatorsArray[i].getName() + " " + childInputObjInspectors[i].length);
}
// We need to initialize those MuxOperators first because if we first
// initialize other operators, the states of all parents of those MuxOperators
@@ -247,10 +246,9 @@ public class DemuxOperator extends Operator<DemuxDesc>
}
}
for (int i = 0; i < childOperatorsArray.length; i++) {
- if (isLogInfoEnabled) {
- LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " +
- childOperatorsArray[i].getName() +
- " " + childInputObjInspectors[i].length);
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " +
+ childOperatorsArray[i].getName() + " " + childInputObjInspectors[i].length);
}
if (!(childOperatorsArray[i] instanceof MuxOperator)) {
childOperatorsArray[i].initialize(hconf, childInputObjInspectors[i]);
@@ -275,7 +273,7 @@ public class DemuxOperator extends Operator<DemuxDesc>
endGroupIfNecessary(currentChildIndex);
int oldTag = newTagToOldTag[tag];
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
cntrs[tag]++;
if (cntrs[tag] == nextCntrs[tag]) {
LOG.debug(id + " (newTag, childIndex, oldTag)=(" + tag + ", " + currentChildIndex + ", "
@@ -311,9 +309,9 @@ public class DemuxOperator extends Operator<DemuxDesc>
int newTag = i;
int oldTag = newTagToOldTag[i];
int childIndex = newTagToChildIndex[newTag];
- if (isLogInfoEnabled) {
- LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " + childIndex + ", "
- + oldTag + "), forwarded " + cntrs[newTag] + " rows");
+ if (LOG.isInfoEnabled()) {
+ LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " + childIndex + ", "
+ + oldTag + "), forwarded " + cntrs[newTag] + " rows");
}
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index f3c571a..3e09432 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -93,8 +93,6 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
Serializable {
public static final Logger LOG = LoggerFactory.getLogger(FileSinkOperator.class);
- private static final boolean isInfoEnabled = LOG.isInfoEnabled();
- private static final boolean isDebugEnabled = LOG.isDebugEnabled();
protected transient HashMap<String, FSPaths> valToPaths;
protected transient int numDynParts;
@@ -160,7 +158,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
finalPaths = new Path[numFiles];
outWriters = new RecordWriter[numFiles];
updaters = new RecordUpdater[numFiles];
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Created slots for " + numFiles);
}
stat = new Stat();
@@ -378,7 +376,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
serializer.initialize(unsetNestedColumnPaths(hconf), conf.getTableInfo().getProperties());
outputClass = serializer.getSerializedClass();
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Using serializer : " + serializer + " and formatter : " + hiveOutputFormat +
(isCompressed ? " with compression" : ""));
}
@@ -520,13 +518,13 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
Set<Integer> seenBuckets = new HashSet<Integer>();
for (int idx = 0; idx < totalFiles; idx++) {
if (this.getExecContext() != null && this.getExecContext().getFileId() != null) {
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("replace taskId from execContext ");
}
taskId = Utilities.replaceTaskIdFromFilename(taskId, this.getExecContext().getFileId());
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("new taskId: FS " + taskId);
}
@@ -582,11 +580,11 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
try {
if (isNativeTable) {
fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, null);
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Final Path: FS " + fsp.finalPaths[filesIdx]);
}
fsp.outPaths[filesIdx] = fsp.getTaskOutPath(taskId);
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Writing to temp file: FS " + fsp.outPaths[filesIdx]);
}
} else {
@@ -603,7 +601,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, extension);
}
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]);
}
@@ -743,7 +741,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
fpaths.stat.addToStat(StatsSetupConst.ROW_COUNT, 1);
}
- if ((++numRows == cntr) && isLogInfoEnabled) {
+ if ((++numRows == cntr) && LOG.isInfoEnabled()) {
cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows;
if (cntr < 0 || numRows < 0) {
cntr = 0;
@@ -778,7 +776,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
fpaths.updaters[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 : ++fpaths.acidFileOffset] = HiveFileFormatUtils.getAcidRecordUpdater(
jc, conf.getTableInfo(), bucketNum, conf, fpaths.outPaths[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 :fpaths.acidFileOffset],
rowInspector, reporter, 0);
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Created updater for bucket number " + bucketNum + " using file " +
fpaths.outPaths[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 :fpaths.acidFileOffset]);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
index af5e90f..9c5e7e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
@@ -750,7 +750,7 @@ public class GroupByOperator extends Operator<GroupByDesc> {
flushHashTable(true);
hashAggr = false;
} else {
- if (isLogTraceEnabled) {
+ if (LOG.isTraceEnabled()) {
LOG.trace("Hash Aggr Enabled: #hash table = " + numRowsHashTbl
+ " #total = " + numRowsInput + " reduction = " + 1.0
* (numRowsHashTbl / numRowsInput) + " minReduction = "
@@ -948,7 +948,7 @@ public class GroupByOperator extends Operator<GroupByDesc> {
// Update the number of entries that can fit in the hash table
numEntriesHashTable =
(int) (maxHashTblMemory / (fixedRowSize + (totalVariableSize / numEntriesVarSize)));
- if (isLogTraceEnabled) {
+ if (LOG.isTraceEnabled()) {
LOG.trace("Hash Aggr: #hash table = " + numEntries
+ " #max in hash table = " + numEntriesHashTable);
}
@@ -999,14 +999,14 @@ public class GroupByOperator extends Operator<GroupByDesc> {
}
hashAggregations.clear();
hashAggregations = null;
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Hash Table completed flushed");
}
return;
}
int oldSize = hashAggregations.size();
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Hash Tbl flush: #hash table = " + oldSize);
}
Iterator<Map.Entry<KeyWrapper, AggregationBuffer[]>> iter = hashAggregations
@@ -1018,7 +1018,7 @@ public class GroupByOperator extends Operator<GroupByDesc> {
iter.remove();
numDel++;
if (numDel * 10 >= oldSize) {
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Hash Table flushed: new size = " + hashAggregations.size());
}
return;
@@ -1058,10 +1058,9 @@ public class GroupByOperator extends Operator<GroupByDesc> {
public void flush() throws HiveException{
try {
if (hashAggregations != null) {
- if (isLogInfoEnabled) {
- LOG.info("Begin Hash Table flush: size = "
- + hashAggregations.size());
- }
+ if (LOG.isInfoEnabled()) {
+ LOG.info("Begin Hash Table flush: size = " + hashAggregations.size());
+ }
Iterator iter = hashAggregations.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<KeyWrapper, AggregationBuffer[]> m = (Map.Entry) iter
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
index 3a366f6..f8ea701 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
@@ -275,7 +275,7 @@ public class HashTableSinkOperator extends TerminalOperator<HashTableSinkDesc> i
public void closeOp(boolean abort) throws HiveException {
try {
if (mapJoinTables == null) {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("mapJoinTables is null");
}
} else {
@@ -292,7 +292,7 @@ public class HashTableSinkOperator extends TerminalOperator<HashTableSinkDesc> i
protected void flushToFile() throws IOException, HiveException {
// get tmp file URI
Path tmpURI = getExecContext().getLocalWork().getTmpPath();
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Temp URI for side table: " + tmpURI);
}
for (byte tag = 0; tag < mapJoinTables.length; tag++) {
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
index 0282763..a4bca45 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
@@ -113,7 +113,7 @@ public class JoinOperator extends CommonJoinOperator<JoinDesc> implements Serial
storage[alias].clearRows();
}
} else {
- if (isLogInfoEnabled && (sz == nextSz)) {
+ if (LOG.isInfoEnabled() && (sz == nextSz)) {
// Print a message if we reached at least 1000 rows for a join operand
// We won't print a message for the last join operand since the size
// will never goes to joinEmitInterval.
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index 4971707..384e664 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -177,7 +177,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
* requires changes in the Tez API with regard to finding bucket id and
* also ability to schedule tasks to re-use containers that have cached the specific bucket.
*/
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("This is not bucket map join, so cache");
}
@@ -318,7 +318,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
try {
loader.load(mapJoinTables, mapJoinTableSerdes);
} catch (HiveException e) {
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Exception loading hash tables. Clearing partially loaded hash table containers.");
}
@@ -558,7 +558,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
}
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("spilled: " + spilled + " abort: " + abort + ". Clearing spilled partitions.");
}
@@ -572,7 +572,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
&& (this.getExecContext().getLocalWork().getInputFileChangeSensitive())
&& !(HiveConf.getVar(hconf, ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")
&& SparkUtilities.isDedicatedCluster(hconf))) {
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("MR: Clearing all map join table containers.");
}
clearAllTableContainers();
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
index 2a46b30..d801ae7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
@@ -428,7 +428,7 @@ public class MapOperator extends AbstractMapOperator {
for (String alias : aliases) {
Operator<? extends OperatorDesc> op = conf.getAliasToWork().get(alias);
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Adding alias " + alias + " to work list for file "
+ onefile);
}
@@ -469,7 +469,7 @@ public class MapOperator extends AbstractMapOperator {
if (prev != null && !prev.equals(context.rowObjectInspector)) {
throw new HiveException("Conflict on row inspector for " + context.alias);
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("dump " + context.op + " " + context.rowObjectInspector.getTypeName());
}
}
@@ -509,7 +509,7 @@ public class MapOperator extends AbstractMapOperator {
Path fpath = getExecContext().getCurrentInputPath();
String nominalPath = getNominalPath(fpath);
Map<Operator<?>, MapOpCtx> contexts = opCtxMap.get(nominalPath);
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
StringBuilder builder = new StringBuilder();
for (MapOpCtx context : contexts.values()) {
if (builder.length() > 0) {
@@ -517,7 +517,7 @@ public class MapOperator extends AbstractMapOperator {
}
builder.append(context.alias);
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Processing alias(es) " + builder.toString() + " for file " + fpath);
}
}
@@ -567,7 +567,7 @@ public class MapOperator extends AbstractMapOperator {
protected final void rowsForwarded(int childrenDone, int rows) {
numRows += rows;
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
while (numRows >= cntr) {
cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows;
if (cntr < 0 || numRows < 0) {
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
index 9849243..82d0017 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
@@ -225,13 +225,13 @@ public class MuxOperator extends Operator<MuxDesc> implements Serializable{
@Override
protected void initializeChildren(Configuration hconf) throws HiveException {
state = State.INIT;
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Operator " + id + " " + getName() + " initialized");
}
if (childOperators == null || childOperators.isEmpty()) {
return;
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Initializing children of " + id + " " + getName());
}
childOperatorsArray[0].initialize(hconf, outputObjectInspectors);
@@ -242,7 +242,7 @@ public class MuxOperator extends Operator<MuxDesc> implements Serializable{
@Override
public void process(Object row, int tag) throws HiveException {
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
cntrs[tag]++;
if (cntrs[tag] == nextCntrs[tag]) {
LOG.info(id + ", tag=" + tag + ", forwarding " + cntrs[tag] + " rows");
@@ -317,7 +317,7 @@ public class MuxOperator extends Operator<MuxDesc> implements Serializable{
@Override
protected void closeOp(boolean abort) throws HiveException {
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
for (int i = 0; i < numParents; i++) {
LOG.info(id + ", tag=" + i + ", forwarded " + cntrs[i] + " rows");
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 8b04cd4..ffa5f41 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -219,9 +219,6 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
protected transient OutputCollector out;
protected transient final Logger LOG = LoggerFactory.getLogger(getClass().getName());
protected transient final Logger PLOG = LoggerFactory.getLogger(Operator.class.getName()); // for simple disabling logs from all operators
- protected transient final boolean isLogInfoEnabled = LOG.isInfoEnabled() && PLOG.isInfoEnabled();
- protected transient final boolean isLogDebugEnabled = LOG.isDebugEnabled() && PLOG.isDebugEnabled();
- protected transient final boolean isLogTraceEnabled = LOG.isTraceEnabled() && PLOG.isTraceEnabled();
protected transient String alias;
protected transient Reporter reporter;
protected String id;
@@ -330,7 +327,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
return;
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Initializing operator " + this);
}
@@ -369,7 +366,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
|| childOperatorsArray.length != childOperators.size()) {
throw new AssertionError("Internal error during operator initialization");
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Initialization Done " + id + " " + getName());
}
@@ -382,7 +379,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
}
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Initialization Done " + id + " " + getName() + " done is reset.");
}
@@ -495,13 +492,13 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
*/
protected void initializeChildren(Configuration hconf) throws HiveException {
state = State.INIT;
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Operator " + id + " " + getName() + " initialized");
}
if (childOperators == null || childOperators.isEmpty()) {
return;
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Initializing children of " + id + " " + getName());
}
for (int i = 0; i < childOperatorsArray.length; i++) {
@@ -540,7 +537,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
*/
protected void initialize(Configuration hconf, ObjectInspector inputOI,
int parentId) throws HiveException {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Initializing child " + id + " " + getName());
}
// Double the size of the array if needed
@@ -581,7 +578,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
public abstract void process(Object row, int tag) throws HiveException;
protected final void defaultStartGroup() throws HiveException {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Starting group");
}
@@ -589,20 +586,20 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
return;
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Starting group for children:");
}
for (Operator<? extends OperatorDesc> op : childOperators) {
op.startGroup();
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Start group Done");
}
}
protected final void defaultEndGroup() throws HiveException {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Ending group");
}
@@ -610,14 +607,14 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
return;
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Ending group for children:");
}
for (Operator<? extends OperatorDesc> op : childOperators) {
op.endGroup();
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("End group Done");
}
}
@@ -652,9 +649,9 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
if(parent==null){
continue;
}
- if (isLogDebugEnabled) {
- LOG.debug("allInitializedParentsAreClosed? parent.state = " + parent.state);
- }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("allInitializedParentsAreClosed? parent.state = " + parent.state);
+ }
if (!(parent.state == State.CLOSE || parent.state == State.UNINIT)) {
return false;
}
@@ -667,7 +664,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
// since it is called by its parents' main thread, so no
// more than 1 thread should call this close() function.
public void close(boolean abort) throws HiveException {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("close called for operator " + this);
}
@@ -677,7 +674,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
// check if all parents are finished
if (!allInitializedParentsAreClosed()) {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Not all parent operators are closed. Not closing.");
}
return;
@@ -686,7 +683,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
// set state as CLOSE as long as all parents are closed
// state == CLOSE doesn't mean all children are also in state CLOSE
state = State.CLOSE;
- if (isLogInfoEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.info("Closing operator " + this);
}
@@ -705,13 +702,13 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
}
for (Operator<? extends OperatorDesc> op : childOperators) {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Closing child = " + op);
}
op.close(abort);
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(id + " Close done");
}
} catch (HiveException e) {
@@ -938,7 +935,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
}
public void logStats() {
- if (isLogInfoEnabled && !statsMap.isEmpty()) {
+ if (LOG.isInfoEnabled() && !statsMap.isEmpty()) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, LongWritable> e : statsMap.entrySet()) {
sb.append(e.getKey()).append(":").append(e.getValue()).append(", ");
@@ -1364,7 +1361,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
}
public void setOpTraits(OpTraits metaInfo) {
- if (isLogDebugEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.debug("Setting traits (" + metaInfo + ") on " + this);
}
if (conf != null) {
@@ -1375,7 +1372,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
}
public void setStatistics(Statistics stats) {
- if (isLogDebugEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.debug("Setting stats (" + stats + ") on " + this);
}
if (conf != null) {
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
index e3cb765..d9547b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
@@ -100,7 +100,7 @@ public class OrcFileMergeOperator extends
if (prevPath == null) {
prevPath = k.getInputPath();
reader = OrcFile.createReader(fs, k.getInputPath());
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("ORC merge file input path: " + k.getInputPath());
}
}
@@ -127,7 +127,7 @@ public class OrcFileMergeOperator extends
}
outWriter = OrcFile.createWriter(outPath, options);
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.info("ORC merge file output path: " + outPath);
}
}
@@ -152,7 +152,7 @@ public class OrcFileMergeOperator extends
outWriter.appendStripe(buffer, 0, buffer.length, v.getStripeInformation(),
v.getStripeStatistics());
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Merged stripe from file " + k.getInputPath() + " [ offset : "
+ v.getStripeInformation().getOffset() + " length: "
+ v.getStripeInformation().getLength() + " row: "
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
index e03f4b7..92741ee 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
@@ -167,7 +167,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
List<ExprNodeDesc> keys = conf.getKeyCols();
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("keys size is " + keys.size());
for (ExprNodeDesc k : keys) {
LOG.debug("Key exprNodeDesc " + k.getExprString());
@@ -215,7 +215,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
tag = conf.getTag();
tagByte[0] = (byte) tag;
skipTag = conf.getSkipTag();
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Using tag = " + tag);
}
@@ -310,7 +310,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
// TODO: this is fishy - we init object inspectors based on first tag. We
// should either init for each tag, or if rowInspector doesn't really
// matter, then we can create this in ctor and get rid of firstRow.
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("keys are " + conf.getOutputKeyColumnNames() + " num distributions: " +
conf.getNumDistributionKeys());
}
@@ -461,7 +461,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
keyHashCode = ObjectInspectorUtils.getBucketHashCode(bucketFieldValues, partitionObjectInspectors);
}
int hashCode = buckNum < 0 ? keyHashCode : keyHashCode * 31 + buckNum;
- if (isLogTraceEnabled) {
+ if (LOG.isTraceEnabled()) {
LOG.trace("Going to return hash code " + hashCode);
}
return hashCode;
@@ -508,7 +508,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
if (null != out) {
numRows++;
runTimeNumRows++;
- if (isLogInfoEnabled) {
+ if (LOG.isTraceEnabled()) {
if (numRows == cntr) {
cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows;
if (cntr < 0 || numRows < 0) {
@@ -543,7 +543,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
out = null;
random = null;
reducerHash = null;
- if (isLogInfoEnabled) {
+ if (LOG.isTraceEnabled()) {
LOG.info(toString() + ": records written - " + numRows);
}
recordCounter.set(numRows);
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
index 7c1e344..64aa744 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
@@ -542,7 +542,7 @@ public class SMBMapJoinOperator extends AbstractMapJoinOperator<SMBJoinDesc> imp
BucketMatcher bucketMatcher = ReflectionUtil.newInstance(bucketMatcherCls, null);
getExecContext().setFileId(bucketMatcherCxt.createFileId(currentInputPath.toString()));
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("set task id: " + getExecContext().getFileId());
}
@@ -768,9 +768,9 @@ public class SMBMapJoinOperator extends AbstractMapJoinOperator<SMBJoinDesc> imp
}
Integer current = top();
if (current == null) {
- if (isLogInfoEnabled) {
- LOG.info("MergeQueue forwarded " + counter + " rows");
- }
+ if (LOG.isInfoEnabled()) {
+ LOG.info("MergeQueue forwarded " + counter + " rows");
+ }
return null;
}
counter++;
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
index 4767af1..e15bbba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
@@ -300,7 +300,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
}
void displayBrokenPipeInfo() {
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("The script did not consume all input data. This is considered as an error.");
LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() + "=true; to ignore it.");
}
@@ -346,7 +346,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
}
String[] wrappedCmdArgs = addWrapper(cmdArgs);
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Executing " + Arrays.asList(wrappedCmdArgs));
LOG.info("tablename=" + tableName);
LOG.info("partname=" + partitionName);
@@ -680,7 +680,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
long now = System.currentTimeMillis();
// reporter is a member variable of the Operator class.
if (now - lastReportTime > 60 * 1000 && reporter != null) {
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("ErrorStreamProcessor calling reporter.progress()");
}
lastReportTime = now;
@@ -738,7 +738,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
}
proc.processLine(row);
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("StreamThread " + name + " done");
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
index 94af097..9e96126 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
@@ -69,7 +69,7 @@ public class SelectOperator extends Operator<SelectDesc> implements Serializable
eval = ExprNodeEvaluatorFactory.toCachedEvals(eval);
}
output = new Object[eval.length];
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("SELECT " + inputObjInspectors[0].getTypeName());
}
outputObjInspector = initEvaluatorsAndReturnStruct(eval, conf.getOutputColumnNames(),
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
index 68477ca..17f2efb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
@@ -188,7 +188,7 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
values.add(o == null ? defaultPartitionName : o.toString());
}
partitionSpecs = FileUtils.makePartName(conf.getPartColumns(), values);
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("Stats Gathering found a new partition spec = " + partitionSpecs);
}
}
@@ -331,7 +331,7 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
sc.setStatsTmpDir(conf.getTmpStatsDir());
if (!statsPublisher.connect(sc)) {
// just return, stats gathering should not block the main query.
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
LOG.info("StatsPublishing error: cannot connect to database.");
}
if (isStatsReliable) {
@@ -355,8 +355,8 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
throw new HiveException(ErrorMsg.STATSPUBLISHER_PUBLISHING_ERROR.getErrorCodedMsg());
}
}
- if (isLogInfoEnabled) {
- LOG.info("publishing : " + key + " : " + statsToPublish.toString());
+ if (LOG.isInfoEnabled()) {
+ LOG.info("publishing : " + key + " : " + statsToPublish.toString());
}
}
if (!statsPublisher.closeConnection(sc)) {
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
index 3df5533..99822a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
@@ -126,7 +126,7 @@ public class UnionOperator extends Operator<UnionDesc> implements Serializable {
// to
// create ObjectInspectors.
needsTransform[p] = (inputObjInspectors[p] != outputObjInspector);
- if (isLogInfoEnabled && needsTransform[p]) {
+ if (LOG.isInfoEnabled() && needsTransform[p]) {
LOG.info("Union Operator needs to transform row from parent[" + p
+ "] from " + inputObjInspectors[p] + " to " + outputObjInspector);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
index 1dffff2..3af75d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
@@ -64,8 +64,6 @@ import org.apache.hadoop.util.StringUtils;
public class ExecReducer extends MapReduceBase implements Reducer {
private static final Logger LOG = LoggerFactory.getLogger("ExecReducer");
- private static final boolean isInfoEnabled = LOG.isInfoEnabled();
- private static final boolean isTraceEnabled = LOG.isTraceEnabled();
private static final String PLAN_KEY = "__REDUCE_PLAN__";
// Input value serde needs to be an array to support different SerDe
@@ -96,7 +94,7 @@ public class ExecReducer extends MapReduceBase implements Reducer {
ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE];
ObjectInspector keyObjectInspector;
- if (isInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
try {
LOG.info("conf classpath = "
+ Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs()));
@@ -190,7 +188,7 @@ public class ExecReducer extends MapReduceBase implements Reducer {
groupKey = new BytesWritable();
} else {
// If a operator wants to do some work at the end of a group
- if (isTraceEnabled) {
+ if (LOG.isTraceEnabled()) {
LOG.trace("End Group");
}
reducer.endGroup();
@@ -207,7 +205,7 @@ public class ExecReducer extends MapReduceBase implements Reducer {
}
groupKey.set(keyWritable.get(), 0, keyWritable.getSize());
- if (isTraceEnabled) {
+ if (LOG.isTraceEnabled()) {
LOG.trace("Start Group");
}
reducer.startGroup();
@@ -263,14 +261,14 @@ public class ExecReducer extends MapReduceBase implements Reducer {
public void close() {
// No row was processed
- if (oc == null && isTraceEnabled) {
+ if (oc == null && LOG.isTraceEnabled()) {
LOG.trace("Close called without any rows processed");
}
try {
if (groupKey != null) {
// If a operator wants to do some work at the end of a group
- if (isTraceEnabled) {
+ if (LOG.isTraceEnabled()) {
LOG.trace("End Group");
}
reducer.endGroup();
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
index cfe1750..5589a07 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
@@ -36,12 +36,11 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
public class ObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCache {
private static final Logger LOG = LoggerFactory.getLogger(ObjectCache.class.getName());
- private static final boolean isDebugEnabled = LOG.isDebugEnabled();
@Override
public void release(String key) {
// nothing to do
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(key + " no longer needed");
}
}
@@ -54,7 +53,7 @@ public class ObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCache {
@Override
public <T> T retrieve(String key, Callable<T> fn) throws HiveException {
try {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Creating " + key);
}
return fn.call();
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
index 48dfedc..8333cf5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
@@ -58,7 +58,6 @@ public class SparkMapRecordHandler extends SparkRecordHandler {
private static final Logger LOG = LoggerFactory.getLogger(SparkMapRecordHandler.class);
private AbstractMapOperator mo;
private MapredLocalWork localWork = null;
- private boolean isLogInfoEnabled = false;
private ExecMapperContext execContext;
@Override
@@ -66,8 +65,6 @@ public class SparkMapRecordHandler extends SparkRecordHandler {
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
super.init(job, output, reporter);
- isLogInfoEnabled = LOG.isInfoEnabled();
-
try {
jc = job;
execContext = new ExecMapperContext(jc);
@@ -134,7 +131,7 @@ public class SparkMapRecordHandler extends SparkRecordHandler {
// Since there is no concept of a group, we don't invoke
// startGroup/endGroup for a mapper
mo.process((Writable) value);
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
logMemoryInfo();
}
} catch (Throwable e) {
@@ -182,7 +179,7 @@ public class SparkMapRecordHandler extends SparkRecordHandler {
}
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
logCloseInfo();
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
index 7eaad18..e473580 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
@@ -75,7 +75,6 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
private final Deserializer[] inputValueDeserializer = new Deserializer[Byte.MAX_VALUE];
private final Object[] valueObject = new Object[Byte.MAX_VALUE];
private final List<Object> row = new ArrayList<Object>(Utilities.reduceFieldNameList.size());
- private final boolean isLogInfoEnabled = LOG.isInfoEnabled();
// TODO: move to DynamicSerDe when it's ready
private Deserializer inputKeyDeserializer;
@@ -338,7 +337,7 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
row.clear();
row.add(keyObject);
row.add(valueObject[tag]);
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
logMemoryInfo();
}
try {
@@ -390,7 +389,7 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
reducer.process(batch, tag);
rowIdx = 0;
batchBytes = 0;
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
logMemoryInfo();
}
}
@@ -399,7 +398,7 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
VectorizedBatchUtil.setBatchSize(batch, rowIdx);
reducer.process(batch, tag);
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
logMemoryInfo();
}
} catch (Exception e) {
@@ -441,7 +440,7 @@ public class SparkReduceRecordHandler extends SparkRecordHandler {
LOG.trace("End Group");
reducer.endGroup();
}
- if (isLogInfoEnabled) {
+ if (LOG.isInfoEnabled()) {
logCloseInfo();
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
index ecd4ddc..b63b673 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.mapred.split.SplitSizeEstimator;
*/
public class ColumnarSplitSizeEstimator implements SplitSizeEstimator {
private static final Logger LOG = LoggerFactory.getLogger(ColumnarSplitSizeEstimator.class);
- private static final boolean isDebugEnabled = LOG.isDebugEnabled();
@Override
public long getEstimatedSize(InputSplit inputSplit) throws IOException {
@@ -39,7 +38,7 @@ public class ColumnarSplitSizeEstimator implements SplitSizeEstimator {
if (inputSplit instanceof ColumnarSplit) {
colProjSize = ((ColumnarSplit) inputSplit).getColumnarProjectionSize();
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Estimated column projection size: " + colProjSize);
}
} else if (inputSplit instanceof HiveInputFormat.HiveInputSplit) {
@@ -47,7 +46,7 @@ public class ColumnarSplitSizeEstimator implements SplitSizeEstimator {
if (innerSplit instanceof ColumnarSplit) {
colProjSize = ((ColumnarSplit) innerSplit).getColumnarProjectionSize();
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Estimated column projection size: " + colProjSize);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java
index dcb985f..c5d96e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java
@@ -58,7 +58,7 @@ public class HostAffinitySplitLocationProvider implements SplitLocationProvider
@Override
public String[] getLocations(InputSplit split) throws IOException {
if (!(split instanceof FileSplit)) {
- if (isDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Split: " + split + " is not a FileSplit. Using default locations");
}
return split.getLocations();
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
index 1ce8ee9..b26e0eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java
@@ -44,8 +44,6 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
private static ExecutorService staticPool = Executors.newCachedThreadPool();
- private static final boolean isLogDebugEnabled = LOG.isDebugEnabled();
-
private final Cache<String, Object> registry = CacheBuilder.newBuilder().softValues().build();
private final Map<String, ReentrantLock> locks = new HashMap<String, ReentrantLock>();
@@ -67,7 +65,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
lock.lock();
try {
value = (T) registry.getIfPresent(key);
- if (value != null && isLogDebugEnabled) {
+ if (value != null && LOG.isDebugEnabled()) {
LOG.debug("Found " + key + " in cache");
}
return value;
@@ -87,7 +85,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
try {
value = (T) registry.getIfPresent(key);
if (value != null) {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Found " + key + " in cache");
}
return value;
@@ -109,7 +107,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
try {
value = (T) registry.getIfPresent(key);
if (value != null) {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Found " + key + " in cache");
}
return value;
@@ -126,7 +124,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
lock.lock();
try {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Caching new object for key: " + key);
}
@@ -153,7 +151,7 @@ public class LlapObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCac
@Override
public void remove(String key) {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Removing key: " + key);
}
registry.invalidate(key);
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
index 106a534..d16a97a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
@@ -56,9 +56,6 @@ public abstract class RecordProcessor extends InterruptibleProcessing {
public static final Logger l4j = LoggerFactory.getLogger(RecordProcessor.class);
- // used to log memory usage periodically
- protected boolean isLogInfoEnabled = false;
- protected boolean isLogTraceEnabled = false;
protected MRTaskReporter reporter;
protected PerfLogger perfLogger = SessionState.getPerfLogger();
@@ -82,9 +79,6 @@ public abstract class RecordProcessor extends InterruptibleProcessing {
this.inputs = inputs;
this.outputs = outputs;
- isLogInfoEnabled = l4j.isInfoEnabled();
- isLogTraceEnabled = l4j.isTraceEnabled();
-
checkAbortCondition();
//log classpaths
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
index f854132..7b8e7ea 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
@@ -280,7 +280,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
outputProjection = projectionMapping.getOutputColumns();
outputTypeInfos = projectionMapping.getTypeInfos();
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
int[] orderDisplayable = new int[order.length];
for (int i = 0; i < order.length; i++) {
orderDisplayable[i] = (int) order[i];
@@ -338,7 +338,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
* columns and new scratch columns.
*/
protected void setupVOutContext(List<String> outputColumnNames) {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor outputColumnNames " + outputColumnNames);
}
if (outputColumnNames.size() != outputProjection.length) {
@@ -350,7 +350,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
int outputColumn = outputProjection[i];
vOutContext.addProjectionColumn(columnName, outputColumn);
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor addProjectionColumn " + i + " columnName " + columnName + " outputColumn " + outputColumn);
}
}
@@ -423,7 +423,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
needCommonSetup = true;
needHashTableSetup = true;
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
int[] currentScratchColumns = vOutContext.currentScratchColumns();
LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp currentScratchColumns " + Arrays.toString(currentScratchColumns));
@@ -515,7 +515,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
overflowBatch.cols[outputColumn] = VectorizedBatchUtil.createColumnVector(typeInfo);
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp overflowBatch outputColumn " + outputColumn + " class " + overflowBatch.cols[outputColumn].getClass().getSimpleName());
}
}
@@ -526,7 +526,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
*/
protected void commonSetup(VectorizedRowBatch batch) throws HiveException {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("VectorMapJoinInnerCommonOperator commonSetup begin...");
displayBatchColumns(batch, "batch");
displayBatchColumns(overflowBatch, "overflowBatch");
http://git-wip-us.apache.org/repos/asf/hive/blob/ca80968e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
index c4d5113..1c20d93 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
@@ -544,7 +544,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
needHashTableSetup = true;
LOG.info("Created " + vectorMapJoinHashTable.getClass().getSimpleName() + " from " + this.getClass().getSimpleName());
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(CLASS_NAME + " reloadHashTable!");
}
}
@@ -553,7 +553,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
protected void reProcessBigTable(int partitionId)
throws HiveException {
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(CLASS_NAME + " reProcessBigTable enter...");
}
@@ -607,7 +607,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
throw new HiveException(e);
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(CLASS_NAME + " reProcessBigTable exit! " + rowCount + " row processed and " + batchCount + " batches processed");
}
}
@@ -680,7 +680,7 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
if (!aborted && overflowBatch.size > 0) {
forwardOverflow();
}
- if (isLogDebugEnabled) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("VectorMapJoinInnerLongOperator closeOp " + batchCounter + " batches processed");
}
}