You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by go...@apache.org on 2019/08/13 21:22:46 UTC
[hive] branch master updated: HIVE-22075: Fix the max-reducers=1
regression from HIVE-14200 (Gopal V, reviewed by Jesus Camacho Rodriguez)
This is an automated email from the ASF dual-hosted git repository.
gopalv pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new dc24ce6 HIVE-22075: Fix the max-reducers=1 regression from HIVE-14200 (Gopal V, reviewed by Jesus Camacho Rodriguez)
dc24ce6 is described below
commit dc24ce69d06d3371cbec9125e43e122a6dbf47ca
Author: Gopal V <go...@apache.org>
AuthorDate: Tue Aug 13 14:22:29 2019 -0700
HIVE-22075: Fix the max-reducers=1 regression from HIVE-14200 (Gopal V, reviewed by Jesus Camacho Rodriguez)
Signed-off-by: Gopal V <go...@apache.org>
---
.../hadoop/hive/ql/optimizer/physical/LlapDecider.java | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
index 0053997..2f2f04f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
@@ -173,19 +173,22 @@ public class LlapDecider implements PhysicalPlanResolver {
return; // Not based on ARP and cannot assume uniform distribution, bail.
}
clusterState.initClusterInfo();
- int targetCount = 0;
+ final int targetCount;
+ final int executorCount;
+ final int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
if (!clusterState.hasClusterInfo()) {
LOG.warn("Cannot determine LLAP cluster information");
- targetCount = (int)Math.ceil(minReducersPerExec * 1 * executorsPerNode);
+ executorCount = executorsPerNode; // assume 1 node
} else {
- targetCount = (int)Math.ceil(minReducersPerExec * (clusterState.getKnownExecutorCount()
- + clusterState.getNodeCountWithUnknownExecutors() * executorsPerNode));
+ executorCount =
+ clusterState.getKnownExecutorCount() + executorsPerNode
+ * clusterState.getNodeCountWithUnknownExecutors();
}
- // We only increase the targets here.
+ targetCount = Math.min(maxReducers, (int) Math.ceil(minReducersPerExec * executorCount));
+ // We only increase the targets here, but we stay below maxReducers
if (reduceWork.isAutoReduceParallelism()) {
// Do not exceed the configured max reducers.
- int newMin = Math.min(conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS),
- Math.max(reduceWork.getMinReduceTasks(), targetCount));
+ int newMin = Math.min(maxReducers, Math.max(reduceWork.getMinReduceTasks(), targetCount));
if (newMin < reduceWork.getMaxReduceTasks()) {
reduceWork.setMinReduceTasks(newMin);
reduceWork.getEdgePropRef().setAutoReduce(conf, true, newMin,