You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2016/01/19 21:18:47 UTC
hive git commit: HIVE-12682: Reducers in dynamic partitioning job
spend a lot of time running hadoop.conf.Configuration.getOverlay (Prasanth
Jayachandran reviewed by Ashutosh Chauhan)
Repository: hive
Updated Branches:
refs/heads/master 0d379021e -> fe81847bb
HIVE-12682: Reducers in dynamic partitioning job spend a lot of time running hadoop.conf.Configuration.getOverlay (Prasanth Jayachandran reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fe81847b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fe81847b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fe81847b
Branch: refs/heads/master
Commit: fe81847bb50a83ea2d630706c298335497311f43
Parents: 0d37902
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Tue Jan 19 14:18:30 2016 -0600
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Tue Jan 19 14:18:30 2016 -0600
----------------------------------------------------------------------
.../java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/fe81847b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 2fa3d96..3289cfc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -910,7 +910,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
pathKey = dpDir;
if(conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) {
String buckNum = row.get(row.size() - 1);
- taskId = Utilities.replaceTaskIdFromFilename(Utilities.getTaskId(hconf), buckNum);
+ taskId = Utilities.replaceTaskIdFromFilename(taskId, buckNum);
pathKey = appendToSource(taskId, dpDir);
}
FSPaths fsp2 = valToPaths.get(pathKey);
@@ -1155,7 +1155,6 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
return;
}
- String taskID = Utilities.getTaskIdFromFilename(Utilities.getTaskId(hconf));
String spSpec = conf.getStaticSpec();
for (Map.Entry<String, FSPaths> entry : valToPaths.entrySet()) {
@@ -1165,7 +1164,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
// for bucketed tables, hive.optimize.sort.dynamic.partition optimization
// adds the taskId to the fspKey.
if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) {
- taskID = Utilities.getTaskIdFromFilename(fspKey);
+ String taskID = Utilities.getTaskIdFromFilename(fspKey);
// if length of (prefix/ds=__HIVE_DEFAULT_PARTITION__/000000_0) is greater than max key prefix
// and if (prefix/ds=10/000000_0) is less than max key prefix, then former will get hashed
// to a smaller prefix (MD5hash/000000_0) and later will stored as such in staging stats table.