You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/12/18 23:25:48 UTC

[05/50] [abbrv] hive git commit: HIVE-18271: Druid Insert into fails with exception when committing files (Jason Dere, reviewed by Ashutosh Chauhan)

HIVE-18271: Druid Insert into fails with exception when committing files (Jason Dere, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a96564cb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a96564cb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a96564cb

Branch: refs/heads/standalone-metastore
Commit: a96564cbc787ed8665c1bb6b3c3a0e9d8440b926
Parents: 8ab523b
Author: Jason Dere <jd...@hortonworks.com>
Authored: Thu Dec 14 10:59:41 2017 -0800
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Thu Dec 14 10:59:41 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/FileSinkOperator.java     | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a96564cb/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 219d1ad..1ec6ac8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -403,7 +403,6 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   protected transient FileSystem fs;
   protected transient Serializer serializer;
   protected final transient LongWritable row_count = new LongWritable();
-  private transient boolean isNativeTable = true;
 
   /**
    * The evaluators for the multiFile sprayer. If the table under consideration has 1000 buckets,
@@ -486,7 +485,6 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
     try {
       this.hconf = hconf;
       filesCreated = false;
-      isNativeTable = !conf.getTableInfo().isNonNative();
       isTemporary = conf.isTemporary();
       multiFileSpray = conf.isMultiFileSpray();
       this.isBucketed = hconf.getInt(hive_metastoreConstants.BUCKET_COUNT, 0) > 0;
@@ -716,7 +714,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       assert filesIdx == numFiles;
 
       // in recent hadoop versions, use deleteOnExit to clean tmp files.
-      if (isNativeTable && fs != null && fsp != null && !conf.isMmTable()) {
+      if (isNativeTable() && fs != null && fsp != null && !conf.isMmTable()) {
         autoDelete = fs.deleteOnExit(fsp.outPaths[0]);
       }
     } catch (Exception e) {
@@ -730,7 +728,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   protected void createBucketForFileIdx(FSPaths fsp, int filesIdx)
       throws HiveException {
     try {
-      fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable, isSkewedStoredAsSubDirectories);
+      fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable(), isSkewedStoredAsSubDirectories);
       if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
         Utilities.FILE_OP_LOGGER.trace("createBucketForFileIdx " + filesIdx + ": final path " + fsp.finalPaths[filesIdx]
           + "; out path " + fsp.outPaths[filesIdx] +" (spec path " + specPath + ", tmp path "
@@ -740,7 +738,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]);
       }
 
-      if (isNativeTable && !conf.isMmTable()) {
+      if (isNativeTable() && !conf.isMmTable()) {
         // in recent hadoop versions, use deleteOnExit to clean tmp files.
         autoDelete = fs.deleteOnExit(fsp.outPaths[filesIdx]);
       }
@@ -1318,7 +1316,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           }
         }
 
-        if (isNativeTable) {
+        if (isNativeTable()) {
           fsp.commit(fs, commitPaths);
         }
       }
@@ -1335,7 +1333,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       // Hadoop always call close() even if an Exception was thrown in map() or
       // reduce().
       for (FSPaths fsp : valToPaths.values()) {
-        fsp.abortWriters(fs, abort, !autoDelete && isNativeTable && !conf.isMmTable());
+        fsp.abortWriters(fs, abort, !autoDelete && isNativeTable() && !conf.isMmTable());
       }
     }
     fsp = prevFsp = null;
@@ -1359,7 +1357,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   public void jobCloseOp(Configuration hconf, boolean success)
       throws HiveException {
     try {
-      if ((conf != null) && isNativeTable) {
+      if ((conf != null) && isNativeTable()) {
         Path specPath = conf.getDirName();
         String unionSuffix = null;
         DynamicPartitionCtx dpCtx = conf.getDynPartCtx();
@@ -1585,4 +1583,8 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
     }
     return conf;
   }
+
+  private boolean isNativeTable() {
+    return !conf.getTableInfo().isNonNative();
+  }
 }