You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by st...@apache.org on 2017/07/19 16:27:48 UTC

hive git commit: HIVE-16960: Hive throws an ugly error exception when HDFS sticky bit is set (Janaki Lahorani, reviewed by Sahil Takiar)

Repository: hive
Updated Branches:
  refs/heads/master d4c496bf5 -> fb6b023dc


HIVE-16960: Hive throws an ugly error exception when HDFS sticky bit is set (Janaki Lahorani, reviewed by Sahil Takiar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fb6b023d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fb6b023d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fb6b023d

Branch: refs/heads/master
Commit: fb6b023dc9d6440bbcc72ab849c0187d0188be36
Parents: d4c496b
Author: Janaki Lahorani <ja...@cloudera.com>
Authored: Wed Jul 19 08:59:40 2017 -0700
Committer: Sahil Takiar <st...@cloudera.com>
Committed: Wed Jul 19 08:59:40 2017 -0700

----------------------------------------------------------------------
 .../hive/service/TestDFSErrorHandling.java      | 152 +++++++++++++++++++
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  11 ++
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |  28 ++++
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |  21 ++-
 .../apache/hadoop/hive/ql/metadata/Hive.java    | 115 +++++++++++---
 .../hadoop/hive/ql/metadata/HiveException.java  |  33 +++-
 .../results/clientnegative/dyn_part_max.q.out   |   1 -
 .../service/cli/thrift/ThriftCLIService.java    |   6 +-
 8 files changed, 333 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/fb6b023d/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java b/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java
new file mode 100644
index 0000000..c58767f
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
+import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.HashMap;
+
+/**
+ * If the operation fails because of a DFS error, it used to result in an ugly stack at the client.
+ * HIVE-16960 fixes that issue.  This test case checks one DFS error related to sticky bit.  When
+ * the sticky bit is set, a user error indicating access denied will the thrown.
+ *
+ * Setup: HIVE_SERVER2_ENABLE_DOAS set to true:  HS2 performs the operation as connected user.
+ * Connect to HS2 as "hive".
+ * Create a file and set the sticky bit on the directory.  This will not allow the file to move
+ * out of the directory.
+ * Perform "LOAD" operation.  This operation will attempt to move the file, resulting in an error
+ * from DFS.  The DFS error will translate to an Hive Error with number 20009, that corresponds to
+ * "ACCESS DENIED".  The test checks that 20009 is thrown.
+ *
+ * Additional tests can be added to cover Quota related exceptions.
+ */
+public class TestDFSErrorHandling
+{
+
+  private static MiniHS2 miniHS2 = null;
+  private static HiveConf hiveConf = null;
+
+  @BeforeClass
+  public static void startServices() throws Exception {
+    hiveConf = new HiveConf();
+    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS, 1);
+    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS, 1);
+    hiveConf.setBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI, true);
+    hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+
+    // Setting hive.server2.enable.doAs to True ensures that HS2 performs the query operation as
+    // the connected user instead of the user running HS2.
+    hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, true);
+
+    miniHS2 = new MiniHS2.Builder()
+        .withMiniMR()
+        .withRemoteMetastore()
+        .withConf(hiveConf).build();
+
+    miniHS2.start(new HashMap<String, String>());
+  }
+
+  @AfterClass
+  public static void stopServices() throws Exception {
+    if (miniHS2 != null && miniHS2.isStarted()) {
+      miniHS2.stop();
+    }
+  }
+
+  @Test
+  public void testAccessDenied() throws Exception {
+    assertTrue("Test setup failed. MiniHS2 is not initialized",
+        miniHS2 != null && miniHS2.isStarted());
+
+    Class.forName(MiniHS2.getJdbcDriverName());
+    Path scratchDir = new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCHDIR));
+
+    MiniDFSShim dfs = miniHS2.getDfs();
+    FileSystem fs = dfs.getFileSystem();
+
+    Path stickyBitDir = new Path(scratchDir, "stickyBitDir");
+
+    fs.mkdirs(stickyBitDir);
+
+    String dataFileDir = hiveConf.get("test.data.files").replace('\\', '/')
+        .replace("c:", "").replace("C:", "").replace("D:", "").replace("d:", "");
+    Path dataFilePath = new Path(dataFileDir, "kv1.txt");
+
+    fs.copyFromLocalFile(dataFilePath, stickyBitDir);
+
+    FsPermission fsPermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
+
+    // Sets the sticky bit on stickyBitDir - now removing file kv1.txt from stickyBitDir by
+    // unprivileged user will result in a DFS error.
+    fs.setPermission(stickyBitDir, fsPermission);
+
+    FileStatus[] files = fs.listStatus(stickyBitDir);
+
+    // Connecting to HS2 as foo.
+    Connection hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "foo", "bar");
+    Statement stmt = hs2Conn.createStatement();
+
+    String tableName = "stickyBitTable";
+
+    stmt.execute("drop table if exists " + tableName);
+    stmt.execute("create table " + tableName + " (foo int, bar string)");
+
+    try {
+      // This statement will attempt to move kv1.txt out of stickyBitDir as user foo.  HS2 is
+      // expected to return 20009.
+      stmt.execute("LOAD DATA INPATH '" + stickyBitDir.toUri().getPath() + "/kv1.txt' "
+          + "OVERWRITE INTO TABLE " + tableName);
+    } catch (Exception e) {
+      if (e instanceof SQLException) {
+        SQLException se = (SQLException) e;
+        Assert.assertEquals("Unexpected error code", 20009, se.getErrorCode());
+        System.out.println(String.format("Error Message: %s", se.getMessage()));
+      } else
+        throw e;
+    }
+
+    stmt.execute("drop table if exists " + tableName);
+
+    stmt.close();
+    hs2Conn.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/fb6b023d/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 5c3e676..4e7c80f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -1878,6 +1878,17 @@ public class Driver implements CommandProcessor {
             invokeFailureHooks(perfLogger, hookContext,
               errorMessage + Strings.nullToEmpty(tsk.getDiagnosticsMessage()), result.getTaskError());
             SQLState = "08S01";
+
+            // 08S01 (Communication error) is the default sql state.  Override the sqlstate
+            // based on the ErrorMsg set in HiveException.
+            if (result.getTaskError() instanceof HiveException) {
+              ErrorMsg errorMsg = ((HiveException) result.getTaskError()).
+                  getCanonicalErrorMsg();
+              if (errorMsg != ErrorMsg.GENERIC_ERROR) {
+                SQLState = errorMsg.getSQLState();
+              }
+            }
+
             console.printError(errorMessage);
             driverCxt.shutdown();
             // in case we decided to run everything in local mode, restore the

http://git-wip-us.apache.org/repos/asf/hive/blob/fb6b023d/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 8642049..9c9d4e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql;
 
+import java.io.FileNotFoundException;
 import java.text.MessageFormat;
 import java.util.HashMap;
 import java.util.Map;
@@ -25,9 +26,13 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.antlr.runtime.tree.Tree;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.ASTNodeOrigin;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
+import org.apache.hadoop.security.AccessControlException;
 
 /**
  * List of all error messages.
@@ -495,6 +500,15 @@ public enum ErrorMsg {
    */
   OP_NOT_ALLOWED_IN_TXN(20007, "Operation {0} is not allowed in a transaction ({1},queryId={2}).", true),
   OP_NOT_ALLOWED_WITHOUT_TXN(20008, "Operation {0} is not allowed without an active transaction", true),
+  ACCESS_DENIED(20009, "Access denied: {0}", "42000", true),
+  QUOTA_EXCEEDED(20010, "Quota exceeded: {0}", "64000", true),
+  UNRESOLVED_PATH(20011, "Unresolved path: {0}", "64000", true),
+  FILE_NOT_FOUND(20012, "File not found: {0}", "64000", true),
+  WRONG_FILE_FORMAT(20013, "Wrong file format. Please check the file's format.", "64000", true),
+
+  // An exception from runtime that will show the full stack to client
+  UNRESOLVED_RT_EXCEPTION(29999, "Runtime Error: {0}", "58004", true),
+
   //========================== 30000 range starts here ========================//
   STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " +
     "There was a error to retrieve the StatsPublisher, and retrying " +
@@ -580,6 +594,20 @@ public enum ErrorMsg {
   }
 
   /**
+   * Given a remote runtime exception, returns the ErrorMsg object associated with it.
+   * @param e An exception
+   * @return ErrorMsg
+   */
+  public static ErrorMsg getErrorMsg(Exception e) {
+    if (e instanceof AccessControlException) return ACCESS_DENIED;
+    if (e instanceof NSQuotaExceededException) return QUOTA_EXCEEDED;
+    if (e instanceof DSQuotaExceededException) return QUOTA_EXCEEDED;
+    if (e instanceof UnresolvedPathException) return UNRESOLVED_PATH;
+    if (e instanceof FileNotFoundException) return FILE_NOT_FOUND;
+    return UNRESOLVED_RT_EXCEPTION;
+  }
+
+  /**
    * Given an error message string, returns the ErrorMsg object associated with it.
    * @param mesg An error message string
    * @return ErrorMsg

http://git-wip-us.apache.org/repos/asf/hive/blob/fb6b023d/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 53bc9fe..cde2805 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
 import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer;
@@ -356,8 +357,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
                 }
               }
               if (!flag) {
-                throw new HiveException(
-                    "Wrong file format. Please check the file's format.");
+                throw new HiveException(ErrorMsg.WRONG_FILE_FORMAT);
               }
             } else {
               LOG.warn("Skipping file format check as dpCtx is not null");
@@ -555,6 +555,23 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
       }
 
       return 0;
+    } catch (HiveException he) {
+      int errorCode = 1;
+
+      if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) {
+        errorCode = he.getCanonicalErrorMsg().getErrorCode();
+        if (he.getCanonicalErrorMsg() == ErrorMsg.UNRESOLVED_RT_EXCEPTION) {
+          console.printError("Failed with exception " + he.getMessage(), "\n"
+              + StringUtils.stringifyException(he));
+        } else {
+          console.printError("Failed with exception " + he.getMessage()
+              + "\nRemote Exception: " + he.getRemoteErrorMsg());
+          console.printInfo("\n", StringUtils.stringifyException(he),false);
+        }
+      }
+
+      setException(he);
+      return errorCode;
     } catch (Exception e) {
       console.printError("Failed with exception " + e.getMessage(), "\n"
           + StringUtils.stringifyException(e));

http://git-wip-us.apache.org/repos/asf/hive/blob/fb6b023d/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 0cfc8d2..02cea7c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hive.common.BlobStorageUtils;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
@@ -150,6 +151,7 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
@@ -161,6 +163,7 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.common.base.Splitter;
 
 /**
  * This class has functions that implement meta data/DDL operations using calls
@@ -2995,6 +2998,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
         final boolean needToCopy = needToCopy(srcP, destf, srcFs, destFs);
 
         final boolean isRenameAllowed = !needToCopy && !isSrcLocal;
+
+        final String msg = "Unable to move source " + srcP + " to destination " + destf;
+
         // If we do a rename for a non-local file, we will be transfering the original
         // file permissions from source to the destination. Else, in case of mvFile() where we
         // copy from source to destination, we will inherit the destination's parent group ownership.
@@ -3005,22 +3011,26 @@ private void constructOneLBLocationMap(FileStatus fSta,
             if (null != newFiles) {
               newFiles.add(destPath);
             }
-          } catch (IOException ioe) {
-            LOG.error("Failed to move: {}", ioe.getMessage());
-            throw new HiveException(ioe.getCause());
+          } catch (Exception e) {
+            throw getHiveException(e, msg, "Failed to move: {}");
           }
         } else {
           futures.add(pool.submit(new Callable<ObjectPair<Path, Path>>() {
             @Override
-            public ObjectPair<Path, Path> call() throws Exception {
+            public ObjectPair<Path, Path> call() throws HiveException {
               SessionState.setCurrentSessionState(parentSession);
 
-              Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isRenameAllowed);
+              try {
+                Path destPath =
+                    mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isRenameAllowed);
 
-              if (null != newFiles) {
-                newFiles.add(destPath);
+                if (null != newFiles) {
+                  newFiles.add(destPath);
+                }
+                return ObjectPair.create(srcP, destPath);
+              } catch (Exception e) {
+                throw getHiveException(e, msg);
               }
-              return ObjectPair.create(srcP, destPath);
             }
           }));
         }
@@ -3033,9 +3043,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
           ObjectPair<Path, Path> pair = future.get();
           LOG.debug("Moved src: {}", pair.getFirst().toString(), ", to dest: {}", pair.getSecond().toString());
         } catch (Exception e) {
-          LOG.error("Failed to move: {}", e.getMessage());
-          pool.shutdownNow();
-          throw new HiveException(e.getCause());
+          throw handlePoolException(pool, e);
         }
       }
     }
@@ -3233,6 +3241,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
     // (2) It is assumed that subdir and dir are in same encryption zone.
     // (3) Move individual files from scr dir to dest dir.
     boolean destIsSubDir = isSubDir(srcf, destf, srcFs, destFs, isSrcLocal);
+    final String msg = "Unable to move source " + srcf + " to destination " + destf;
+
     try {
       if (replace) {
         try{
@@ -3275,6 +3285,10 @@ private void constructOneLBLocationMap(FileStatus fSta,
             for (final FileStatus srcStatus : srcs) {
 
               final Path destFile = new Path(destf, srcStatus.getPath().getName());
+
+              final String poolMsg =
+                  "Unable to move source " + srcStatus.getPath() + " to destination " + destFile;
+
               if (null == pool) {
                 if(!destFs.rename(srcStatus.getPath(), destFile)) {
                   throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest:"
@@ -3283,12 +3297,17 @@ private void constructOneLBLocationMap(FileStatus fSta,
               } else {
                 futures.add(pool.submit(new Callable<Void>() {
                   @Override
-                  public Void call() throws Exception {
+                  public Void call() throws HiveException {
                     SessionState.setCurrentSessionState(parentSession);
                     final String group = srcStatus.getGroup();
-                    if(!destFs.rename(srcStatus.getPath(), destFile)) {
-                      throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest path:"
-                          + destFile + " returned false");
+                    try {
+                      if (!destFs.rename(srcStatus.getPath(), destFile)) {
+                        throw new IOException(
+                            "rename for src path: " + srcStatus.getPath() + " to dest path:"
+                                + destFile + " returned false");
+                      }
+                    } catch (Exception e) {
+                      throw getHiveException(e, poolMsg);
                     }
                     return null;
                   }
@@ -3301,9 +3320,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
                 try {
                   future.get();
                 } catch (Exception e) {
-                  LOG.debug(e.getMessage());
-                  pool.shutdownNow();
-                  throw new HiveException(e.getCause());
+                  throw handlePoolException(pool, e);
                 }
               }
             }
@@ -3316,9 +3333,57 @@ private void constructOneLBLocationMap(FileStatus fSta,
           }
         }
       }
-    } catch (IOException ioe) {
-      throw new HiveException("Unable to move source " + srcf + " to destination " + destf, ioe);
+    } catch (Exception e) {
+      throw getHiveException(e, msg);
+    }
+  }
+
+  static private HiveException getHiveException(Exception e, String msg) {
+    return getHiveException(e, msg, null);
+  }
+
+  static private HiveException handlePoolException(ExecutorService pool, Exception e) {
+    HiveException he = null;
+
+    if (e instanceof HiveException) {
+      he = (HiveException) e;
+      if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) {
+        if (he.getCanonicalErrorMsg() == ErrorMsg.UNRESOLVED_RT_EXCEPTION) {
+          LOG.error(String.format("Failed to move: {}", he.getMessage()));
+        } else {
+          LOG.info(String.format("Failed to move: {}", he.getRemoteErrorMsg()));
+        }
+      }
+    } else {
+      LOG.error(String.format("Failed to move: {}", e.getMessage()));
+      he = new HiveException(e.getCause());
     }
+    pool.shutdownNow();
+    return he;
+  }
+
+  static private HiveException getHiveException(Exception e, String msg, String logMsg) {
+    // The message from remote exception includes the entire stack.  The error thrown from
+    // hive based on the remote exception needs only the first line.
+    String hiveErrMsg = null;
+
+    if (e.getMessage() != null) {
+      hiveErrMsg = String.format("%s%s%s", msg, ": ",
+          Splitter.on(System.getProperty("line.separator")).split(e.getMessage()).iterator()
+              .next());
+    } else {
+      hiveErrMsg = msg;
+    }
+
+    ErrorMsg errorMsg = ErrorMsg.getErrorMsg(e);
+
+    if (logMsg != null)
+      LOG.info(String.format(logMsg, e.getMessage()));
+
+    if (errorMsg != ErrorMsg.UNRESOLVED_RT_EXCEPTION)
+      return new HiveException(e, e.getMessage(), errorMsg, hiveErrMsg);
+    else
+      return new HiveException(msg, e);
   }
 
   /**
@@ -3471,10 +3536,16 @@ private void constructOneLBLocationMap(FileStatus fSta,
         for (FileStatus bucketStat : bucketStats) {
           Path bucketSrc = bucketStat.getPath();
           Path bucketDest = new Path(deltaDest, bucketSrc.getName());
+          final String msg = "Unable to move source " + bucketSrc + " to destination " +
+              bucketDest;
           LOG.info("Moving bucket " + bucketSrc.toUri().toString() + " to " +
               bucketDest.toUri().toString());
-          fs.rename(bucketSrc, bucketDest);
-          if (newFiles != null) newFiles.add(bucketDest);
+          try {
+            fs.rename(bucketSrc, bucketDest);
+            if (newFiles != null) newFiles.add(bucketDest);
+          } catch (Exception e) {
+            throw getHiveException(e, msg);
+          }
         }
       } catch (IOException e) {
         throw new HiveException("Error moving acid files " + e.getMessage(), e);

http://git-wip-us.apache.org/repos/asf/hive/blob/fb6b023d/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
index d017705..a23d8c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
@@ -29,6 +29,12 @@ public class HiveException extends Exception {
    * Standard predefined message with error code and possibly SQL State, etc.
    */
   private ErrorMsg canonicalErrorMsg = ErrorMsg.GENERIC_ERROR;
+
+  /**
+   * Error Messages returned from remote exception (eg. hadoop error)
+   */
+  private String remoteErrorMsg;
+
   public HiveException() {
     super();
   }
@@ -46,27 +52,40 @@ public class HiveException extends Exception {
   }
 
   public HiveException(ErrorMsg message, String... msgArgs) {
-    this(null, message, msgArgs);
+    this(null, null, message, msgArgs);
+  }
+
+  public HiveException(Throwable cause, ErrorMsg errorMsg, String... msgArgs) {
+    this(cause, null, errorMsg, msgArgs);
+  }
+
+  public HiveException(Throwable cause, ErrorMsg errorMsg) {
+    this(cause, null, errorMsg, new String[0]);
+  }
+
+  public HiveException(ErrorMsg errorMsg) {
+    this(null, null, errorMsg, new String[0]);
   }
 
   /**
    * This is the recommended constructor to use since it helps use
-   * canonical messages throughout.  
+   * canonical messages throughout and propagate remote errors.
+   *
    * @param errorMsg Canonical error message
    * @param msgArgs message arguments if message is parametrized; must be {@code null} is message takes no arguments
    */
-  public HiveException(Throwable cause, ErrorMsg errorMsg, String... msgArgs) {
+  public HiveException(Throwable cause, String remErrMsg, ErrorMsg errorMsg, String... msgArgs) {
     super(errorMsg.format(msgArgs), cause);
     canonicalErrorMsg = errorMsg;
-
-  }
-  public HiveException(Throwable cause, ErrorMsg errorMsg) {
-    this(cause, errorMsg, new String[0]);
+    remoteErrorMsg = remErrMsg;
   }
+
   /**
    * @return {@link ErrorMsg#GENERIC_ERROR} by default
    */
   public ErrorMsg getCanonicalErrorMsg() {
     return canonicalErrorMsg;
   }
+
+  public String getRemoteErrorMsg() { return remoteErrorMsg; }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/fb6b023d/ql/src/test/results/clientnegative/dyn_part_max.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/dyn_part_max.q.out b/ql/src/test/results/clientnegative/dyn_part_max.q.out
index 736bfac..7de4998 100644
--- a/ql/src/test/results/clientnegative/dyn_part_max.q.out
+++ b/ql/src/test/results/clientnegative/dyn_part_max.q.out
@@ -19,5 +19,4 @@ LIMIT 50
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@max_parts
-Failed with exception Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49.
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.MoveTask. Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49.

http://git-wip-us.apache.org/repos/asf/hive/blob/fb6b023d/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
index 2b252d2..9880fc6 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
@@ -683,8 +683,10 @@ public abstract class ThriftCLIService extends AbstractService implements TCLISe
       if (opException != null) {
         resp.setSqlState(opException.getSQLState());
         resp.setErrorCode(opException.getErrorCode());
-        resp.setErrorMessage(org.apache.hadoop.util.StringUtils.
-            stringifyException(opException));
+        if (opException.getErrorCode() == 29999)
+          resp.setErrorMessage(org.apache.hadoop.util.StringUtils.stringifyException(opException));
+        else
+          resp.setErrorMessage(opException.getMessage());
       } else if (executionStatus == TJobExecutionStatus.NOT_AVAILABLE
           && OperationType.EXECUTE_STATEMENT.equals(operationHandle.getOperationType())) {
         resp.getProgressUpdateResponse().setProgressedPercentage(