You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by mi...@apache.org on 2023/02/08 22:54:29 UTC
[impala] 01/02: IMPALA-11834: Error reporting addendum
This is an automated email from the ASF dual-hosted git repository.
michaelsmith pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git
commit bbdc24d6fc40c0f57f75ced543d8331fb0f33fd9
Author: Tamas Mate <tm...@apache.org>
AuthorDate: Fri Feb 3 11:56:24 2023 +0100
IMPALA-11834: Error reporting addendum
This commit is an addendum that addresses some review comments that
arrived while the base change was merged. Code cleanup and improved
error reporting.
Testing:
- Tested the code paths manually
Change-Id: Iece31fa19b7448095462671acb2f02e8c5a405ae
Reviewed-on: http://gerrit.cloudera.org:8080/19481
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
be/src/service/client-request-state.cc | 34 +++++++++++++++++++++++-----------
1 file changed, 23 insertions(+), 11 deletions(-)
diff --git a/be/src/service/client-request-state.cc b/be/src/service/client-request-state.cc
index 13c9bef63..e0c066de6 100644
--- a/be/src/service/client-request-state.cc
+++ b/be/src/service/client-request-state.cc
@@ -872,28 +872,40 @@ void ClientRequestState::ExecLoadIcebergDataRequestImpl(TLoadDataResp response)
Status query_status = child_query_executor_->WaitForAll(completed_queries);
if (query_status.ok()) {
const char* path = response.create_location.c_str();
+ string delete_err = "Load was succesful, but failed to remove staging data under '"
+ + response.create_location + "', HDFS error: ";
hdfsFS hdfs_conn;
Status hdfs_ret = HdfsFsCache::instance()->GetConnection(path, &hdfs_conn);
- if (!hdfs_ret.ok() || hdfsDelete(hdfs_conn, path, 1)) {
- hdfs_ret = Status("Failed to remove staging data under '" + response.create_location
- + "' after query failure: " + query_status.msg().msg());
+ if (!hdfs_ret.ok()) {
lock_guard<mutex> l(lock_);
- RETURN_VOID_IF_ERROR(UpdateQueryStatus(hdfs_ret));
+ RETURN_VOID_IF_ERROR(UpdateQueryStatus(Status(delete_err + hdfs_ret.GetDetail())));
+ }
+ if (hdfsDelete(hdfs_conn, path, 1)) {
+ lock_guard<mutex> l(lock_);
+ RETURN_VOID_IF_ERROR(UpdateQueryStatus(Status(delete_err + strerror(errno))));
}
} else {
const char* dst_path = load_data_req.source_path.c_str();
hdfsFS hdfs_dst_conn;
+ string revert_err = "Failed to load data and failed to revert data movement, "
+ "please check source and staging directory under '" + response.create_location
+ + "', Query error: " + query_status.GetDetail() + " HDFS error: ";
Status hdfs_ret = HdfsFsCache::instance()->GetConnection(dst_path, &hdfs_dst_conn);
+ if (!hdfs_ret.ok()) {
+ lock_guard<mutex> l(lock_);
+ RETURN_VOID_IF_ERROR(UpdateQueryStatus(Status(revert_err + hdfs_ret.GetDetail())));
+ }
for (string src_path : response.loaded_files) {
hdfsFS hdfs_src_conn;
- hdfs_ret = Status(HdfsFsCache::instance()->GetConnection(dst_path, &hdfs_src_conn));
- if (!hdfs_ret.ok() || hdfsMove(hdfs_src_conn, src_path.c_str(), hdfs_dst_conn,
- dst_path)) {
- hdfs_ret = Status("Failed to revert data movement, some files might still be in '"
- + response.create_location + "' after query failure: "
- + query_status.msg().msg());
+ hdfs_ret = HdfsFsCache::instance()->GetConnection(dst_path, &hdfs_src_conn);
+ if (!hdfs_ret.ok()) {
+ lock_guard<mutex> l(lock_);
+ RETURN_VOID_IF_ERROR(UpdateQueryStatus(Status(revert_err
+ + hdfs_ret.GetDetail())));
+ }
+ if (hdfsMove(hdfs_src_conn, src_path.c_str(), hdfs_dst_conn, dst_path)) {
lock_guard<mutex> l(lock_);
- RETURN_VOID_IF_ERROR(UpdateQueryStatus(hdfs_ret));
+ RETURN_VOID_IF_ERROR(UpdateQueryStatus(Status(revert_err + strerror(errno))));
}
}
}