You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by yi...@apache.org on 2022/06/29 04:02:33 UTC

[doris] branch master updated: [log] add more error info for hdfs reader writer (#10475)

This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 5f73668626 [log] add more error info for hdfs reader writer (#10475)
5f73668626 is described below

commit 5f7366862636463fd80fbcd7f905a39a862b60dd
Author: Mingyu Chen <mo...@gmail.com>
AuthorDate: Wed Jun 29 12:02:27 2022 +0800

    [log] add more error info for hdfs reader writer (#10475)
---
 be/src/io/hdfs_file_reader.cpp | 16 +++++++---------
 be/src/io/hdfs_writer.cpp      | 12 ++++++------
 2 files changed, 13 insertions(+), 15 deletions(-)

diff --git a/be/src/io/hdfs_file_reader.cpp b/be/src/io/hdfs_file_reader.cpp
index 765e8cc32f..e993ec5b17 100644
--- a/be/src/io/hdfs_file_reader.cpp
+++ b/be/src/io/hdfs_file_reader.cpp
@@ -91,19 +91,17 @@ Status HdfsFileReader::open() {
         std::stringstream ss;
         ss << "open file failed. "
            << "(BE: " << BackendOptions::get_localhost() << ")"
-           << " namenode:" << _namenode << ", path:" << _path << ", err: " << strerror(errno);
+           << " namenode:" << _namenode << ", path:" << _path << ", err: " << hdfsGetLastError();
         return Status::InternalError(ss.str());
     }
-    LOG(INFO) << "open file, namenode:" << _namenode << ", path:" << _path;
+    VLOG_NOTICE << "open file, namenode:" << _namenode << ", path:" << _path;
     return seek(_current_offset);
 }
 
 void HdfsFileReader::close() {
     if (!closed()) {
         if (_hdfs_file != nullptr && _hdfs_fs != nullptr) {
-            std::stringstream ss;
-            ss << "close hdfs file: " << _namenode << _path;
-            LOG(INFO) << ss.str();
+            VLOG_NOTICE << "close hdfs file: " << _namenode << _path;
             //If the hdfs file was valid, the memory associated with it will
             // be freed at the end of this call, even if there was an I/O error
             hdfsCloseFile(_hdfs_fs, _hdfs_file);
@@ -152,7 +150,7 @@ Status HdfsFileReader::readat(int64_t position, int64_t nbytes, int64_t* bytes_r
             std::stringstream ss;
             ss << "hdfsSeek failed. "
                << "(BE: " << BackendOptions::get_localhost() << ")" << _namenode << _path
-               << ", err: " << strerror(errno);
+               << ", err: " << hdfsGetLastError();
             ;
             return Status::InternalError(ss.str());
         }
@@ -163,7 +161,7 @@ Status HdfsFileReader::readat(int64_t position, int64_t nbytes, int64_t* bytes_r
         std::stringstream ss;
         ss << "Read hdfs file failed. "
            << "(BE: " << BackendOptions::get_localhost() << ")" << _namenode << _path
-           << ", err: " << strerror(errno);
+           << ", err: " << hdfsGetLastError();
         ;
         return Status::InternalError(ss.str());
     }
@@ -183,7 +181,7 @@ int64_t HdfsFileReader::size() {
         hdfsFileInfo* file_info = hdfsGetPathInfo(_hdfs_fs, _path.c_str());
         if (file_info == nullptr) {
             LOG(WARNING) << "get path info failed: " << _namenode << _path
-                         << ", err: " << strerror(errno);
+                         << ", err: " << hdfsGetLastError();
             ;
             close();
             return -1;
@@ -203,7 +201,7 @@ Status HdfsFileReader::seek(int64_t position) {
         std::stringstream ss;
         ss << "Seek to offset failed. "
            << "(BE: " << BackendOptions::get_localhost() << ")"
-           << " offset=" << position << ", err: " << strerror(errno);
+           << " offset=" << position << ", err: " << hdfsGetLastError();
         return Status::InternalError(ss.str());
     }
     return Status::OK();
diff --git a/be/src/io/hdfs_writer.cpp b/be/src/io/hdfs_writer.cpp
index 1a59bb7a1b..10b7955cbe 100644
--- a/be/src/io/hdfs_writer.cpp
+++ b/be/src/io/hdfs_writer.cpp
@@ -60,14 +60,14 @@ Status HDFSWriter::open() {
     std::string hdfs_dir = hdfs_path.parent_path().string();
     exists = hdfsExists(_hdfs_fs, hdfs_dir.c_str());
     if (exists != 0) {
-        LOG(INFO) << "hdfs dir doesn't exist, create it: " << hdfs_dir;
+        VLOG_NOTICE << "hdfs dir doesn't exist, create it: " << hdfs_dir;
         int ret = hdfsCreateDirectory(_hdfs_fs, hdfs_dir.c_str());
         if (ret != 0) {
             std::stringstream ss;
             ss << "create dir failed. "
                << "(BE: " << BackendOptions::get_localhost() << ")"
                << " namenode: " << _namenode << " path: " << hdfs_dir
-               << ", err: " << strerror(errno);
+               << ", err: " << hdfsGetLastError();
             LOG(WARNING) << ss.str();
             return Status::InternalError(ss.str());
         }
@@ -78,11 +78,11 @@ Status HDFSWriter::open() {
         std::stringstream ss;
         ss << "open file failed. "
            << "(BE: " << BackendOptions::get_localhost() << ")"
-           << " namenode:" << _namenode << " path:" << _path << ", err: " << strerror(errno);
+           << " namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError();
         LOG(WARNING) << ss.str();
         return Status::InternalError(ss.str());
     }
-    LOG(INFO) << "open file. namenode:" << _namenode << ", path:" << _path;
+    VLOG_NOTICE << "open file. namenode:" << _namenode << ", path:" << _path;
     return Status::OK();
 }
 
@@ -96,7 +96,7 @@ Status HDFSWriter::write(const uint8_t* buf, size_t buf_len, size_t* written_len
         std::stringstream ss;
         ss << "write file failed. "
            << "(BE: " << BackendOptions::get_localhost() << ")"
-           << "namenode:" << _namenode << " path:" << _path << ", err: " << strerror(errno);
+           << "namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError();
         LOG(WARNING) << ss.str();
         return Status::InternalError(ss.str());
     }
@@ -123,7 +123,7 @@ Status HDFSWriter::close() {
         std::stringstream ss;
         ss << "failed to flush hdfs file. "
            << "(BE: " << BackendOptions::get_localhost() << ")"
-           << "namenode:" << _namenode << " path:" << _path << ", err: " << strerror(errno);
+           << "namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError();
         LOG(WARNING) << ss.str();
         return Status::InternalError(ss.str());
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org