You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by jh...@apache.org on 2017/05/19 19:56:14 UTC

hadoop git commit: HDFS-11544: libhdfs++: Revert HDFS-11544.HDFS-8707.001.patch due to invalid memory access regression. Done by James Clampffer

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 d35d9ab2c -> 5be241554


HDFS-11544: libhdfs++: Revert HDFS-11544.HDFS-8707.001.patch due to invalid memory access regression.  Done by James Clampffer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5be24155
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5be24155
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5be24155

Branch: refs/heads/HDFS-8707
Commit: 5be241554d611a3ca3cfb2722e7c73237ed455b0
Parents: d35d9ab
Author: James Clampffer <ja...@hp.com>
Authored: Fri May 19 15:38:17 2017 -0400
Committer: James Clampffer <ja...@hp.com>
Committed: Fri May 19 15:38:17 2017 -0400

----------------------------------------------------------------------
 .../native/libhdfspp/lib/bindings/c/hdfs.cc     | 800 +++++++++----------
 1 file changed, 398 insertions(+), 402 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5be24155/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
index 4e6c0f3..8c7e34c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
@@ -41,23 +41,6 @@ using namespace std::placeholders;
 
 static constexpr tPort kDefaultPort = 8020;
 
-// Functions exposed through the C API and extended C API get tagged with no-op
-// defines.  This is to make it really clear what's being exposed in a way that
-// won't blend into the comments.  Can also be used to add attributes or disable
-// the extensions e.g. "#define HDFS_EXT_API static"
-// API declared in hdfs.h shared with libhdfs
-#define HDFS_API
-// API extension functions specific to libhdfs++ declared in hdfs_ext.h
-#define HDFS_EXT_API
-
-// Common catch-all case to get exception and return the appropriate error code
-#define CATCH_AND_REPORT  catch (const std::exception & e) { return ReportException(e); } \
-                          catch (...) { return ReportCaughtNonException(); }
-
-
-
-
-
 /* Separate the handles used by the C api from the C++ API*/
 struct hdfs_internal {
   hdfs_internal(FileSystem *p) : filesystem_(p), working_directory_("/") {}
@@ -96,14 +79,9 @@ struct hdfsFile_internal {
 thread_local std::string errstr;
 
 /* Fetch last error that happened in this thread */
-HDFS_EXT_API
 int hdfsGetLastError(char *buf, int len) {
   //No error message
   if(errstr.empty()){
-    if(len > 0) {
-      // Null terminator if there is space
-      *buf = 0;
-    }
     return -1;
   }
 
@@ -126,17 +104,6 @@ int hdfsGetLastError(char *buf, int len) {
   return 0;
 }
 
-/* Clear out last error.  This shouldn't need to be public if functions do it themselves */
-static void resetErrorForApiCall(const char *location = nullptr) {
-  // POSIX spec says caller should be responsible for resetting errno.
-  // Clearing it here just in case.
-  errno = 0;
-  if(location)
-    errstr = std::string(location) + " has not set an error message";
-  else
-    errstr.clear();
-}
-
 /* Event callbacks for next open calls */
 thread_local std::experimental::optional<fs_event_callback> fsEventCallback;
 thread_local std::experimental::optional<file_event_callback> fileEventCallback;
@@ -288,9 +255,7 @@ optional<std::string> getAbsolutePath(hdfsFS fs, const char* path) {
  * C API implementations
  **/
 
-HDFS_API
 int hdfsFileIsOpenForRead(hdfsFile file) {
-  resetErrorForApiCall("hdfsFileIsOpenForRead");
   /* files can only be open for reads at the moment, do a quick check */
   if (!CheckHandle(file)){
     return 0;
@@ -298,9 +263,7 @@ int hdfsFileIsOpenForRead(hdfsFile file) {
   return 1; // Update implementation when we get file writing
 }
 
-HDFS_API
 int hdfsFileIsOpenForWrite(hdfsFile file) {
-  resetErrorForApiCall("hdfsFileIsOpenForWrite");
   /* files can only be open for reads at the moment, so return false */
   CheckHandle(file);
   return -1; // Update implementation when we get file writing
@@ -313,7 +276,11 @@ int hdfsConfGetLong(const char *key, int64_t *val)
     errno = 0;
     hdfsBuilder builder;
     return hdfsBuilderConfGetLong(&builder, key, val);
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
 hdfsFS doHdfsConnect(optional<std::string> nn, optional<tPort> port, optional<std::string> user, const Options & options) {
@@ -365,12 +332,11 @@ hdfsFS doHdfsConnect(optional<std::string> nn, optional<tPort> port, optional<st
   }
 }
 
-HDFS_EXT_API
 hdfsFS hdfsAllocateFileSystem(struct hdfsBuilder *bld) {
-  resetErrorForApiCall("hdfsAllocateFileSystem");
   // Same idea as the first half of doHdfsConnect, but return the wrapped FS before
   // connecting.
   try {
+    errno = 0;
     std::shared_ptr<IoService> io_service = IoService::MakeShared();
 
     int io_thread_count = bld->config.GetOptions().io_threads_;
@@ -401,9 +367,7 @@ hdfsFS hdfsAllocateFileSystem(struct hdfsBuilder *bld) {
   return nullptr;
 }
 
-HDFS_EXT_API
 int hdfsConnectAllocated(hdfsFS fs, struct hdfsBuilder *bld) {
-  resetErrorForApiCall("hdfsConnectAllocated");
   if(!CheckSystem(fs)) {
     return ENODEV;
   }
@@ -456,35 +420,25 @@ int hdfsConnectAllocated(hdfsFS fs, struct hdfsBuilder *bld) {
   return 0;
 }
 
-HDFS_API
 hdfsFS hdfsConnect(const char *nn, tPort port) {
-  resetErrorForApiCall("hdfsConnect");
-  return doHdfsConnect(std::string(nn), port, std::string(""), Options());
+  return hdfsConnectAsUser(nn, port, "");
 }
 
-HDFS_API
 hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user) {
-  resetErrorForApiCall("hdfsConnectAsUser");
   return doHdfsConnect(std::string(nn), port, std::string(user), Options());
 }
 
-HDFS_API
 hdfsFS hdfsConnectAsUserNewInstance(const char* nn, tPort port, const char *user ) {
-  resetErrorForApiCall("hdfsConnectAsUserNewInstance");
   //libhdfspp always returns a new instance
   return doHdfsConnect(std::string(nn), port, std::string(user), Options());
 }
 
-HDFS_API
 hdfsFS hdfsConnectNewInstance(const char* nn, tPort port) {
-  resetErrorForApiCall("hdfsConnectNewInstance");
   //libhdfspp always returns a new instance
-  return doHdfsConnect(std::string(nn), port, std::string(""), Options());
+  return hdfsConnectAsUser(nn, port, "");
 }
 
-HDFS_EXT_API
 int hdfsCancelPendingConnection(hdfsFS fs) {
-  resetErrorForApiCall("hdfsCancelPendingConnection");
   // todo: stick an enum in hdfs_internal to check the connect state
   if(!CheckSystem(fs)) {
     return ENODEV;
@@ -504,11 +458,10 @@ int hdfsCancelPendingConnection(hdfsFS fs) {
   }
 }
 
-HDFS_API
 int hdfsDisconnect(hdfsFS fs) {
-  resetErrorForApiCall("hdfsDisconnect");
   try
   {
+    errno = 0;
     if (!fs) {
       ReportError(ENODEV, "Cannot disconnect null FS handle.");
       return -1;
@@ -516,15 +469,18 @@ int hdfsDisconnect(hdfsFS fs) {
 
     delete fs;
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags, int bufferSize,
                       short replication, tSize blocksize) {
-  resetErrorForApiCall("hdfsOpenFile");
   try
   {
+    errno = 0;
     (void)flags;
     (void)bufferSize;
     (void)replication;
@@ -556,24 +512,26 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags, int bufferSize,
   }
 }
 
-HDFS_API
 int hdfsCloseFile(hdfsFS fs, hdfsFile file) {
-  resetErrorForApiCall("hdfsCloseFile");
   try
   {
+    errno = 0;
     if (!CheckSystemAndHandle(fs, file)) {
       return -1;
     }
     delete file;
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize) {
-  resetErrorForApiCall("hdfsGetWorkingDirectory");
   try
   {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return nullptr;
     }
@@ -598,11 +556,10 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize) {
   }
 }
 
-HDFS_API
 int hdfsSetWorkingDirectory(hdfsFS fs, const char* path) {
-  resetErrorForApiCall("hdfsSetWorkingDirectory");
   try
   {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -618,23 +575,25 @@ int hdfsSetWorkingDirectory(hdfsFS fs, const char* path) {
     }
     fs->set_working_directory(withSlash);
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 int hdfsAvailable(hdfsFS fs, hdfsFile file) {
-  resetErrorForApiCall("hdfsAvailable");
   //Since we do not have read ahead implemented, return 0 if fs and file are good;
+  errno = 0;
   if (!CheckSystemAndHandle(fs, file)) {
     return -1;
   }
   return 0;
 }
 
-HDFS_API
 tOffset hdfsGetDefaultBlockSize(hdfsFS fs) {
-  resetErrorForApiCall("hdfsGetDefaultBlockSize");
   try {
+    errno = 0;
     return fs->get_impl()->get_options().block_size;
   } catch (const std::exception & e) {
     ReportException(e);
@@ -645,10 +604,9 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS fs) {
   }
 }
 
-HDFS_API
 tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path) {
-  resetErrorForApiCall("hdfsGetDefaultBlockSizeAtPath");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -675,53 +633,58 @@ tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path) {
   }
 }
 
-HDFS_API
 int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication) {
-  resetErrorForApiCall("hdfsSetReplication");
-  try {
-    if (!CheckSystem(fs)) {
-      return -1;
-    }
-    const optional<std::string> abs_path = getAbsolutePath(fs, path);
-    if(!abs_path) {
-      return -1;
-    }
-    if(replication < 1){
-      return Error(Status::InvalidArgument("SetReplication: argument 'replication' cannot be less than 1"));
-    }
-    Status stat;
-    stat = fs->get_impl()->SetReplication(*abs_path, replication);
-    if (!stat.ok()) {
-      return Error(stat);
+    try {
+      errno = 0;
+      if (!CheckSystem(fs)) {
+        return -1;
+      }
+      const optional<std::string> abs_path = getAbsolutePath(fs, path);
+      if(!abs_path) {
+        return -1;
+      }
+      if(replication < 1){
+        return Error(Status::InvalidArgument("SetReplication: argument 'replication' cannot be less than 1"));
+      }
+      Status stat;
+      stat = fs->get_impl()->SetReplication(*abs_path, replication);
+      if (!stat.ok()) {
+        return Error(stat);
+      }
+      return 0;
+    } catch (const std::exception & e) {
+      return ReportException(e);
+    } catch (...) {
+      return ReportCaughtNonException();
     }
-    return 0;
-  } CATCH_AND_REPORT
 }
 
-HDFS_API
 int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime) {
-  resetErrorForApiCall("hdfsUtime");
-  try {
-    if (!CheckSystem(fs)) {
-      return -1;
-    }
-    const optional<std::string> abs_path = getAbsolutePath(fs, path);
-    if(!abs_path) {
-      return -1;
-    }
-    Status stat;
-    stat = fs->get_impl()->SetTimes(*abs_path, mtime, atime);
-    if (!stat.ok()) {
-      return Error(stat);
+    try {
+      errno = 0;
+      if (!CheckSystem(fs)) {
+        return -1;
+      }
+      const optional<std::string> abs_path = getAbsolutePath(fs, path);
+      if(!abs_path) {
+        return -1;
+      }
+      Status stat;
+      stat = fs->get_impl()->SetTimes(*abs_path, mtime, atime);
+      if (!stat.ok()) {
+        return Error(stat);
+      }
+      return 0;
+    } catch (const std::exception & e) {
+      return ReportException(e);
+    } catch (...) {
+      return ReportCaughtNonException();
     }
-    return 0;
-  } CATCH_AND_REPORT
 }
 
-HDFS_API
 tOffset hdfsGetCapacity(hdfsFS fs) {
-  resetErrorForApiCall("hdfsGetCapacity");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -742,10 +705,9 @@ tOffset hdfsGetCapacity(hdfsFS fs) {
   }
 }
 
-HDFS_API
 tOffset hdfsGetUsed(hdfsFS fs) {
-  resetErrorForApiCall("hdfsGetUsed");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -815,10 +777,9 @@ void StatInfoToHdfsFileInfo(hdfsFileInfo * file_info,
   file_info->mLastAccess = stat_info.access_time;
 }
 
-HDFS_API
 int hdfsExists(hdfsFS fs, const char *path) {
-  resetErrorForApiCall("hdfsExists");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -832,13 +793,16 @@ int hdfsExists(hdfsFS fs, const char *path) {
       return Error(stat);
     }
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path) {
-  resetErrorForApiCall("hdfsGetPathInfo");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
        return nullptr;
     }
@@ -864,64 +828,61 @@ hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path) {
   }
 }
 
-HDFS_API
 hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries) {
-  resetErrorForApiCall("hdfsListDirectory");
   try {
-    if (!CheckSystem(fs)) {
-      *numEntries = 0;
-      return nullptr;
-    }
-    const optional<std::string> abs_path = getAbsolutePath(fs, path);
-    if(!abs_path) {
-      return nullptr;
-    }
-    std::vector<StatInfo> stat_infos;
-    Status stat = fs->get_impl()->GetListing(*abs_path, &stat_infos);
-    if (!stat.ok()) {
-      Error(stat);
+      errno = 0;
+      if (!CheckSystem(fs)) {
+        *numEntries = 0;
+        return nullptr;
+      }
+      const optional<std::string> abs_path = getAbsolutePath(fs, path);
+      if(!abs_path) {
+        return nullptr;
+      }
+      std::vector<StatInfo> stat_infos;
+      Status stat = fs->get_impl()->GetListing(*abs_path, &stat_infos);
+      if (!stat.ok()) {
+        Error(stat);
+        *numEntries = 0;
+        return nullptr;
+      }
+      if(stat_infos.empty()){
+        *numEntries = 0;
+        return nullptr;
+      }
+      *numEntries = stat_infos.size();
+      hdfsFileInfo *file_infos = new hdfsFileInfo[stat_infos.size()];
+      for(std::vector<StatInfo>::size_type i = 0; i < stat_infos.size(); i++) {
+        StatInfoToHdfsFileInfo(&file_infos[i], stat_infos.at(i));
+      }
+
+      return file_infos;
+    } catch (const std::exception & e) {
+      ReportException(e);
       *numEntries = 0;
       return nullptr;
-    }
-    if(stat_infos.empty()){
+    } catch (...) {
+      ReportCaughtNonException();
       *numEntries = 0;
       return nullptr;
     }
-    *numEntries = stat_infos.size();
-    hdfsFileInfo *file_infos = new hdfsFileInfo[stat_infos.size()];
-    for(std::vector<StatInfo>::size_type i = 0; i < stat_infos.size(); i++) {
-      StatInfoToHdfsFileInfo(&file_infos[i], stat_infos.at(i));
-    }
-
-    return file_infos;
-  } catch (const std::exception & e) {
-    ReportException(e);
-    *numEntries = 0;
-    return nullptr;
-  } catch (...) {
-    ReportCaughtNonException();
-    *numEntries = 0;
-    return nullptr;
-  }
 }
 
-HDFS_API
 void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
 {
-  resetErrorForApiCall("hdfsFreeFileInfo");
-  int i;
-  for (i = 0; i < numEntries; ++i) {
-    delete[] hdfsFileInfo[i].mName;
-    delete[] hdfsFileInfo[i].mOwner;
-    delete[] hdfsFileInfo[i].mGroup;
-  }
-  delete[] hdfsFileInfo;
+    errno = 0;
+    int i;
+    for (i = 0; i < numEntries; ++i) {
+        delete[] hdfsFileInfo[i].mName;
+        delete[] hdfsFileInfo[i].mOwner;
+        delete[] hdfsFileInfo[i].mGroup;
+    }
+    delete[] hdfsFileInfo;
 }
 
-HDFS_API
 int hdfsCreateDirectory(hdfsFS fs, const char* path) {
-  resetErrorForApiCall("hdfsCreateDirectory");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -930,39 +891,45 @@ int hdfsCreateDirectory(hdfsFS fs, const char* path) {
       return -1;
     }
     Status stat;
-    //Use default permissions and set true for creating all non-existent parent directories
+    //Use default permissions and set true for creating all non-existant parent directories
     stat = fs->get_impl()->Mkdirs(*abs_path, FileSystem::GetDefaultPermissionMask(), true);
     if (!stat.ok()) {
       return Error(stat);
     }
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 int hdfsDelete(hdfsFS fs, const char* path, int recursive) {
-  resetErrorForApiCall("hdfsDelete");
   try {
-    if (!CheckSystem(fs)) {
-      return -1;
-    }
-    const optional<std::string> abs_path = getAbsolutePath(fs, path);
-    if(!abs_path) {
-      return -1;
-    }
-    Status stat;
-    stat = fs->get_impl()->Delete(*abs_path, recursive);
-    if (!stat.ok()) {
-      return Error(stat);
+      errno = 0;
+      if (!CheckSystem(fs)) {
+        return -1;
+      }
+      const optional<std::string> abs_path = getAbsolutePath(fs, path);
+      if(!abs_path) {
+        return -1;
+      }
+      Status stat;
+      stat = fs->get_impl()->Delete(*abs_path, recursive);
+      if (!stat.ok()) {
+        return Error(stat);
+      }
+      return 0;
+    } catch (const std::exception & e) {
+      return ReportException(e);
+    } catch (...) {
+      return ReportCaughtNonException();
     }
-    return 0;
-  } CATCH_AND_REPORT
 }
 
-HDFS_API
 int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath) {
-  resetErrorForApiCall("hdfsRename");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -977,99 +944,106 @@ int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath) {
       return Error(stat);
     }
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 int hdfsChmod(hdfsFS fs, const char* path, short mode){
-  resetErrorForApiCall("hdfsChmod");
   try {
-    if (!CheckSystem(fs)) {
-      return -1;
-    }
-    const optional<std::string> abs_path = getAbsolutePath(fs, path);
-    if(!abs_path) {
-      return -1;
-    }
-    Status stat = FileSystem::CheckValidPermissionMask(mode);
-    if (!stat.ok()) {
-      return Error(stat);
-    }
-    stat = fs->get_impl()->SetPermission(*abs_path, mode);
-    if (!stat.ok()) {
-      return Error(stat);
+      errno = 0;
+      if (!CheckSystem(fs)) {
+        return -1;
+      }
+      const optional<std::string> abs_path = getAbsolutePath(fs, path);
+      if(!abs_path) {
+        return -1;
+      }
+      Status stat = FileSystem::CheckValidPermissionMask(mode);
+      if (!stat.ok()) {
+        return Error(stat);
+      }
+      stat = fs->get_impl()->SetPermission(*abs_path, mode);
+      if (!stat.ok()) {
+        return Error(stat);
+      }
+      return 0;
+    } catch (const std::exception & e) {
+      return ReportException(e);
+    } catch (...) {
+      return ReportCaughtNonException();
     }
-    return 0;
-  } CATCH_AND_REPORT
 }
 
-HDFS_API
 int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group){
-  resetErrorForApiCall("hdfsChown");
   try {
-    if (!CheckSystem(fs)) {
-      return -1;
-    }
-    const optional<std::string> abs_path = getAbsolutePath(fs, path);
-    if(!abs_path) {
-      return -1;
-    }
-    std::string own = (owner) ? owner : "";
-    std::string grp = (group) ? group : "";
+      errno = 0;
+      if (!CheckSystem(fs)) {
+        return -1;
+      }
+      const optional<std::string> abs_path = getAbsolutePath(fs, path);
+      if(!abs_path) {
+        return -1;
+      }
+      std::string own = (owner) ? owner : "";
+      std::string grp = (group) ? group : "";
 
-    Status stat;
-    stat = fs->get_impl()->SetOwner(*abs_path, own, grp);
-    if (!stat.ok()) {
-      return Error(stat);
+      Status stat;
+      stat = fs->get_impl()->SetOwner(*abs_path, own, grp);
+      if (!stat.ok()) {
+        return Error(stat);
+      }
+      return 0;
+    } catch (const std::exception & e) {
+      return ReportException(e);
+    } catch (...) {
+      return ReportCaughtNonException();
     }
-    return 0;
-  } CATCH_AND_REPORT
 }
 
-HDFS_EXT_API
-hdfsFileInfo * hdfsFind(hdfsFS fs, const char* path, const char* name, uint32_t * numEntries)
-{
-  resetErrorForApiCall("hdfsFind");
+hdfsFileInfo * hdfsFind(hdfsFS fs, const char* path, const char* name, uint32_t * numEntries){
   try {
-    if (!CheckSystem(fs)) {
-      *numEntries = 0;
-      return nullptr;
-    }
+      errno = 0;
+      if (!CheckSystem(fs)) {
+        *numEntries = 0;
+        return nullptr;
+      }
 
-    std::vector<StatInfo>  stat_infos;
-    Status stat = fs->get_impl()->Find(path, name, hdfs::FileSystem::GetDefaultFindMaxDepth(), &stat_infos);
-    if (!stat.ok()) {
-      Error(stat);
+      std::vector<StatInfo>  stat_infos;
+      Status stat = fs->get_impl()->Find(path, name, hdfs::FileSystem::GetDefaultFindMaxDepth(), &stat_infos);
+      if (!stat.ok()) {
+        Error(stat);
+        *numEntries = 0;
+        return nullptr;
+      }
+      //Existing API expects nullptr if size is 0
+      if(stat_infos.empty()){
+        *numEntries = 0;
+        return nullptr;
+      }
+      *numEntries = stat_infos.size();
+      hdfsFileInfo *file_infos = new hdfsFileInfo[stat_infos.size()];
+      for(std::vector<StatInfo>::size_type i = 0; i < stat_infos.size(); i++) {
+        StatInfoToHdfsFileInfo(&file_infos[i], stat_infos.at(i));
+      }
+
+      return file_infos;
+    } catch (const std::exception & e) {
+      ReportException(e);
       *numEntries = 0;
       return nullptr;
-    }
-    //Existing API expects nullptr if size is 0
-    if(stat_infos.empty()){
+    } catch (...) {
+      ReportCaughtNonException();
       *numEntries = 0;
       return nullptr;
     }
-    *numEntries = stat_infos.size();
-    hdfsFileInfo *file_infos = new hdfsFileInfo[stat_infos.size()];
-    for(std::vector<StatInfo>::size_type i = 0; i < stat_infos.size(); i++) {
-      StatInfoToHdfsFileInfo(&file_infos[i], stat_infos.at(i));
-    }
-
-    return file_infos;
-  } catch (const std::exception & e) {
-    ReportException(e);
-    *numEntries = 0;
-    return nullptr;
-  } catch (...) {
-    ReportCaughtNonException();
-    *numEntries = 0;
-    return nullptr;
-  }
 }
 
-HDFS_EXT_API
 int hdfsCreateSnapshot(hdfsFS fs, const char* path, const char* name) {
-  resetErrorForApiCall("hdfsCreateSnapshot");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -1087,13 +1061,16 @@ int hdfsCreateSnapshot(hdfsFS fs, const char* path, const char* name) {
       return Error(stat);
     }
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_EXT_API
 int hdfsDeleteSnapshot(hdfsFS fs, const char* path, const char* name) {
-  resetErrorForApiCall("hdfsDeleteSnapshot");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -1110,13 +1087,16 @@ int hdfsDeleteSnapshot(hdfsFS fs, const char* path, const char* name) {
       return Error(stat);
     }
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_EXT_API
 int hdfsAllowSnapshot(hdfsFS fs, const char* path) {
-  resetErrorForApiCall("hdfsAllowSnapshot");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -1130,13 +1110,16 @@ int hdfsAllowSnapshot(hdfsFS fs, const char* path) {
       return Error(stat);
     }
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_EXT_API
 int hdfsDisallowSnapshot(hdfsFS fs, const char* path) {
-  resetErrorForApiCall("hdfsDisallowSnapshot");
   try {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -1150,15 +1133,18 @@ int hdfsDisallowSnapshot(hdfsFS fs, const char* path) {
       return Error(stat);
     }
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, void *buffer,
                 tSize length) {
-  resetErrorForApiCall("hdfsPread");
   try
   {
+    errno = 0;
     if (!CheckSystemAndHandle(fs, file)) {
       return -1;
     }
@@ -1169,14 +1155,17 @@ tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, void *buffer,
       return Error(stat);
     }
     return (tSize)len;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 tSize hdfsRead(hdfsFS fs, hdfsFile file, void *buffer, tSize length) {
-  resetErrorForApiCall("hdfsRead");
   try
   {
+    errno = 0;
     if (!CheckSystemAndHandle(fs, file)) {
       return -1;
     }
@@ -1188,63 +1177,67 @@ tSize hdfsRead(hdfsFS fs, hdfsFile file, void *buffer, tSize length) {
     }
 
     return (tSize)len;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 int hdfsUnbufferFile(hdfsFile file) {
-  resetErrorForApiCall("hdfsUnbufferFile");
   //Currently we are not doing any buffering
   CheckHandle(file);
   return -1;
 }
 
-HDFS_API
 int hdfsFileGetReadStatistics(hdfsFile file, struct hdfsReadStatistics **stats) {
-  resetErrorForApiCall("hdfsFileGetReadStatistics");
   try
-  {
-    if (!CheckHandle(file)) {
-      return -1;
+    {
+      errno = 0;
+      if (!CheckHandle(file)) {
+        return -1;
+      }
+      *stats = new hdfsReadStatistics;
+      memset(*stats, 0, sizeof(hdfsReadStatistics));
+      (*stats)->totalBytesRead = file->get_impl()->get_bytes_read();
+      return 0;
+    } catch (const std::exception & e) {
+      return ReportException(e);
+    } catch (...) {
+      return ReportCaughtNonException();
     }
-    *stats = new hdfsReadStatistics;
-    memset(*stats, 0, sizeof(hdfsReadStatistics));
-    (*stats)->totalBytesRead = file->get_impl()->get_bytes_read();
-    return 0;
-  } CATCH_AND_REPORT
 }
 
-HDFS_API
 int hdfsFileClearReadStatistics(hdfsFile file) {
-  resetErrorForApiCall("hdfsFileClearReadStatistics");
   try
-  {
-    if (!CheckHandle(file)) {
-      return -1;
+    {
+      errno = 0;
+      if (!CheckHandle(file)) {
+        return -1;
+      }
+      file->get_impl()->clear_bytes_read();
+      return 0;
+    } catch (const std::exception & e) {
+      return ReportException(e);
+    } catch (...) {
+      return ReportCaughtNonException();
     }
-    file->get_impl()->clear_bytes_read();
-    return 0;
-  } CATCH_AND_REPORT
 }
 
-HDFS_API
 int64_t hdfsReadStatisticsGetRemoteBytesRead(const struct hdfsReadStatistics *stats) {
-  resetErrorForApiCall("hdfsReadStatisticsGetRemoteBytesRead");
-  return stats->totalBytesRead - stats->totalLocalBytesRead;
+    return stats->totalBytesRead - stats->totalLocalBytesRead;
 }
 
-HDFS_API
 void hdfsFileFreeReadStatistics(struct hdfsReadStatistics *stats) {
-  resetErrorForApiCall("hdfsFileFreeReadStatistics");
-  delete stats;
+    errno = 0;
+    delete stats;
 }
 
 /* 0 on success, -1 on error*/
-HDFS_API
 int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) {
-  resetErrorForApiCall("hdfsSeek");
   try
   {
+    errno = 0;
     if (!CheckSystemAndHandle(fs, file)) {
       return -1;
     }
@@ -1256,14 +1249,17 @@ int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) {
     }
 
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_API
 tOffset hdfsTell(hdfsFS fs, hdfsFile file) {
-  resetErrorForApiCall("hdfsTell");
   try
   {
+    errno = 0;
     if (!CheckSystemAndHandle(fs, file)) {
       return -1;
     }
@@ -1275,30 +1271,36 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile file) {
     }
 
     return offset;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
 /* extended API */
-HDFS_EXT_API
 int hdfsCancel(hdfsFS fs, hdfsFile file) {
-  resetErrorForApiCall("hdfsCancel");
   try
   {
+    errno = 0;
     if (!CheckSystemAndHandle(fs, file)) {
       return -1;
     }
     static_cast<FileHandleImpl*>(file->get_impl())->CancelOperations();
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
 
-HDFS_EXT_API
 int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations ** locations_out)
 {
-  resetErrorForApiCall("hdfsGetBlockLocations");
   try
   {
+    errno = 0;
     if (!CheckSystem(fs)) {
       return -1;
     }
@@ -1362,12 +1364,15 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations
     }
 
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_EXT_API
 int hdfsFreeBlockLocations(struct hdfsBlockLocations * blockLocations) {
-  resetErrorForApiCall("hdfsFreeBlockLocations");
+  errno = 0;
   if (blockLocations == nullptr)
     return 0;
 
@@ -1386,50 +1391,48 @@ int hdfsFreeBlockLocations(struct hdfsBlockLocations * blockLocations) {
   return 0;
 }
 
-HDFS_API
 char*** hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length) {
-  resetErrorForApiCall("hdfsGetHosts");
   try
-  {
-    if (!CheckSystem(fs)) {
-      return nullptr;
-    }
-    const optional<std::string> abs_path = getAbsolutePath(fs, path);
-    if(!abs_path) {
+    {
+      errno = 0;
+      if (!CheckSystem(fs)) {
+        return nullptr;
+      }
+      const optional<std::string> abs_path = getAbsolutePath(fs, path);
+      if(!abs_path) {
+        return nullptr;
+      }
+      std::shared_ptr<FileBlockLocation> ppLocations;
+      Status stat = fs->get_impl()->GetBlockLocations(*abs_path, start, length, &ppLocations);
+      if (!stat.ok()) {
+        Error(stat);
+        return nullptr;
+      }
+      const std::vector<BlockLocation> & ppBlockLocations = ppLocations->getBlockLocations();
+      char ***hosts = new char**[ppBlockLocations.size() + 1];
+      for (size_t i=0; i < ppBlockLocations.size(); i++) {
+        const std::vector<DNInfo> & ppDNInfos = ppBlockLocations[i].getDataNodes();
+        hosts[i] = new char*[ppDNInfos.size() + 1];
+        for (size_t j=0; j < ppDNInfos.size(); j++) {
+          auto ppDNInfo = ppDNInfos[j];
+          hosts[i][j] = new char[ppDNInfo.getHostname().size() + 1];
+          strncpy(hosts[i][j], ppDNInfo.getHostname().c_str(), ppDNInfo.getHostname().size() + 1);
+        }
+        hosts[i][ppDNInfos.size()] = nullptr;
+      }
+      hosts[ppBlockLocations.size()] = nullptr;
+      return hosts;
+    } catch (const std::exception & e) {
+      ReportException(e);
       return nullptr;
-    }
-    std::shared_ptr<FileBlockLocation> ppLocations;
-    Status stat = fs->get_impl()->GetBlockLocations(*abs_path, start, length, &ppLocations);
-    if (!stat.ok()) {
-      Error(stat);
+    } catch (...) {
+      ReportCaughtNonException();
       return nullptr;
     }
-    const std::vector<BlockLocation> & ppBlockLocations = ppLocations->getBlockLocations();
-    char ***hosts = new char**[ppBlockLocations.size() + 1];
-    for (size_t i=0; i < ppBlockLocations.size(); i++) {
-      const std::vector<DNInfo> & ppDNInfos = ppBlockLocations[i].getDataNodes();
-      hosts[i] = new char*[ppDNInfos.size() + 1];
-      for (size_t j=0; j < ppDNInfos.size(); j++) {
-        auto ppDNInfo = ppDNInfos[j];
-        hosts[i][j] = new char[ppDNInfo.getHostname().size() + 1];
-        strncpy(hosts[i][j], ppDNInfo.getHostname().c_str(), ppDNInfo.getHostname().size() + 1);
-      }
-      hosts[i][ppDNInfos.size()] = nullptr;
-    }
-    hosts[ppBlockLocations.size()] = nullptr;
-    return hosts;
-  } catch (const std::exception & e) {
-    ReportException(e);
-    return nullptr;
-  } catch (...) {
-    ReportCaughtNonException();
-    return nullptr;
-  }
 }
 
-HDFS_API
 void hdfsFreeHosts(char ***blockHosts) {
-  resetErrorForApiCall("hdfsFreeHosts");
+  errno = 0;
   if (blockHosts == nullptr)
     return;
 
@@ -1492,20 +1495,16 @@ event_response file_callback_glue(libhdfspp_file_event_callback handler,
   return event_response::make_ok();
 }
 
-HDFS_EXT_API
 int hdfsPreAttachFSMonitor(libhdfspp_fs_event_callback handler, int64_t cookie)
 {
-  resetErrorForApiCall("hdfsPreAttachFSMonitor");
   fs_event_callback callback = std::bind(fs_callback_glue, handler, cookie, _1, _2, _3);
   fsEventCallback = callback;
   return 0;
 }
 
 
-HDFS_EXT_API
 int hdfsPreAttachFileMonitor(libhdfspp_file_event_callback handler, int64_t cookie)
 {
-  resetErrorForApiCall("hdfsPreAttachFileMonitor");
   file_event_callback callback = std::bind(file_callback_glue, handler, cookie, _1, _2, _3, _4);
   fileEventCallback = callback;
   return 0;
@@ -1542,12 +1541,11 @@ hdfsBuilder::hdfsBuilder(const char * directory) :
   config = LoadDefault(loader);
 }
 
-HDFS_API
 struct hdfsBuilder *hdfsNewBuilder(void)
 {
-  resetErrorForApiCall("hdfsNewBuilder");
   try
   {
+    errno = 0;
     return new struct hdfsBuilder();
   } catch (const std::exception & e) {
     ReportException(e);
@@ -1558,42 +1556,37 @@ struct hdfsBuilder *hdfsNewBuilder(void)
   }
 }
 
-HDFS_API
 void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn)
 {
-  resetErrorForApiCall("hdfsBuilderSetNameNode");
+  errno = 0;
   bld->overrideHost = std::string(nn);
 }
 
-HDFS_API
 void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port)
 {
-  resetErrorForApiCall("hdfsBuilderSetNameNodePort");
+  errno = 0;
   bld->overridePort = port;
 }
 
-HDFS_API
 void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName)
 {
-  resetErrorForApiCall("hdfsBuilderSetUserName");
+  errno = 0;
   if (userName && *userName) {
     bld->user = std::string(userName);
   }
 }
 
-HDFS_API
 void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld) {
-  resetErrorForApiCall("hdfsBuilderSetForceNewInstance");
   //libhdfspp always returns a new instance, so nothing to do
   (void)bld;
+  errno = 0;
 }
 
-HDFS_API
 void hdfsFreeBuilder(struct hdfsBuilder *bld)
 {
-  resetErrorForApiCall("hdfsFreeBuilder");
   try
   {
+    errno = 0;
     delete bld;
   } catch (const std::exception & e) {
     ReportException(e);
@@ -1602,12 +1595,12 @@ void hdfsFreeBuilder(struct hdfsBuilder *bld)
   }
 }
 
-HDFS_API
-int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key, const char *val)
+int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
+                          const char *val)
 {
-  resetErrorForApiCall("hdfsBuilderConfSetStr");
   try
   {
+    errno = 0;
     optional<HdfsConfiguration> newConfig = bld->loader.OverlayValue(bld->config, key, val);
     if (newConfig)
     {
@@ -1619,17 +1612,6 @@ int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key, const char *
       ReportError(EINVAL, "Could not change Builder value");
       return -1;
     }
-  } CATCH_AND_REPORT
-}
-
-HDFS_API
-int hdfsConfGetStr(const char *key, char **val)
-{
-  resetErrorForApiCall("hdfsConfGetStr");
-  try
-  {
-    hdfsBuilder builder;
-    return hdfsBuilderConfGetStr(&builder, key, val);
   } catch (const std::exception & e) {
     return ReportException(e);
   } catch (...) {
@@ -1637,39 +1619,52 @@ int hdfsConfGetStr(const char *key, char **val)
   }
 }
 
-HDFS_API
 void hdfsConfStrFree(char *val)
 {
-  resetErrorForApiCall("hdfsConfStrFree");
+  errno = 0;
   free(val);
 }
 
-HDFS_API
 hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld) {
-  resetErrorForApiCall("hdfsBuilderConnect");
   return doHdfsConnect(bld->overrideHost, bld->overridePort, bld->user, bld->config.GetOptions());
 }
 
-HDFS_API
+int hdfsConfGetStr(const char *key, char **val)
+{
+  try
+  {
+    errno = 0;
+    hdfsBuilder builder;
+    return hdfsBuilderConfGetStr(&builder, key, val);
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
+}
+
 int hdfsConfGetInt(const char *key, int32_t *val)
 {
-  resetErrorForApiCall("hdfsConfGetInt");
   try
   {
+    errno = 0;
     hdfsBuilder builder;
     return hdfsBuilderConfGetInt(&builder, key, val);
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
 //
 //  Extended builder interface
 //
-HDFS_EXT_API
 struct hdfsBuilder *hdfsNewBuilderFromDirectory(const char * configDirectory)
 {
-  resetErrorForApiCall("hdfsNewBuilderFromDirectory");
   try
   {
+    errno = 0;
     return new struct hdfsBuilder(configDirectory);
   } catch (const std::exception & e) {
     ReportException(e);
@@ -1680,12 +1675,12 @@ struct hdfsBuilder *hdfsNewBuilderFromDirectory(const char * configDirectory)
   }
 }
 
-HDFS_EXT_API
-int hdfsBuilderConfGetStr(struct hdfsBuilder *bld, const char *key, char **val)
+int hdfsBuilderConfGetStr(struct hdfsBuilder *bld, const char *key,
+                          char **val)
 {
-  resetErrorForApiCall("hdfsBuilderConfGetStr");
   try
   {
+    errno = 0;
     optional<std::string> value = bld->config.Get(key);
     if (value)
     {
@@ -1698,7 +1693,11 @@ int hdfsBuilderConfGetStr(struct hdfsBuilder *bld, const char *key, char **val)
       *val = nullptr;
     }
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
 // If we're running on a 32-bit platform, we might get 64-bit values that
@@ -1709,12 +1708,11 @@ bool isValidInt(int64_t value)
           value <= std::numeric_limits<int>::max());
 }
 
-HDFS_EXT_API
 int hdfsBuilderConfGetInt(struct hdfsBuilder *bld, const char *key, int32_t *val)
 {
-  resetErrorForApiCall("hdfsBuilderConfGetInt");
   try
   {
+    errno = 0;
     // Pull from default configuration
     optional<int64_t> value = bld->config.GetInt(key);
     if (value)
@@ -1729,15 +1727,18 @@ int hdfsBuilderConfGetInt(struct hdfsBuilder *bld, const char *key, int32_t *val
     // If not found, don't change val
     ReportError(EINVAL, "Could not get Builder value");
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
-HDFS_EXT_API
 int hdfsBuilderConfGetLong(struct hdfsBuilder *bld, const char *key, int64_t *val)
 {
-  resetErrorForApiCall("hdfsBuilderConfGetLong");
   try
   {
+    errno = 0;
     // Pull from default configuration
     optional<int64_t> value = bld->config.GetInt(key);
     if (value)
@@ -1748,7 +1749,11 @@ int hdfsBuilderConfGetLong(struct hdfsBuilder *bld, const char *key, int64_t *va
     // If not found, don't change val
     ReportError(EINVAL, "Could not get Builder value");
     return 0;
-  } CATCH_AND_REPORT
+  } catch (const std::exception & e) {
+    return ReportException(e);
+  } catch (...) {
+    return ReportCaughtNonException();
+  }
 }
 
 /**
@@ -1824,21 +1829,15 @@ void CForwardingLogger::FreeLogData(LogData *data) {
 }
 
 
-HDFS_EXT_API
 LogData *hdfsCopyLogData(LogData *data) {
-  resetErrorForApiCall("hdfsCopyLogData");
   return CForwardingLogger::CopyLogData(data);
 }
 
-HDFS_EXT_API
 void hdfsFreeLogData(LogData *data) {
-  resetErrorForApiCall("hdfsFreeLogData");
   CForwardingLogger::FreeLogData(data);
 }
 
-HDFS_EXT_API
 void hdfsSetLogFunction(void (*callback)(LogData*)) {
-  resetErrorForApiCall("hdfsSetLogFunction");
   CForwardingLogger *logger = new CForwardingLogger();
   logger->SetCallback(callback);
   LogManager::SetLoggerImplementation(std::unique_ptr<LoggerInterface>(logger));
@@ -1850,8 +1849,8 @@ static bool IsLevelValid(int component) {
   return true;
 }
 
+
 //  should use  __builtin_popcnt as optimization on some platforms
-//    *but maybe not Intel: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=62011
 static int popcnt(int val) {
   int bits = sizeof(val) * 8;
   int count = 0;
@@ -1870,27 +1869,24 @@ static bool IsComponentValid(int component) {
   return true;
 }
 
-HDFS_EXT_API
 int hdfsEnableLoggingForComponent(int component) {
-  resetErrorForApiCall("hdfsEnableLoggingForComponent");
+  errno = 0;
   if(!IsComponentValid(component))
     return -1;
   LogManager::EnableLogForComponent(static_cast<LogSourceComponent>(component));
   return 0;
 }
 
-HDFS_EXT_API
 int hdfsDisableLoggingForComponent(int component) {
-  resetErrorForApiCall("hdfsDisableLoggingForComponent");
+  errno = 0;
   if(!IsComponentValid(component))
     return -1;
   LogManager::DisableLogForComponent(static_cast<LogSourceComponent>(component));
   return 0;
 }
 
-HDFS_EXT_API
 int hdfsSetLoggingLevel(int level) {
-  resetErrorForApiCall("hdfsSetLoggingLevel");
+  errno = 0;
   if(!IsLevelValid(level))
     return -1;
   LogManager::SetLogLevel(static_cast<LogLevel>(level));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org