You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bo...@apache.org on 2016/06/06 14:45:15 UTC

hadoop git commit: HDFS-10454: libhdfspp: Move NameNodeOp to a separate file. Contributed by Anatoli Shein.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 f206a36c9 -> ed43f07ad


HDFS-10454: libhdfspp: Move NameNodeOp to a separate file.  Contributed by Anatoli Shein.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed43f07a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed43f07a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed43f07a

Branch: refs/heads/HDFS-8707
Commit: ed43f07ad921efcec743c9276ab8da11c4bddf77
Parents: f206a36
Author: Bob Hansen <bo...@hpe.com>
Authored: Mon Jun 6 10:45:05 2016 -0400
Committer: Bob Hansen <bo...@hpe.com>
Committed: Mon Jun 6 10:45:05 2016 -0400

----------------------------------------------------------------------
 .../main/native/libhdfspp/lib/fs/CMakeLists.txt |   2 +-
 .../main/native/libhdfspp/lib/fs/filesystem.cc  | 170 ----------------
 .../main/native/libhdfspp/lib/fs/filesystem.h   |  55 +----
 .../libhdfspp/lib/fs/namenode_operations.cc     | 200 +++++++++++++++++++
 .../libhdfspp/lib/fs/namenode_operations.h      |  76 +++++++
 5 files changed, 278 insertions(+), 225 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed43f07a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt
index 7d750b2..0bce70d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt
@@ -16,6 +16,6 @@
 # limitations under the License.
 #
 
-add_library(fs_obj OBJECT filesystem.cc filehandle.cc bad_datanode_tracker.cc)
+add_library(fs_obj OBJECT filesystem.cc filehandle.cc bad_datanode_tracker.cc namenode_operations.cc)
 add_dependencies(fs_obj proto)
 add_library(fs $<TARGET_OBJECTS:fs_obj>)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed43f07a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
index eace6fa..b05bc3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
@@ -17,11 +17,6 @@
  */
 
 #include "filesystem.h"
-#include "common/continuation/asio.h"
-#include "common/util.h"
-#include "common/logging.h"
-
-#include <asio/ip/tcp.hpp>
 
 #include <functional>
 #include <limits>
@@ -42,171 +37,6 @@ using ::asio::ip::tcp;
 
 static constexpr uint16_t kDefaultPort = 8020;
 
-
-/*****************************************************************************
- *                    NAMENODE OPERATIONS
- ****************************************************************************/
-
-void NameNodeOperations::Connect(const std::string &cluster_name,
-                                 const std::string &server,
-                             const std::string &service,
-                             std::function<void(const Status &)> &&handler) {
-  using namespace asio_continuation;
-  typedef std::vector<tcp::endpoint> State;
-  auto m = Pipeline<State>::Create();
-  m->Push(Resolve(io_service_, server, service,
-                  std::back_inserter(m->state())))
-      .Push(Bind([this, m, cluster_name](const Continuation::Next &next) {
-        engine_.Connect(cluster_name, m->state(), next);
-      }));
-  m->Run([this, handler](const Status &status, const State &) {
-    handler(status);
-  });
-}
-
-void NameNodeOperations::GetBlockLocations(const std::string & path,
-  std::function<void(const Status &, std::shared_ptr<const struct FileInfo>)> handler)
-{
-  using ::hadoop::hdfs::GetBlockLocationsRequestProto;
-  using ::hadoop::hdfs::GetBlockLocationsResponseProto;
-
-  LOG_TRACE(kFileSystem, << "NameNodeOperations::GetBlockLocations("
-                           << FMT_THIS_ADDR << ", path=" << path << ", ...) called");
-
-  GetBlockLocationsRequestProto req;
-  req.set_src(path);
-  req.set_offset(0);
-  req.set_length(std::numeric_limits<long long>::max());
-
-  auto resp = std::make_shared<GetBlockLocationsResponseProto>();
-
-  namenode_.GetBlockLocations(&req, resp, [resp, handler](const Status &stat) {
-    if (stat.ok()) {
-      auto file_info = std::make_shared<struct FileInfo>();
-      auto locations = resp->locations();
-
-      file_info->file_length_ = locations.filelength();
-      file_info->last_block_complete_ = locations.islastblockcomplete();
-      file_info->under_construction_ = locations.underconstruction();
-
-      for (const auto &block : locations.blocks()) {
-        file_info->blocks_.push_back(block);
-      }
-
-      if (!locations.islastblockcomplete() &&
-          locations.has_lastblock() && locations.lastblock().b().numbytes()) {
-        file_info->blocks_.push_back(locations.lastblock());
-        file_info->file_length_ += locations.lastblock().b().numbytes();
-      }
-
-      handler(stat, file_info);
-    } else {
-      handler(stat, nullptr);
-    }
-  });
-}
-
-void NameNodeOperations::GetFileInfo(const std::string & path,
-  std::function<void(const Status &, const StatInfo &)> handler)
-{
-  using ::hadoop::hdfs::GetFileInfoRequestProto;
-  using ::hadoop::hdfs::GetFileInfoResponseProto;
-
-  LOG_TRACE(kFileSystem, << "NameNodeOperations::GetFileInfo("
-                           << FMT_THIS_ADDR << ", path=" << path << ") called");
-
-  GetFileInfoRequestProto req;
-  req.set_src(path);
-
-  auto resp = std::make_shared<GetFileInfoResponseProto>();
-
-  namenode_.GetFileInfo(&req, resp, [resp, handler, path](const Status &stat) {
-    if (stat.ok()) {
-      // For non-existant files, the server will respond with an OK message but
-      //   no fs in the protobuf.
-      if(resp -> has_fs()){
-          struct StatInfo stat_info;
-          stat_info.path=path;
-          HdfsFileStatusProtoToStatInfo(stat_info, resp->fs());
-          handler(stat, stat_info);
-        } else {
-          std::string errormsg = "No such file or directory: " + path;
-          Status statNew = Status::PathNotFound(errormsg.c_str());
-          handler(statNew, StatInfo());
-        }
-    } else {
-      handler(stat, StatInfo());
-    }
-  });
-}
-
-void NameNodeOperations::GetListing(
-    const std::string & path,
-    std::function<void(const Status &, std::shared_ptr<std::vector<StatInfo>> &, bool)> handler,
-    const std::string & start_after) {
-  using ::hadoop::hdfs::GetListingRequestProto;
-  using ::hadoop::hdfs::GetListingResponseProto;
-
-  LOG_TRACE(
-      kFileSystem,
-      << "NameNodeOperations::GetListing(" << FMT_THIS_ADDR << ", path=" << path << ") called");
-
-  GetListingRequestProto req;
-  req.set_src(path);
-  req.set_startafter(start_after.c_str());
-  req.set_needlocation(false);
-
-  auto resp = std::make_shared<GetListingResponseProto>();
-
-  namenode_.GetListing(
-      &req,
-      resp,
-      [resp, handler, path](const Status &stat) {
-        if (stat.ok()) {
-          if(resp -> has_dirlist()){
-            std::shared_ptr<std::vector<StatInfo>> stat_infos(new std::vector<StatInfo>);
-            for (::hadoop::hdfs::HdfsFileStatusProto const& fs : resp->dirlist().partiallisting()) {
-              StatInfo si;
-              si.path=fs.path();
-              HdfsFileStatusProtoToStatInfo(si, fs);
-              stat_infos->push_back(si);
-            }
-            handler(stat, stat_infos, resp->dirlist().remainingentries() > 0);
-          } else {
-            std::string errormsg = "No such file or directory: " + path;
-            Status statNew = Status::PathNotFound(errormsg.c_str());
-            std::shared_ptr<std::vector<StatInfo>> stat_infos;
-            handler(statNew, stat_infos, false);
-          }
-        } else {
-          std::shared_ptr<std::vector<StatInfo>> stat_infos;
-          handler(stat, stat_infos, false);
-        }
-      });
-}
-
-
-void NameNodeOperations::SetFsEventCallback(fs_event_callback callback) {
-  engine_.SetFsEventCallback(callback);
-}
-
-void NameNodeOperations::HdfsFileStatusProtoToStatInfo(
-    hdfs::StatInfo & stat_info,
-    const ::hadoop::hdfs::HdfsFileStatusProto & fs) {
-  stat_info.file_type = fs.filetype();
-  stat_info.length = fs.length();
-  stat_info.permissions = fs.permission().perm();
-  stat_info.owner = fs.owner();
-  stat_info.group = fs.group();
-  stat_info.modification_time = fs.modification_time();
-  stat_info.access_time = fs.access_time();
-  stat_info.symlink = fs.symlink();
-  stat_info.block_replication = fs.block_replication();
-  stat_info.blocksize = fs.blocksize();
-  stat_info.fileid = fs.fileid();
-  stat_info.children_num = fs.childrennum();
-}
-
 /*****************************************************************************
  *                    FILESYSTEM BASE CLASS
  ****************************************************************************/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed43f07a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
index 869fd2d..8f795fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
@@ -19,71 +19,18 @@
 #define LIBHDFSPP_LIB_FS_FILESYSTEM_H_
 
 #include "filehandle.h"
-#include "common/libhdfs_events_impl.h"
-#include "common/hdfs_public_api.h"
-#include "common/async_stream.h"
-#include "common/new_delete.h"
 #include "hdfspp/hdfspp.h"
 #include "fs/bad_datanode_tracker.h"
-#include "rpc/rpc_engine.h"
 #include "reader/block_reader.h"
 #include "reader/fileinfo.h"
-#include "hdfspp/statinfo.h"
-#include "ClientNamenodeProtocol.pb.h"
-#include "ClientNamenodeProtocol.hrpc.inl"
 
 #include "asio.hpp"
 
 #include <thread>
+#include "namenode_operations.h"
 
 namespace hdfs {
 
-/**
- * NameNodeConnection: abstracts the details of communicating with a NameNode
- * and the implementation of the communications protocol.
- *
- * Will eventually handle retry and failover.
- *
- * Threading model: thread-safe; all operations can be called concurrently
- * Lifetime: owned by a FileSystemImpl
- */
-class NameNodeOperations {
-public:
-  MEMCHECKED_CLASS(NameNodeOperations);
-  NameNodeOperations(::asio::io_service *io_service, const Options &options,
-            const std::string &client_name, const std::string &user_name,
-            const char *protocol_name, int protocol_version) :
-  io_service_(io_service),
-  engine_(io_service, options, client_name, user_name, protocol_name, protocol_version),
-  namenode_(& engine_) {}
-
-  void Connect(const std::string &cluster_name,
-               const std::string &server,
-               const std::string &service,
-               std::function<void(const Status &)> &&handler);
-
-  void GetBlockLocations(const std::string & path,
-    std::function<void(const Status &, std::shared_ptr<const struct FileInfo>)> handler);
-
-  void GetFileInfo(const std::string & path,
-      std::function<void(const Status &, const StatInfo &)> handler);
-
-  // start_after="" for initial call
-  void GetListing(const std::string & path,
-        std::function<void(const Status &, std::shared_ptr<std::vector<StatInfo>>&, bool)> handler,
-        const std::string & start_after = "");
-
-  void SetFsEventCallback(fs_event_callback callback);
-
-private:
-  static void HdfsFileStatusProtoToStatInfo(hdfs::StatInfo & si, const ::hadoop::hdfs::HdfsFileStatusProto & fs);
-  static void DirectoryListingProtoToStatInfo(std::shared_ptr<std::vector<StatInfo>> stat_infos, const ::hadoop::hdfs::DirectoryListingProto & dl);
-
-  ::asio::io_service * io_service_;
-  RpcEngine engine_;
-  ClientNamenodeProtocol namenode_;
-};
-
 /*
  * FileSystem: The consumer's main point of interaction with the cluster as
  * a whole.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed43f07a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc
new file mode 100644
index 0000000..250e719
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "filesystem.h"
+#include "common/continuation/asio.h"
+
+#include <asio/ip/tcp.hpp>
+
+#include <functional>
+#include <limits>
+#include <future>
+#include <tuple>
+#include <iostream>
+#include <pwd.h>
+
+#define FMT_THIS_ADDR "this=" << (void*)this
+
+using ::asio::ip::tcp;
+
+namespace hdfs {
+
+/*****************************************************************************
+ *                    NAMENODE OPERATIONS
+ ****************************************************************************/
+
+void NameNodeOperations::Connect(const std::string &cluster_name,
+                                 const std::string &server,
+                             const std::string &service,
+                             std::function<void(const Status &)> &&handler) {
+  using namespace asio_continuation;
+  typedef std::vector<tcp::endpoint> State;
+  auto m = Pipeline<State>::Create();
+  m->Push(Resolve(io_service_, server, service,
+                  std::back_inserter(m->state())))
+      .Push(Bind([this, m, cluster_name](const Continuation::Next &next) {
+        engine_.Connect(cluster_name, m->state(), next);
+      }));
+  m->Run([this, handler](const Status &status, const State &) {
+    handler(status);
+  });
+}
+
+void NameNodeOperations::GetBlockLocations(const std::string & path,
+  std::function<void(const Status &, std::shared_ptr<const struct FileInfo>)> handler)
+{
+  using ::hadoop::hdfs::GetBlockLocationsRequestProto;
+  using ::hadoop::hdfs::GetBlockLocationsResponseProto;
+
+  LOG_TRACE(kFileSystem, << "NameNodeOperations::GetBlockLocations("
+                           << FMT_THIS_ADDR << ", path=" << path << ", ...) called");
+
+  GetBlockLocationsRequestProto req;
+  req.set_src(path);
+  req.set_offset(0);
+  req.set_length(std::numeric_limits<long long>::max());
+
+  auto resp = std::make_shared<GetBlockLocationsResponseProto>();
+
+  namenode_.GetBlockLocations(&req, resp, [resp, handler](const Status &stat) {
+    if (stat.ok()) {
+      auto file_info = std::make_shared<struct FileInfo>();
+      auto locations = resp->locations();
+
+      file_info->file_length_ = locations.filelength();
+      file_info->last_block_complete_ = locations.islastblockcomplete();
+      file_info->under_construction_ = locations.underconstruction();
+
+      for (const auto &block : locations.blocks()) {
+        file_info->blocks_.push_back(block);
+      }
+
+      if (!locations.islastblockcomplete() &&
+          locations.has_lastblock() && locations.lastblock().b().numbytes()) {
+        file_info->blocks_.push_back(locations.lastblock());
+        file_info->file_length_ += locations.lastblock().b().numbytes();
+      }
+
+      handler(stat, file_info);
+    } else {
+      handler(stat, nullptr);
+    }
+  });
+}
+
+void NameNodeOperations::GetFileInfo(const std::string & path,
+  std::function<void(const Status &, const StatInfo &)> handler)
+{
+  using ::hadoop::hdfs::GetFileInfoRequestProto;
+  using ::hadoop::hdfs::GetFileInfoResponseProto;
+
+  LOG_TRACE(kFileSystem, << "NameNodeOperations::GetFileInfo("
+                           << FMT_THIS_ADDR << ", path=" << path << ") called");
+
+  GetFileInfoRequestProto req;
+  req.set_src(path);
+
+  auto resp = std::make_shared<GetFileInfoResponseProto>();
+
+  namenode_.GetFileInfo(&req, resp, [resp, handler, path](const Status &stat) {
+    if (stat.ok()) {
+      // For non-existant files, the server will respond with an OK message but
+      //   no fs in the protobuf.
+      if(resp -> has_fs()){
+          struct StatInfo stat_info;
+          stat_info.path=path;
+          HdfsFileStatusProtoToStatInfo(stat_info, resp->fs());
+          handler(stat, stat_info);
+        } else {
+          std::string errormsg = "No such file or directory: " + path;
+          Status statNew = Status::PathNotFound(errormsg.c_str());
+          handler(statNew, StatInfo());
+        }
+    } else {
+      handler(stat, StatInfo());
+    }
+  });
+}
+
+void NameNodeOperations::GetListing(
+    const std::string & path,
+    std::function<void(const Status &, std::shared_ptr<std::vector<StatInfo>> &, bool)> handler,
+    const std::string & start_after) {
+  using ::hadoop::hdfs::GetListingRequestProto;
+  using ::hadoop::hdfs::GetListingResponseProto;
+
+  LOG_TRACE(
+      kFileSystem,
+      << "NameNodeOperations::GetListing(" << FMT_THIS_ADDR << ", path=" << path << ") called");
+
+  GetListingRequestProto req;
+  req.set_src(path);
+  req.set_startafter(start_after.c_str());
+  req.set_needlocation(false);
+
+  auto resp = std::make_shared<GetListingResponseProto>();
+
+  namenode_.GetListing(
+      &req,
+      resp,
+      [resp, handler, path](const Status &stat) {
+        if (stat.ok()) {
+          if(resp -> has_dirlist()){
+            std::shared_ptr<std::vector<StatInfo>> stat_infos(new std::vector<StatInfo>);
+            for (::hadoop::hdfs::HdfsFileStatusProto const& fs : resp->dirlist().partiallisting()) {
+              StatInfo si;
+              si.path=fs.path();
+              HdfsFileStatusProtoToStatInfo(si, fs);
+              stat_infos->push_back(si);
+            }
+            handler(stat, stat_infos, resp->dirlist().remainingentries() > 0);
+          } else {
+            std::string errormsg = "No such file or directory: " + path;
+            Status statNew = Status::PathNotFound(errormsg.c_str());
+            std::shared_ptr<std::vector<StatInfo>> stat_infos;
+            handler(statNew, stat_infos, false);
+          }
+        } else {
+          std::shared_ptr<std::vector<StatInfo>> stat_infos;
+          handler(stat, stat_infos, false);
+        }
+      });
+}
+
+void NameNodeOperations::SetFsEventCallback(fs_event_callback callback) {
+  engine_.SetFsEventCallback(callback);
+}
+
+void NameNodeOperations::HdfsFileStatusProtoToStatInfo(
+    hdfs::StatInfo & stat_info,
+    const ::hadoop::hdfs::HdfsFileStatusProto & fs) {
+  stat_info.file_type = fs.filetype();
+  stat_info.length = fs.length();
+  stat_info.permissions = fs.permission().perm();
+  stat_info.owner = fs.owner();
+  stat_info.group = fs.group();
+  stat_info.modification_time = fs.modification_time();
+  stat_info.access_time = fs.access_time();
+  stat_info.symlink = fs.symlink();
+  stat_info.block_replication = fs.block_replication();
+  stat_info.blocksize = fs.blocksize();
+  stat_info.fileid = fs.fileid();
+  stat_info.children_num = fs.childrennum();
+}
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed43f07a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h
new file mode 100644
index 0000000..fafd874
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef LIBHDFSPP_LIB_FS_NAMENODEOPERATIONS_H_
+#define LIBHDFSPP_LIB_FS_NAMENODEOPERATIONS_H_
+
+#include "rpc/rpc_engine.h"
+#include "hdfspp/statinfo.h"
+#include "ClientNamenodeProtocol.pb.h"
+#include "ClientNamenodeProtocol.hrpc.inl"
+
+
+namespace hdfs {
+
+/**
+* NameNodeConnection: abstracts the details of communicating with a NameNode
+* and the implementation of the communications protocol.
+*
+* Will eventually handle retry and failover.
+*
+* Threading model: thread-safe; all operations can be called concurrently
+* Lifetime: owned by a FileSystemImpl
+*/
+class NameNodeOperations {
+public:
+  MEMCHECKED_CLASS(NameNodeOperations);
+  NameNodeOperations(::asio::io_service *io_service, const Options &options,
+            const std::string &client_name, const std::string &user_name,
+            const char *protocol_name, int protocol_version) :
+  io_service_(io_service),
+  engine_(io_service, options, client_name, user_name, protocol_name, protocol_version),
+  namenode_(& engine_) {}
+
+  void Connect(const std::string &cluster_name,
+               const std::string &server,
+               const std::string &service,
+               std::function<void(const Status &)> &&handler);
+
+  void GetBlockLocations(const std::string & path,
+    std::function<void(const Status &, std::shared_ptr<const struct FileInfo>)> handler);
+
+  void GetFileInfo(const std::string & path,
+      std::function<void(const Status &, const StatInfo &)> handler);
+
+  // start_after="" for initial call
+  void GetListing(const std::string & path,
+        std::function<void(const Status &, std::shared_ptr<std::vector<StatInfo>>&, bool)> handler,
+        const std::string & start_after = "");
+
+  void SetFsEventCallback(fs_event_callback callback);
+
+private:
+  static void HdfsFileStatusProtoToStatInfo(hdfs::StatInfo & si, const ::hadoop::hdfs::HdfsFileStatusProto & fs);
+  static void DirectoryListingProtoToStatInfo(std::shared_ptr<std::vector<StatInfo>> stat_infos, const ::hadoop::hdfs::DirectoryListingProto & dl);
+
+  ::asio::io_service * io_service_;
+  RpcEngine engine_;
+  ClientNamenodeProtocol namenode_;
+};
+}
+
+#endif


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org