You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by gi...@apache.org on 2018/06/07 19:12:55 UTC

[1/6] mesos git commit: Supported host and port in hdfs constructor.

Repository: mesos
Updated Branches:
  refs/heads/master e05342b94 -> c185752b7


Supported host and port in hdfs constructor.

Review: https://reviews.apache.org/r/66651


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/b41a1701
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/b41a1701
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/b41a1701

Branch: refs/heads/master
Commit: b41a1701e5b5922e8bafa30ed7573cd4b9a66a69
Parents: 44d0ef1
Author: Gilbert Song <so...@gmail.com>
Authored: Sun Apr 15 13:32:07 2018 -0700
Committer: Gilbert Song <so...@gmail.com>
Committed: Thu Jun 7 12:11:37 2018 -0700

----------------------------------------------------------------------
 src/uri/schemes/hdfs.hpp | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/b41a1701/src/uri/schemes/hdfs.hpp
----------------------------------------------------------------------
diff --git a/src/uri/schemes/hdfs.hpp b/src/uri/schemes/hdfs.hpp
index 46b9055..1f9ef45 100644
--- a/src/uri/schemes/hdfs.hpp
+++ b/src/uri/schemes/hdfs.hpp
@@ -29,9 +29,12 @@ namespace uri {
 /**
  * Creates an hdfs URI with the given path.
  */
-inline URI hdfs(const std::string& path)
+inline URI hdfs(
+    const std::string& path,
+    const Option<std::string>& host = None(),
+    const Option<int>& port = None())
 {
-  return construct("hdfs", path);
+  return construct("hdfs", path, host, port);
 }
 
 } // namespace uri {


[6/6] mesos git commit: Renamed local_puller to image_tar_puller.

Posted by gi...@apache.org.
Renamed local_puller to image_tar_puller.

Review: https://reviews.apache.org/r/66652


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/c185752b
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/c185752b
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/c185752b

Branch: refs/heads/master
Commit: c185752b71850ac79901bbe441851f8b5bcc3f7c
Parents: af531b6
Author: Gilbert Song <so...@gmail.com>
Authored: Mon Apr 16 22:22:05 2018 -0700
Committer: Gilbert Song <so...@gmail.com>
Committed: Thu Jun 7 12:11:38 2018 -0700

----------------------------------------------------------------------
 src/CMakeLists.txt                              |   2 +-
 src/Makefile.am                                 |   4 +-
 .../provisioner/docker/image_tar_puller.cpp     | 402 +++++++++++++++++++
 .../provisioner/docker/image_tar_puller.hpp     |  73 ++++
 .../mesos/provisioner/docker/local_puller.cpp   | 402 -------------------
 .../mesos/provisioner/docker/local_puller.hpp   |  73 ----
 .../mesos/provisioner/docker/puller.cpp         |   6 +-
 .../containerizer/provisioner_docker_tests.cpp  |   6 +-
 .../containerizer/runtime_isolator_tests.cpp    |  12 +-
 9 files changed, 490 insertions(+), 490 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 773771b..10b0946 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -175,7 +175,7 @@ set(AGENT_SRC
   slave/containerizer/mesos/provisioner/appc/paths.cpp
   slave/containerizer/mesos/provisioner/appc/store.cpp
   slave/containerizer/mesos/provisioner/backends/copy.cpp
-  slave/containerizer/mesos/provisioner/docker/local_puller.cpp
+  slave/containerizer/mesos/provisioner/docker/image_tar_puller.cpp
   slave/containerizer/mesos/provisioner/docker/metadata_manager.cpp
   slave/containerizer/mesos/provisioner/docker/paths.cpp
   slave/containerizer/mesos/provisioner/docker/puller.cpp

http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/Makefile.am
----------------------------------------------------------------------
diff --git a/src/Makefile.am b/src/Makefile.am
index 9032453..2bcee1e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1096,7 +1096,7 @@ libmesos_no_3rdparty_la_SOURCES +=					\
   slave/containerizer/mesos/provisioner/appc/paths.cpp			\
   slave/containerizer/mesos/provisioner/appc/store.cpp			\
   slave/containerizer/mesos/provisioner/backends/copy.cpp		\
-  slave/containerizer/mesos/provisioner/docker/local_puller.cpp		\
+  slave/containerizer/mesos/provisioner/docker/image_tar_puller.cpp		\
   slave/containerizer/mesos/provisioner/docker/metadata_manager.cpp	\
   slave/containerizer/mesos/provisioner/docker/paths.cpp		\
   slave/containerizer/mesos/provisioner/docker/puller.cpp		\
@@ -1259,7 +1259,7 @@ libmesos_no_3rdparty_la_SOURCES +=					\
   slave/containerizer/mesos/provisioner/appc/paths.hpp			\
   slave/containerizer/mesos/provisioner/appc/store.hpp			\
   slave/containerizer/mesos/provisioner/backends/copy.hpp		\
-  slave/containerizer/mesos/provisioner/docker/local_puller.hpp		\
+  slave/containerizer/mesos/provisioner/docker/image_tar_puller.hpp		\
   slave/containerizer/mesos/provisioner/docker/message.hpp		\
   slave/containerizer/mesos/provisioner/docker/metadata_manager.hpp	\
   slave/containerizer/mesos/provisioner/docker/paths.hpp		\

http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/slave/containerizer/mesos/provisioner/docker/image_tar_puller.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/image_tar_puller.cpp b/src/slave/containerizer/mesos/provisioner/docker/image_tar_puller.cpp
new file mode 100644
index 0000000..6549bf5
--- /dev/null
+++ b/src/slave/containerizer/mesos/provisioner/docker/image_tar_puller.cpp
@@ -0,0 +1,402 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+#include <vector>
+
+#include <glog/logging.h>
+
+#include <stout/json.hpp>
+#include <stout/os.hpp>
+#include <stout/result.hpp>
+#include <stout/strings.hpp>
+
+#include <process/collect.hpp>
+#include <process/defer.hpp>
+#include <process/dispatch.hpp>
+#include <process/id.hpp>
+#include <process/process.hpp>
+
+#include "common/command_utils.hpp"
+
+#include "hdfs/hdfs.hpp"
+
+#include "uri/schemes/file.hpp"
+#include "uri/schemes/hdfs.hpp"
+
+#include "slave/containerizer/mesos/provisioner/docker/image_tar_puller.hpp"
+#include "slave/containerizer/mesos/provisioner/docker/paths.hpp"
+
+using namespace process;
+
+namespace spec = docker::spec;
+
+using std::string;
+using std::vector;
+
+namespace mesos {
+namespace internal {
+namespace slave {
+namespace docker {
+
+class ImageTarPullerProcess : public Process<ImageTarPullerProcess>
+{
+public:
+  ImageTarPullerProcess(
+      const string& _storeDir,
+      const URI& _archivesUri,
+      const Shared<uri::Fetcher>& _fetcher)
+    : ProcessBase(process::ID::generate("docker-provisioner-local-puller")),
+      storeDir(_storeDir),
+      archivesUri(_archivesUri),
+      fetcher(_fetcher) {}
+
+  ~ImageTarPullerProcess() {}
+
+  Future<vector<string>> pull(
+      const spec::ImageReference& reference,
+      const string& directory,
+      const string& backend);
+
+private:
+  Future<vector<string>> _pull(
+      const spec::ImageReference& reference,
+      const string& directory,
+      const string& backend);
+
+  Result<string> getParentLayerId(
+      const string& directory,
+      const string& layerId);
+
+  Future<Nothing> extractLayers(
+      const string& directory,
+      const vector<string>& layerIds,
+      const string& backend);
+
+  Future<Nothing> extractLayer(
+      const string& directory,
+      const string& layerId,
+      const string& backend);
+
+  const string storeDir;
+  const URI archivesUri;
+
+  Shared<uri::Fetcher> fetcher;
+};
+
+
+static Try<URI> parseUri(const string& uri)
+{
+  if (strings::startsWith(uri, "/")) {
+    return uri::file(uri);
+  }
+
+  return HDFS::parse(uri);
+}
+
+
+Try<Owned<Puller>> ImageTarPuller::create(
+    const Flags& flags,
+    const Shared<uri::Fetcher>& fetcher)
+{
+  // This should already been verified at puller.cpp.
+  if (!strings::startsWith(flags.docker_registry, "/") &&
+      !strings::startsWith(flags.docker_registry, "hdfs://")) {
+    return Error("Expecting registry url starting with '/' or 'hdfs'");
+  }
+
+  Try<URI> uri = parseUri(flags.docker_registry);
+  if (uri.isError()) {
+    return Error(
+        "Failed to parse the agent flag --docker_registry '" +
+        flags.docker_registry + "': " + uri.error());
+  }
+
+  VLOG(1) << "Creating image tar puller with docker registry '"
+          << flags.docker_registry << "'";
+
+  Owned<ImageTarPullerProcess> process(
+      new ImageTarPullerProcess(
+          flags.docker_store_dir,
+          uri.get(),
+          fetcher));
+
+  return Owned<Puller>(new ImageTarPuller(process));
+}
+
+
+ImageTarPuller::ImageTarPuller(Owned<ImageTarPullerProcess> _process)
+  : process(_process)
+{
+  spawn(process.get());
+}
+
+
+ImageTarPuller::~ImageTarPuller()
+{
+  terminate(process.get());
+  wait(process.get());
+}
+
+
+Future<vector<string>> ImageTarPuller::pull(
+    const spec::ImageReference& reference,
+    const string& directory,
+    const string& backend,
+    const Option<Secret>& config)
+{
+  return dispatch(
+      process.get(),
+      &ImageTarPullerProcess::pull,
+      reference,
+      directory,
+      backend);
+}
+
+
+Future<vector<string>> ImageTarPullerProcess::pull(
+    const spec::ImageReference& reference,
+    const string& directory,
+    const string& backend)
+{
+  // TODO(jieyu): We need to handle the case where the image reference
+  // contains a slash '/'.
+  const string image = stringify(reference);
+
+  // TODO(gilbert): Support 'http' and 'https'.
+  if (archivesUri.scheme() == "hdfs") {
+    URI uri = archivesUri;
+    uri.set_path(paths::getImageArchiveTarPath(archivesUri.path(), image));
+
+    VLOG(1) << "Fetching image '" << reference
+            << "' from '" << uri
+            << "' to '" << directory << "' using HDFS uri fetcher";
+
+    return fetcher->fetch(uri, directory)
+      .then(defer(self(), [=]() -> Future<vector<string>> {
+        const string source = paths::getImageArchiveTarPath(directory, image);
+
+        VLOG(1) << "Untarring image '" << reference
+                << "' from '" << source
+                << "' to '" << directory << "'";
+
+        return command::untar(Path(source), Path(directory))
+          .then(defer(self(), &Self::_pull, reference, directory, backend));
+      }));
+  }
+
+  const string tarPath = paths::getImageArchiveTarPath(
+      archivesUri.path(), image);
+
+  if (!os::exists(tarPath)) {
+    return Failure(
+        "Failed to find archive for image '" +
+        image + "' at '" + tarPath + "'");
+  }
+
+  VLOG(1) << "Untarring image '" << reference
+          << "' from '" << tarPath
+          << "' to '" << directory << "'";
+
+  return command::untar(Path(tarPath), Path(directory))
+    .then(defer(self(), &Self::_pull, reference, directory, backend));
+}
+
+
+Future<vector<string>> ImageTarPullerProcess::_pull(
+    const spec::ImageReference& reference,
+    const string& directory,
+    const string& backend)
+{
+  // We first parse the 'repositories' JSON file to get the top most
+  // layer id for the image.
+  Try<string> _repositories = os::read(path::join(directory, "repositories"));
+  if (_repositories.isError()) {
+    return Failure("Failed to read 'repositories': " + _repositories.error());
+  }
+
+  VLOG(1) << "The repositories JSON file for image '" << reference
+          << "' is '" << _repositories.get() << "'";
+
+  Try<JSON::Object> repositories =
+    JSON::parse<JSON::Object>(_repositories.get());
+
+  if (repositories.isError()) {
+    return Failure("Failed to parse 'repositories': " + repositories.error());
+  }
+
+  // We are looking for the topmost layer, so we know that is it OK to
+  // use at() rather than find() on the JSON object.
+  Result<JSON::Object> repository =
+    repositories->at<JSON::Object>(reference.repository());
+
+  // If we didn't find the bare repository name, try
+  // with the registry-qualified name. This would look like
+  // "registry.example.com/image".
+  if (repository.isNone() && reference.has_registry()) {
+    repository = repositories->at<JSON::Object>(
+        path::join(reference.registry(), reference.repository()));
+  }
+
+  if (repository.isError()) {
+    return Failure(
+        "Failed to find repository '" + reference.repository() +
+        "' in 'repositories': " + repository.error());
+  } else if (repository.isNone()) {
+    return Failure(
+        "Repository '" + reference.repository() + "' does not "
+        "exist in 'repositories'");
+  }
+
+  const string tag = reference.has_tag()
+    ? reference.tag()
+    : "latest";
+
+  // NOTE: We don't use JSON find here since a tag might contain '.'.
+  Result<JSON::String> layerId = repository->at<JSON::String>(tag);
+
+  if (layerId.isError()) {
+    return Failure(
+        "Failed to access layer id '" + tag + "': " + layerId.error());
+  } else if (layerId.isNone()) {
+    return Failure("Layer id '" + tag + "' is not found");
+  }
+
+  // Do a traverse to find all parent image layer ids. Here, we assume
+  // that all the parent layers are part of the archive tar, thus are
+  // already extracted under 'directory'.
+  vector<string> layerIds = { layerId->value };
+  Result<string> parentLayerId = getParentLayerId(directory, layerId->value);
+  while (parentLayerId.isSome()) {
+    // NOTE: We put parent layer ids in front because that's what the
+    // provisioner backends assume.
+    layerIds.insert(layerIds.begin(), parentLayerId.get());
+    parentLayerId = getParentLayerId(directory, parentLayerId.get());
+  }
+
+  if (parentLayerId.isError()) {
+    return Failure(
+        "Failed to find parent layer id for layer '" + layerId->value +
+        "': " + parentLayerId.error());
+  }
+
+  return extractLayers(directory, layerIds, backend)
+    .then([layerIds]() -> vector<string> { return layerIds; });
+}
+
+
+Result<string> ImageTarPullerProcess::getParentLayerId(
+    const string& directory,
+    const string& layerId)
+{
+  const string path =
+    paths::getImageLayerManifestPath(path::join(directory, layerId));
+
+  Try<string> _manifest = os::read(path);
+  if (_manifest.isError()) {
+    return Error(
+        "Failed to read manifest from '" + path + "': " + _manifest.error());
+  }
+
+  Try<JSON::Object> manifest = JSON::parse<JSON::Object>(_manifest.get());
+  if (manifest.isError()) {
+    return Error(
+        "Failed to parse manifest from '" + path + "': " + manifest.error());
+  }
+
+  Result<JSON::Value> parentLayerId = manifest->find<JSON::Value>("parent");
+  if (parentLayerId.isError()) {
+    return Error(
+        "Failed to parse 'parent' key in manifest from '" + path + "': " +
+        parentLayerId.error());
+  } else if (parentLayerId.isNone()) {
+    return None();
+  } else if (parentLayerId->is<JSON::Null>()) {
+    return None();
+  } else if (!parentLayerId->is<JSON::String>()) {
+    return Error("Unexpected 'parent' type in manifest from '" + path + "'");
+  }
+
+  const string id = parentLayerId->as<JSON::String>().value;
+  if (id == "") {
+    return None();
+  } else {
+    return id;
+  }
+}
+
+
+Future<Nothing> ImageTarPullerProcess::extractLayers(
+    const string& directory,
+    const vector<string>& layerIds,
+    const string& backend)
+{
+  vector<Future<Nothing>> futures;
+  foreach (const string& layerId, layerIds) {
+    // Check if the layer is already in the store. If yes, skip the
+    // unnecessary extracting.
+    if (os::exists(paths::getImageLayerRootfsPath(
+            storeDir,
+            layerId,
+            backend))) {
+      continue;
+    }
+
+    futures.push_back(extractLayer(directory, layerId, backend));
+  }
+
+  return collect(futures)
+    .then([]() { return Nothing(); });
+}
+
+
+Future<Nothing> ImageTarPullerProcess::extractLayer(
+    const string& directory,
+    const string& layerId,
+    const string& backend)
+{
+  const string layerPath = path::join(directory, layerId);
+  const string tar = paths::getImageLayerTarPath(layerPath);
+  const string rootfs = paths::getImageLayerRootfsPath(layerPath, backend);
+
+  VLOG(1) << "Extracting layer tar ball '" << tar
+          << " to rootfs '" << rootfs << "'";
+
+  Try<Nothing> mkdir = os::mkdir(rootfs);
+  if (mkdir.isError()) {
+    return Failure(
+        "Failed to create directory '" + rootfs + "'"
+        ": " + mkdir.error());
+  }
+
+  return command::untar(Path(tar), Path(rootfs))
+    .then([tar]() -> Future<Nothing> {
+      // Remove the tar after the extraction.
+      Try<Nothing> rm = os::rm(tar);
+      if (rm.isError()) {
+        return Failure(
+          "Failed to remove '" + tar + "' "
+          "after extraction: " + rm.error());
+      }
+
+      return Nothing();
+    });
+}
+
+} // namespace docker {
+} // namespace slave {
+} // namespace internal {
+} // namespace mesos {

http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/slave/containerizer/mesos/provisioner/docker/image_tar_puller.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/image_tar_puller.hpp b/src/slave/containerizer/mesos/provisioner/docker/image_tar_puller.hpp
new file mode 100644
index 0000000..f18a758
--- /dev/null
+++ b/src/slave/containerizer/mesos/provisioner/docker/image_tar_puller.hpp
@@ -0,0 +1,73 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef __PROVISIONER_DOCKER_IMAGE_TAR_PULLER_HPP__
+#define __PROVISIONER_DOCKER_IMAGE_TAR_PULLER_HPP__
+
+#include <process/owned.hpp>
+
+#include <mesos/docker/spec.hpp>
+
+#include <mesos/uri/fetcher.hpp>
+
+#include "slave/containerizer/mesos/provisioner/docker/puller.hpp"
+
+#include "slave/flags.hpp"
+
+namespace mesos {
+namespace internal {
+namespace slave {
+namespace docker {
+
+// Forward declaration.
+class ImageTarPullerProcess;
+
+
+/**
+ * ImageTarPuller assumes Docker images are stored in a local directory
+ * (configured with flags.docker_registry), with all the
+ * images saved as tars with file names in the form of <repo>:<tag>.tar.
+ */
+class ImageTarPuller : public Puller
+{
+public:
+  static Try<process::Owned<Puller>> create(
+      const Flags& flags,
+      const process::Shared<uri::Fetcher>& fetcher);
+
+  ~ImageTarPuller();
+
+  process::Future<std::vector<std::string>> pull(
+      const ::docker::spec::ImageReference& reference,
+      const std::string& directory,
+      const std::string& backend,
+      const Option<Secret>& config = None());
+
+private:
+  explicit ImageTarPuller(process::Owned<ImageTarPullerProcess> _process);
+
+  ImageTarPuller(const ImageTarPuller&) = delete; // Not copyable.
+  ImageTarPuller& operator=(const ImageTarPuller&) = delete; // Not assignable.
+
+  process::Owned<ImageTarPullerProcess> process;
+};
+
+} // namespace docker {
+} // namespace slave {
+} // namespace internal {
+} // namespace mesos {
+
+#endif // __PROVISIONER_DOCKER_IMAGE_TAR_PULLER_HPP__

http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp b/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp
deleted file mode 100644
index df715e2..0000000
--- a/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp
+++ /dev/null
@@ -1,402 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <string>
-#include <vector>
-
-#include <glog/logging.h>
-
-#include <stout/json.hpp>
-#include <stout/os.hpp>
-#include <stout/result.hpp>
-#include <stout/strings.hpp>
-
-#include <process/collect.hpp>
-#include <process/defer.hpp>
-#include <process/dispatch.hpp>
-#include <process/id.hpp>
-#include <process/process.hpp>
-
-#include "common/command_utils.hpp"
-
-#include "hdfs/hdfs.hpp"
-
-#include "uri/schemes/file.hpp"
-#include "uri/schemes/hdfs.hpp"
-
-#include "slave/containerizer/mesos/provisioner/docker/local_puller.hpp"
-#include "slave/containerizer/mesos/provisioner/docker/paths.hpp"
-
-using namespace process;
-
-namespace spec = docker::spec;
-
-using std::string;
-using std::vector;
-
-namespace mesos {
-namespace internal {
-namespace slave {
-namespace docker {
-
-class LocalPullerProcess : public Process<LocalPullerProcess>
-{
-public:
-  LocalPullerProcess(
-      const string& _storeDir,
-      const URI& _archivesUri,
-      const Shared<uri::Fetcher>& _fetcher)
-    : ProcessBase(process::ID::generate("docker-provisioner-local-puller")),
-      storeDir(_storeDir),
-      archivesUri(_archivesUri),
-      fetcher(_fetcher) {}
-
-  ~LocalPullerProcess() {}
-
-  Future<vector<string>> pull(
-      const spec::ImageReference& reference,
-      const string& directory,
-      const string& backend);
-
-private:
-  Future<vector<string>> _pull(
-      const spec::ImageReference& reference,
-      const string& directory,
-      const string& backend);
-
-  Result<string> getParentLayerId(
-      const string& directory,
-      const string& layerId);
-
-  Future<Nothing> extractLayers(
-      const string& directory,
-      const vector<string>& layerIds,
-      const string& backend);
-
-  Future<Nothing> extractLayer(
-      const string& directory,
-      const string& layerId,
-      const string& backend);
-
-  const string storeDir;
-  const URI archivesUri;
-
-  Shared<uri::Fetcher> fetcher;
-};
-
-
-static Try<URI> parseUri(const string& uri)
-{
-  if (strings::startsWith(uri, "/")) {
-    return uri::file(uri);
-  }
-
-  return HDFS::parse(uri);
-}
-
-
-Try<Owned<Puller>> LocalPuller::create(
-    const Flags& flags,
-    const Shared<uri::Fetcher>& fetcher)
-{
-  // This should already been verified at puller.cpp.
-  if (!strings::startsWith(flags.docker_registry, "/") &&
-      !strings::startsWith(flags.docker_registry, "hdfs://")) {
-    return Error("Expecting registry url starting with '/' or 'hdfs'");
-  }
-
-  Try<URI> uri = parseUri(flags.docker_registry);
-  if (uri.isError()) {
-    return Error(
-        "Failed to parse the agent flag --docker_registry '" +
-        flags.docker_registry + "': " + uri.error());
-  }
-
-  VLOG(1) << "Creating local puller with docker registry '"
-          << flags.docker_registry << "'";
-
-  Owned<LocalPullerProcess> process(
-      new LocalPullerProcess(
-          flags.docker_store_dir,
-          uri.get(),
-          fetcher));
-
-  return Owned<Puller>(new LocalPuller(process));
-}
-
-
-LocalPuller::LocalPuller(Owned<LocalPullerProcess> _process)
-  : process(_process)
-{
-  spawn(process.get());
-}
-
-
-LocalPuller::~LocalPuller()
-{
-  terminate(process.get());
-  wait(process.get());
-}
-
-
-Future<vector<string>> LocalPuller::pull(
-    const spec::ImageReference& reference,
-    const string& directory,
-    const string& backend,
-    const Option<Secret>& config)
-{
-  return dispatch(
-      process.get(),
-      &LocalPullerProcess::pull,
-      reference,
-      directory,
-      backend);
-}
-
-
-Future<vector<string>> LocalPullerProcess::pull(
-    const spec::ImageReference& reference,
-    const string& directory,
-    const string& backend)
-{
-  // TODO(jieyu): We need to handle the case where the image reference
-  // contains a slash '/'.
-  const string image = stringify(reference);
-
-  // TODO(gilbert): Support 'http' and 'https'.
-  if (archivesUri.scheme() == "hdfs") {
-    URI uri = archivesUri;
-    uri.set_path(paths::getImageArchiveTarPath(archivesUri.path(), image));
-
-    VLOG(1) << "Fetching image '" << reference
-            << "' from '" << uri
-            << "' to '" << directory << "' using HDFS uri fetcher";
-
-    return fetcher->fetch(uri, directory)
-      .then(defer(self(), [=]() -> Future<vector<string>> {
-        const string source = paths::getImageArchiveTarPath(directory, image);
-
-        VLOG(1) << "Untarring image '" << reference
-                << "' from '" << source
-                << "' to '" << directory << "'";
-
-        return command::untar(Path(source), Path(directory))
-          .then(defer(self(), &Self::_pull, reference, directory, backend));
-      }));
-  }
-
-  const string tarPath = paths::getImageArchiveTarPath(
-      archivesUri.path(), image);
-
-  if (!os::exists(tarPath)) {
-    return Failure(
-        "Failed to find archive for image '" +
-        image + "' at '" + tarPath + "'");
-  }
-
-  VLOG(1) << "Untarring image '" << reference
-          << "' from '" << tarPath
-          << "' to '" << directory << "'";
-
-  return command::untar(Path(tarPath), Path(directory))
-    .then(defer(self(), &Self::_pull, reference, directory, backend));
-}
-
-
-Future<vector<string>> LocalPullerProcess::_pull(
-    const spec::ImageReference& reference,
-    const string& directory,
-    const string& backend)
-{
-  // We first parse the 'repositories' JSON file to get the top most
-  // layer id for the image.
-  Try<string> _repositories = os::read(path::join(directory, "repositories"));
-  if (_repositories.isError()) {
-    return Failure("Failed to read 'repositories': " + _repositories.error());
-  }
-
-  VLOG(1) << "The repositories JSON file for image '" << reference
-          << "' is '" << _repositories.get() << "'";
-
-  Try<JSON::Object> repositories =
-    JSON::parse<JSON::Object>(_repositories.get());
-
-  if (repositories.isError()) {
-    return Failure("Failed to parse 'repositories': " + repositories.error());
-  }
-
-  // We are looking for the topmost layer, so we know that is it OK to
-  // use at() rather than find() on the JSON object.
-  Result<JSON::Object> repository =
-    repositories->at<JSON::Object>(reference.repository());
-
-  // If we didn't find the bare repository name, try
-  // with the registry-qualified name. This would look like
-  // "registry.example.com/image".
-  if (repository.isNone() && reference.has_registry()) {
-    repository = repositories->at<JSON::Object>(
-        path::join(reference.registry(), reference.repository()));
-  }
-
-  if (repository.isError()) {
-    return Failure(
-        "Failed to find repository '" + reference.repository() +
-        "' in 'repositories': " + repository.error());
-  } else if (repository.isNone()) {
-    return Failure(
-        "Repository '" + reference.repository() + "' does not "
-        "exist in 'repositories'");
-  }
-
-  const string tag = reference.has_tag()
-    ? reference.tag()
-    : "latest";
-
-  // NOTE: We don't use JSON find here since a tag might contain '.'.
-  Result<JSON::String> layerId = repository->at<JSON::String>(tag);
-
-  if (layerId.isError()) {
-    return Failure(
-        "Failed to access layer id '" + tag + "': " + layerId.error());
-  } else if (layerId.isNone()) {
-    return Failure("Layer id '" + tag + "' is not found");
-  }
-
-  // Do a traverse to find all parent image layer ids. Here, we assume
-  // that all the parent layers are part of the archive tar, thus are
-  // already extracted under 'directory'.
-  vector<string> layerIds = { layerId->value };
-  Result<string> parentLayerId = getParentLayerId(directory, layerId->value);
-  while (parentLayerId.isSome()) {
-    // NOTE: We put parent layer ids in front because that's what the
-    // provisioner backends assume.
-    layerIds.insert(layerIds.begin(), parentLayerId.get());
-    parentLayerId = getParentLayerId(directory, parentLayerId.get());
-  }
-
-  if (parentLayerId.isError()) {
-    return Failure(
-        "Failed to find parent layer id for layer '" + layerId->value +
-        "': " + parentLayerId.error());
-  }
-
-  return extractLayers(directory, layerIds, backend)
-    .then([layerIds]() -> vector<string> { return layerIds; });
-}
-
-
-Result<string> LocalPullerProcess::getParentLayerId(
-    const string& directory,
-    const string& layerId)
-{
-  const string path =
-    paths::getImageLayerManifestPath(path::join(directory, layerId));
-
-  Try<string> _manifest = os::read(path);
-  if (_manifest.isError()) {
-    return Error(
-        "Failed to read manifest from '" + path + "': " + _manifest.error());
-  }
-
-  Try<JSON::Object> manifest = JSON::parse<JSON::Object>(_manifest.get());
-  if (manifest.isError()) {
-    return Error(
-        "Failed to parse manifest from '" + path + "': " + manifest.error());
-  }
-
-  Result<JSON::Value> parentLayerId = manifest->find<JSON::Value>("parent");
-  if (parentLayerId.isError()) {
-    return Error(
-        "Failed to parse 'parent' key in manifest from '" + path + "': " +
-        parentLayerId.error());
-  } else if (parentLayerId.isNone()) {
-    return None();
-  } else if (parentLayerId->is<JSON::Null>()) {
-    return None();
-  } else if (!parentLayerId->is<JSON::String>()) {
-    return Error("Unexpected 'parent' type in manifest from '" + path + "'");
-  }
-
-  const string id = parentLayerId->as<JSON::String>().value;
-  if (id == "") {
-    return None();
-  } else {
-    return id;
-  }
-}
-
-
-Future<Nothing> LocalPullerProcess::extractLayers(
-    const string& directory,
-    const vector<string>& layerIds,
-    const string& backend)
-{
-  vector<Future<Nothing>> futures;
-  foreach (const string& layerId, layerIds) {
-    // Check if the layer is already in the store. If yes, skip the
-    // unnecessary extracting.
-    if (os::exists(paths::getImageLayerRootfsPath(
-            storeDir,
-            layerId,
-            backend))) {
-      continue;
-    }
-
-    futures.push_back(extractLayer(directory, layerId, backend));
-  }
-
-  return collect(futures)
-    .then([]() { return Nothing(); });
-}
-
-
-Future<Nothing> LocalPullerProcess::extractLayer(
-    const string& directory,
-    const string& layerId,
-    const string& backend)
-{
-  const string layerPath = path::join(directory, layerId);
-  const string tar = paths::getImageLayerTarPath(layerPath);
-  const string rootfs = paths::getImageLayerRootfsPath(layerPath, backend);
-
-  VLOG(1) << "Extracting layer tar ball '" << tar
-          << " to rootfs '" << rootfs << "'";
-
-  Try<Nothing> mkdir = os::mkdir(rootfs);
-  if (mkdir.isError()) {
-    return Failure(
-        "Failed to create directory '" + rootfs + "'"
-        ": " + mkdir.error());
-  }
-
-  return command::untar(Path(tar), Path(rootfs))
-    .then([tar]() -> Future<Nothing> {
-      // Remove the tar after the extraction.
-      Try<Nothing> rm = os::rm(tar);
-      if (rm.isError()) {
-        return Failure(
-          "Failed to remove '" + tar + "' "
-          "after extraction: " + rm.error());
-      }
-
-      return Nothing();
-    });
-}
-
-} // namespace docker {
-} // namespace slave {
-} // namespace internal {
-} // namespace mesos {

http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp b/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp
deleted file mode 100644
index 37f2510..0000000
--- a/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp
+++ /dev/null
@@ -1,73 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef __PROVISIONER_DOCKER_LOCAL_PULLER_HPP__
-#define __PROVISIONER_DOCKER_LOCAL_PULLER_HPP__
-
-#include <process/owned.hpp>
-
-#include <mesos/docker/spec.hpp>
-
-#include <mesos/uri/fetcher.hpp>
-
-#include "slave/containerizer/mesos/provisioner/docker/puller.hpp"
-
-#include "slave/flags.hpp"
-
-namespace mesos {
-namespace internal {
-namespace slave {
-namespace docker {
-
-// Forward declaration.
-class LocalPullerProcess;
-
-
-/**
- * LocalPuller assumes Docker images are stored in a local directory
- * (configured with flags.docker_registry), with all the
- * images saved as tars with file names in the form of <repo>:<tag>.tar.
- */
-class LocalPuller : public Puller
-{
-public:
-  static Try<process::Owned<Puller>> create(
-      const Flags& flags,
-      const process::Shared<uri::Fetcher>& fetcher);
-
-  ~LocalPuller();
-
-  process::Future<std::vector<std::string>> pull(
-      const ::docker::spec::ImageReference& reference,
-      const std::string& directory,
-      const std::string& backend,
-      const Option<Secret>& config = None());
-
-private:
-  explicit LocalPuller(process::Owned<LocalPullerProcess> _process);
-
-  LocalPuller(const LocalPuller&) = delete; // Not copyable.
-  LocalPuller& operator=(const LocalPuller&) = delete; // Not assignable.
-
-  process::Owned<LocalPullerProcess> process;
-};
-
-} // namespace docker {
-} // namespace slave {
-} // namespace internal {
-} // namespace mesos {
-
-#endif // __PROVISIONER_DOCKER_LOCAL_PULLER_HPP__

http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/puller.cpp b/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
index cb4248b..d013c9d 100644
--- a/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
+++ b/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
@@ -19,7 +19,7 @@
 #include <stout/strings.hpp>
 #include <stout/try.hpp>
 
-#include "slave/containerizer/mesos/provisioner/docker/local_puller.hpp"
+#include "slave/containerizer/mesos/provisioner/docker/image_tar_puller.hpp"
 #include "slave/containerizer/mesos/provisioner/docker/puller.hpp"
 #include "slave/containerizer/mesos/provisioner/docker/registry_puller.hpp"
 
@@ -45,9 +45,9 @@ Try<Owned<Puller>> Puller::create(
   // image tarballs or the remote docker registry.
   if (strings::startsWith(flags.docker_registry, "/") ||
       strings::startsWith(flags.docker_registry, "hdfs://")) {
-    Try<Owned<Puller>> puller = LocalPuller::create(flags, fetcher);
+    Try<Owned<Puller>> puller = ImageTarPuller::create(flags, fetcher);
     if (puller.isError()) {
-      return Error("Failed to create local puller: " + puller.error());
+      return Error("Failed to create image tar puller " + puller.error());
     }
 
     return puller.get();

http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/tests/containerizer/provisioner_docker_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/containerizer/provisioner_docker_tests.cpp b/src/tests/containerizer/provisioner_docker_tests.cpp
index bb84f3f..71247c3 100644
--- a/src/tests/containerizer/provisioner_docker_tests.cpp
+++ b/src/tests/containerizer/provisioner_docker_tests.cpp
@@ -378,7 +378,7 @@ class ProvisionerDockerTest
 
 // This test verifies that local docker image can be pulled and
 // provisioned correctly, and shell command should be executed.
-TEST_F(ProvisionerDockerTest, ROOT_LocalPullerSimpleCommand)
+TEST_F(ProvisionerDockerTest, ROOT_ImageTarPullerSimpleCommand)
 {
   Try<Owned<cluster::Master>> master = StartMaster();
   ASSERT_SOME(master);
@@ -473,9 +473,9 @@ INSTANTIATE_TEST_CASE_P(
         "hdfs://"})));
 
 
-// This test verifies that the local puller could pull image
+// This test verifies that the image tar puller could pull image
 // with the hdfs uri fetcher plugin.
-TEST_P(ProvisionerDockerHdfsTest, ROOT_LocalPullerHdfsFetcherSimpleCommand)
+TEST_P(ProvisionerDockerHdfsTest, ROOT_ImageTarPullerHdfsFetcherSimpleCommand)
 {
   string hadoopPath = os::getcwd();
   ASSERT_TRUE(os::exists(hadoopPath));

http://git-wip-us.apache.org/repos/asf/mesos/blob/c185752b/src/tests/containerizer/runtime_isolator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/containerizer/runtime_isolator_tests.cpp b/src/tests/containerizer/runtime_isolator_tests.cpp
index b703b82..539ae68 100644
--- a/src/tests/containerizer/runtime_isolator_tests.cpp
+++ b/src/tests/containerizer/runtime_isolator_tests.cpp
@@ -84,7 +84,7 @@ class DockerRuntimeIsolatorTest : public MesosTest {};
 // This test verifies that docker image default cmd is executed correctly.
 // This corresponds to the case in runtime isolator logic table: sh=0,
 // value=0, argv=1, entrypoint=0, cmd=1.
-TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultCmdLocalPuller)
+TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultCmdImageTarPuller)
 {
   Try<Owned<cluster::Master>> master = StartMaster();
   ASSERT_SOME(master);
@@ -176,9 +176,9 @@ TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultCmdLocalPuller)
 
 
 // This test verifies that docker image default entrypoint is executed
-// correctly using local puller. This corresponds to the case in runtime
-// isolator logic table: sh=0, value=0, argv=1, entrypoint=1, cmd=0.
-TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultEntryptLocalPuller)
+// correctly using image tar puller. This corresponds to the case in
+// runtime isolator logic table: sh=0, value=0, argv=1, entrypoint=1, cmd=0.
+TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultEntryptImageTarPuller)
 {
   Try<Owned<cluster::Master>> master = StartMaster();
   ASSERT_SOME(master);
@@ -456,7 +456,7 @@ TEST_F(DockerRuntimeIsolatorTest, ROOT_INTERNET_CURL_NestedSimpleCommand)
 // This is a regression test for MESOS-6852. It corresponds to the
 // following case in runtime isolator logic table for nested
 // container: sh=0, value=0, argv=1, entrypoint=0, cmd=1.
-TEST_F(DockerRuntimeIsolatorTest, ROOT_NestedDockerDefaultCmdLocalPuller)
+TEST_F(DockerRuntimeIsolatorTest, ROOT_NestedDockerDefaultCmdImageTarPuller)
 {
   Try<Owned<cluster::Master>> master = StartMaster();
   ASSERT_SOME(master);
@@ -579,7 +579,7 @@ TEST_F(DockerRuntimeIsolatorTest, ROOT_NestedDockerDefaultCmdLocalPuller)
 // It corresponds to the following case in runtime isolator
 // logic table for nested container: sh=0, value=0, argv=1,
 // entrypoint=1, cmd=0.
-TEST_F(DockerRuntimeIsolatorTest, ROOT_NestedDockerDefaultEntryptLocalPuller)
+TEST_F(DockerRuntimeIsolatorTest, ROOT_NestedDockerDefaultEntryptImageTarPuller)
 {
   Try<Owned<cluster::Master>> master = StartMaster();
   ASSERT_SOME(master);


[5/6] mesos git commit: Added test for local puller hdfs uri fetcher plugin.

Posted by gi...@apache.org.
Added test for local puller hdfs uri fetcher plugin.

Review: https://reviews.apache.org/r/66562


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/af531b6d
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/af531b6d
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/af531b6d

Branch: refs/heads/master
Commit: af531b6d3b83b55c81a0fa66bdd005fafa8220cb
Parents: 044aa6f
Author: Gilbert Song <so...@gmail.com>
Authored: Tue Apr 10 23:39:20 2018 -0700
Committer: Gilbert Song <so...@gmail.com>
Committed: Thu Jun 7 12:11:38 2018 -0700

----------------------------------------------------------------------
 .../containerizer/provisioner_docker_tests.cpp  | 142 +++++++++++++++++++
 1 file changed, 142 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/af531b6d/src/tests/containerizer/provisioner_docker_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/containerizer/provisioner_docker_tests.cpp b/src/tests/containerizer/provisioner_docker_tests.cpp
index 3cb1a7e..bb84f3f 100644
--- a/src/tests/containerizer/provisioner_docker_tests.cpp
+++ b/src/tests/containerizer/provisioner_docker_tests.cpp
@@ -459,6 +459,148 @@ TEST_F(ProvisionerDockerTest, ROOT_LocalPullerSimpleCommand)
 }
 
 
+class ProvisionerDockerHdfsTest
+  : public MesosTest,
+    public WithParamInterface<string> {};
+
+
+// The host of HDFS can be a remote host or local directory.
+INSTANTIATE_TEST_CASE_P(
+    HdfsHost,
+    ProvisionerDockerHdfsTest,
+    ::testing::ValuesIn(vector<string>({
+        "hdfs://localhost:8020",
+        "hdfs://"})));
+
+
+// This test verifies that the local puller could pull image
+// with the hdfs uri fetcher plugin.
+TEST_P(ProvisionerDockerHdfsTest, ROOT_LocalPullerHdfsFetcherSimpleCommand)
+{
+  string hadoopPath = os::getcwd();
+  ASSERT_TRUE(os::exists(hadoopPath));
+
+  string hadoopBinPath = path::join(hadoopPath, "bin");
+  ASSERT_SOME(os::mkdir(hadoopBinPath));
+  ASSERT_SOME(os::chmod(hadoopBinPath, S_IRWXU | S_IRWXG | S_IRWXO));
+
+  const string& proof = path::join(hadoopPath, "proof");
+
+  // This acts exactly as "hadoop" for testing purposes. On some platforms, the
+  // "hadoop" wrapper command will emit a warning that Hadoop installation has
+  // no native code support. We always emit that here to make sure it is parsed
+  // correctly.
+  string mockHadoopScript =
+    "#!/usr/bin/env bash\n"
+    "\n"
+    "touch " + proof + "\n"
+    "\n"
+    "now=$(date '+%y/%m/%d %I:%M:%S')\n"
+    "echo \"$now WARN util.NativeCodeLoader: "
+      "Unable to load native-hadoop library for your platform...\" 1>&2\n"
+    "\n"
+    "if [[ 'version' == $1 ]]; then\n"
+    "  echo $0 'for Mesos testing'\n"
+    "fi\n"
+    "\n"
+    "# hadoop fs -copyToLocal $3 $4\n"
+    "if [[ 'fs' == $1 && '-copyToLocal' == $2 ]]; then\n"
+    "  if [[ $3 == 'hdfs://'* ]]; then\n"
+    "    # Remove 'hdfs://<host>/' and use just the (absolute) path.\n"
+    "    withoutProtocol=${3/'hdfs:'\\/\\//}\n"
+    "    withoutHost=${withoutProtocol#*\\/}\n"
+    "    absolutePath='/'$withoutHost\n"
+    "   cp $absolutePath $4\n"
+    "  else\n"
+    "    cp $3 $4\n"
+    "  fi\n"
+    "fi\n";
+
+  string hadoopCommand = path::join(hadoopBinPath, "hadoop");
+  ASSERT_SOME(os::write(hadoopCommand, mockHadoopScript));
+  ASSERT_SOME(os::chmod(hadoopCommand,
+                        S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH));
+
+  Try<Owned<cluster::Master>> master = StartMaster();
+  ASSERT_SOME(master);
+
+  const string directory = path::join(os::getcwd(), "archives");
+
+  Future<Nothing> testImage = DockerArchive::create(directory, "alpine");
+  AWAIT_READY(testImage);
+
+  ASSERT_TRUE(os::exists(path::join(directory, "alpine.tar")));
+
+  slave::Flags flags = CreateSlaveFlags();
+  flags.isolation = "docker/runtime,filesystem/linux";
+  flags.image_providers = "docker";
+  flags.docker_registry = GetParam() + directory;
+  flags.docker_store_dir = path::join(os::getcwd(), "store");
+  flags.hadoop_home = hadoopPath;
+
+  Owned<MasterDetector> detector = master.get()->createDetector();
+
+  Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
+  ASSERT_SOME(slave);
+
+  MockScheduler sched;
+  MesosSchedulerDriver driver(
+      &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
+
+  EXPECT_CALL(sched, registered(&driver, _, _));
+
+  Future<vector<Offer>> offers;
+  EXPECT_CALL(sched, resourceOffers(&driver, _))
+    .WillOnce(FutureArg<1>(&offers))
+    .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+  driver.start();
+
+  AWAIT_READY(offers);
+  ASSERT_EQ(1u, offers->size());
+
+  const Offer& offer = offers.get()[0];
+
+  TaskInfo task = createTask(
+      offer.slave_id(),
+      Resources::parse("cpus:1;mem:128").get(),
+      "ls -al /");
+
+  Image image;
+  image.set_type(Image::DOCKER);
+  image.mutable_docker()->set_name("alpine");
+
+  ContainerInfo* container = task.mutable_container();
+  container->set_type(ContainerInfo::MESOS);
+  container->mutable_mesos()->mutable_image()->CopyFrom(image);
+
+  Future<TaskStatus> statusStarting;
+  Future<TaskStatus> statusRunning;
+  Future<TaskStatus> statusFinished;
+  EXPECT_CALL(sched, statusUpdate(&driver, _))
+    .WillOnce(FutureArg<1>(&statusStarting))
+    .WillOnce(FutureArg<1>(&statusRunning))
+    .WillOnce(FutureArg<1>(&statusFinished));
+
+  driver.launchTasks(offer.id(), {task});
+
+  AWAIT_READY_FOR(statusStarting, Seconds(60));
+  EXPECT_EQ(task.task_id(), statusStarting->task_id());
+  EXPECT_EQ(TASK_STARTING, statusStarting->state());
+
+  AWAIT_READY_FOR(statusRunning, Seconds(60));
+  EXPECT_EQ(task.task_id(), statusRunning->task_id());
+  EXPECT_EQ(TASK_RUNNING, statusRunning->state());
+
+  AWAIT_READY(statusFinished);
+  EXPECT_EQ(task.task_id(), statusFinished->task_id());
+  EXPECT_EQ(TASK_FINISHED, statusFinished->state());
+
+  driver.stop();
+  driver.join();
+}
+
+
 // For official Docker images, users can omit the 'library/' prefix
 // when specifying the repository name (e.g., 'busybox'). The registry
 // puller normalize docker official images if necessary.


[2/6] mesos git commit: Made agent flag '--hadoop_home' as optional.

Posted by gi...@apache.org.
Made agent flag '--hadoop_home' as optional.

Review: https://reviews.apache.org/r/66559


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/d24ef174
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/d24ef174
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/d24ef174

Branch: refs/heads/master
Commit: d24ef1741f5bef69be95319cd29893a3ce4a6d73
Parents: e05342b
Author: Gilbert Song <so...@gmail.com>
Authored: Wed Apr 4 13:39:59 2018 -0700
Committer: Gilbert Song <so...@gmail.com>
Committed: Thu Jun 7 12:11:37 2018 -0700

----------------------------------------------------------------------
 docs/configuration/agent.md         | 2 +-
 docs/operator-http-api.md           | 4 ----
 src/slave/containerizer/fetcher.cpp | 4 ++--
 src/slave/flags.cpp                 | 3 +--
 src/slave/flags.hpp                 | 2 +-
 src/slave/http.cpp                  | 1 -
 6 files changed, 5 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/d24ef174/docs/configuration/agent.md
----------------------------------------------------------------------
diff --git a/docs/configuration/agent.md b/docs/configuration/agent.md
index e0aaf2c..c8e9e6c 100644
--- a/docs/configuration/agent.md
+++ b/docs/configuration/agent.md
@@ -890,7 +890,7 @@ be a value between 0.0 and 1.0 (default: 0.1)
 Path to find Hadoop installed (for
 fetching framework executors from HDFS)
 (no default, look for <code>HADOOP_HOME</code> in
-environment or find hadoop on <code>PATH</code>) (default: )
+environment or find hadoop on <code>PATH</code>)
   </td>
 </tr>
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/d24ef174/docs/operator-http-api.md
----------------------------------------------------------------------
diff --git a/docs/operator-http-api.md b/docs/operator-http-api.md
index c0f20c0..4acbee9 100644
--- a/docs/operator-http-api.md
+++ b/docs/operator-http-api.md
@@ -2953,10 +2953,6 @@ Content-Type: application/json
         "value": "0.1"
       },
       {
-        "name": "hadoop_home",
-        "value": ""
-      },
-      {
         "name": "help",
         "value": "false"
       },

http://git-wip-us.apache.org/repos/asf/mesos/blob/d24ef174/src/slave/containerizer/fetcher.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/fetcher.cpp b/src/slave/containerizer/fetcher.cpp
index d6b4270..7d67a90 100644
--- a/src/slave/containerizer/fetcher.cpp
+++ b/src/slave/containerizer/fetcher.cpp
@@ -861,8 +861,8 @@ Future<Nothing> FetcherProcess::run(
 
   environment["MESOS_FETCHER_INFO"] = stringify(JSON::protobuf(info));
 
-  if (!flags.hadoop_home.empty()) {
-    environment["HADOOP_HOME"] = flags.hadoop_home;
+  if (flags.hadoop_home.isSome()) {
+    environment["HADOOP_HOME"] = flags.hadoop_home.get();
   }
 
   // TODO(jieyu): This is to make sure the libprocess of the fetcher

http://git-wip-us.apache.org/repos/asf/mesos/blob/d24ef174/src/slave/flags.cpp
----------------------------------------------------------------------
diff --git a/src/slave/flags.cpp b/src/slave/flags.cpp
index 23d9bb1..06c5421 100644
--- a/src/slave/flags.cpp
+++ b/src/slave/flags.cpp
@@ -307,8 +307,7 @@ mesos::internal::slave::Flags::Flags()
       "Path to find Hadoop installed (for\n"
       "fetching framework executors from HDFS)\n"
       "(no default, look for `HADOOP_HOME` in\n"
-      "environment or find hadoop on `PATH`)",
-      "");
+      "environment or find hadoop on `PATH`)");
 
 #ifndef __WINDOWS__
   add(&Flags::switch_user,

http://git-wip-us.apache.org/repos/asf/mesos/blob/d24ef174/src/slave/flags.hpp
----------------------------------------------------------------------
diff --git a/src/slave/flags.hpp b/src/slave/flags.hpp
index ae09e19..eeb9708 100644
--- a/src/slave/flags.hpp
+++ b/src/slave/flags.hpp
@@ -69,7 +69,7 @@ public:
   std::string work_dir;
   std::string runtime_dir;
   std::string launcher_dir;
-  std::string hadoop_home; // TODO(benh): Make an Option.
+  Option<std::string> hadoop_home;
   size_t max_completed_executors_per_framework;
 
 #ifndef __WINDOWS__

http://git-wip-us.apache.org/repos/asf/mesos/blob/d24ef174/src/slave/http.cpp
----------------------------------------------------------------------
diff --git a/src/slave/http.cpp b/src/slave/http.cpp
index ba43086..a6739e1 100644
--- a/src/slave/http.cpp
+++ b/src/slave/http.cpp
@@ -1217,7 +1217,6 @@ string Http::STATE_HELP() {
         "         \"docker_kill_orphans\" : \"true\",",
         "         \"switch_user\" : \"true\",",
         "         \"logging_level\" : \"INFO\",",
-        "         \"hadoop_home\" : \"\",",
         "         \"strict\" : \"true\",",
         "         \"executor_registration_timeout\" : \"1mins\",",
         "         \"recovery_timeout\" : \"15mins\",",


[4/6] mesos git commit: Supported hdfs fetching in local puller.

Posted by gi...@apache.org.
Supported hdfs fetching in local puller.

Review: https://reviews.apache.org/r/66561


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/044aa6f9
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/044aa6f9
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/044aa6f9

Branch: refs/heads/master
Commit: 044aa6f90def775d9d2e7172320119b181e71575
Parents: b41a170
Author: Gilbert Song <so...@gmail.com>
Authored: Tue Apr 10 19:28:43 2018 -0700
Committer: Gilbert Song <so...@gmail.com>
Committed: Thu Jun 7 12:11:38 2018 -0700

----------------------------------------------------------------------
 docs/configuration/agent.md                     | 10 ++-
 src/hdfs/hdfs.cpp                               | 56 ++++++++++++++
 src/hdfs/hdfs.hpp                               |  8 ++
 .../mesos/provisioner/docker/local_puller.cpp   | 77 +++++++++++++++++---
 .../mesos/provisioner/docker/local_puller.hpp   |  6 +-
 .../mesos/provisioner/docker/puller.cpp         | 12 ++-
 .../mesos/provisioner/docker/store.cpp          |  4 +
 src/slave/flags.cpp                             |  8 +-
 8 files changed, 161 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/044aa6f9/docs/configuration/agent.md
----------------------------------------------------------------------
diff --git a/docs/configuration/agent.md b/docs/configuration/agent.md
index c8e9e6c..d2b6b82 100644
--- a/docs/configuration/agent.md
+++ b/docs/configuration/agent.md
@@ -621,10 +621,12 @@ recovers.
   <td>
 The default url for Mesos containerizer to pull Docker images. It could
 either be a Docker registry server url (i.e: <code>https://registry.docker.io</code>),
-or a local path (i.e: <code>/tmp/docker/images</code>) in which Docker
-image archives (result of <code>docker save</code>) are stored. Note
-that this option won't change the default registry server for Docker
-containerizer. (default: https://registry-1.docker.io)
+or a source that Docker image archives (result of <code>docker save</code>) are
+stored. The Docker archive source could be specified either as a local
+path (i.e: <code>/tmp/docker/images</code>), or as an HDFS URI
+(i.e: <code>hdfs://localhost:8020/archives/</code>) that this option won't change
+the default registry server for Docker containerizer.
+(default: https://registry-1.docker.io)
   </td>
 </tr>
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/044aa6f9/src/hdfs/hdfs.cpp
----------------------------------------------------------------------
diff --git a/src/hdfs/hdfs.cpp b/src/hdfs/hdfs.cpp
index 726925f..3947b69 100644
--- a/src/hdfs/hdfs.cpp
+++ b/src/hdfs/hdfs.cpp
@@ -38,6 +38,8 @@
 #include "common/status_utils.hpp"
 #include "hdfs/hdfs.hpp"
 
+#include "uri/schemes/hdfs.hpp"
+
 using namespace process;
 
 using std::string;
@@ -137,6 +139,60 @@ Try<Owned<HDFS>> HDFS::create(const Option<string>& _hadoop)
 }
 
 
+Try<mesos::URI> HDFS::parse(const string& uri)
+{
+  size_t schemePos = uri.find("://");
+  if (schemePos == string::npos) {
+    return Error("Missing scheme in url string");
+  }
+
+  const string uriPath = uri.substr(schemePos + 3);
+
+  size_t pathPos = uriPath.find_first_of('/');
+  if (pathPos == 0) {
+    return mesos::uri::hdfs(uriPath);
+  }
+
+  // If path is specified in the URL, try to capture the host and path
+  // separately.
+  string host = uriPath;
+  string path = "/";
+  if (pathPos != string::npos) {
+    host = host.substr(0, pathPos);
+    path = uriPath.substr(pathPos);
+  }
+
+  if (host.empty()) {
+    return mesos::uri::hdfs(path);
+  }
+
+  const vector<string> tokens = strings::tokenize(host, ":");
+
+  if (tokens[0].empty()) {
+    return Error("Host not found in url");
+  }
+
+  if (tokens.size() > 2) {
+    return Error("Found multiple ports in url");
+  }
+
+  Option<int> port;
+  if (tokens.size() == 2) {
+    Try<int> numifyPort = numify<int>(tokens[1]);
+    if (numifyPort.isError()) {
+      return Error("Failed to parse port: " + numifyPort.error());
+    }
+
+    port = numifyPort.get();
+  } else {
+    // Default port for HDFS.
+    port = 8020;
+  }
+
+  return mesos::uri::hdfs(path, tokens[0], port.get());
+}
+
+
 // An HDFS client path must be either a full URI or an absolute path. If it is
 // a relative path, prepend "/" to make it absolute. (Note that all URI schemes
 // supported by the HDFS client contain "://" whereas file paths never do.)

http://git-wip-us.apache.org/repos/asf/mesos/blob/044aa6f9/src/hdfs/hdfs.hpp
----------------------------------------------------------------------
diff --git a/src/hdfs/hdfs.hpp b/src/hdfs/hdfs.hpp
index 716d13f..66fd226 100644
--- a/src/hdfs/hdfs.hpp
+++ b/src/hdfs/hdfs.hpp
@@ -28,6 +28,8 @@
 #include <stout/option.hpp>
 #include <stout/try.hpp>
 
+#include <mesos/uri/uri.hpp>
+
 
 // TODO(benh): We should get the hostname:port (or ip:port) of the
 // server via:
@@ -49,6 +51,12 @@ public:
   static Try<process::Owned<HDFS>> create(
       const Option<std::string>& hadoop = None());
 
+  // TODO(gilbert): Remove this helper function once we have URI Parser
+  // support (see MESOS-5254 for details). Ideally, we should support
+  // other schemes (e.g., hftp, s3, s3n etc) with hadoop plugin. It is
+  // hard coded for HDFS for now.
+  static Try<mesos::URI> parse(const std::string& uri);
+
   process::Future<bool> exists(const std::string& path);
   process::Future<Bytes> du(const std::string& path);
   process::Future<Nothing> rm(const std::string& path);

http://git-wip-us.apache.org/repos/asf/mesos/blob/044aa6f9/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp b/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp
index 509be63..df715e2 100644
--- a/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp
+++ b/src/slave/containerizer/mesos/provisioner/docker/local_puller.cpp
@@ -32,6 +32,11 @@
 
 #include "common/command_utils.hpp"
 
+#include "hdfs/hdfs.hpp"
+
+#include "uri/schemes/file.hpp"
+#include "uri/schemes/hdfs.hpp"
+
 #include "slave/containerizer/mesos/provisioner/docker/local_puller.hpp"
 #include "slave/containerizer/mesos/provisioner/docker/paths.hpp"
 
@@ -50,10 +55,14 @@ namespace docker {
 class LocalPullerProcess : public Process<LocalPullerProcess>
 {
 public:
-  LocalPullerProcess(const string& _storeDir, const string& _archivesDir)
+  LocalPullerProcess(
+      const string& _storeDir,
+      const URI& _archivesUri,
+      const Shared<uri::Fetcher>& _fetcher)
     : ProcessBase(process::ID::generate("docker-provisioner-local-puller")),
       storeDir(_storeDir),
-      archivesDir(_archivesDir) {}
+      archivesUri(_archivesUri),
+      fetcher(_fetcher) {}
 
   ~LocalPullerProcess() {}
 
@@ -83,22 +92,47 @@ private:
       const string& backend);
 
   const string storeDir;
-  const string archivesDir;
+  const URI archivesUri;
+
+  Shared<uri::Fetcher> fetcher;
 };
 
 
-Try<Owned<Puller>> LocalPuller::create(const Flags& flags)
+static Try<URI> parseUri(const string& uri)
+{
+  if (strings::startsWith(uri, "/")) {
+    return uri::file(uri);
+  }
+
+  return HDFS::parse(uri);
+}
+
+
+Try<Owned<Puller>> LocalPuller::create(
+    const Flags& flags,
+    const Shared<uri::Fetcher>& fetcher)
 {
   // This should already been verified at puller.cpp.
-  if (!strings::startsWith(flags.docker_registry, "/")) {
-    return Error("Expecting registry url starting with '/'");
+  if (!strings::startsWith(flags.docker_registry, "/") &&
+      !strings::startsWith(flags.docker_registry, "hdfs://")) {
+    return Error("Expecting registry url starting with '/' or 'hdfs'");
+  }
+
+  Try<URI> uri = parseUri(flags.docker_registry);
+  if (uri.isError()) {
+    return Error(
+        "Failed to parse the agent flag --docker_registry '" +
+        flags.docker_registry + "': " + uri.error());
   }
 
   VLOG(1) << "Creating local puller with docker registry '"
           << flags.docker_registry << "'";
 
   Owned<LocalPullerProcess> process(
-      new LocalPullerProcess(flags.docker_store_dir, flags.docker_registry));
+      new LocalPullerProcess(
+          flags.docker_store_dir,
+          uri.get(),
+          fetcher));
 
   return Owned<Puller>(new LocalPuller(process));
 }
@@ -140,14 +174,37 @@ Future<vector<string>> LocalPullerProcess::pull(
 {
   // TODO(jieyu): We need to handle the case where the image reference
   // contains a slash '/'.
+  const string image = stringify(reference);
+
+  // TODO(gilbert): Support 'http' and 'https'.
+  if (archivesUri.scheme() == "hdfs") {
+    URI uri = archivesUri;
+    uri.set_path(paths::getImageArchiveTarPath(archivesUri.path(), image));
+
+    VLOG(1) << "Fetching image '" << reference
+            << "' from '" << uri
+            << "' to '" << directory << "' using HDFS uri fetcher";
+
+    return fetcher->fetch(uri, directory)
+      .then(defer(self(), [=]() -> Future<vector<string>> {
+        const string source = paths::getImageArchiveTarPath(directory, image);
+
+        VLOG(1) << "Untarring image '" << reference
+                << "' from '" << source
+                << "' to '" << directory << "'";
+
+        return command::untar(Path(source), Path(directory))
+          .then(defer(self(), &Self::_pull, reference, directory, backend));
+      }));
+  }
+
   const string tarPath = paths::getImageArchiveTarPath(
-      archivesDir,
-      stringify(reference));
+      archivesUri.path(), image);
 
   if (!os::exists(tarPath)) {
     return Failure(
         "Failed to find archive for image '" +
-        stringify(reference) + "' at '" + tarPath + "'");
+        image + "' at '" + tarPath + "'");
   }
 
   VLOG(1) << "Untarring image '" << reference

http://git-wip-us.apache.org/repos/asf/mesos/blob/044aa6f9/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp b/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp
index 4d2e497..37f2510 100644
--- a/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp
+++ b/src/slave/containerizer/mesos/provisioner/docker/local_puller.hpp
@@ -21,6 +21,8 @@
 
 #include <mesos/docker/spec.hpp>
 
+#include <mesos/uri/fetcher.hpp>
+
 #include "slave/containerizer/mesos/provisioner/docker/puller.hpp"
 
 #include "slave/flags.hpp"
@@ -42,7 +44,9 @@ class LocalPullerProcess;
 class LocalPuller : public Puller
 {
 public:
-  static Try<process::Owned<Puller>> create(const Flags& flags);
+  static Try<process::Owned<Puller>> create(
+      const Flags& flags,
+      const process::Shared<uri::Fetcher>& fetcher);
 
   ~LocalPuller();
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/044aa6f9/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/puller.cpp b/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
index 647cf05..cb4248b 100644
--- a/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
+++ b/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
@@ -36,8 +36,16 @@ Try<Owned<Puller>> Puller::create(
     const Shared<uri::Fetcher>& fetcher,
     SecretResolver* secretResolver)
 {
-  if (strings::startsWith(flags.docker_registry, "/")) {
-    Try<Owned<Puller>> puller = LocalPuller::create(flags);
+  // TODO(gilbert): Consider to introduce a new protobuf API to
+  // represent docker image by an optional URI in Image::Docker,
+  // so that the source of docker images are not necessarily from
+  // the agent flag.
+  // TODO(gilbert): Support multiple pullers simultaneously in
+  // docker store, so that users could prefer pulling from either
+  // image tarballs or the remote docker registry.
+  if (strings::startsWith(flags.docker_registry, "/") ||
+      strings::startsWith(flags.docker_registry, "hdfs://")) {
+    Try<Owned<Puller>> puller = LocalPuller::create(flags, fetcher);
     if (puller.isError()) {
       return Error("Failed to create local puller: " + puller.error());
     }

http://git-wip-us.apache.org/repos/asf/mesos/blob/044aa6f9/src/slave/containerizer/mesos/provisioner/docker/store.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/store.cpp b/src/slave/containerizer/mesos/provisioner/docker/store.cpp
index f6b8f39..6e7dc44 100644
--- a/src/slave/containerizer/mesos/provisioner/docker/store.cpp
+++ b/src/slave/containerizer/mesos/provisioner/docker/store.cpp
@@ -144,6 +144,10 @@ Try<Owned<slave::Store>> Store::create(
   _flags.docker_stall_timeout = flags.fetcher_stall_timeout;
 #endif
 
+  if (flags.hadoop_home.isSome()) {
+    _flags.hadoop_client = path::join(flags.hadoop_home.get(), "bin", "hadoop");
+  }
+
   Try<Owned<uri::Fetcher>> fetcher = uri::fetcher::create(_flags);
   if (fetcher.isError()) {
     return Error("Failed to create the URI fetcher: " + fetcher.error());

http://git-wip-us.apache.org/repos/asf/mesos/blob/044aa6f9/src/slave/flags.cpp
----------------------------------------------------------------------
diff --git a/src/slave/flags.cpp b/src/slave/flags.cpp
index 06c5421..8e448d8 100644
--- a/src/slave/flags.cpp
+++ b/src/slave/flags.cpp
@@ -200,9 +200,11 @@ mesos::internal::slave::Flags::Flags()
       "docker_registry",
       "The default url for Mesos containerizer to pull Docker images. It\n"
       "could either be a Docker registry server url (i.e: `https://registry.docker.io`),\n" // NOLINT(whitespace/line_length)
-      "or a local path (i.e: `/tmp/docker/images`) in which Docker image\n"
-      "archives (result of `docker save`) are stored. Note that this option\n"
-      "won't change the default registry server for Docker containerizer.",
+      "or a source that Docker image archives (result of `docker save`) are\n"
+      "stored. The Docker archive source could be specified either as a local\n"
+      "path (i.e: `/tmp/docker/images`), or as an HDFS URI\n"
+      "(i.e: `hdfs://localhost:8020/archives/`) that this option won't change\n"
+      "the default registry server for Docker containerizer.",
       "https://registry-1.docker.io");
 
   add(&Flags::docker_store_dir,


[3/6] mesos git commit: Removed an invalid TODO in puller.cpp.

Posted by gi...@apache.org.
Removed an invalid TODO in puller.cpp.

Review: https://reviews.apache.org/r/66650


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/44d0ef14
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/44d0ef14
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/44d0ef14

Branch: refs/heads/master
Commit: 44d0ef1484c111750b519d5f3210d8fa6aee9c85
Parents: d24ef17
Author: Gilbert Song <so...@gmail.com>
Authored: Wed Apr 11 14:42:16 2018 -0700
Committer: Gilbert Song <so...@gmail.com>
Committed: Thu Jun 7 12:11:37 2018 -0700

----------------------------------------------------------------------
 src/slave/containerizer/mesos/provisioner/docker/puller.cpp | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/44d0ef14/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/mesos/provisioner/docker/puller.cpp b/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
index d7d8987..647cf05 100644
--- a/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
+++ b/src/slave/containerizer/mesos/provisioner/docker/puller.cpp
@@ -36,7 +36,6 @@ Try<Owned<Puller>> Puller::create(
     const Shared<uri::Fetcher>& fetcher,
     SecretResolver* secretResolver)
 {
-  // TODO(tnachen): Support multiple registries in the puller.
   if (strings::startsWith(flags.docker_registry, "/")) {
     Try<Owned<Puller>> puller = LocalPuller::create(flags);
     if (puller.isError()) {