You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by ne...@apache.org on 2017/04/11 20:35:33 UTC

[4/6] mesos git commit: Cleaned up `strings::tokenize` and `strings::split`.

Cleaned up `strings::tokenize` and `strings::split`.

The previous coding used a `while` loop condition that was invariant
over the body of the loop; it is clearer to write this as an `if`
statement instead. Also clarified comments.

Review: https://reviews.apache.org/r/57513


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/6cee2a14
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/6cee2a14
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/6cee2a14

Branch: refs/heads/master
Commit: 6cee2a143f9fc47661c33525646c9d4968f955c9
Parents: f7c5dd1
Author: Neil Conway <ne...@gmail.com>
Authored: Fri Mar 10 11:54:51 2017 -0500
Committer: Neil Conway <ne...@gmail.com>
Committed: Tue Apr 11 13:11:25 2017 -0700

----------------------------------------------------------------------
 3rdparty/stout/include/stout/strings.hpp | 42 +++++++++++++++++----------
 1 file changed, 27 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/6cee2a14/3rdparty/stout/include/stout/strings.hpp
----------------------------------------------------------------------
diff --git a/3rdparty/stout/include/stout/strings.hpp b/3rdparty/stout/include/stout/strings.hpp
index fe469fa..067a792 100644
--- a/3rdparty/stout/include/stout/strings.hpp
+++ b/3rdparty/stout/include/stout/strings.hpp
@@ -129,23 +129,29 @@ inline std::string replace(
 }
 
 
-// Tokenizes the string using the delimiters.
-// Empty tokens will not be included in the result.
-// Optionally, maximum number of tokens to be returned
-// can be specified.
+// Tokenizes the string using the delimiters. Empty tokens will not be
+// included in the result.
+//
+// Optionally, the maximum number of tokens to be returned can be
+// specified. If the maximum number of tokens is reached, the last
+// token returned contains the remainder of the input string.
 inline std::vector<std::string> tokenize(
     const std::string& s,
     const std::string& delims,
     const Option<size_t>& maxTokens = None())
 {
-  size_t offset = 0;
+  if (maxTokens.isSome() && maxTokens.get() == 0) {
+    return {};
+  }
+
   std::vector<std::string> tokens;
+  size_t offset = 0;
 
-  while (maxTokens.isNone() || maxTokens.get() > 0) {
+  while (true) {
     size_t nonDelim = s.find_first_not_of(delims, offset);
 
     if (nonDelim == std::string::npos) {
-      break; // Nothing left
+      break; // Nothing left.
     }
 
     size_t delim = s.find_first_of(delims, nonDelim);
@@ -166,21 +172,27 @@ inline std::vector<std::string> tokenize(
 }
 
 
-// Splits the string using the provided delimiters.
-// The string is split each time at the first character
-// that matches any of the characters specified in delims.
-// Empty tokens are allowed in the result.
-// Optionally, maximum number of tokens to be returned
-// can be specified.
+// Splits the string using the provided delimiters. The string is
+// split each time at the first character that matches any of the
+// characters specified in delims.  Empty tokens are allowed in the
+// result.
+//
+// Optionally, the maximum number of tokens to be returned can be
+// specified. If the maximum number of tokens is reached, the last
+// token returned contains the remainder of the input string.
 inline std::vector<std::string> split(
     const std::string& s,
     const std::string& delims,
     const Option<size_t>& maxTokens = None())
 {
-  size_t offset = 0;
+  if (maxTokens.isSome() && maxTokens.get() == 0) {
+    return {};
+  }
+
   std::vector<std::string> tokens;
+  size_t offset = 0;
 
-  while (maxTokens.isNone() || maxTokens.get() > 0) {
+  while (true) {
     size_t next = s.find_first_of(delims, offset);
 
     // Finish splitting if this is the last token,