You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@buildstream.apache.org by tv...@apache.org on 2021/02/04 07:58:36 UTC

[buildstream] branch traveltissues/mr4 created (now 7e7e2e6)

This is an automated email from the ASF dual-hosted git repository.

tvb pushed a change to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git.


      at 7e7e2e6  element: always attempt to save the workspace config

This branch includes the following new commits:

     new 80df8e2  remote_execution: Update proto
     new f3fe105  local_cas: Update proto
     new 03007e1  element: Do not use deterministic times if sources support mtimes
     new 4f7a310  cascache: Add properties to CaptureTree requests
     new 7acfddd  Support properties in import_files()
     new a45c765  _casbaseddirectory: Add properties to IndexEntry
     new f735652  Add utils for file timestamp support
     new 0d1b6d0  cascache: parse timestamp and update mtimes in checkout
     new bbab470  _filebaseddirectory: support mtimes in `_import_files_from_cas`
     new 26aaf72  Use specified mtime in import_files
     new af17fe9  element: pass arbitrary timestamp to import_files
     new ce6b6c5  workspace: import mtimes when staging
     new d88fc7d  tests: test mtimes in storage
     new 955526a  tests: remove xfail for non-incremental-time RE workspace
     new 1120ad8  tests: improve RE workspace test
     new c3c0b04  tests: avoid testing utimes along with umask
     new 529e9b8  DEBUG: use new buildbox image
     new cc20ce3  Reference node properties specification in comments
     new 24eb3a2  tests: remove xfails for incremental RE workspaces
     new 4d0d43d  Replace workspace.prepared callback with attribute marking
     new b65a664  _artifact: import workspaced artifacts with mtimes
     new 8816ffa  cascache: append MTimes to capturefiles request
     new 0283e6b  _sandboxreapi: append MTimes to Action output properties
     new 3c35a5a  element: fix bug causing workspaces to always reprepare
     new b97e326  Rename workspace.last_successful to workspace.last_build
     new 7e7e2e6  element: always attempt to save the workspace config

The 26 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[buildstream] 06/26: _casbaseddirectory: Add properties to IndexEntry

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit a45c76586c590493cbe80de5e94661ec750f9eb3
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Dec 2 14:41:30 2019 +0000

    _casbaseddirectory: Add properties to IndexEntry
    
    supports properties in `_partial_import_cas_into_cas`
    
    _casbaseddirectory: initialise entrynode properties
    
    Copy node properties from/to IndexEntry
    
    _casbaseddirectory: sort properties before copying to filenode
---
 src/buildstream/storage/_casbaseddirectory.py  | 37 +++++++++++++++++++++-----
 src/buildstream/storage/_filebaseddirectory.py |  2 +-
 src/buildstream/storage/directory.py           |  2 +-
 3 files changed, 33 insertions(+), 8 deletions(-)

diff --git a/src/buildstream/storage/_casbaseddirectory.py b/src/buildstream/storage/_casbaseddirectory.py
index bfa3f82..377a461 100644
--- a/src/buildstream/storage/_casbaseddirectory.py
+++ b/src/buildstream/storage/_casbaseddirectory.py
@@ -29,13 +29,14 @@ See also: :ref:`sandboxing`.
 
 import os
 import stat
+import copy
 import tarfile as tarfilelib
 from io import StringIO
 
 from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
 from .directory import Directory, VirtualDirectoryError, _FileType
 from ._filebaseddirectory import FileBasedDirectory
-from ..utils import FileListResult, BST_ARBITRARY_TIMESTAMP
+from ..utils import FileListResult, BST_ARBITRARY_TIMESTAMP, _get_file_mtimestamp
 
 
 class IndexEntry:
@@ -50,7 +51,8 @@ class IndexEntry:
         target=None,
         is_executable=False,
         buildstream_object=None,
-        modified=False
+        modified=False,
+        node_properties=None
     ):
         self.name = name
         self.type = entrytype
@@ -59,6 +61,7 @@ class IndexEntry:
         self.is_executable = is_executable
         self.buildstream_object = buildstream_object
         self.modified = modified
+        self.node_properties = copy.deepcopy(node_properties)
 
     def get_directory(self, parent):
         if not self.buildstream_object:
@@ -126,7 +129,11 @@ class CasBasedDirectory(Directory):
             self.index[entry.name] = IndexEntry(entry.name, _FileType.DIRECTORY, digest=entry.digest)
         for entry in pb2_directory.files:
             self.index[entry.name] = IndexEntry(
-                entry.name, _FileType.REGULAR_FILE, digest=entry.digest, is_executable=entry.is_executable
+                entry.name,
+                _FileType.REGULAR_FILE,
+                digest=entry.digest,
+                is_executable=entry.is_executable,
+                node_properties=entry.node_properties,
             )
         for entry in pb2_directory.symlinks:
             self.index[entry.name] = IndexEntry(entry.name, _FileType.SYMLINK, target=entry.target)
@@ -150,11 +157,18 @@ class CasBasedDirectory(Directory):
 
         return newdir
 
-    def _add_file(self, basename, filename, modified=False, can_link=False):
+    def _add_file(self, basename, filename, modified=False, can_link=False, properties=None):
         entry = IndexEntry(filename, _FileType.REGULAR_FILE, modified=modified or filename in self.index)
         path = os.path.join(basename, filename)
         entry.digest = self.cas_cache.add_object(path=path, link_directly=can_link)
         entry.is_executable = os.access(path, os.X_OK)
+        properties = properties or []
+        entry.node_properties = []
+        if "MTime" in properties:
+            node_property = remote_execution_pb2.NodeProperty()
+            node_property.name = "MTime"
+            node_property.value = _get_file_mtimestamp(path)
+            entry.node_properties.append(node_property)
         self.index[filename] = entry
 
         self.__invalidate_digest()
@@ -333,6 +347,7 @@ class CasBasedDirectory(Directory):
                             digest=entry.digest,
                             is_executable=entry.is_executable,
                             modified=True,
+                            node_properties=entry.node_properties,
                         )
                         self.__invalidate_digest()
                     else:
@@ -341,7 +356,14 @@ class CasBasedDirectory(Directory):
                     result.files_written.append(relative_pathname)
 
     def import_files(
-        self, external_pathspec, *, filter_callback=None, report_written=True, update_mtime=False, can_link=False
+        self,
+        external_pathspec,
+        *,
+        filter_callback=None,
+        report_written=True,
+        update_mtime=False,
+        can_link=False,
+        properties=None
     ):
         """ See superclass Directory for arguments """
 
@@ -368,13 +390,14 @@ class CasBasedDirectory(Directory):
 
         return result
 
-    def import_single_file(self, external_pathspec):
+    def import_single_file(self, external_pathspec, properties=None):
         result = FileListResult()
         if self._check_replacement(os.path.basename(external_pathspec), os.path.dirname(external_pathspec), result):
             self._add_file(
                 os.path.dirname(external_pathspec),
                 os.path.basename(external_pathspec),
                 modified=os.path.basename(external_pathspec) in result.overwritten,
+                properties=properties,
             )
             result.files_written.append(external_pathspec)
         return result
@@ -639,6 +662,8 @@ class CasBasedDirectory(Directory):
                     filenode.name = name
                     filenode.digest.CopyFrom(entry.digest)
                     filenode.is_executable = entry.is_executable
+                    if entry.node_properties:
+                        filenode.node_properties.extend(copy.deepcopy(sorted(entry.node_properties)))
                 elif entry.type == _FileType.SYMLINK:
                     symlinknode = pb2_directory.symlinks.add()
                     symlinknode.name = name
diff --git a/src/buildstream/storage/_filebaseddirectory.py b/src/buildstream/storage/_filebaseddirectory.py
index 95d113e..d01b5ce 100644
--- a/src/buildstream/storage/_filebaseddirectory.py
+++ b/src/buildstream/storage/_filebaseddirectory.py
@@ -130,7 +130,7 @@ class FileBasedDirectory(Directory):
                 os.utime(os.path.join(self.external_directory, f), times=(cur_time, cur_time))
         return import_result
 
-    def import_single_file(self, external_pathspec):
+    def import_single_file(self, external_pathspec, properties=None):
         dstpath = os.path.join(self.external_directory, os.path.basename(external_pathspec))
         result = FileListResult()
         if os.path.exists(dstpath):
diff --git a/src/buildstream/storage/directory.py b/src/buildstream/storage/directory.py
index 4cec772..92388b1 100644
--- a/src/buildstream/storage/directory.py
+++ b/src/buildstream/storage/directory.py
@@ -114,7 +114,7 @@ class Directory:
 
         raise NotImplementedError()
 
-    def import_single_file(self, external_pathspec):
+    def import_single_file(self, external_pathspec, properties=None):
         """Imports a single file from an external path"""
         raise NotImplementedError()
 


[buildstream] 03/26: element: Do not use deterministic times if sources support mtimes

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 03007e14efa710cd913c02b0bb177906e01a85f8
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Dec 2 12:41:53 2019 +0000

    element: Do not use deterministic times if sources support mtimes
---
 src/buildstream/element.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index 595724f..f634537 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -1417,6 +1417,7 @@ class Element(Plugin):
     def _stage_sources_at(self, vdirectory, usebuildtree=False):
 
         context = self._get_context()
+        set_deterministic_mtimes = True
 
         # It's advantageous to have this temporary directory on
         # the same file system as the rest of our cache.
@@ -1455,6 +1456,8 @@ class Element(Plugin):
                         for source in self.__sources[last_required_previous_ix:]:
                             source_dir = sourcecache.export(source)
                             import_dir.import_files(source_dir)
+                            if source.BST_STAGE_VIRTUAL_DIRECTORY:
+                                set_deterministic_mtimes = False
 
                     except SourceCacheError as e:
                         raise ElementError("Error trying to export source for {}: {}".format(self.name, e))
@@ -1468,7 +1471,8 @@ class Element(Plugin):
                 vdirectory.import_files(import_dir)
 
         # Ensure deterministic mtime of sources at build time
-        vdirectory.set_deterministic_mtime()
+        if set_deterministic_mtimes:
+            vdirectory.set_deterministic_mtime()
         # Ensure deterministic owners of sources at build time
         vdirectory.set_deterministic_user()
 


[buildstream] 22/26: cascache: append MTimes to capturefiles request

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 8816ffaa64e6e9de248c02231fa2bcc791dab018
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Tue Dec 31 12:05:30 2019 +0000

    cascache: append MTimes to capturefiles request
---
 src/buildstream/_cas/cascache.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/buildstream/_cas/cascache.py b/src/buildstream/_cas/cascache.py
index bb2abc6..725ce6a 100644
--- a/src/buildstream/_cas/cascache.py
+++ b/src/buildstream/_cas/cascache.py
@@ -321,6 +321,7 @@ class CASCache:
                 path = tmp.name
 
             request = local_cas_pb2.CaptureFilesRequest()
+            request.node_properties.append("MTime")
             if instance_name:
                 request.instance_name = instance_name
 


[buildstream] 10/26: Use specified mtime in import_files

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 26aaf72d5efd9b9e74ae6d7220169552595e1a03
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Wed Jan 8 12:40:43 2020 +0000

    Use specified mtime in import_files
    
    if the mtime is specified then try to copy files in file-to-file
    transfers and reset the mtimes to the parameter value
---
 src/buildstream/storage/_casbaseddirectory.py  |  2 +-
 src/buildstream/storage/_filebaseddirectory.py | 48 +++++++++++++++-----------
 src/buildstream/storage/directory.py           |  7 ++--
 3 files changed, 31 insertions(+), 26 deletions(-)

diff --git a/src/buildstream/storage/_casbaseddirectory.py b/src/buildstream/storage/_casbaseddirectory.py
index 377a461..6fdca1a 100644
--- a/src/buildstream/storage/_casbaseddirectory.py
+++ b/src/buildstream/storage/_casbaseddirectory.py
@@ -361,7 +361,7 @@ class CasBasedDirectory(Directory):
         *,
         filter_callback=None,
         report_written=True,
-        update_mtime=False,
+        update_mtime=None,
         can_link=False,
         properties=None
     ):
diff --git a/src/buildstream/storage/_filebaseddirectory.py b/src/buildstream/storage/_filebaseddirectory.py
index 4c6c92f..a38e2ac 100644
--- a/src/buildstream/storage/_filebaseddirectory.py
+++ b/src/buildstream/storage/_filebaseddirectory.py
@@ -30,7 +30,6 @@ See also: :ref:`sandboxing`.
 import os
 import shutil
 import stat
-import time
 
 from .directory import Directory, VirtualDirectoryError, _FileType
 from .. import utils
@@ -84,7 +83,7 @@ class FileBasedDirectory(Directory):
         *,
         filter_callback=None,
         report_written=True,
-        update_mtime=False,
+        update_mtime=None,
         can_link=False,
         properties=None
     ):
@@ -93,13 +92,15 @@ class FileBasedDirectory(Directory):
         from ._casbaseddirectory import CasBasedDirectory  # pylint: disable=cyclic-import
 
         if isinstance(external_pathspec, CasBasedDirectory):
-            if can_link and not update_mtime:
+            if can_link:
                 actionfunc = utils.safe_link
             else:
                 actionfunc = utils.safe_copy
 
             import_result = FileListResult()
-            self._import_files_from_cas(external_pathspec, actionfunc, filter_callback, result=import_result)
+            self._import_files_from_cas(
+                external_pathspec, actionfunc, filter_callback, update_mtime=update_mtime, result=import_result,
+            )
         else:
             if isinstance(external_pathspec, Directory):
                 source_directory = external_pathspec.external_directory
@@ -122,14 +123,10 @@ class FileBasedDirectory(Directory):
                     ignore_missing=False,
                     report_written=report_written,
                 )
+                if update_mtime:
+                    for f in import_result.files_written:
+                        os.utime(os.path.join(self.external_directory, f), times=(update_mtime, update_mtime))
 
-        # do not update times if these were set via nodes
-        properties = properties or []
-        if update_mtime and "MTime" not in properties:
-            cur_time = time.time()
-
-            for f in import_result.files_written:
-                os.utime(os.path.join(self.external_directory, f), times=(cur_time, cur_time))
         return import_result
 
     def import_single_file(self, external_pathspec, properties=None):
@@ -250,7 +247,9 @@ class FileBasedDirectory(Directory):
         else:
             return _FileType.SPECIAL_FILE
 
-    def _import_files_from_cas(self, source_directory, actionfunc, filter_callback, *, path_prefix="", result):
+    def _import_files_from_cas(
+        self, source_directory, actionfunc, filter_callback, *, path_prefix="", update_mtime=None, result
+    ):
         """ Import files from a CAS-based directory. """
 
         for name, entry in source_directory.index.items():
@@ -275,7 +274,12 @@ class FileBasedDirectory(Directory):
                     )
 
                 dest_subdir._import_files_from_cas(
-                    src_subdir, actionfunc, filter_callback, path_prefix=relative_pathname, result=result
+                    src_subdir,
+                    actionfunc,
+                    filter_callback,
+                    path_prefix=relative_pathname,
+                    result=result,
+                    update_mtime=update_mtime,
                 )
 
             if filter_callback and not filter_callback(relative_pathname):
@@ -300,18 +304,20 @@ class FileBasedDirectory(Directory):
                     src_path = source_directory.cas_cache.objpath(entry.digest)
 
                     # fallback to copying if we require mtime support on this file
-                    if entry.node_properties:
+                    if update_mtime or entry.node_properties:
                         utils.safe_copy(src_path, dest_path, result=result)
-                        mtime = None
-                        for prop in entry.node_properties:
-                            if prop.name == "MTime" and prop.value:
-                                mtime = prop.value
-                            else:
-                                raise ImplError("{} is not a supported node property.".format(prop.name))
+                        mtime = update_mtime
+                        # XXX mtime property will override specified mtime
+                        if entry.node_properties:
+                            for prop in entry.node_properties:
+                                if prop.name == "MTime" and prop.value:
+                                    mtime = utils._parse_timestamp(prop.value)
+                                else:
+                                    raise ImplError("{} is not a supported node property.".format(prop.name))
                         if mtime:
                             utils._set_file_mtime(dest_path, mtime)
                     else:
-                        utils.safe_link(src_path, dest_path, result=result)
+                        actionfunc(src_path, dest_path, result=result)
 
                     if entry.is_executable:
                         os.chmod(
diff --git a/src/buildstream/storage/directory.py b/src/buildstream/storage/directory.py
index 92388b1..674c0b1 100644
--- a/src/buildstream/storage/directory.py
+++ b/src/buildstream/storage/directory.py
@@ -81,7 +81,7 @@ class Directory:
         *,
         filter_callback: Optional[Callable[[str], bool]] = None,
         report_written: bool = True,
-        update_mtime: bool = False,
+        update_mtime: Optional[float] = None,
         can_link: bool = False,
         properties: Optional[List[str]] = None
     ) -> FileListResult:
@@ -98,12 +98,11 @@ class Directory:
             written. Defaults to true. If false, only a list of
             overwritten files is returned.
           update_mtime: Update the access and modification time
-            of each file copied to the current time.
+            of each file copied to the time specified in seconds.
           can_link: Whether it's OK to create a hard link to the
             original content, meaning the stored copy will change when the
             original files change. Setting this doesn't guarantee hard
-            links will be made. can_link will never be used if
-            update_mtime is set.
+            links will be made.
           properties: Optional list of strings representing file properties
             to capture when importing.
 


[buildstream] 21/26: _artifact: import workspaced artifacts with mtimes

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit b65a6645c29c35896f9370fa3a61f06138289981
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Dec 30 09:42:02 2019 +0000

    _artifact: import workspaced artifacts with mtimes
---
 src/buildstream/_artifact.py | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/_artifact.py b/src/buildstream/_artifact.py
index feba389..7d91a3a 100644
--- a/src/buildstream/_artifact.py
+++ b/src/buildstream/_artifact.py
@@ -153,11 +153,12 @@ class Artifact:
         artifact.weak_key = self._weak_cache_key
 
         artifact.was_workspaced = bool(element._get_workspace())
+        properties = ["MTime"] if artifact.was_workspaced else []
 
         # Store files
         if collectvdir:
             filesvdir = CasBasedDirectory(cas_cache=self._cas)
-            filesvdir.import_files(collectvdir)
+            filesvdir.import_files(collectvdir, properties=properties)
             artifact.files.CopyFrom(filesvdir._get_digest())
             size += filesvdir.get_size()
 
@@ -189,7 +190,7 @@ class Artifact:
         # Store build tree
         if sandbox_build_dir:
             buildtreevdir = CasBasedDirectory(cas_cache=self._cas)
-            buildtreevdir.import_files(sandbox_build_dir)
+            buildtreevdir.import_files(sandbox_build_dir, properties=properties)
             artifact.buildtree.CopyFrom(buildtreevdir._get_digest())
             size += buildtreevdir.get_size()
 


[buildstream] 01/26: remote_execution: Update proto

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 80df8e27e7f5b0ec82baa44de33d770f3830eea1
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Wed Dec 11 14:27:52 2019 +0000

    remote_execution: Update proto
---
 src/buildstream/_cas/casserver.py                  |   4 +-
 .../remote/execution/v2/remote_execution.proto     | 463 ++++++++++++----
 .../remote/execution/v2/remote_execution_pb2.py    | 613 ++++++++++++++++-----
 .../execution/v2/remote_execution_pb2_grpc.py      |  60 +-
 4 files changed, 873 insertions(+), 267 deletions(-)

diff --git a/src/buildstream/_cas/casserver.py b/src/buildstream/_cas/casserver.py
index 882e7e6..dd822d5 100644
--- a/src/buildstream/_cas/casserver.py
+++ b/src/buildstream/_cas/casserver.py
@@ -268,10 +268,10 @@ class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
         response = remote_execution_pb2.ServerCapabilities()
 
         cache_capabilities = response.cache_capabilities
-        cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
+        cache_capabilities.digest_function.append(remote_execution_pb2.DigestFunction.SHA256)
         cache_capabilities.action_cache_update_capabilities.update_enabled = False
         cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
-        cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
+        cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.SymlinkAbsolutePathStrategy.ALLOWED
 
         response.deprecated_api_version.major = 2
         response.low_api_version.major = 2
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
index 7edbce3..efbf513 100644
--- a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
@@ -81,6 +81,7 @@ service Execution {
   // action will be reported in the `status` field of the `ExecuteResponse`. The
   // server MUST NOT set the `error` field of the `Operation` proto.
   // The possible errors include:
+  //
   // * `INVALID_ARGUMENT`: One or more arguments are invalid.
   // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
   //   action requested, such as a missing input or command or no worker being
@@ -93,6 +94,9 @@ service Execution {
   // * `INTERNAL`: An internal error occurred in the execution engine or the
   //   worker.
   // * `DEADLINE_EXCEEDED`: The execution timed out.
+  // * `CANCELLED`: The operation was cancelled by the client. This status is
+  //   only possible if the server implements the Operations API CancelOperation
+  //   method, and it was called for the current execution.
   //
   // In the case of a missing input or command, the server SHOULD additionally
   // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
@@ -124,10 +128,7 @@ service Execution {
 //
 // The lifetime of entries in the action cache is implementation-specific, but
 // the server SHOULD assume that more recently used entries are more likely to
-// be used again. Additionally, action cache implementations SHOULD ensure that
-// any blobs referenced in the
-// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
-// are still valid when returning a result.
+// be used again.
 //
 // As with other services in the Remote Execution API, any call may return an
 // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
@@ -136,7 +137,15 @@ service Execution {
 service ActionCache {
   // Retrieve a cached execution result.
   //
+  // Implementations SHOULD ensure that any blobs referenced from the
+  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+  // are available at the time of returning the
+  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be
+  // for some period of time afterwards. The TTLs of the referenced blobs SHOULD be increased
+  // if necessary and applicable.
+  //
   // Errors:
+  //
   // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
   rpc GetActionResult(GetActionResultRequest) returns (ActionResult) {
     option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" };
@@ -144,11 +153,6 @@ service ActionCache {
 
   // Upload a new execution result.
   //
-  // This method is intended for servers which implement the distributed cache
-  // independently of the
-  // [Execution][build.bazel.remote.execution.v2.Execution] API. As a
-  // result, it is OPTIONAL for servers to implement.
-  //
   // In order to allow the server to perform access control based on the type of
   // action, and to assist with client debugging, the client MUST first upload
   // the [Action][build.bazel.remote.execution.v2.Execution] that produced the
@@ -157,7 +161,10 @@ service ActionCache {
   // `ContentAddressableStorage`.
   //
   // Errors:
-  // * `NOT_IMPLEMENTED`: This method is not supported by the server.
+  //
+  // * `INVALID_ARGUMENT`: One or more arguments are invalid.
+  // * `FAILED_PRECONDITION`: One or more errors occurred in updating the
+  //   action result, such as a missing command or action.
   // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
   //   entry to the cache.
   rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) {
@@ -181,8 +188,8 @@ service ActionCache {
 // hierarchy, which must also each be uploaded on their own.
 //
 // For small file uploads the client should group them together and call
-// [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
-// on chunks of no more than 10 MiB. For large uploads, the client must use the
+// [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+// For large uploads, the client must use the
 // [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
 // `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
 // where `instance_name` is as described in the next paragraph, `uuid` is a
@@ -204,6 +211,9 @@ service ActionCache {
 // by the server. For servers which do not support multiple instances, then the
 // `instance_name` is the empty path and the leading slash is omitted, so that
 // the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+// To simplify parsing, a path segment cannot equal any of the following
+// keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
+// `capabilities`.
 //
 // When attempting an upload, if another client has already completed the upload
 // (which may occur in the middle of a single upload if another client uploads
@@ -255,10 +265,12 @@ service ContentAddressableStorage {
   // independently.
   //
   // Errors:
+  //
   // * `INVALID_ARGUMENT`: The client attempted to upload more than the
   //   server supported limit.
   //
   // Individual requests may return the following errors, additionally:
+  //
   // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
   // * `INVALID_ARGUMENT`: The
   // [Digest][build.bazel.remote.execution.v2.Digest] does not match the
@@ -281,6 +293,7 @@ service ContentAddressableStorage {
   // independently.
   //
   // Errors:
+  //
   // * `INVALID_ARGUMENT`: The client attempted to read more than the
   //   server supported limit.
   //
@@ -310,6 +323,8 @@ service ContentAddressableStorage {
   // If part of the tree is missing from the CAS, the server will return the
   // portion present and omit the rest.
   //
+  // Errors:
+  //
   // * `NOT_FOUND`: The requested tree root is not present in the CAS.
   rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) {
     option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" };
@@ -323,7 +338,14 @@ service ContentAddressableStorage {
 // The query may include a particular `instance_name`, in which case the values
 // returned will pertain to that instance.
 service Capabilities {
-  // GetCapabilities returns the server capabilities configuration.
+  // GetCapabilities returns the server capabilities configuration of the
+  // remote endpoint.
+  // Only the capabilities of the services supported by the endpoint will
+  // be returned:
+  // * Execution + CAS + Action Cache endpoints should return both
+  //   CacheCapabilities and ExecutionCapabilities.
+  // * Execution only endpoints should return ExecutionCapabilities.
+  // * CAS + Action Cache only endpoints should return CacheCapabilities.
   rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) {
     option (google.api.http) = {
       get: "/v2/{instance_name=**}/capabilities"
@@ -387,8 +409,19 @@ message Action {
   // immediately, rather than whenever the cache entry gets evicted.
   google.protobuf.Duration timeout = 6;
 
-  // If true, then the `Action`'s result cannot be cached.
+  // If true, then the `Action`'s result cannot be cached, and in-flight
+  // requests for the same `Action` may not be merged.
   bool do_not_cache = 7;
+
+  // List of required supported [NodeProperty][build.bazel.remote.execution.v2.NodeProperty]
+  // keys. In order to ensure that equivalent `Action`s always hash to the same
+  // value, the supported node properties MUST be lexicographically sorted by name.
+  // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
+  //
+  // The interpretation of these properties is server-dependent. If a property is
+  // not recognized by the server, the server will return an `INVALID_ARGUMENT`
+  // error.
+  repeated string output_node_properties = 8;
 }
 
 // A `Command` is the actual command executed by a worker running an
@@ -418,7 +451,8 @@ message Command {
   // provide its own default environment variables; these defaults can be
   // overridden using this field. Additional variables can also be specified.
   //
-  // In order to ensure that equivalent `Command`s always hash to the same
+  // In order to ensure that equivalent
+  // [Command][build.bazel.remote.execution.v2.Command]s always hash to the same
   // value, the environment variables MUST be lexicographically sorted by name.
   // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
   repeated EnvironmentVariable environment_variables = 2;
@@ -426,7 +460,8 @@ message Command {
   // A list of the output files that the client expects to retrieve from the
   // action. Only the listed files, as well as directories listed in
   // `output_directories`, will be returned to the client as output.
-  // Other files that may be created during command execution are discarded.
+  // Other files or directories that may be created during command execution
+  // are discarded.
   //
   // The paths are relative to the working directory of the action execution.
   // The paths are specified using a single forward slash (`/`) as a path
@@ -438,16 +473,22 @@ message Command {
   // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
   // bytes).
   //
-  // An output file cannot be duplicated, be a parent of another output file, be
-  // a child of a listed output directory, or have the same path as any of the
-  // listed output directories.
+  // An output file cannot be duplicated, be a parent of another output file, or
+  // have the same path as any of the listed output directories.
+  //
+  // Directories leading up to the output files are created by the worker prior
+  // to execution, even if they are not explicitly part of the input root.
+  //
+  // DEPRECATED since v2.1: Use `output_paths` instead.
   repeated string output_files = 3;
 
   // A list of the output directories that the client expects to retrieve from
-  // the action. Only the contents of the indicated directories (recursively
-  // including the contents of their subdirectories) will be
-  // returned, as well as files listed in `output_files`. Other files that may
-  // be created during command execution are discarded.
+  // the action. Only the listed directories will be returned (an entire
+  // directory structure will be returned as a
+  // [Tree][build.bazel.remote.execution.v2.Tree] message digest, see
+  // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory]), as
+  // well as files listed in `output_files`. Other files or directories that
+  // may be created during command execution are discarded.
   //
   // The paths are relative to the working directory of the action execution.
   // The paths are specified using a single forward slash (`/`) as a path
@@ -461,15 +502,52 @@ message Command {
   // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
   // bytes).
   //
-  // An output directory cannot be duplicated, be a parent of another output
-  // directory, be a parent of a listed output file, or have the same path as
-  // any of the listed output files.
+  // An output directory cannot be duplicated or have the same path as any of
+  // the listed output files. An output directory is allowed to be a parent of
+  // another output directory.
+  //
+  // Directories leading up to the output directories (but not the output
+  // directories themselves) are created by the worker prior to execution, even
+  // if they are not explicitly part of the input root.
+  //
+  // DEPRECATED since 2.1: Use `output_paths` instead.
   repeated string output_directories = 4;
 
+  // A list of the output paths that the client expects to retrieve from the
+  // action. Only the listed paths will be returned to the client as output.
+  // The type of the output (file or directory) is not specified, and will be
+  // determined by the server after action execution. If the resulting path is
+  // a file, it will be returned in an
+  // [OutputFile][build.bazel.remote.execution.v2.OutputFile]) typed field.
+  // If the path is a directory, the entire directory structure will be returned
+  // as a [Tree][build.bazel.remote.execution.v2.Tree] message digest, see
+  // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory])
+  // Other files or directories that may be created during command execution
+  // are discarded.
+  //
+  // The paths are relative to the working directory of the action execution.
+  // The paths are specified using a single forward slash (`/`) as a path
+  // separator, even if the execution platform natively uses a different
+  // separator. The path MUST NOT include a trailing slash, nor a leading slash,
+  // being a relative path.
+  //
+  // In order to ensure consistent hashing of the same Action, the output paths
+  // MUST be deduplicated and sorted lexicographically by code point (or,
+  // equivalently, by UTF-8 bytes).
+  //
+  // Directories leading up to the output paths are created by the worker prior
+  // to execution, even if they are not explicitly part of the input root.
+  //
+  // New in v2.1: this field supersedes the DEPRECATED `output_files` and
+  // `output_directories` fields. If `output_paths` is used, `output_files` and
+  // `output_directories` will be ignored!
+  repeated string output_paths = 7;
+
   // The platform requirements for the execution environment. The server MAY
   // choose to execute the action on any worker satisfying the requirements, so
   // the client SHOULD ensure that running the action on any such worker will
   // have the same result.
+  // A detailed lexicon for this can be found in the accompanying platform.md.
   Platform platform = 5;
 
   // The working directory, relative to the input root, for the command to run
@@ -527,12 +605,21 @@ message Platform {
 // In order to ensure that two equivalent directory trees hash to the same
 // value, the following restrictions MUST be obeyed when constructing a
 // a `Directory`:
-//   - Every child in the directory must have a path of exactly one segment.
-//     Multiple levels of directory hierarchy may not be collapsed.
-//   - Each child in the directory must have a unique path segment (file name).
-//   - The files, directories and symlinks in the directory must each be sorted
-//     in lexicographical order by path. The path strings must be sorted by code
-//     point, equivalently, by UTF-8 bytes.
+//
+// * Every child in the directory must have a path of exactly one segment.
+//   Multiple levels of directory hierarchy may not be collapsed.
+// * Each child in the directory must have a unique path segment (file name).
+//   Note that while the API itself is case-sensitive, the environment where
+//   the Action is executed may or may not be case-sensitive. That is, it is
+//   legal to call the API with a Directory that has both "Foo" and "foo" as
+//   children, but the Action may be rejected by the remote system upon
+//   execution.
+// * The files, directories and symlinks in the directory must each be sorted
+//   in lexicographical order by path. The path strings must be sorted by code
+//   point, equivalently, by UTF-8 bytes.
+// * The [NodeProperties][build.bazel.remote.execution.v2.NodeProperty] of files,
+//   directories, and symlinks must be sorted in lexicographical order by
+//   property name.
 //
 // A `Directory` that obeys the restrictions is said to be in canonical form.
 //
@@ -549,7 +636,13 @@ message Platform {
 //       digest: {
 //         hash: "4a73bc9d03...",
 //         size: 65534
-//       }
+//       },
+//       node_properties: [
+//         {
+//           "name": "MTime",
+//           "value": "2017-01-15T01:30:15.01Z"
+//         }
+//       ]
 //     }
 //   ],
 //   directories: [
@@ -586,6 +679,22 @@ message Directory {
 
   // The symlinks in the directory.
   repeated SymlinkNode symlinks = 3;
+
+  // The node properties of the Directory.
+  repeated NodeProperty node_properties = 4;
+}
+
+// A single property for [FileNodes][build.bazel.remote.execution.v2.FileNode],
+// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode], and
+// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. The server is
+// responsible for specifying the property `name`s that it accepts. If
+// permitted by the server, the same `name` may occur multiple times.
+message NodeProperty {
+    // The property name.
+    string name = 1;
+
+    // The property value.
+    string value = 2;
 }
 
 // A `FileNode` represents a single file and associated metadata.
@@ -600,6 +709,9 @@ message FileNode {
 
   // True if file is executable, false otherwise.
   bool is_executable = 4;
+
+  // The node properties of the FileNode.
+  repeated NodeProperty node_properties = 5;
 }
 
 // A `DirectoryNode` represents a child of a
@@ -628,11 +740,13 @@ message SymlinkNode {
   // API. The canonical form forbids the substrings `/./` and `//` in the target
   // path. `..` components are allowed anywhere in the target path.
   string target = 2;
+
+  // The node properties of the SymlinkNode.
+  repeated NodeProperty node_properties = 3;
 }
 
 // A content digest. A digest for a given blob consists of the size of the blob
-// and its hash. The hash algorithm to use is defined by the server, but servers
-// SHOULD use SHA-256.
+// and its hash. The hash algorithm to use is defined by the server.
 //
 // The size is considered to be an integral part of the digest and cannot be
 // separated. That is, even if the `hash` field is correctly specified but
@@ -652,11 +766,12 @@ message SymlinkNode {
 // When a `Digest` is used to refer to a proto message, it always refers to the
 // message in binary encoded form. To ensure consistent hashing, clients and
 // servers MUST ensure that they serialize messages according to the following
-// rules, even if there are alternate valid encodings for the same message.
-// - Fields are serialized in tag order.
-// - There are no unknown fields.
-// - There are no duplicate fields.
-// - Fields are serialized according to the default semantics for their type.
+// rules, even if there are alternate valid encodings for the same message:
+//
+// * Fields are serialized in tag order.
+// * There are no unknown fields.
+// * There are no duplicate fields.
+// * Fields are serialized according to the default semantics for their type.
 //
 // Most protocol buffer implementations will always follow these rules when
 // serializing, but care should be taken to avoid shortcuts. For instance,
@@ -709,19 +824,58 @@ message ActionResult {
   reserved 1; // Reserved for use as the resource name.
 
   // The output files of the action. For each output file requested in the
-  // `output_files` field of the Action, if the corresponding file existed after
-  // the action completed, a single entry will be present in the output list.
+  // `output_files` or `output_paths` field of the Action, if the corresponding
+  // file existed after the action completed, a single entry will be present
+  // either in this field, or the `output_file_symlinks` field if the file was
+  // a symbolic link to another file (`output_symlinks` field after v2.1).
   //
-  // If the action does not produce the requested output, or produces a
-  // directory where a regular file is expected or vice versa, then that output
+  // If an output listed in `output_files` was found, but was a directory rather
+  // than a regular file, the server will return a FAILED_PRECONDITION.
+  // If the action does not produce the requested output, then that output
   // will be omitted from the list. The server is free to arrange the output
   // list as desired; clients MUST NOT assume that the output list is sorted.
   repeated OutputFile output_files = 2;
 
+  // The output files of the action that are symbolic links to other files. Those
+  // may be links to other output files, or input files, or even absolute paths
+  // outside of the working directory, if the server supports
+  // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
+  // For each output file requested in the `output_files` or `output_paths`
+  // field of the Action, if the corresponding file existed after
+  // the action completed, a single entry will be present either in this field,
+  // or in the `output_files` field, if the file was not a symbolic link.
+  //
+  // If an output symbolic link of the same name as listed in `output_files` of
+  // the Command was found, but its target type was not a regular file, the
+  // server will return a FAILED_PRECONDITION.
+  // If the action does not produce the requested output, then that output
+  // will be omitted from the list. The server is free to arrange the output
+  // list as desired; clients MUST NOT assume that the output list is sorted.
+  //
+  // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
+  // should still populate this field in addition to `output_symlinks`.
+  repeated OutputSymlink output_file_symlinks = 10;
+
+  // New in v2.1: this field will only be populated if the command
+  // `output_paths` field was used, and not the pre v2.1 `output_files` or
+  // `output_directories` fields.
+  // The output paths of the action that are symbolic links to other paths. Those
+  // may be links to other outputs, or inputs, or even absolute paths
+  // outside of the working directory, if the server supports
+  // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
+  // A single entry for each output requested in `output_paths`
+  // field of the Action, if the corresponding path existed after
+  // the action completed and was a symbolic link.
+  //
+  // If the action does not produce a requested output, then that output
+  // will be omitted from the list. The server is free to arrange the output
+  // list as desired; clients MUST NOT assume that the output list is sorted.
+  repeated OutputSymlink output_symlinks = 12;
+
   // The output directories of the action. For each output directory requested
-  // in the `output_directories` field of the Action, if the corresponding
-  // directory existed after the action completed, a single entry will be
-  // present in the output list, which will contain the digest of a
+  // in the `output_directories` or `output_paths` field of the Action, if the
+  // corresponding directory existed after the action completed, a single entry
+  // will be present in the output list, which will contain the digest of a
   // [Tree][build.bazel.remote.execution.v2.Tree] message containing the
   // directory tree, and the path equal exactly to the corresponding Action
   // output_directories member.
@@ -777,37 +931,56 @@ message ActionResult {
   //   }
   // }
   // ```
+  // If an output of the same name as listed in `output_files` of
+  // the Command was found in `output_directories`, but was not a directory, the
+  // server will return a FAILED_PRECONDITION.
   repeated OutputDirectory output_directories = 3;
 
+  // The output directories of the action that are symbolic links to other
+  // directories. Those may be links to other output directories, or input
+  // directories, or even absolute paths outside of the working directory,
+  // if the server supports
+  // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
+  // For each output directory requested in the `output_directories` field of
+  // the Action, if the directory existed after the action completed, a
+  // single entry will be present either in this field, or in the
+  // `output_directories` field, if the directory was not a symbolic link.
+  //
+  // If an output of the same name was found, but was a symbolic link to a file
+  // instead of a directory, the server will return a FAILED_PRECONDITION.
+  // If the action does not produce the requested output, then that output
+  // will be omitted from the list. The server is free to arrange the output
+  // list as desired; clients MUST NOT assume that the output list is sorted.
+  //
+  // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
+  // should still populate this field in addition to `output_symlinks`.
+  repeated OutputSymlink output_directory_symlinks = 11;
+
   // The exit code of the command.
   int32 exit_code = 4;
 
-  // The standard output buffer of the action. The server will determine, based
-  // on the size of the buffer, whether to return it in raw form or to return
-  // a digest in `stdout_digest` that points to the buffer. If neither is set,
-  // then the buffer is empty. The client SHOULD NOT assume it will get one of
-  // the raw buffer or a digest on any given request and should be prepared to
-  // handle either.
+  // The standard output buffer of the action. The server SHOULD NOT inline
+  // stdout unless requested by the client in the
+  // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
+  // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
+  // would cause the response to exceed message size limits.
   bytes stdout_raw = 5;
 
   // The digest for a blob containing the standard output of the action, which
   // can be retrieved from the
   // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
-  // See `stdout_raw` for when this will be set.
   Digest stdout_digest = 6;
 
-  // The standard error buffer of the action. The server will determine, based
-  // on the size of the buffer, whether to return it in raw form or to return
-  // a digest in `stderr_digest` that points to the buffer. If neither is set,
-  // then the buffer is empty. The client SHOULD NOT assume it will get one of
-  // the raw buffer or a digest on any given request and should be prepared to
-  // handle either.
+  // The standard error buffer of the action. The server SHOULD NOT inline
+  // stderr unless requested by the client in the
+  // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
+  // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
+  // would cause the response to exceed message size limits.
   bytes stderr_raw = 7;
 
   // The digest for a blob containing the standard error of the action, which
   // can be retrieved from the
   // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
-  // See `stderr_raw` for when this will be set.
   Digest stderr_digest = 8;
 
   // The details of the execution that originally produced this result.
@@ -818,10 +991,8 @@ message ActionResult {
 // [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an
 // output in an `ActionResult`. It allows a full file path rather than
 // only a name.
-//
-// `OutputFile` is binary-compatible with `FileNode`.
 message OutputFile {
-  // The full path of the file relative to the input root, including the
+  // The full path of the file relative to the working directory, including the
   // filename. The path separator is a forward slash `/`. Since this is a
   // relative path, it MUST NOT begin with a leading forward slash.
   string path = 1;
@@ -833,6 +1004,16 @@ message OutputFile {
 
   // True if file is executable, false otherwise.
   bool is_executable = 4;
+
+  // The contents of the file if inlining was requested. The server SHOULD NOT inline
+  // file contents unless requested by the client in the
+  // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
+  // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
+  // would cause the response to exceed message size limits.
+  bytes contents = 5;
+
+  // The supported node properties of the OutputFile, if requested by the Action.
+  repeated NodeProperty node_properties = 6;
 }
 
 // A `Tree` contains all the
@@ -866,6 +1047,30 @@ message OutputDirectory {
   Digest tree_digest = 3;
 }
 
+// An `OutputSymlink` is similar to a
+// [Symlink][build.bazel.remote.execution.v2.SymlinkNode], but it is used as an
+// output in an `ActionResult`.
+//
+// `OutputSymlink` is binary-compatible with `SymlinkNode`.
+message OutputSymlink {
+  // The full path of the symlink relative to the working directory, including the
+  // filename. The path separator is a forward slash `/`. Since this is a
+  // relative path, it MUST NOT begin with a leading forward slash.
+  string path = 1;
+
+  // The target path of the symlink. The path separator is a forward slash `/`.
+  // The target path can be relative to the parent directory of the symlink or
+  // it can be an absolute path starting with `/`. Support for absolute paths
+  // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
+  // API. The canonical form forbids the substrings `/./` and `//` in the target
+  // path. `..` components are allowed anywhere in the target path.
+  string target = 2;
+
+  // The supported node properties of the OutputSymlink, if requested by the
+  // Action.
+  repeated NodeProperty node_properties = 3;
+}
+
 // An `ExecutionPolicy` can be used to control the scheduling of the action.
 message ExecutionPolicy {
   // The priority (relative importance) of this action. Generally, a lower value
@@ -904,9 +1109,19 @@ message ExecuteRequest {
   // omitted.
   string instance_name = 1;
 
-  // If true, the action will be executed anew even if its result was already
-  // present in the cache. If false, the result may be served from the
-  // [ActionCache][build.bazel.remote.execution.v2.ActionCache].
+  // If true, the action will be executed even if its result is already
+  // present in the [ActionCache][build.bazel.remote.execution.v2.ActionCache].
+  // The execution is still allowed to be merged with other in-flight executions
+  // of the same action, however - semantically, the service MUST only guarantee
+  // that the results of an execution with this field set were not visible
+  // before the corresponding execution request was sent.
+  // Note that actions from execution requests setting this field set are still
+  // eligible to be entered into the action cache upon completion, and services
+  // SHOULD overwrite any existing entries that may exist. This allows
+  // skip_cache_lookup requests to be used as a mechanism for replacing action
+  // cache entries that reference outputs no longer available or that are
+  // poisoned in any way.
+  // If false, the result may be served from the action cache.
   bool skip_cache_lookup = 3;
 
   reserved 2, 4, 5; // Used for removed fields in an earlier version of the API.
@@ -970,16 +1185,16 @@ message ExecuteResponse {
   // phase. The keys SHOULD be human readable so that a client can display them
   // to a user.
   map<string, LogFile> server_logs = 4;
+
+  // Freeform informational message with details on the execution of the action
+  // that may be displayed to the user upon failure or when requested explicitly.
+  string message = 5;
 }
 
-// Metadata about an ongoing
-// [execution][build.bazel.remote.execution.v2.Execution.Execute], which
-// will be contained in the [metadata
-// field][google.longrunning.Operation.response] of the
-// [Operation][google.longrunning.Operation].
-message ExecuteOperationMetadata {
-  // The current stage of execution.
-  enum Stage {
+// The current stage of action execution.
+message ExecutionStage {
+  enum Value {
+    // Invalid value.
     UNKNOWN = 0;
 
     // Checking the result against the cache.
@@ -994,8 +1209,16 @@ message ExecuteOperationMetadata {
     // Finished execution.
     COMPLETED = 4;
   }
+}
 
-  Stage stage = 1;
+// Metadata about an ongoing
+// [execution][build.bazel.remote.execution.v2.Execution.Execute], which
+// will be contained in the [metadata
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteOperationMetadata {
+  // The current stage of execution.
+  ExecutionStage.Value stage = 1;
 
   // The digest of the [Action][build.bazel.remote.execution.v2.Action]
   // being executed.
@@ -1015,7 +1238,7 @@ message ExecuteOperationMetadata {
 // A request message for
 // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution].
 message WaitExecutionRequest {
-  // The name of the [Operation][google.longrunning.operations.v1.Operation]
+  // The name of the [Operation][google.longrunning.Operation]
   // returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute].
   string name = 1;
 }
@@ -1033,6 +1256,19 @@ message GetActionResultRequest {
   // The digest of the [Action][build.bazel.remote.execution.v2.Action]
   // whose result is requested.
   Digest action_digest = 2;
+
+  // A hint to the server to request inlining stdout in the
+  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message.
+  bool inline_stdout = 3;
+
+  // A hint to the server to request inlining stderr in the
+  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message.
+  bool inline_stderr = 4;
+
+  // A hint to the server to inline the contents of the listed output files.
+  // Each path needs to exactly match one path in `output_files` in the
+  // [Command][build.bazel.remote.execution.v2.Command] message.
+  repeated string inline_output_files = 5;
 }
 
 // A request message for
@@ -1136,7 +1372,7 @@ message BatchReadBlobsRequest {
 // A response message for
 // [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
 message BatchReadBlobsResponse {
-  // A response corresponding to a single blob that the client tried to upload.
+  // A response corresponding to a single blob that the client tried to download.
   message Response {
     // The digest to which this response corresponds.
     Digest digest = 1;
@@ -1176,7 +1412,8 @@ message GetTreeRequest {
 
   // A page token, which must be a value received in a previous
   // [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse].
-  // If present, the server will use it to return the following page of results.
+  // If present, the server will use that token as an offset, returning only
+  // that page and the ones that succeed it.
   string page_token = 4;
 }
 
@@ -1194,7 +1431,7 @@ message GetTreeResponse {
 }
 
 // A request message for
-// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
+// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities].
 message GetCapabilitiesRequest {
   // The instance of the execution system to operate against. A server may
   // support multiple instances of the execution system (with their own workers,
@@ -1205,7 +1442,7 @@ message GetCapabilitiesRequest {
 }
 
 // A response message for
-// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
+// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities].
 message ServerCapabilities {
   // Capabilities of the remote cache system.
   CacheCapabilities cache_capabilities = 1;
@@ -1225,11 +1462,30 @@ message ServerCapabilities {
 
 // The digest function used for converting values into keys for CAS and Action
 // Cache.
-enum DigestFunction {
-  UNKNOWN = 0;
-  SHA256 = 1;
-  SHA1 = 2;
-  MD5 = 3;
+message DigestFunction {
+  enum Value {
+    // It is an error for the server to return this value.
+    UNKNOWN = 0;
+
+    // The SHA-256 digest function.
+    SHA256 = 1;
+
+    // The SHA-1 digest function.
+    SHA1 = 2;
+
+    // The MD5 digest function.
+    MD5 = 3;
+
+    // The Microsoft "VSO-Hash" paged SHA256 digest function.
+    // See https://github.com/microsoft/BuildXL/blob/master/Documentation/Specs/PagedHash.md .
+    VSO = 4;
+
+    // The SHA-384 digest function.
+    SHA384 = 5;
+
+    // The SHA-512 digest function.
+    SHA512 = 6;
+  }
 }
 
 // Describes the server/instance capabilities for updating the action cache.
@@ -1249,25 +1505,29 @@ message PriorityCapabilities {
   repeated PriorityRange priorities = 1;
 }
 
-// Capabilities of the remote cache system.
-message CacheCapabilities {
-  // Describes how the server treats absolute symlink targets.
-  enum SymlinkAbsolutePathStrategy {
+// Describes how the server treats absolute symlink targets.
+message SymlinkAbsolutePathStrategy {
+  enum Value {
+    // Invalid value.
     UNKNOWN = 0;
 
-    // Server will return an INVALID_ARGUMENT on input symlinks with absolute targets.
+    // Server will return an `INVALID_ARGUMENT` on input symlinks with absolute
+    // targets.
     // If an action tries to create an output symlink with an absolute target, a
-    // FAILED_PRECONDITION will be returned.
+    // `FAILED_PRECONDITION` will be returned.
     DISALLOWED = 1;
 
     // Server will allow symlink targets to escape the input root tree, possibly
     // resulting in non-hermetic builds.
     ALLOWED = 2;
   }
+}
 
+// Capabilities of the remote cache system.
+message CacheCapabilities {
   // All the digest functions supported by the remote cache.
   // Remote cache may support multiple digest functions simultaneously.
-  repeated DigestFunction digest_function = 1;
+  repeated DigestFunction.Value digest_function = 1;
 
   // Capabilities for updating the action cache.
   ActionCacheUpdateCapabilities action_cache_update_capabilities = 2;
@@ -1282,19 +1542,22 @@ message CacheCapabilities {
   int64 max_batch_total_size_bytes = 4;
 
   // Whether absolute symlink targets are supported.
-  SymlinkAbsolutePathStrategy symlink_absolute_path_strategy = 5;
+  SymlinkAbsolutePathStrategy.Value symlink_absolute_path_strategy = 5;
 }
 
 // Capabilities of the remote execution system.
 message ExecutionCapabilities {
   // Remote execution may only support a single digest function.
-  DigestFunction digest_function = 1;
+  DigestFunction.Value digest_function = 1;
 
   // Whether remote execution is enabled for the particular server/instance.
   bool exec_enabled = 2;
 
   // Supported execution priority range.
   PriorityCapabilities execution_priority_capabilities = 3;
+
+  // Supported node properties.
+  repeated string supported_node_properties = 4;
 }
 
 // Details for the tool used to call the API.
@@ -1310,8 +1573,14 @@ message ToolDetails {
 // external context of the request. The server may use this for logging or other
 // purposes. To use it, the client attaches the header to the call using the
 // canonical proto serialization:
-// name: build.bazel.remote.execution.v2.requestmetadata-bin
-// contents: the base64 encoded binary RequestMetadata message.
+//
+// * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
+// * contents: the base64 encoded binary `RequestMetadata` message.
+// Note: the gRPC library serializes binary headers encoded in base 64 by
+// default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests).
+// Therefore, if the gRPC library is used to pass/retrieve this
+// metadata, the user may ignore the base64 encoding and assume it is simply
+// serialized as a binary message.
 message RequestMetadata {
   // The details for the tool invoking the requests.
   ToolDetails tool_details = 1;
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
index ac42b7b..97ec30c 100644
--- a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
@@ -4,7 +4,6 @@
 
 import sys
 _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
 from google.protobuf import descriptor as _descriptor
 from google.protobuf import message as _message
 from google.protobuf import reflection as _reflection
@@ -27,13 +26,15 @@ DESCRIPTOR = _descriptor.FileDescriptor(
   package='build.bazel.remote.execution.v2',
   syntax='proto3',
   serialized_options=_b('\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001Z\017remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2'),
-  serialized_pb=_b('\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xd5\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x42\n\x11input_root_dig [...]
+  serialized_pb=_b('\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xf5\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x42\n\x11input_root_dig [...]
   ,
   dependencies=[build_dot_bazel_dot_semver_dot_semver__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
 
-_DIGESTFUNCTION = _descriptor.EnumDescriptor(
-  name='DigestFunction',
-  full_name='build.bazel.remote.execution.v2.DigestFunction',
+
+
+_EXECUTIONSTAGE_VALUE = _descriptor.EnumDescriptor(
+  name='Value',
+  full_name='build.bazel.remote.execution.v2.ExecutionStage.Value',
   filename=None,
   file=DESCRIPTOR,
   values=[
@@ -42,35 +43,32 @@ _DIGESTFUNCTION = _descriptor.EnumDescriptor(
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='SHA256', index=1, number=1,
+      name='CACHE_CHECK', index=1, number=1,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='SHA1', index=2, number=2,
+      name='QUEUED', index=2, number=2,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='MD5', index=3, number=3,
+      name='EXECUTING', index=3, number=3,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='COMPLETED', index=4, number=4,
       serialized_options=None,
       type=None),
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=7213,
-  serialized_end=7273,
+  serialized_start=4432,
+  serialized_end=4511,
 )
-_sym_db.RegisterEnumDescriptor(_DIGESTFUNCTION)
-
-DigestFunction = enum_type_wrapper.EnumTypeWrapper(_DIGESTFUNCTION)
-UNKNOWN = 0
-SHA256 = 1
-SHA1 = 2
-MD5 = 3
-
+_sym_db.RegisterEnumDescriptor(_EXECUTIONSTAGE_VALUE)
 
-_EXECUTEOPERATIONMETADATA_STAGE = _descriptor.EnumDescriptor(
-  name='Stage',
-  full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage',
+_DIGESTFUNCTION_VALUE = _descriptor.EnumDescriptor(
+  name='Value',
+  full_name='build.bazel.remote.execution.v2.DigestFunction.Value',
   filename=None,
   file=DESCRIPTOR,
   values=[
@@ -79,32 +77,40 @@ _EXECUTEOPERATIONMETADATA_STAGE = _descriptor.EnumDescriptor(
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='CACHE_CHECK', index=1, number=1,
+      name='SHA256', index=1, number=1,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='QUEUED', index=2, number=2,
+      name='SHA1', index=2, number=2,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='EXECUTING', index=3, number=3,
+      name='MD5', index=3, number=3,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='COMPLETED', index=4, number=4,
+      name='VSO', index=4, number=4,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SHA384', index=5, number=5,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SHA512', index=6, number=6,
       serialized_options=None,
       type=None),
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=3866,
-  serialized_end=3945,
+  serialized_start=6893,
+  serialized_end=6977,
 )
-_sym_db.RegisterEnumDescriptor(_EXECUTEOPERATIONMETADATA_STAGE)
+_sym_db.RegisterEnumDescriptor(_DIGESTFUNCTION_VALUE)
 
-_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY = _descriptor.EnumDescriptor(
-  name='SymlinkAbsolutePathStrategy',
-  full_name='build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy',
+_SYMLINKABSOLUTEPATHSTRATEGY_VALUE = _descriptor.EnumDescriptor(
+  name='Value',
+  full_name='build.bazel.remote.execution.v2.SymlinkAbsolutePathStrategy.Value',
   filename=None,
   file=DESCRIPTOR,
   values=[
@@ -123,10 +129,10 @@ _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY = _descriptor.EnumDescriptor(
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=6696,
-  serialized_end=6767,
+  serialized_start=7242,
+  serialized_end=7291,
 )
-_sym_db.RegisterEnumDescriptor(_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY)
+_sym_db.RegisterEnumDescriptor(_SYMLINKABSOLUTEPATHSTRATEGY_VALUE)
 
 
 _ACTION = _descriptor.Descriptor(
@@ -164,6 +170,13 @@ _ACTION = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='output_node_properties', full_name='build.bazel.remote.execution.v2.Action.output_node_properties', index=4,
+      number=8, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -177,7 +190,7 @@ _ACTION = _descriptor.Descriptor(
   oneofs=[
   ],
   serialized_start=282,
-  serialized_end=495,
+  serialized_end=527,
 )
 
 
@@ -214,8 +227,8 @@ _COMMAND_ENVIRONMENTVARIABLE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=759,
-  serialized_end=809,
+  serialized_start=813,
+  serialized_end=863,
 )
 
 _COMMAND = _descriptor.Descriptor(
@@ -254,14 +267,21 @@ _COMMAND = _descriptor.Descriptor(
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='platform', full_name='build.bazel.remote.execution.v2.Command.platform', index=4,
+      name='output_paths', full_name='build.bazel.remote.execution.v2.Command.output_paths', index=4,
+      number=7, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='platform', full_name='build.bazel.remote.execution.v2.Command.platform', index=5,
       number=5, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='working_directory', full_name='build.bazel.remote.execution.v2.Command.working_directory', index=5,
+      name='working_directory', full_name='build.bazel.remote.execution.v2.Command.working_directory', index=6,
       number=6, type=9, cpp_type=9, label=1,
       has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
@@ -279,8 +299,8 @@ _COMMAND = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=498,
-  serialized_end=809,
+  serialized_start=530,
+  serialized_end=863,
 )
 
 
@@ -317,8 +337,8 @@ _PLATFORM_PROPERTY = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=895,
-  serialized_end=934,
+  serialized_start=949,
+  serialized_end=988,
 )
 
 _PLATFORM = _descriptor.Descriptor(
@@ -347,8 +367,8 @@ _PLATFORM = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=811,
-  serialized_end=934,
+  serialized_start=865,
+  serialized_end=988,
 )
 
 
@@ -380,6 +400,13 @@ _DIRECTORY = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='node_properties', full_name='build.bazel.remote.execution.v2.Directory.node_properties', index=3,
+      number=4, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -392,8 +419,46 @@ _DIRECTORY = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=937,
-  serialized_end=1139,
+  serialized_start=991,
+  serialized_end=1265,
+)
+
+
+_NODEPROPERTY = _descriptor.Descriptor(
+  name='NodeProperty',
+  full_name='build.bazel.remote.execution.v2.NodeProperty',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='build.bazel.remote.execution.v2.NodeProperty.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='value', full_name='build.bazel.remote.execution.v2.NodeProperty.value', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1267,
+  serialized_end=1310,
 )
 
 
@@ -425,6 +490,13 @@ _FILENODE = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='node_properties', full_name='build.bazel.remote.execution.v2.FileNode.node_properties', index=3,
+      number=5, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -437,8 +509,8 @@ _FILENODE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1141,
-  serialized_end=1251,
+  serialized_start=1313,
+  serialized_end=1495,
 )
 
 
@@ -475,8 +547,8 @@ _DIRECTORYNODE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1253,
-  serialized_end=1339,
+  serialized_start=1497,
+  serialized_end=1583,
 )
 
 
@@ -501,6 +573,13 @@ _SYMLINKNODE = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='node_properties', full_name='build.bazel.remote.execution.v2.SymlinkNode.node_properties', index=2,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -513,8 +592,8 @@ _SYMLINKNODE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1341,
-  serialized_end=1384,
+  serialized_start=1585,
+  serialized_end=1700,
 )
 
 
@@ -551,8 +630,8 @@ _DIGEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1386,
-  serialized_end=1428,
+  serialized_start=1702,
+  serialized_end=1744,
 )
 
 
@@ -645,8 +724,8 @@ _EXECUTEDACTIONMETADATA = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1431,
-  serialized_end=2051,
+  serialized_start=1747,
+  serialized_end=2367,
 )
 
 
@@ -665,49 +744,70 @@ _ACTIONRESULT = _descriptor.Descriptor(
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='output_directories', full_name='build.bazel.remote.execution.v2.ActionResult.output_directories', index=1,
+      name='output_file_symlinks', full_name='build.bazel.remote.execution.v2.ActionResult.output_file_symlinks', index=1,
+      number=10, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='output_symlinks', full_name='build.bazel.remote.execution.v2.ActionResult.output_symlinks', index=2,
+      number=12, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='output_directories', full_name='build.bazel.remote.execution.v2.ActionResult.output_directories', index=3,
       number=3, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='exit_code', full_name='build.bazel.remote.execution.v2.ActionResult.exit_code', index=2,
+      name='output_directory_symlinks', full_name='build.bazel.remote.execution.v2.ActionResult.output_directory_symlinks', index=4,
+      number=11, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='exit_code', full_name='build.bazel.remote.execution.v2.ActionResult.exit_code', index=5,
       number=4, type=5, cpp_type=1, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='stdout_raw', full_name='build.bazel.remote.execution.v2.ActionResult.stdout_raw', index=3,
+      name='stdout_raw', full_name='build.bazel.remote.execution.v2.ActionResult.stdout_raw', index=6,
       number=5, type=12, cpp_type=9, label=1,
       has_default_value=False, default_value=_b(""),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='stdout_digest', full_name='build.bazel.remote.execution.v2.ActionResult.stdout_digest', index=4,
+      name='stdout_digest', full_name='build.bazel.remote.execution.v2.ActionResult.stdout_digest', index=7,
       number=6, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='stderr_raw', full_name='build.bazel.remote.execution.v2.ActionResult.stderr_raw', index=5,
+      name='stderr_raw', full_name='build.bazel.remote.execution.v2.ActionResult.stderr_raw', index=8,
       number=7, type=12, cpp_type=9, label=1,
       has_default_value=False, default_value=_b(""),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='stderr_digest', full_name='build.bazel.remote.execution.v2.ActionResult.stderr_digest', index=6,
+      name='stderr_digest', full_name='build.bazel.remote.execution.v2.ActionResult.stderr_digest', index=9,
       number=8, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='execution_metadata', full_name='build.bazel.remote.execution.v2.ActionResult.execution_metadata', index=7,
+      name='execution_metadata', full_name='build.bazel.remote.execution.v2.ActionResult.execution_metadata', index=10,
       number=9, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
@@ -725,8 +825,8 @@ _ACTIONRESULT = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2054,
-  serialized_end=2491,
+  serialized_start=2370,
+  serialized_end=3041,
 )
 
 
@@ -758,6 +858,20 @@ _OUTPUTFILE = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='contents', full_name='build.bazel.remote.execution.v2.OutputFile.contents', index=3,
+      number=5, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='node_properties', full_name='build.bazel.remote.execution.v2.OutputFile.node_properties', index=4,
+      number=6, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -770,8 +884,8 @@ _OUTPUTFILE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2493,
-  serialized_end=2605,
+  serialized_start=3044,
+  serialized_end=3246,
 )
 
 
@@ -808,8 +922,8 @@ _TREE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2607,
-  serialized_end=2733,
+  serialized_start=3248,
+  serialized_end=3374,
 )
 
 
@@ -846,8 +960,53 @@ _OUTPUTDIRECTORY = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2735,
-  serialized_end=2834,
+  serialized_start=3376,
+  serialized_end=3475,
+)
+
+
+_OUTPUTSYMLINK = _descriptor.Descriptor(
+  name='OutputSymlink',
+  full_name='build.bazel.remote.execution.v2.OutputSymlink',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='path', full_name='build.bazel.remote.execution.v2.OutputSymlink.path', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='target', full_name='build.bazel.remote.execution.v2.OutputSymlink.target', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='node_properties', full_name='build.bazel.remote.execution.v2.OutputSymlink.node_properties', index=2,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=3477,
+  serialized_end=3594,
 )
 
 
@@ -877,8 +1036,8 @@ _EXECUTIONPOLICY = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2836,
-  serialized_end=2871,
+  serialized_start=3596,
+  serialized_end=3631,
 )
 
 
@@ -908,8 +1067,8 @@ _RESULTSCACHEPOLICY = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2873,
-  serialized_end=2911,
+  serialized_start=3633,
+  serialized_end=3671,
 )
 
 
@@ -967,8 +1126,8 @@ _EXECUTEREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2914,
-  serialized_end=3221,
+  serialized_start=3674,
+  serialized_end=3981,
 )
 
 
@@ -1005,8 +1164,8 @@ _LOGFILE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=3223,
-  serialized_end=3313,
+  serialized_start=3983,
+  serialized_end=4073,
 )
 
 
@@ -1043,8 +1202,8 @@ _EXECUTERESPONSE_SERVERLOGSENTRY = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=3544,
-  serialized_end=3635,
+  serialized_start=4321,
+  serialized_end=4412,
 )
 
 _EXECUTERESPONSE = _descriptor.Descriptor(
@@ -1082,6 +1241,13 @@ _EXECUTERESPONSE = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='message', full_name='build.bazel.remote.execution.v2.ExecuteResponse.message', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -1094,8 +1260,33 @@ _EXECUTERESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=3316,
-  serialized_end=3635,
+  serialized_start=4076,
+  serialized_end=4412,
+)
+
+
+_EXECUTIONSTAGE = _descriptor.Descriptor(
+  name='ExecutionStage',
+  full_name='build.bazel.remote.execution.v2.ExecutionStage',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _EXECUTIONSTAGE_VALUE,
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=4414,
+  serialized_end=4511,
 )
 
 
@@ -1139,7 +1330,6 @@ _EXECUTEOPERATIONMETADATA = _descriptor.Descriptor(
   ],
   nested_types=[],
   enum_types=[
-    _EXECUTEOPERATIONMETADATA_STAGE,
   ],
   serialized_options=None,
   is_extendable=False,
@@ -1147,8 +1337,8 @@ _EXECUTEOPERATIONMETADATA = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=3638,
-  serialized_end=3945,
+  serialized_start=4514,
+  serialized_end=4730,
 )
 
 
@@ -1178,8 +1368,8 @@ _WAITEXECUTIONREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=3947,
-  serialized_end=3983,
+  serialized_start=4732,
+  serialized_end=4768,
 )
 
 
@@ -1204,6 +1394,27 @@ _GETACTIONRESULTREQUEST = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='inline_stdout', full_name='build.bazel.remote.execution.v2.GetActionResultRequest.inline_stdout', index=2,
+      number=3, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='inline_stderr', full_name='build.bazel.remote.execution.v2.GetActionResultRequest.inline_stderr', index=3,
+      number=4, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='inline_output_files', full_name='build.bazel.remote.execution.v2.GetActionResultRequest.inline_output_files', index=4,
+      number=5, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -1216,8 +1427,8 @@ _GETACTIONRESULTREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=3985,
-  serialized_end=4096,
+  serialized_start=4771,
+  serialized_end=4957,
 )
 
 
@@ -1268,8 +1479,8 @@ _UPDATEACTIONRESULTREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=4099,
-  serialized_end=4366,
+  serialized_start=4960,
+  serialized_end=5227,
 )
 
 
@@ -1306,8 +1517,8 @@ _FINDMISSINGBLOBSREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=4368,
-  serialized_end=4479,
+  serialized_start=5229,
+  serialized_end=5340,
 )
 
 
@@ -1337,8 +1548,8 @@ _FINDMISSINGBLOBSRESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=4481,
-  serialized_end=4578,
+  serialized_start=5342,
+  serialized_end=5439,
 )
 
 
@@ -1375,8 +1586,8 @@ _BATCHUPDATEBLOBSREQUEST_REQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=4715,
-  serialized_end=4795,
+  serialized_start=5576,
+  serialized_end=5656,
 )
 
 _BATCHUPDATEBLOBSREQUEST = _descriptor.Descriptor(
@@ -1412,8 +1623,8 @@ _BATCHUPDATEBLOBSREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=4581,
-  serialized_end=4795,
+  serialized_start=5442,
+  serialized_end=5656,
 )
 
 
@@ -1450,8 +1661,8 @@ _BATCHUPDATEBLOBSRESPONSE_RESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=4913,
-  serialized_end=5016,
+  serialized_start=5774,
+  serialized_end=5877,
 )
 
 _BATCHUPDATEBLOBSRESPONSE = _descriptor.Descriptor(
@@ -1480,8 +1691,8 @@ _BATCHUPDATEBLOBSRESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=4798,
-  serialized_end=5016,
+  serialized_start=5659,
+  serialized_end=5877,
 )
 
 
@@ -1518,8 +1729,8 @@ _BATCHREADBLOBSREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=5018,
-  serialized_end=5122,
+  serialized_start=5879,
+  serialized_end=5983,
 )
 
 
@@ -1563,8 +1774,8 @@ _BATCHREADBLOBSRESPONSE_RESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=5236,
-  serialized_end=5353,
+  serialized_start=6097,
+  serialized_end=6214,
 )
 
 _BATCHREADBLOBSRESPONSE = _descriptor.Descriptor(
@@ -1593,8 +1804,8 @@ _BATCHREADBLOBSRESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=5125,
-  serialized_end=5353,
+  serialized_start=5986,
+  serialized_end=6214,
 )
 
 
@@ -1645,8 +1856,8 @@ _GETTREEREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=5356,
-  serialized_end=5496,
+  serialized_start=6217,
+  serialized_end=6357,
 )
 
 
@@ -1683,8 +1894,8 @@ _GETTREERESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=5498,
-  serialized_end=5605,
+  serialized_start=6359,
+  serialized_end=6466,
 )
 
 
@@ -1714,8 +1925,8 @@ _GETCAPABILITIESREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=5607,
-  serialized_end=5654,
+  serialized_start=6468,
+  serialized_end=6515,
 )
 
 
@@ -1773,8 +1984,33 @@ _SERVERCAPABILITIES = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=5657,
-  serialized_end=6012,
+  serialized_start=6518,
+  serialized_end=6873,
+)
+
+
+_DIGESTFUNCTION = _descriptor.Descriptor(
+  name='DigestFunction',
+  full_name='build.bazel.remote.execution.v2.DigestFunction',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _DIGESTFUNCTION_VALUE,
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=6875,
+  serialized_end=6977,
 )
 
 
@@ -1804,8 +2040,8 @@ _ACTIONCACHEUPDATECAPABILITIES = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=6014,
-  serialized_end=6069,
+  serialized_start=6979,
+  serialized_end=7034,
 )
 
 
@@ -1842,8 +2078,8 @@ _PRIORITYCAPABILITIES_PRIORITYRANGE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=6185,
-  serialized_end=6244,
+  serialized_start=7150,
+  serialized_end=7209,
 )
 
 _PRIORITYCAPABILITIES = _descriptor.Descriptor(
@@ -1872,8 +2108,33 @@ _PRIORITYCAPABILITIES = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=6072,
-  serialized_end=6244,
+  serialized_start=7037,
+  serialized_end=7209,
+)
+
+
+_SYMLINKABSOLUTEPATHSTRATEGY = _descriptor.Descriptor(
+  name='SymlinkAbsolutePathStrategy',
+  full_name='build.bazel.remote.execution.v2.SymlinkAbsolutePathStrategy',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _SYMLINKABSOLUTEPATHSTRATEGY_VALUE,
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=7211,
+  serialized_end=7291,
 )
 
 
@@ -1924,7 +2185,6 @@ _CACHECAPABILITIES = _descriptor.Descriptor(
   ],
   nested_types=[],
   enum_types=[
-    _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY,
   ],
   serialized_options=None,
   is_extendable=False,
@@ -1932,8 +2192,8 @@ _CACHECAPABILITIES = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=6247,
-  serialized_end=6767,
+  serialized_start=7294,
+  serialized_end=7735,
 )
 
 
@@ -1965,6 +2225,13 @@ _EXECUTIONCAPABILITIES = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='supported_node_properties', full_name='build.bazel.remote.execution.v2.ExecutionCapabilities.supported_node_properties', index=3,
+      number=4, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -1977,8 +2244,8 @@ _EXECUTIONCAPABILITIES = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=6770,
-  serialized_end=6985,
+  serialized_start=7738,
+  serialized_end=7994,
 )
 
 
@@ -2015,8 +2282,8 @@ _TOOLDETAILS = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=6987,
-  serialized_end=7041,
+  serialized_start=7996,
+  serialized_end=8050,
 )
 
 
@@ -2067,8 +2334,8 @@ _REQUESTMETADATA = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=7044,
-  serialized_end=7211,
+  serialized_start=8053,
+  serialized_end=8220,
 )
 
 _ACTION.fields_by_name['command_digest'].message_type = _DIGEST
@@ -2082,8 +2349,11 @@ _PLATFORM.fields_by_name['properties'].message_type = _PLATFORM_PROPERTY
 _DIRECTORY.fields_by_name['files'].message_type = _FILENODE
 _DIRECTORY.fields_by_name['directories'].message_type = _DIRECTORYNODE
 _DIRECTORY.fields_by_name['symlinks'].message_type = _SYMLINKNODE
+_DIRECTORY.fields_by_name['node_properties'].message_type = _NODEPROPERTY
 _FILENODE.fields_by_name['digest'].message_type = _DIGEST
+_FILENODE.fields_by_name['node_properties'].message_type = _NODEPROPERTY
 _DIRECTORYNODE.fields_by_name['digest'].message_type = _DIGEST
+_SYMLINKNODE.fields_by_name['node_properties'].message_type = _NODEPROPERTY
 _EXECUTEDACTIONMETADATA.fields_by_name['queued_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
 _EXECUTEDACTIONMETADATA.fields_by_name['worker_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
 _EXECUTEDACTIONMETADATA.fields_by_name['worker_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
@@ -2094,14 +2364,19 @@ _EXECUTEDACTIONMETADATA.fields_by_name['execution_completed_timestamp'].message_
 _EXECUTEDACTIONMETADATA.fields_by_name['output_upload_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
 _EXECUTEDACTIONMETADATA.fields_by_name['output_upload_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
 _ACTIONRESULT.fields_by_name['output_files'].message_type = _OUTPUTFILE
+_ACTIONRESULT.fields_by_name['output_file_symlinks'].message_type = _OUTPUTSYMLINK
+_ACTIONRESULT.fields_by_name['output_symlinks'].message_type = _OUTPUTSYMLINK
 _ACTIONRESULT.fields_by_name['output_directories'].message_type = _OUTPUTDIRECTORY
+_ACTIONRESULT.fields_by_name['output_directory_symlinks'].message_type = _OUTPUTSYMLINK
 _ACTIONRESULT.fields_by_name['stdout_digest'].message_type = _DIGEST
 _ACTIONRESULT.fields_by_name['stderr_digest'].message_type = _DIGEST
 _ACTIONRESULT.fields_by_name['execution_metadata'].message_type = _EXECUTEDACTIONMETADATA
 _OUTPUTFILE.fields_by_name['digest'].message_type = _DIGEST
+_OUTPUTFILE.fields_by_name['node_properties'].message_type = _NODEPROPERTY
 _TREE.fields_by_name['root'].message_type = _DIRECTORY
 _TREE.fields_by_name['children'].message_type = _DIRECTORY
 _OUTPUTDIRECTORY.fields_by_name['tree_digest'].message_type = _DIGEST
+_OUTPUTSYMLINK.fields_by_name['node_properties'].message_type = _NODEPROPERTY
 _EXECUTEREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
 _EXECUTEREQUEST.fields_by_name['execution_policy'].message_type = _EXECUTIONPOLICY
 _EXECUTEREQUEST.fields_by_name['results_cache_policy'].message_type = _RESULTSCACHEPOLICY
@@ -2111,9 +2386,9 @@ _EXECUTERESPONSE_SERVERLOGSENTRY.containing_type = _EXECUTERESPONSE
 _EXECUTERESPONSE.fields_by_name['result'].message_type = _ACTIONRESULT
 _EXECUTERESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
 _EXECUTERESPONSE.fields_by_name['server_logs'].message_type = _EXECUTERESPONSE_SERVERLOGSENTRY
-_EXECUTEOPERATIONMETADATA.fields_by_name['stage'].enum_type = _EXECUTEOPERATIONMETADATA_STAGE
+_EXECUTIONSTAGE_VALUE.containing_type = _EXECUTIONSTAGE
+_EXECUTEOPERATIONMETADATA.fields_by_name['stage'].enum_type = _EXECUTIONSTAGE_VALUE
 _EXECUTEOPERATIONMETADATA.fields_by_name['action_digest'].message_type = _DIGEST
-_EXECUTEOPERATIONMETADATA_STAGE.containing_type = _EXECUTEOPERATIONMETADATA
 _GETACTIONRESULTREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
 _UPDATEACTIONRESULTREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
 _UPDATEACTIONRESULTREQUEST.fields_by_name['action_result'].message_type = _ACTIONRESULT
@@ -2139,20 +2414,22 @@ _SERVERCAPABILITIES.fields_by_name['execution_capabilities'].message_type = _EXE
 _SERVERCAPABILITIES.fields_by_name['deprecated_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
 _SERVERCAPABILITIES.fields_by_name['low_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
 _SERVERCAPABILITIES.fields_by_name['high_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
+_DIGESTFUNCTION_VALUE.containing_type = _DIGESTFUNCTION
 _PRIORITYCAPABILITIES_PRIORITYRANGE.containing_type = _PRIORITYCAPABILITIES
 _PRIORITYCAPABILITIES.fields_by_name['priorities'].message_type = _PRIORITYCAPABILITIES_PRIORITYRANGE
-_CACHECAPABILITIES.fields_by_name['digest_function'].enum_type = _DIGESTFUNCTION
+_SYMLINKABSOLUTEPATHSTRATEGY_VALUE.containing_type = _SYMLINKABSOLUTEPATHSTRATEGY
+_CACHECAPABILITIES.fields_by_name['digest_function'].enum_type = _DIGESTFUNCTION_VALUE
 _CACHECAPABILITIES.fields_by_name['action_cache_update_capabilities'].message_type = _ACTIONCACHEUPDATECAPABILITIES
 _CACHECAPABILITIES.fields_by_name['cache_priority_capabilities'].message_type = _PRIORITYCAPABILITIES
-_CACHECAPABILITIES.fields_by_name['symlink_absolute_path_strategy'].enum_type = _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY
-_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY.containing_type = _CACHECAPABILITIES
-_EXECUTIONCAPABILITIES.fields_by_name['digest_function'].enum_type = _DIGESTFUNCTION
+_CACHECAPABILITIES.fields_by_name['symlink_absolute_path_strategy'].enum_type = _SYMLINKABSOLUTEPATHSTRATEGY_VALUE
+_EXECUTIONCAPABILITIES.fields_by_name['digest_function'].enum_type = _DIGESTFUNCTION_VALUE
 _EXECUTIONCAPABILITIES.fields_by_name['execution_priority_capabilities'].message_type = _PRIORITYCAPABILITIES
 _REQUESTMETADATA.fields_by_name['tool_details'].message_type = _TOOLDETAILS
 DESCRIPTOR.message_types_by_name['Action'] = _ACTION
 DESCRIPTOR.message_types_by_name['Command'] = _COMMAND
 DESCRIPTOR.message_types_by_name['Platform'] = _PLATFORM
 DESCRIPTOR.message_types_by_name['Directory'] = _DIRECTORY
+DESCRIPTOR.message_types_by_name['NodeProperty'] = _NODEPROPERTY
 DESCRIPTOR.message_types_by_name['FileNode'] = _FILENODE
 DESCRIPTOR.message_types_by_name['DirectoryNode'] = _DIRECTORYNODE
 DESCRIPTOR.message_types_by_name['SymlinkNode'] = _SYMLINKNODE
@@ -2162,11 +2439,13 @@ DESCRIPTOR.message_types_by_name['ActionResult'] = _ACTIONRESULT
 DESCRIPTOR.message_types_by_name['OutputFile'] = _OUTPUTFILE
 DESCRIPTOR.message_types_by_name['Tree'] = _TREE
 DESCRIPTOR.message_types_by_name['OutputDirectory'] = _OUTPUTDIRECTORY
+DESCRIPTOR.message_types_by_name['OutputSymlink'] = _OUTPUTSYMLINK
 DESCRIPTOR.message_types_by_name['ExecutionPolicy'] = _EXECUTIONPOLICY
 DESCRIPTOR.message_types_by_name['ResultsCachePolicy'] = _RESULTSCACHEPOLICY
 DESCRIPTOR.message_types_by_name['ExecuteRequest'] = _EXECUTEREQUEST
 DESCRIPTOR.message_types_by_name['LogFile'] = _LOGFILE
 DESCRIPTOR.message_types_by_name['ExecuteResponse'] = _EXECUTERESPONSE
+DESCRIPTOR.message_types_by_name['ExecutionStage'] = _EXECUTIONSTAGE
 DESCRIPTOR.message_types_by_name['ExecuteOperationMetadata'] = _EXECUTEOPERATIONMETADATA
 DESCRIPTOR.message_types_by_name['WaitExecutionRequest'] = _WAITEXECUTIONREQUEST
 DESCRIPTOR.message_types_by_name['GetActionResultRequest'] = _GETACTIONRESULTREQUEST
@@ -2181,13 +2460,14 @@ DESCRIPTOR.message_types_by_name['GetTreeRequest'] = _GETTREEREQUEST
 DESCRIPTOR.message_types_by_name['GetTreeResponse'] = _GETTREERESPONSE
 DESCRIPTOR.message_types_by_name['GetCapabilitiesRequest'] = _GETCAPABILITIESREQUEST
 DESCRIPTOR.message_types_by_name['ServerCapabilities'] = _SERVERCAPABILITIES
+DESCRIPTOR.message_types_by_name['DigestFunction'] = _DIGESTFUNCTION
 DESCRIPTOR.message_types_by_name['ActionCacheUpdateCapabilities'] = _ACTIONCACHEUPDATECAPABILITIES
 DESCRIPTOR.message_types_by_name['PriorityCapabilities'] = _PRIORITYCAPABILITIES
+DESCRIPTOR.message_types_by_name['SymlinkAbsolutePathStrategy'] = _SYMLINKABSOLUTEPATHSTRATEGY
 DESCRIPTOR.message_types_by_name['CacheCapabilities'] = _CACHECAPABILITIES
 DESCRIPTOR.message_types_by_name['ExecutionCapabilities'] = _EXECUTIONCAPABILITIES
 DESCRIPTOR.message_types_by_name['ToolDetails'] = _TOOLDETAILS
 DESCRIPTOR.message_types_by_name['RequestMetadata'] = _REQUESTMETADATA
-DESCRIPTOR.enum_types_by_name['DigestFunction'] = _DIGESTFUNCTION
 _sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
 Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), {
@@ -2234,6 +2514,13 @@ Directory = _reflection.GeneratedProtocolMessageType('Directory', (_message.Mess
   })
 _sym_db.RegisterMessage(Directory)
 
+NodeProperty = _reflection.GeneratedProtocolMessageType('NodeProperty', (_message.Message,), {
+  'DESCRIPTOR' : _NODEPROPERTY,
+  '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.NodeProperty)
+  })
+_sym_db.RegisterMessage(NodeProperty)
+
 FileNode = _reflection.GeneratedProtocolMessageType('FileNode', (_message.Message,), {
   'DESCRIPTOR' : _FILENODE,
   '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
@@ -2297,6 +2584,13 @@ OutputDirectory = _reflection.GeneratedProtocolMessageType('OutputDirectory', (_
   })
 _sym_db.RegisterMessage(OutputDirectory)
 
+OutputSymlink = _reflection.GeneratedProtocolMessageType('OutputSymlink', (_message.Message,), {
+  'DESCRIPTOR' : _OUTPUTSYMLINK,
+  '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputSymlink)
+  })
+_sym_db.RegisterMessage(OutputSymlink)
+
 ExecutionPolicy = _reflection.GeneratedProtocolMessageType('ExecutionPolicy', (_message.Message,), {
   'DESCRIPTOR' : _EXECUTIONPOLICY,
   '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
@@ -2340,6 +2634,13 @@ ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_
 _sym_db.RegisterMessage(ExecuteResponse)
 _sym_db.RegisterMessage(ExecuteResponse.ServerLogsEntry)
 
+ExecutionStage = _reflection.GeneratedProtocolMessageType('ExecutionStage', (_message.Message,), {
+  'DESCRIPTOR' : _EXECUTIONSTAGE,
+  '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionStage)
+  })
+_sym_db.RegisterMessage(ExecutionStage)
+
 ExecuteOperationMetadata = _reflection.GeneratedProtocolMessageType('ExecuteOperationMetadata', (_message.Message,), {
   'DESCRIPTOR' : _EXECUTEOPERATIONMETADATA,
   '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
@@ -2462,6 +2763,13 @@ ServerCapabilities = _reflection.GeneratedProtocolMessageType('ServerCapabilitie
   })
 _sym_db.RegisterMessage(ServerCapabilities)
 
+DigestFunction = _reflection.GeneratedProtocolMessageType('DigestFunction', (_message.Message,), {
+  'DESCRIPTOR' : _DIGESTFUNCTION,
+  '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.DigestFunction)
+  })
+_sym_db.RegisterMessage(DigestFunction)
+
 ActionCacheUpdateCapabilities = _reflection.GeneratedProtocolMessageType('ActionCacheUpdateCapabilities', (_message.Message,), {
   'DESCRIPTOR' : _ACTIONCACHEUPDATECAPABILITIES,
   '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
@@ -2484,6 +2792,13 @@ PriorityCapabilities = _reflection.GeneratedProtocolMessageType('PriorityCapabil
 _sym_db.RegisterMessage(PriorityCapabilities)
 _sym_db.RegisterMessage(PriorityCapabilities.PriorityRange)
 
+SymlinkAbsolutePathStrategy = _reflection.GeneratedProtocolMessageType('SymlinkAbsolutePathStrategy', (_message.Message,), {
+  'DESCRIPTOR' : _SYMLINKABSOLUTEPATHSTRATEGY,
+  '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.SymlinkAbsolutePathStrategy)
+  })
+_sym_db.RegisterMessage(SymlinkAbsolutePathStrategy)
+
 CacheCapabilities = _reflection.GeneratedProtocolMessageType('CacheCapabilities', (_message.Message,), {
   'DESCRIPTOR' : _CACHECAPABILITIES,
   '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
@@ -2522,8 +2837,8 @@ _EXECUTION = _descriptor.ServiceDescriptor(
   file=DESCRIPTOR,
   index=0,
   serialized_options=None,
-  serialized_start=7276,
-  serialized_end=7589,
+  serialized_start=8223,
+  serialized_end=8536,
   methods=[
   _descriptor.MethodDescriptor(
     name='Execute',
@@ -2555,8 +2870,8 @@ _ACTIONCACHE = _descriptor.ServiceDescriptor(
   file=DESCRIPTOR,
   index=1,
   serialized_options=None,
-  serialized_start=7592,
-  serialized_end=8062,
+  serialized_start=8539,
+  serialized_end=9009,
   methods=[
   _descriptor.MethodDescriptor(
     name='GetActionResult',
@@ -2588,8 +2903,8 @@ _CONTENTADDRESSABLESTORAGE = _descriptor.ServiceDescriptor(
   file=DESCRIPTOR,
   index=2,
   serialized_options=None,
-  serialized_start=8065,
-  serialized_end=8860,
+  serialized_start=9012,
+  serialized_end=9807,
   methods=[
   _descriptor.MethodDescriptor(
     name='FindMissingBlobs',
@@ -2639,8 +2954,8 @@ _CAPABILITIES = _descriptor.ServiceDescriptor(
   file=DESCRIPTOR,
   index=3,
   serialized_options=None,
-  serialized_start=8863,
-  serialized_end=9052,
+  serialized_start=9810,
+  serialized_end=9999,
   methods=[
   _descriptor.MethodDescriptor(
     name='GetCapabilities',
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
index 3769a68..5a30549 100644
--- a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
@@ -87,6 +87,7 @@ class ExecutionServicer(object):
     action will be reported in the `status` field of the `ExecuteResponse`. The
     server MUST NOT set the `error` field of the `Operation` proto.
     The possible errors include:
+
     * `INVALID_ARGUMENT`: One or more arguments are invalid.
     * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
     action requested, such as a missing input or command or no worker being
@@ -99,6 +100,9 @@ class ExecutionServicer(object):
     * `INTERNAL`: An internal error occurred in the execution engine or the
     worker.
     * `DEADLINE_EXCEEDED`: The execution timed out.
+    * `CANCELLED`: The operation was cancelled by the client. This status is
+    only possible if the server implements the Operations API CancelOperation
+    method, and it was called for the current execution.
 
     In the case of a missing input or command, the server SHOULD additionally
     send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
@@ -152,10 +156,7 @@ class ActionCacheStub(object):
 
   The lifetime of entries in the action cache is implementation-specific, but
   the server SHOULD assume that more recently used entries are more likely to
-  be used again. Additionally, action cache implementations SHOULD ensure that
-  any blobs referenced in the
-  [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
-  are still valid when returning a result.
+  be used again.
 
   As with other services in the Remote Execution API, any call may return an
   error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
@@ -192,10 +193,7 @@ class ActionCacheServicer(object):
 
   The lifetime of entries in the action cache is implementation-specific, but
   the server SHOULD assume that more recently used entries are more likely to
-  be used again. Additionally, action cache implementations SHOULD ensure that
-  any blobs referenced in the
-  [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
-  are still valid when returning a result.
+  be used again.
 
   As with other services in the Remote Execution API, any call may return an
   error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
@@ -206,7 +204,15 @@ class ActionCacheServicer(object):
   def GetActionResult(self, request, context):
     """Retrieve a cached execution result.
 
+    Implementations SHOULD ensure that any blobs referenced from the
+    [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+    are available at the time of returning the
+    [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be
+    for some period of time afterwards. The TTLs of the referenced blobs SHOULD be increased
+    if necessary and applicable.
+
     Errors:
+
     * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
     """
     context.set_code(grpc.StatusCode.UNIMPLEMENTED)
@@ -216,11 +222,6 @@ class ActionCacheServicer(object):
   def UpdateActionResult(self, request, context):
     """Upload a new execution result.
 
-    This method is intended for servers which implement the distributed cache
-    independently of the
-    [Execution][build.bazel.remote.execution.v2.Execution] API. As a
-    result, it is OPTIONAL for servers to implement.
-
     In order to allow the server to perform access control based on the type of
     action, and to assist with client debugging, the client MUST first upload
     the [Action][build.bazel.remote.execution.v2.Execution] that produced the
@@ -229,7 +230,10 @@ class ActionCacheServicer(object):
     `ContentAddressableStorage`.
 
     Errors:
-    * `NOT_IMPLEMENTED`: This method is not supported by the server.
+
+    * `INVALID_ARGUMENT`: One or more arguments are invalid.
+    * `FAILED_PRECONDITION`: One or more errors occurred in updating the
+    action result, such as a missing command or action.
     * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
     entry to the cache.
     """
@@ -273,8 +277,8 @@ class ContentAddressableStorageStub(object):
   hierarchy, which must also each be uploaded on their own.
 
   For small file uploads the client should group them together and call
-  [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
-  on chunks of no more than 10 MiB. For large uploads, the client must use the
+  [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+  For large uploads, the client must use the
   [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
   `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
   where `instance_name` is as described in the next paragraph, `uuid` is a
@@ -296,6 +300,9 @@ class ContentAddressableStorageStub(object):
   by the server. For servers which do not support multiple instances, then the
   `instance_name` is the empty path and the leading slash is omitted, so that
   the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+  To simplify parsing, a path segment cannot equal any of the following
+  keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
+  `capabilities`.
 
   When attempting an upload, if another client has already completed the upload
   (which may occur in the middle of a single upload if another client uploads
@@ -369,8 +376,8 @@ class ContentAddressableStorageServicer(object):
   hierarchy, which must also each be uploaded on their own.
 
   For small file uploads the client should group them together and call
-  [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
-  on chunks of no more than 10 MiB. For large uploads, the client must use the
+  [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+  For large uploads, the client must use the
   [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
   `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
   where `instance_name` is as described in the next paragraph, `uuid` is a
@@ -392,6 +399,9 @@ class ContentAddressableStorageServicer(object):
   by the server. For servers which do not support multiple instances, then the
   `instance_name` is the empty path and the leading slash is omitted, so that
   the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+  To simplify parsing, a path segment cannot equal any of the following
+  keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
+  `capabilities`.
 
   When attempting an upload, if another client has already completed the upload
   (which may occur in the middle of a single upload if another client uploads
@@ -447,10 +457,12 @@ class ContentAddressableStorageServicer(object):
     independently.
 
     Errors:
+
     * `INVALID_ARGUMENT`: The client attempted to upload more than the
     server supported limit.
 
     Individual requests may return the following errors, additionally:
+
     * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
     * `INVALID_ARGUMENT`: The
     [Digest][build.bazel.remote.execution.v2.Digest] does not match the
@@ -475,6 +487,7 @@ class ContentAddressableStorageServicer(object):
     independently.
 
     Errors:
+
     * `INVALID_ARGUMENT`: The client attempted to read more than the
     server supported limit.
 
@@ -506,6 +519,8 @@ class ContentAddressableStorageServicer(object):
     If part of the tree is missing from the CAS, the server will return the
     portion present and omit the rest.
 
+    Errors:
+
     * `NOT_FOUND`: The requested tree root is not present in the CAS.
     """
     context.set_code(grpc.StatusCode.UNIMPLEMENTED)
@@ -573,7 +588,14 @@ class CapabilitiesServicer(object):
   """
 
   def GetCapabilities(self, request, context):
-    """GetCapabilities returns the server capabilities configuration.
+    """GetCapabilities returns the server capabilities configuration of the
+    remote endpoint.
+    Only the capabilities of the services supported by the endpoint will
+    be returned:
+    * Execution + CAS + Action Cache endpoints should return both
+    CacheCapabilities and ExecutionCapabilities.
+    * Execution only endpoints should return ExecutionCapabilities.
+    * CAS + Action Cache only endpoints should return CacheCapabilities.
     """
     context.set_code(grpc.StatusCode.UNIMPLEMENTED)
     context.set_details('Method not implemented!')


[buildstream] 26/26: element: always attempt to save the workspace config

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 7e7e2e65b22204d989993d0dcddfef7fe008f6f8
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Thu Jan 16 12:10:38 2020 +0000

    element: always attempt to save the workspace config
    
    Save the workspace config if the element is cached and workspaced
    regardless of the success of the build.
---
 src/buildstream/element.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index 6ba3595..6e263f7 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -1570,7 +1570,7 @@ class Element(Plugin):
         self.__update_cache_key_non_strict()
         self._update_ready_for_runtime_and_cached()
 
-        if self._get_workspace() and self._cached_success():
+        if self._get_workspace() and self._cached():
             assert utils._is_main_process(), "Attempted to save workspace configuration from child process"
             #
             # Note that this block can only happen in the


[buildstream] 25/26: Rename workspace.last_successful to workspace.last_build

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit b97e32612da29addcadf2e1aa2d11b2ab5908b25
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Jan 13 09:06:11 2020 +0000

    Rename workspace.last_successful to workspace.last_build
    
    Add last_dep, built_incrementally and expand the criteria for
    can_build_incrementally. Also add several fixmes for incremental
    support.
---
 src/buildstream/_loader/loader.py            |  2 +-
 src/buildstream/_stream.py                   |  2 +-
 src/buildstream/_workspaces.py               | 37 ++++++++++---
 src/buildstream/element.py                   | 79 +++++++++++++++++++---------
 src/buildstream/plugins/sources/workspace.py |  8 +--
 5 files changed, 90 insertions(+), 38 deletions(-)

diff --git a/src/buildstream/_loader/loader.py b/src/buildstream/_loader/loader.py
index 0c6c725..bbc16d2 100644
--- a/src/buildstream/_loader/loader.py
+++ b/src/buildstream/_loader/loader.py
@@ -456,7 +456,7 @@ class Loader:
         if workspace:
             workspace_node = {"kind": "workspace"}
             workspace_node["path"] = workspace.get_absolute_path()
-            workspace_node["last_successful"] = str(workspace.to_dict().get("last_successful", ""))
+            workspace_node["last_build"] = str(workspace.to_dict().get("last_build", ""))
             node[Symbol.SOURCES] = [workspace_node]
             skip_workspace = False
 
diff --git a/src/buildstream/_stream.py b/src/buildstream/_stream.py
index db9794c..b2a888c 100644
--- a/src/buildstream/_stream.py
+++ b/src/buildstream/_stream.py
@@ -931,7 +931,7 @@ class Stream:
 
             if soft:
                 workspace.prepared = False
-                workspace.last_successful = None
+                workspace.last_build = None
                 self._message(
                     MessageType.INFO, "Reset workspace state for {} at: {}".format(element.name, workspace_path)
                 )
diff --git a/src/buildstream/_workspaces.py b/src/buildstream/_workspaces.py
index 49b76a7..9c383c7 100644
--- a/src/buildstream/_workspaces.py
+++ b/src/buildstream/_workspaces.py
@@ -232,22 +232,34 @@ class WorkspaceProjectCache:
 # An object to contain various helper functions and data required for
 # workspaces.
 #
-# last_successful, path and running_files are intended to be public
+# last_build, path and running_files are intended to be public
 # properties, but may be best accessed using this classes' helper
 # methods.
 #
 # Args:
 #    toplevel_project (Project): Top project. Will be used for resolving relative workspace paths.
 #    path (str): The path that should host this workspace
-#    last_successful (str): The key of the last successful build of this workspace
+#    last_build (str): The key of the last attempted build of this workspace
 #    running_files (dict): A dict mapping dependency elements to files
 #                          changed between failed builds. Should be
 #                          made obsolete with failed build artifacts.
 #
 class Workspace:
-    def __init__(self, toplevel_project, *, last_successful=None, path=None, prepared=False, running_files=None):
+    def __init__(
+        self,
+        toplevel_project,
+        *,
+        last_build=None,
+        last_dep=None,
+        path=None,
+        built_incrementally=False,
+        prepared=False,
+        running_files=None
+    ):
         self.prepared = prepared
-        self.last_successful = last_successful
+        self.built_incrementally = built_incrementally
+        self.last_build = last_build
+        self.last_dep = last_dep
         self._path = path
         self.running_files = running_files if running_files is not None else {}
 
@@ -262,9 +274,16 @@ class Workspace:
     #     (dict) A dict representation of the workspace
     #
     def to_dict(self):
-        ret = {"prepared": self.prepared, "path": self._path, "running_files": self.running_files}
-        if self.last_successful is not None:
-            ret["last_successful"] = self.last_successful
+        ret = {
+            "built_incrementally": self.built_incrementally,
+            "prepared": self.prepared,
+            "path": self._path,
+            "running_files": self.running_files,
+        }
+        if self.last_build is not None:
+            ret["last_build"] = self.last_build
+        if self.last_dep is not None:
+            ret["last_dep"] = self.last_dep
         return ret
 
     # from_dict():
@@ -585,8 +604,10 @@ class Workspaces:
 
         dictionary = {
             "prepared": node.get_bool("prepared", default=False),
+            "built_incrementally": node.get_bool("built_incrementally", default=False),
             "path": node.get_str("path"),
-            "last_successful": node.get_str("last_successful", default=None),
+            "last_build": node.get_str("last_build", default=None),
+            "last_dep": node.get_str("last_dep", default=None),
             "running_files": running_files,
         }
         return Workspace.from_dict(self._toplevel_project, dictionary)
diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index d333a40..6ba3595 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -750,27 +750,27 @@ class Element(Plugin):
         workspace = self._get_workspace()
         context = self._get_context()
 
-        if self.__can_build_incrementally() and workspace.last_successful:
+        if self.__can_build_incrementally():
 
             # Try to perform an incremental build if the last successful
             # build is still in the artifact cache
             #
-            if self.__artifacts.contains(self, workspace.last_successful):
-                last_successful = Artifact(self, context, strong_key=workspace.last_successful)
-                # Get a dict of dependency strong keys
-                old_dep_keys = last_successful.get_metadata_dependencies()
-            else:
-                # Last successful build is no longer in the artifact cache,
-                # so let's reset it and perform a full build now.
-                workspace.prepared = False
-                workspace.last_successful = None
-
-                self.info("Resetting workspace state, last successful build is no longer in the cache")
-
-                # In case we are staging in the main process
-                if utils._is_main_process():
-                    context.get_workspaces().save_config()
-
+            last_artifact = Artifact(self, context, strong_key=workspace.last_build)
+            # Get a dict of dependency strong keys
+            old_dep_keys = last_artifact.get_metadata_dependencies()
+        elif workspace:
+            # Last successful build is no longer in the artifact cache,
+            # so let's reset it and perform a full build now.
+            workspace.prepared = False
+            workspace.last_build = None
+
+            self.info("Resetting workspace state, last successful build is no longer in the cache")
+
+            # In case we are staging in the main process
+            if utils._is_main_process():
+                context.get_workspaces().save_config()
+
+        # FIXME: for incremental builds, if the build deps have changed then we must fallback to a full build
         for dep in self.dependencies(scope):
             # If we are workspaced, and we therefore perform an
             # incremental build, we must ensure that we update the mtimes
@@ -1402,6 +1402,7 @@ class Element(Plugin):
         # perform incremental builds.
         if self.__can_build_incrementally():
             sandbox.mark_directory(directory)
+        # FIXME: incremental builds should merge the source into the last artifact before staging
 
         # Stage all sources that need to be copied
         sandbox_vroot = sandbox.get_virtual_directory()
@@ -1581,7 +1582,14 @@ class Element(Plugin):
             #
             key = self._get_cache_key()
             workspace = self._get_workspace()
-            workspace.last_successful = key
+            workspace.last_build = key
+            # FIXME: get last dep hash and save to the workspace config
+            workspace.last_dep = None
+            # FIXME: merge the cached source into the cached buildtree
+            # if the digest of this is equivalent to the digest of the cached
+            # buildtree then the workspace was built incrementally (that is,
+            # the sourcetree is a subset of the buildtree)
+            workspace.built_incrementally = False
             workspace.clear_running_files()
             self._get_context().get_workspaces().save_config()
 
@@ -2363,7 +2371,32 @@ class Element(Plugin):
     #    (bool): Whether this element can be built incrementally
     #
     def __can_build_incrementally(self):
-        return bool(self._get_workspace())
+        # FIXME:
+        # in order to build incrementally the element must be:
+        # 1. workspaced
+        # 2. workspace config provides the previous key, previous artifact and previous dependency hash
+        # 3. the previous artifact is cached
+        # 4. the workspace advertises that artifact and key can merge (calculated after previous build and saved in the config
+        # 5. dependency hash is unchanged
+
+        # 1
+        workspace = self._get_workspace()
+        if workspace:
+            assert len(self.__sources) == 1
+            # 2
+            if not workspace.last_dep and workspace.last_build:
+                return False
+            # 4
+            if not workspace.built_incrementally:
+                return False
+            # 3
+            if not self.__artifacts.contains(self, workspace.last_build):
+                return False
+            # 5
+            # TODO
+            return True
+
+        return False
 
     # __configure_sandbox():
     #
@@ -2386,11 +2419,9 @@ class Element(Plugin):
         workspace = self._get_workspace()
         prepared = False
         if workspace and workspace.prepared:
-            # FIXME: ideally we don't have to check this, eventually we would
-            # like to get the saved old_ref and apply the new workspace on top
-            # to support incremental builds.
-            if [s._key for s in self.__sources] == [workspace.last_successful]:
-                prepared = True
+            # FIXME: ideally we have already merged the workspace.last_build and
+            # can consider this prepared, for now we mark it as unprepared
+            prepared = False
 
         if not prepared:
             self.prepare(sandbox)
diff --git a/src/buildstream/plugins/sources/workspace.py b/src/buildstream/plugins/sources/workspace.py
index 5225b1a..375299b 100644
--- a/src/buildstream/plugins/sources/workspace.py
+++ b/src/buildstream/plugins/sources/workspace.py
@@ -55,16 +55,16 @@ class WorkspaceSource(Source):
         self.__unique_key = None
         # the digest of the Directory following the import of the workspace
         self.__digest = None
-        # the cache key of the last successful workspace
-        self.__last_successful = None
+        # the cache key of the last workspace build
+        self.__last_build = None
 
     def track(self) -> SourceRef:  # pylint: disable=arguments-differ
         return None
 
     def configure(self, node: MappingNode) -> None:
-        node.validate_keys(["path", "last_successful", "kind"])
+        node.validate_keys(["path", "last_build", "kind"])
         self.path = node.get_str("path")
-        self.__last_successful = node.get_str("last_successful")
+        self.__last_build = node.get_str("last_build")
 
     def preflight(self) -> None:
         pass  # pragma: nocover


[buildstream] 23/26: _sandboxreapi: append MTimes to Action output properties

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 0283e6b78c59d26bd20fc72f7defd2ebd71bfe2f
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Dec 30 17:01:27 2019 +0000

    _sandboxreapi: append MTimes to Action output properties
---
 src/buildstream/sandbox/_sandboxreapi.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/buildstream/sandbox/_sandboxreapi.py b/src/buildstream/sandbox/_sandboxreapi.py
index 2430fd3..590e4a9 100644
--- a/src/buildstream/sandbox/_sandboxreapi.py
+++ b/src/buildstream/sandbox/_sandboxreapi.py
@@ -69,6 +69,7 @@ class SandboxREAPI(Sandbox):
         command_proto = self._create_command(command, cwd, env)
         command_digest = cascache.add_object(buffer=command_proto.SerializeToString())
         action = remote_execution_pb2.Action(command_digest=command_digest, input_root_digest=input_root_digest)
+        action.output_node_properties.append("MTime")
 
         action_result = self._execute_action(action, flags)  # pylint: disable=assignment-from-no-return
 


[buildstream] 18/26: Reference node properties specification in comments

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit cc20ce3a7b966dcb5fc5faaec46b227e721e1796
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Wed Jan 8 13:35:51 2020 +0000

    Reference node properties specification in comments
---
 src/buildstream/_cas/cascache.py               | 2 +-
 src/buildstream/storage/_casbaseddirectory.py  | 2 ++
 src/buildstream/storage/_filebaseddirectory.py | 4 +++-
 3 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/_cas/cascache.py b/src/buildstream/_cas/cascache.py
index c608f12..bb2abc6 100644
--- a/src/buildstream/_cas/cascache.py
+++ b/src/buildstream/_cas/cascache.py
@@ -224,7 +224,7 @@ class CASCache:
             else:
                 utils.safe_copy(self.objpath(filenode.digest), fullpath)
                 if filenode.node_properties:
-                    # XXX: see https://github.com/bazelbuild/remote-apis/blob/master/build/bazel/remote/execution/v2/nodeproperties.md
+                    # see https://github.com/bazelbuild/remote-apis/blob/master/build/bazel/remote/execution/v2/nodeproperties.md
                     # for supported node property specifications
                     for prop in filenode.node_properties:
                         if prop.name == "MTime" and prop.value:
diff --git a/src/buildstream/storage/_casbaseddirectory.py b/src/buildstream/storage/_casbaseddirectory.py
index 6fdca1a..93facd6 100644
--- a/src/buildstream/storage/_casbaseddirectory.py
+++ b/src/buildstream/storage/_casbaseddirectory.py
@@ -163,6 +163,8 @@ class CasBasedDirectory(Directory):
         entry.digest = self.cas_cache.add_object(path=path, link_directly=can_link)
         entry.is_executable = os.access(path, os.X_OK)
         properties = properties or []
+        # see https://github.com/bazelbuild/remote-apis/blob/master/build/bazel/remote/execution/v2/nodeproperties.md
+        # for supported node property specifications
         entry.node_properties = []
         if "MTime" in properties:
             node_property = remote_execution_pb2.NodeProperty()
diff --git a/src/buildstream/storage/_filebaseddirectory.py b/src/buildstream/storage/_filebaseddirectory.py
index a38e2ac..7b745f7 100644
--- a/src/buildstream/storage/_filebaseddirectory.py
+++ b/src/buildstream/storage/_filebaseddirectory.py
@@ -307,7 +307,9 @@ class FileBasedDirectory(Directory):
                     if update_mtime or entry.node_properties:
                         utils.safe_copy(src_path, dest_path, result=result)
                         mtime = update_mtime
-                        # XXX mtime property will override specified mtime
+                        # mtime property will override specified mtime
+                        # see https://github.com/bazelbuild/remote-apis/blob/master/build/bazel/remote/execution/v2/nodeproperties.md
+                        # for supported node property specifications
                         if entry.node_properties:
                             for prop in entry.node_properties:
                                 if prop.name == "MTime" and prop.value:


[buildstream] 15/26: tests: improve RE workspace test

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 1120ad82dd69040aa129cfb10f1c885e749ead09
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Tue Dec 24 12:00:38 2019 +0000

    tests: improve RE workspace test
---
 tests/remoteexecution/workspace.py | 175 ++++++++++++++++++++-----------------
 1 file changed, 96 insertions(+), 79 deletions(-)

diff --git a/tests/remoteexecution/workspace.py b/tests/remoteexecution/workspace.py
index b810bf1..3bf35a7 100644
--- a/tests/remoteexecution/workspace.py
+++ b/tests/remoteexecution/workspace.py
@@ -2,6 +2,7 @@
 # pylint: disable=redefined-outer-name
 
 import os
+import re
 import shutil
 import pytest
 
@@ -11,10 +12,15 @@ from buildstream.testing.integration import assert_contains
 pytestmark = pytest.mark.remoteexecution
 
 
+# subdirectories of the buildtree
+SRC = "src"
+DEPS = os.path.join(SRC, ".deps")
+AUTO = "autom4te.cache"
+DIRS = [os.sep + SRC, os.sep + DEPS, os.sep + AUTO]
+
 DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
-MKFILEAM = os.path.join("src", "Makefile.am")
-MKFILE = os.path.join("src", "Makefile")
-MAIN = os.path.join("src", "main.o")
+MAIN = os.path.join(SRC, "main.c")
+MAINO = os.path.join(SRC, "main.o")
 CFGMARK = "config-time"
 BLDMARK = "build-time"
 
@@ -29,9 +35,9 @@ def files():
         "depcomp",
         "configure.ac",
         "compile",
-        "src",
-        os.path.join("src", "main.c"),
-        MKFILEAM,
+        SRC,
+        MAIN,
+        os.path.join(SRC, "Makefile.am"),
         "Makefile.am",
     ]
     input_files = [os.sep + fname for fname in _input_files]
@@ -39,26 +45,26 @@ def files():
     _generated_files = [
         "Makefile",
         "Makefile.in",
-        "autom4te.cache",
-        os.path.join("autom4te.cache", "traces.1"),
-        os.path.join("autom4te.cache", "traces.0"),
-        os.path.join("autom4te.cache", "requests"),
-        os.path.join("autom4te.cache", "output.0"),
-        os.path.join("autom4te.cache", "output.1"),
+        AUTO,
+        os.path.join(AUTO, "traces.1"),
+        os.path.join(AUTO, "traces.0"),
+        os.path.join(AUTO, "requests"),
+        os.path.join(AUTO, "output.0"),
+        os.path.join(AUTO, "output.1"),
         "config.h",
         "config.h.in",
         "config.log",
         "config.status",
         "configure",
         "configure.lineno",
-        os.path.join("src", "hello"),
-        os.path.join("src", ".deps"),
-        os.path.join("src", ".deps", "main.Po"),
-        MKFILE,
-        MAIN,
+        os.path.join(SRC, "hello"),
+        DEPS,
+        os.path.join(DEPS, "main.Po"),
+        os.path.join(SRC, "Makefile"),
+        MAINO,
         CFGMARK,
         BLDMARK,
-        os.path.join("src", "Makefile.in"),
+        os.path.join(SRC, "Makefile.in"),
         "stamp-h1",
     ]
     generated_files = [os.sep + fname for fname in _generated_files]
@@ -79,12 +85,9 @@ def files():
 
 def _get_mtimes(root):
     assert os.path.exists(root)
-    for dirname, dirnames, filenames in os.walk(root):
-        dirnames.sort()
+    # timestamps on subdirs are not currently semantically meaningful
+    for dirname, _, filenames in os.walk(root):
         filenames.sort()
-        for subdirname in dirnames:
-            fname = os.path.join(dirname, subdirname)
-            yield fname[len(root) :], os.stat(fname).st_mtime
         for filename in filenames:
             fname = os.path.join(dirname, filename)
             yield fname[len(root) :], os.stat(fname).st_mtime
@@ -129,8 +132,6 @@ def check_buildtree(
     result.assert_success()
 
     buildtree = {}
-    inp_times = []
-    gen_times = []
     output = result.output.splitlines()
 
     for line in output:
@@ -141,28 +142,34 @@ def check_buildtree(
         mtime = int(mtime)
         buildtree[fname] = mtime
 
+        typ_inptime = None
+        typ_gentime = None
+
         if incremental:
+            # directory timestamps are not meaningful
+            if fname in DIRS:
+                continue
             if fname in input_files:
-                inp_times.append(mtime)
-            else:
-                gen_times.append(mtime)
+                if fname != os.sep + MAIN and not typ_inptime:
+                    typ_inptime = mtime
+            if fname in generated_files:
+                if fname != os.sep + MAINO and not typ_gentime:
+                    typ_gentime = mtime
 
     # all expected files should have been found
     for filename in input_files + generated_files:
         assert filename in buildtree
 
     if incremental:
-        # at least inputs should be older than generated files
-        assert not any([inp_time > gen_time for inp_time in inp_times for gen_time in gen_times])
-
-        makefile = os.sep + "Makefile"
-        makefile_am = os.sep + "Makefile.am"
-        mainc = os.sep + os.path.join("src", "main.c")
-        maino = os.sep + os.path.join("src", "hello")
-        testfiles = [makefile, makefile_am, mainc, maino]
-        if all([testfile in buildtree for testfile in testfiles]):
-            assert buildtree[makefile] < buildtree[makefile_am]
-            assert buildtree[mainc] < buildtree[maino]
+        # the source file was changed so should be more recent than other input files
+        # it should be older than the main object.
+        # The main object should be more recent than generated files.
+        assert buildtree[os.sep + MAIN] > typ_inptime
+        assert buildtree[os.sep + MAINO] > buildtree[os.sep + MAIN]
+        assert buildtree[os.sep + MAINO] > typ_gentime
+
+    for fname in DIRS:
+        del buildtree[fname]
 
     return buildtree
 
@@ -178,7 +185,7 @@ def get_timemark(cli, project, element_name, marker):
 
 @pytest.mark.datafiles(DATA_DIR)
 @pytest.mark.parametrize(
-    "modification", [pytest.param("none"), pytest.param("content"), pytest.param("time"),],
+    "modification", [pytest.param("content"), pytest.param("time"),],
 )
 @pytest.mark.parametrize(
     "buildtype",
@@ -190,10 +197,7 @@ def get_timemark(cli, project, element_name, marker):
     ],
 )
 def test_workspace_build(cli, tmpdir, datafiles, modification, buildtype):
-    incremental = False
-    if buildtype == "incremental":
-        incremental = True
-
+    incremental = buildtype == "incremental"
     project = str(datafiles)
     checkout = os.path.join(cli.directory, "checkout")
     workspace = os.path.join(cli.directory, "workspace")
@@ -229,14 +233,17 @@ def test_workspace_build(cli, tmpdir, datafiles, modification, buildtype):
     # build the element and cache the buildtree
     result = cli.run(project=project, args=build)
     result.assert_success()
+    assert cli.get_element_state(project, element_name) == "cached"
+    build_key = cli.get_element_key(project, element_name)
 
     # check that the local workspace is unchanged
     assert_contains(workspace, input_files, strict=True)
     assert ws_times == get_mtimes(workspace)
 
     # check modified workspace dir was cached and save the time
-    # build was run
-    build_mtimes = check_buildtree(cli, project, element_name, input_files, generated_files, incremental=incremental)
+    # build was run. Incremental build conditions do not apply since the workspace
+    # was initially opened using magic timestamps.
+    build_times = check_buildtree(cli, project, element_name, input_files, generated_files, incremental=False)
     build_timemark = get_timemark(cli, project, element_name, (os.sep + BLDMARK))
 
     # check that the artifacts are available
@@ -248,45 +255,55 @@ def test_workspace_build(cli, tmpdir, datafiles, modification, buildtype):
     # rebuild the element
     result = cli.run(project=project, args=build)
     result.assert_success()
-    # this should all be cached
-    # so the buildmark time should be the same
-    rebuild_mtimes = check_buildtree(cli, project, element_name, input_files, generated_files, incremental=incremental)
+    assert cli.get_element_state(project, element_name) == "cached"
+    rebuild_key = cli.get_element_key(project, element_name)
+    assert rebuild_key == build_key
+    rebuild_times = check_buildtree(cli, project, element_name, input_files, generated_files, incremental=False)
     rebuild_timemark = get_timemark(cli, project, element_name, (os.sep + BLDMARK))
 
+    # buildmark time should be the same
     assert build_timemark == rebuild_timemark
-    assert build_mtimes == rebuild_mtimes
+    assert all([rebuild_times[fname] == build_times[fname] for fname in rebuild_times]), "{}\n{}".format(
+        rebuild_times, build_times
+    )
 
     # modify the open workspace and rebuild
-    if modification != "none":
-        assert os.path.exists(newfile_path)
-
-        if modification == "time":
-            # touch a file in the workspace and save the mtime
-            os.utime(newfile_path)
-
-        elif modification == "content":
-            # change a source file
-            with open(newfile_path, "w") as fdata:
-                fdata.write("anotherstring")
-
-        # refresh input times
-        ws_times = get_mtimes(workspace)
+    main_path = os.path.join(workspace, MAIN)
+    assert os.path.exists(main_path)
+
+    if modification == "time":
+        # touch a file in the workspace and save the mtime
+        os.utime(main_path)
+        touched_time = os.stat(main_path).st_mtime
+
+    elif modification == "content":
+        # change a source file (there's a race here but it's not serious)
+        with open(main_path, "r") as fdata:
+            data = fdata.readlines()
+        with open(main_path, "w") as fdata:
+            for line in data:
+                fdata.write(re.sub(r"Hello", "Goodbye", line))
+        touched_time = os.stat(main_path).st_mtime
+
+    # refresh input times
+    ws_times = get_mtimes(workspace)
 
-        # rebuild the element
-        result = cli.run(project=project, args=build)
-        result.assert_success()
+    # rebuild the element
+    result = cli.run(project=project, args=build)
+    result.assert_success()
 
-        rebuild_mtimes = check_buildtree(
-            cli, project, element_name, input_files, generated_files, incremental=incremental
-        )
-        rebuild_timemark = get_timemark(cli, project, element_name, (os.sep + BLDMARK))
-        assert build_timemark != rebuild_timemark
+    rebuild_times = check_buildtree(cli, project, element_name, input_files, generated_files, incremental=incremental)
+    rebuild_timemark = get_timemark(cli, project, element_name, (os.sep + BLDMARK))
+    assert rebuild_timemark > build_timemark
 
-        # check the times of the changed files
-        if incremental:
-            touched_time = os.stat(newfile_path).st_mtime
-            assert rebuild_mtimes[newfile] == touched_time
+    # check the times of the changed files
+    if incremental:
+        assert rebuild_times[os.sep + MAIN] == touched_time
+        del rebuild_times[os.sep + MAIN]
+    assert all([rebuild_times[fname] == build_times[fname] for fname in rebuild_times]), "{}\n{}".format(
+        rebuild_times, build_times
+    )
 
-        # Check workspace is unchanged
-        assert_contains(workspace, input_files, strict=True)
-        assert ws_times == get_mtimes(workspace)
+    # Check workspace is unchanged
+    assert_contains(workspace, input_files, strict=True)
+    assert ws_times == get_mtimes(workspace)


[buildstream] 07/26: Add utils for file timestamp support

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit f735652bb7a3612d0b63b53bb14b62ee6bec4b0e
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Wed Jan 8 16:30:47 2020 +0000

    Add utils for file timestamp support
---
 requirements/cov-requirements.txt    | 14 +++----
 requirements/dev-requirements.txt    | 18 ++++-----
 requirements/requirements.in         |  1 +
 requirements/requirements.txt        |  7 ++--
 src/buildstream/utils.py             | 77 ++++++++++++++++++++++++++++++++++++
 tests/internals/utils_move_atomic.py | 23 ++++++++++-
 6 files changed, 120 insertions(+), 20 deletions(-)

diff --git a/requirements/cov-requirements.txt b/requirements/cov-requirements.txt
index c700aba..abf4936 100644
--- a/requirements/cov-requirements.txt
+++ b/requirements/cov-requirements.txt
@@ -3,13 +3,13 @@ pytest-cov==2.8.1
 Cython==0.29.14
 ## The following requirements were added by pip freeze:
 attrs==19.3.0
-importlib-metadata==1.1.0
-more-itertools==8.0.0
-packaging==19.2
+importlib-metadata==1.3.0
+more-itertools==8.0.2
+packaging==20.0
 pluggy==0.13.1
-py==1.8.0
-pyparsing==2.4.5
-pytest==5.3.1
+py==1.8.1
+pyparsing==2.4.6
+pytest==5.3.2
 six==1.13.0
-wcwidth==0.1.7
+wcwidth==0.1.8
 zipp==0.6.0
diff --git a/requirements/dev-requirements.txt b/requirements/dev-requirements.txt
index 15e9b30..f188280 100644
--- a/requirements/dev-requirements.txt
+++ b/requirements/dev-requirements.txt
@@ -1,29 +1,29 @@
 pexpect==4.7.0
 pylint==2.4.4
-pytest==5.3.1
+pytest==5.3.2
 pytest-datafiles==2.0
 pytest-env==0.6.2
-pytest-xdist==1.30.0
-pytest-timeout==1.3.3
+pytest-xdist==1.31.0
+pytest-timeout==1.3.4
 pyftpdlib==1.5.5
 ## The following requirements were added by pip freeze:
 apipkg==1.5
 astroid==2.3.3
 attrs==19.3.0
 execnet==1.7.1
-importlib-metadata==1.1.0
+importlib-metadata==1.3.0
 isort==4.3.21
 lazy-object-proxy==1.4.3
 mccabe==0.6.1
-more-itertools==8.0.0
-packaging==19.2
+more-itertools==8.0.2
+packaging==20.0
 pluggy==0.13.1
 ptyprocess==0.6.0
-py==1.8.0
-pyparsing==2.4.5
+py==1.8.1
+pyparsing==2.4.6
 pytest-forked==1.1.3
 six==1.13.0
 typed-ast==1.4.0
-wcwidth==0.1.7
+wcwidth==0.1.8
 wrapt==1.11.2
 zipp==0.6.0
diff --git a/requirements/requirements.in b/requirements/requirements.in
index 50bb523..a595f69 100644
--- a/requirements/requirements.in
+++ b/requirements/requirements.in
@@ -9,3 +9,4 @@ ruamel.yaml.clib >= 0.1.2
 setuptools
 pyroaring
 ujson
+python-dateutil>= 2.7.0
diff --git a/requirements/requirements.txt b/requirements/requirements.txt
index 9620908..1bc75f2 100644
--- a/requirements/requirements.txt
+++ b/requirements/requirements.txt
@@ -1,14 +1,15 @@
 Click==7.0
-grpcio==1.25.0
+grpcio==1.26.0
 Jinja2==2.10.3
 pluginbase==1.0.0
-protobuf==3.11.0
+protobuf==3.11.2
 psutil==5.6.7
 ruamel.yaml==0.16.5
+ruamel.yaml.clib==0.2.0
 setuptools==39.0.1
 pyroaring==0.2.9
 ujson==1.35
+python-dateutil==2.8.1
 ## The following requirements were added by pip freeze:
 MarkupSafe==1.1.1
-ruamel.yaml.clib==0.2.0
 six==1.13.0
diff --git a/src/buildstream/utils.py b/src/buildstream/utils.py
index 545816e..c2a43d9 100644
--- a/src/buildstream/utils.py
+++ b/src/buildstream/utils.py
@@ -33,10 +33,12 @@ from stat import S_ISDIR
 import subprocess
 import tempfile
 import time
+import datetime
 import itertools
 from contextlib import contextmanager
 from pathlib import Path
 from typing import Callable, IO, Iterable, Iterator, Optional, Tuple, Union
+from dateutil import parser as dateutil_parser
 
 import psutil
 
@@ -132,6 +134,81 @@ class FileListResult:
         return ret
 
 
+def _make_timestamp(timepoint: float) -> str:
+    """Obtain the ISO 8601 timestamp represented by the time given in seconds.
+
+    Args:
+        timepoint (float): the time since the epoch in seconds
+
+    Returns:
+        (str): the timestamp specified by https://www.ietf.org/rfc/rfc3339.txt
+               with a UTC timezone code 'Z'.
+
+    """
+    assert isinstance(timepoint, float), "Time to render as timestamp must be a float: {}".format(str(timepoint))
+    try:
+        return datetime.datetime.utcfromtimestamp(timepoint).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+    except (OverflowError, TypeError):
+        raise UtilError("Failed to make UTC timestamp from {}".format(timepoint))
+
+
+def _get_file_mtimestamp(fullpath: str) -> str:
+    """Obtain the ISO 8601 timestamp represented by the mtime of the
+    file at the given path."""
+    assert isinstance(fullpath, str), "Path to file must be a string: {}".format(str(fullpath))
+    try:
+        mtime = os.path.getmtime(fullpath)
+    except OSError:
+        raise UtilError("Failed to get mtime of file at {}".format(fullpath))
+    return _make_timestamp(mtime)
+
+
+def _parse_timestamp(timestamp: str) -> float:
+    """Parse an ISO 8601 timestamp as specified in
+    https://www.ietf.org/rfc/rfc3339.txt. Only timestamps with the UTC code
+    'Z' or an offset are valid. For example: '2019-12-12T10:23:01.54Z' or
+    '2019-12-12T10:23:01.54+00:00'.
+
+    Args:
+        timestamp (str): the timestamp
+
+    Returns:
+        (float): The time in seconds since epoch represented by the
+            timestamp.
+
+    Raises:
+        UtilError: if extraction of seconds fails
+    """
+    assert isinstance(timestamp, str), "Timestamp to parse must be a string: {}".format(str(timestamp))
+    try:
+        errmsg = "Failed to parse given timestamp: " + timestamp
+        parsed_time = dateutil_parser.isoparse(timestamp)
+        if parsed_time.tzinfo:
+            return parsed_time.timestamp()
+        raise UtilError(errmsg)
+    except (ValueError, OverflowError, TypeError):
+        raise UtilError(errmsg)
+
+
+def _set_file_mtime(fullpath: str, seconds: Union[int, float]) -> None:
+    """Set the access and modification times of the file at the given path
+    to the given time. The time of the file will be set with nanosecond
+    resolution if supported.
+
+    Args:
+        fullpath (str): the string representing the path to the file
+        timestamp (int, float): the time in seconds since the UNIX epoch
+    """
+    assert isinstance(fullpath, str), "Path to file must be a string: {}".format(str(fullpath))
+    assert isinstance(seconds, (int, float)), "Mtime to set must be a float or integer: {}".format(str(seconds))
+    set_mtime = seconds * 10 ** 9
+    try:
+        os.utime(fullpath, times=None, ns=(int(set_mtime), int(set_mtime)))
+    except OSError:
+        errmsg = "Failed to set the times of the file at {} to {}".format(fullpath, str(seconds))
+        raise UtilError(errmsg)
+
+
 def list_relative_paths(directory: str) -> Iterator[str]:
     """A generator for walking directory relative paths
 
diff --git a/tests/internals/utils_move_atomic.py b/tests/internals/utils_move_atomic.py
index cda0208..dd417cb 100644
--- a/tests/internals/utils_move_atomic.py
+++ b/tests/internals/utils_move_atomic.py
@@ -3,7 +3,13 @@
 
 import pytest
 
-from buildstream.utils import move_atomic, DirectoryExistsError
+from buildstream.utils import (
+    move_atomic,
+    DirectoryExistsError,
+    _get_file_mtimestamp,
+    _set_file_mtime,
+    _parse_timestamp,
+)
 
 
 @pytest.fixture
@@ -89,3 +95,18 @@ def test_move_to_existing_non_empty_dir(src, tmp_path):
 
     with pytest.raises(DirectoryExistsError):
         move_atomic(src, dst)
+
+
+def test_move_to_empty_dir_set_mtime(src, tmp_path):
+    dst = tmp_path.joinpath("dst")
+    move_atomic(src, dst)
+    assert dst.joinpath("test").exists()
+    _dst = str(dst)
+    # set the mtime via stamp
+    timestamp1 = "2020-01-08T11:05:50.832123Z"
+    _set_file_mtime(_dst, _parse_timestamp(timestamp1))
+    assert timestamp1 == _get_file_mtimestamp(_dst)
+    # reset the mtime using an offset stamp
+    timestamp2 = "2010-02-12T12:05:50.832123+01:00"
+    _set_file_mtime(_dst, _parse_timestamp(timestamp2))
+    assert _get_file_mtimestamp(_dst) == "2010-02-12T11:05:50.832123Z"


[buildstream] 05/26: Support properties in import_files()

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 7acfddd3e27eda65696dcc81d81a578948ddb2fa
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Tue Dec 10 14:11:22 2019 +0000

    Support properties in import_files()
---
 src/buildstream/storage/_casbaseddirectory.py  | 2 +-
 src/buildstream/storage/_filebaseddirectory.py | 9 ++++++++-
 src/buildstream/storage/directory.py           | 7 +++++--
 3 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/src/buildstream/storage/_casbaseddirectory.py b/src/buildstream/storage/_casbaseddirectory.py
index 4698404..bfa3f82 100644
--- a/src/buildstream/storage/_casbaseddirectory.py
+++ b/src/buildstream/storage/_casbaseddirectory.py
@@ -356,7 +356,7 @@ class CasBasedDirectory(Directory):
             # content into this CasBasedDirectory using CAS-to-CAS import
             # to write the report, handle possible conflicts (if the target
             # directory is not empty) and apply the optional filter.
-            digest = self.cas_cache.import_directory(external_pathspec)
+            digest = self.cas_cache.import_directory(external_pathspec, properties=properties)
             external_pathspec = CasBasedDirectory(self.cas_cache, digest=digest)
 
         assert isinstance(external_pathspec, CasBasedDirectory)
diff --git a/src/buildstream/storage/_filebaseddirectory.py b/src/buildstream/storage/_filebaseddirectory.py
index 222b479..95d113e 100644
--- a/src/buildstream/storage/_filebaseddirectory.py
+++ b/src/buildstream/storage/_filebaseddirectory.py
@@ -79,7 +79,14 @@ class FileBasedDirectory(Directory):
         return current_dir
 
     def import_files(
-        self, external_pathspec, *, filter_callback=None, report_written=True, update_mtime=False, can_link=False
+        self,
+        external_pathspec,
+        *,
+        filter_callback=None,
+        report_written=True,
+        update_mtime=False,
+        can_link=False,
+        properties=None
     ):
         """ See superclass Directory for arguments """
 
diff --git a/src/buildstream/storage/directory.py b/src/buildstream/storage/directory.py
index 89d20c4..4cec772 100644
--- a/src/buildstream/storage/directory.py
+++ b/src/buildstream/storage/directory.py
@@ -32,7 +32,7 @@ See also: :ref:`sandboxing`.
 """
 
 
-from typing import Callable, Optional, Union
+from typing import Callable, Optional, Union, List
 
 from .._exceptions import BstError, ErrorDomain
 from ..types import FastEnum
@@ -82,7 +82,8 @@ class Directory:
         filter_callback: Optional[Callable[[str], bool]] = None,
         report_written: bool = True,
         update_mtime: bool = False,
-        can_link: bool = False
+        can_link: bool = False,
+        properties: Optional[List[str]] = None
     ) -> FileListResult:
         """Imports some or all files from external_path into this directory.
 
@@ -103,6 +104,8 @@ class Directory:
             original files change. Setting this doesn't guarantee hard
             links will be made. can_link will never be used if
             update_mtime is set.
+          properties: Optional list of strings representing file properties
+            to capture when importing.
 
         Yields:
           A report of files imported and overwritten.


[buildstream] 16/26: tests: avoid testing utimes along with umask

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit c3c0b049d5175857343a1309e9223708f35d3d8d
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Dec 30 08:47:19 2019 +0000

    tests: avoid testing utimes along with umask
    
    Avoids testing for utimes when testing for umask. These tests are known to
    be flaky and are overly specific in that they implicitly test more than
    just umask.
---
 src/buildstream/testing/_sourcetests/source_determinism.py | 9 ++++-----
 tests/integration/source-determinism.py                    | 9 ++++-----
 2 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/src/buildstream/testing/_sourcetests/source_determinism.py b/src/buildstream/testing/_sourcetests/source_determinism.py
index ed00c71..c44f0fa 100644
--- a/src/buildstream/testing/_sourcetests/source_determinism.py
+++ b/src/buildstream/testing/_sourcetests/source_determinism.py
@@ -50,10 +50,6 @@ def create_test_directory(*path, mode=0o644):
 @pytest.mark.integration
 @pytest.mark.datafiles(DATA_DIR)
 @pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
-@pytest.mark.skipif(
-    HAVE_SANDBOX == "buildbox-run" and CASD_SEPARATE_USER,
-    reason="Flaky due to timestamps: https://gitlab.com/BuildStream/buildstream/issues/1218",
-)
 def test_deterministic_source_umask(cli, tmpdir, datafiles, kind):
     project = str(datafiles)
     element_name = "list.bst"
@@ -92,6 +88,7 @@ def test_deterministic_source_umask(cli, tmpdir, datafiles, kind):
         old_umask = os.umask(umask)
 
         try:
+            test_values = []
             result = cli.run(project=project, args=["build", element_name])
             result.assert_success()
 
@@ -99,7 +96,9 @@ def test_deterministic_source_umask(cli, tmpdir, datafiles, kind):
             result.assert_success()
 
             with open(os.path.join(checkoutdir, "ls-l"), "r") as f:
-                return f.read()
+                for line in f.readlines():
+                    test_values.append(line.split()[0] + " " + line.split()[-1])
+                return test_values
         finally:
             os.umask(old_umask)
             cli.remove_artifact_from_cache(project, element_name)
diff --git a/tests/integration/source-determinism.py b/tests/integration/source-determinism.py
index 3555881..a69e55a 100644
--- a/tests/integration/source-determinism.py
+++ b/tests/integration/source-determinism.py
@@ -29,10 +29,6 @@ def create_test_directory(*path, mode=0o644):
 @pytest.mark.integration
 @pytest.mark.datafiles(DATA_DIR)
 @pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
-@pytest.mark.skipif(
-    HAVE_SANDBOX == "buildbox-run" and CASD_SEPARATE_USER,
-    reason="Flaky due to timestamps: https://gitlab.com/BuildStream/buildstream/issues/1218",
-)
 def test_deterministic_source_local(cli, tmpdir, datafiles):
     """Only user rights should be considered for local source.
     """
@@ -62,6 +58,7 @@ def test_deterministic_source_local(cli, tmpdir, datafiles):
         create_test_directory(sourcedir, "dir-c", mode=0o2755 & mask)
         create_test_directory(sourcedir, "dir-d", mode=0o1755 & mask)
         try:
+            test_values = []
             result = cli.run(project=project, args=["build", element_name])
             result.assert_success()
 
@@ -69,7 +66,9 @@ def test_deterministic_source_local(cli, tmpdir, datafiles):
             result.assert_success()
 
             with open(os.path.join(checkoutdir, "ls-l"), "r") as f:
-                return f.read()
+                for line in f.readlines():
+                    test_values.append(line.split()[0] + " " + line.split()[-1])
+                return test_values
         finally:
             cli.remove_artifact_from_cache(project, element_name)
 


[buildstream] 14/26: tests: remove xfail for non-incremental-time RE workspace

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 955526a33fafba2cc9516cf4b905f2a5678a5ac6
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Wed Dec 18 12:19:30 2019 +0000

    tests: remove xfail for non-incremental-time RE workspace
---
 tests/remoteexecution/workspace.py | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/tests/remoteexecution/workspace.py b/tests/remoteexecution/workspace.py
index 83480b4..b810bf1 100644
--- a/tests/remoteexecution/workspace.py
+++ b/tests/remoteexecution/workspace.py
@@ -178,12 +178,7 @@ def get_timemark(cli, project, element_name, marker):
 
 @pytest.mark.datafiles(DATA_DIR)
 @pytest.mark.parametrize(
-    "modification",
-    [
-        pytest.param("none"),
-        pytest.param("content"),
-        pytest.param("time", marks=pytest.mark.xfail(reason="mtimes are set to a magic value and not stored in CAS")),
-    ],
+    "modification", [pytest.param("none"), pytest.param("content"), pytest.param("time"),],
 )
 @pytest.mark.parametrize(
     "buildtype",


[buildstream] 19/26: tests: remove xfails for incremental RE workspaces

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 24eb3a239e121b261d00e743e9f6bef6614efe01
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Thu Nov 21 10:14:27 2019 +0000

    tests: remove xfails for incremental RE workspaces
---
 tests/remoteexecution/workspace.py | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/tests/remoteexecution/workspace.py b/tests/remoteexecution/workspace.py
index 3bf35a7..58ffb10 100644
--- a/tests/remoteexecution/workspace.py
+++ b/tests/remoteexecution/workspace.py
@@ -188,13 +188,7 @@ def get_timemark(cli, project, element_name, marker):
     "modification", [pytest.param("content"), pytest.param("time"),],
 )
 @pytest.mark.parametrize(
-    "buildtype",
-    [
-        pytest.param("non-incremental"),
-        pytest.param(
-            "incremental", marks=pytest.mark.xfail(reason="incremental workspace builds are not yet supported")
-        ),
-    ],
+    "buildtype", [pytest.param("non-incremental"), pytest.param("incremental"),],
 )
 def test_workspace_build(cli, tmpdir, datafiles, modification, buildtype):
     incremental = buildtype == "incremental"


[buildstream] 20/26: Replace workspace.prepared callback with attribute marking

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 4d0d43dee0672dc01b10a0f35a0bdab1d4c8adbe
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Fri Nov 15 13:15:53 2019 +0000

    Replace workspace.prepared callback with attribute marking
    
    Supports workspace builds via RE
---
 src/buildstream/buildelement.py    |  1 +
 src/buildstream/element.py         | 17 +++++++++++++++--
 src/buildstream/sandbox/sandbox.py |  3 +++
 3 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/buildelement.py b/src/buildstream/buildelement.py
index f04d3b0..ae2a3e2 100644
--- a/src/buildstream/buildelement.py
+++ b/src/buildstream/buildelement.py
@@ -266,6 +266,7 @@ class BuildElement(Element):
             with sandbox.batch(SandboxFlags.ROOT_READ_ONLY, label="Running configure-commands"):
                 for cmd in commands:
                     self.__run_command(sandbox, cmd)
+        sandbox.prepared = True
 
     def generate_script(self):
         script = ""
diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index 20cc809..c2018b3 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -389,6 +389,7 @@ class Element(Plugin):
 
         *Since: 1.2*
         """
+        sandbox.prepared = True
 
     def assemble(self, sandbox: "Sandbox") -> str:
         """Assemble the output artifact
@@ -2378,12 +2379,24 @@ class Element(Plugin):
     # Internal method for calling public abstract prepare() method.
     #
     def __prepare(self, sandbox):
-        # FIXME:
         # We need to ensure that the prepare() method is only called
         # once in workspaces, because the changes will persist across
         # incremental builds - not desirable, for example, in the case
         # of autotools' `./configure`.
-        self.prepare(sandbox)
+        workspace = self._get_workspace()
+        prepared = False
+        if workspace and workspace.prepared:
+            # FIXME: ideally we don't have to check this, eventually we would
+            # like to get the saved old_ref and apply the new workspace on top
+            # to support incremental builds.
+            if [s._key for s in self.__sources] == [workspace.last_successful]:
+                prepared = False
+
+        if not prepared:
+            self.prepare(sandbox)
+
+        if workspace and sandbox.prepared:
+            workspace.prepared = True
 
     # __preflight():
     #
diff --git a/src/buildstream/sandbox/sandbox.py b/src/buildstream/sandbox/sandbox.py
index e91e890..26e551d 100644
--- a/src/buildstream/sandbox/sandbox.py
+++ b/src/buildstream/sandbox/sandbox.py
@@ -168,6 +168,9 @@ class Sandbox:
         # Pending command batch
         self.__batch = None
 
+        # was the sandbox prepared
+        self.prepared = False
+
     def get_directory(self) -> str:
         """Fetches the sandbox root directory
 


[buildstream] 24/26: element: fix bug causing workspaces to always reprepare

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 3c35a5a64c2e7ba835f754d7d66aed9e498ad6c0
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Jan 13 09:05:35 2020 +0000

    element: fix bug causing workspaces to always reprepare
---
 src/buildstream/element.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index c2018b3..d333a40 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -2390,7 +2390,7 @@ class Element(Plugin):
             # like to get the saved old_ref and apply the new workspace on top
             # to support incremental builds.
             if [s._key for s in self.__sources] == [workspace.last_successful]:
-                prepared = False
+                prepared = True
 
         if not prepared:
             self.prepare(sandbox)


[buildstream] 12/26: workspace: import mtimes when staging

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit ce6b6c5c934071b36ba1b33a5704199fbf5d598c
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Tue Dec 24 14:32:51 2019 +0000

    workspace: import mtimes when staging
---
 src/buildstream/plugins/sources/workspace.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/buildstream/plugins/sources/workspace.py b/src/buildstream/plugins/sources/workspace.py
index 3d4c93b..5225b1a 100644
--- a/src/buildstream/plugins/sources/workspace.py
+++ b/src/buildstream/plugins/sources/workspace.py
@@ -98,7 +98,7 @@ class WorkspaceSource(Source):
     def stage(self, directory: Directory) -> None:
         assert isinstance(directory, Directory)
         with self.timed_activity("Staging local files"):
-            result = directory.import_files(self.path)
+            result = directory.import_files(self.path, properties=["MTime"])
 
             if result.overwritten or result.ignored:
                 raise SourceError(


[buildstream] 13/26: tests: test mtimes in storage

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit d88fc7d303bb63256c8e80254e13a30885936b8d
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Wed Dec 11 16:35:10 2019 +0000

    tests: test mtimes in storage
---
 tests/internals/storage_vdir_import.py | 19 +++++++++++++++----
 1 file changed, 15 insertions(+), 4 deletions(-)

diff --git a/tests/internals/storage_vdir_import.py b/tests/internals/storage_vdir_import.py
index 6b70f92..fe30127 100644
--- a/tests/internals/storage_vdir_import.py
+++ b/tests/internals/storage_vdir_import.py
@@ -23,6 +23,7 @@ from buildstream.storage._casbaseddirectory import CasBasedDirectory
 from buildstream.storage._filebaseddirectory import FileBasedDirectory
 from buildstream._cas import CASCache
 from buildstream.storage.directory import VirtualDirectoryError
+from buildstream.utils import _set_file_mtime, _parse_timestamp
 
 
 # These are comparitive tests that check that FileBasedDirectory and
@@ -48,6 +49,8 @@ root_filesets = [
 empty_hash_ref = sha256().hexdigest()
 RANDOM_SEED = 69105
 NUM_RANDOM_TESTS = 4
+TIMESTAMP = "2019-12-16T08:49:04.012Z"
+MTIME = 1576486144.0120000
 
 
 def generate_import_roots(rootno, directory):
@@ -63,8 +66,11 @@ def generate_import_root(rootdir, filelist):
         if typesymbol == "F":
             (dirnames, filename) = os.path.split(path)
             os.makedirs(os.path.join(rootdir, dirnames), exist_ok=True)
-            with open(os.path.join(rootdir, dirnames, filename), "wt") as f:
+            fullpath = os.path.join(rootdir, dirnames, filename)
+            with open(fullpath, "wt") as f:
                 f.write(content)
+            # set file mtime to arbitrary
+            _set_file_mtime(fullpath, _parse_timestamp(TIMESTAMP))
         elif typesymbol == "D":
             os.makedirs(os.path.join(rootdir, path), exist_ok=True)
         elif typesymbol == "S":
@@ -98,6 +104,7 @@ def generate_random_root(rootno, directory):
         elif thing == "file":
             with open(target, "wt") as f:
                 f.write("This is node {}\n".format(i))
+            _set_file_mtime(target, _parse_timestamp(TIMESTAMP))
         elif thing == "link":
             symlink_type = random.choice(["absolute", "relative", "broken"])
             if symlink_type == "broken" or not things:
@@ -124,7 +131,7 @@ def file_contents_are(path, contents):
 
 def create_new_casdir(root_number, cas_cache, tmpdir):
     d = CasBasedDirectory(cas_cache)
-    d.import_files(os.path.join(tmpdir, "content", "root{}".format(root_number)))
+    d.import_files(os.path.join(tmpdir, "content", "root{}".format(root_number)), properties=["MTime"])
     digest = d._get_digest()
     assert digest.hash != empty_hash_ref
     return d
@@ -192,7 +199,7 @@ def _import_test(tmpdir, original, overlay, generator_function, verify_contents=
         assert duplicate_cas._get_digest().hash == d._get_digest().hash
 
         d2 = create_new_casdir(overlay, cas_cache, tmpdir)
-        d.import_files(d2)
+        d.import_files(d2, properties=["MTime"])
         export_dir = os.path.join(tmpdir, "output-{}-{}".format(original, overlay))
         roundtrip_dir = os.path.join(tmpdir, "roundtrip-{}-{}".format(original, overlay))
         d2.export_files(roundtrip_dir)
@@ -211,6 +218,10 @@ def _import_test(tmpdir, original, overlay, generator_function, verify_contents=
                             path
                         )
                         assert file_contents_are(realpath, content)
+                        roundtrip = os.path.join(roundtrip_dir, path)
+                        assert os.path.getmtime(roundtrip) == MTIME
+                        assert os.path.getmtime(realpath) == MTIME
+
                 elif typename == "S":
                     if os.path.isdir(realpath) and directory_not_empty(realpath):
                         # The symlink should not have overwritten the directory in this case.
@@ -227,7 +238,7 @@ def _import_test(tmpdir, original, overlay, generator_function, verify_contents=
 
         # Now do the same thing with filebaseddirectories and check the contents match
 
-        duplicate_cas.import_files(roundtrip_dir)
+        duplicate_cas.import_files(roundtrip_dir, properties=["MTime"])
 
         assert duplicate_cas._get_digest().hash == d._get_digest().hash
     finally:


[buildstream] 08/26: cascache: parse timestamp and update mtimes in checkout

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 0d1b6d057a32236540fc2d006bdfd60199a8ce1e
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Wed Jan 8 16:30:59 2020 +0000

    cascache: parse timestamp and update mtimes in checkout
    
    If checking out files from a CasBasedDirectory which holds
    node_properties in the index files are explicitly copied instead of
    being hardlinked and the mtime is updated to the stored value.
---
 src/buildstream/_cas/cascache.py | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/src/buildstream/_cas/cascache.py b/src/buildstream/_cas/cascache.py
index 519de6e..c608f12 100644
--- a/src/buildstream/_cas/cascache.py
+++ b/src/buildstream/_cas/cascache.py
@@ -217,10 +217,18 @@ class CASCache:
         for filenode in directory.files:
             # regular file, create hardlink
             fullpath = os.path.join(dest, filenode.name)
-            if can_link:
+            # generally, if the node holds properties we will fallback
+            # to copying instead of hardlinking
+            if can_link and not filenode.node_properties:
                 utils.safe_link(self.objpath(filenode.digest), fullpath)
             else:
                 utils.safe_copy(self.objpath(filenode.digest), fullpath)
+                if filenode.node_properties:
+                    # XXX: see https://github.com/bazelbuild/remote-apis/blob/master/build/bazel/remote/execution/v2/nodeproperties.md
+                    # for supported node property specifications
+                    for prop in filenode.node_properties:
+                        if prop.name == "MTime" and prop.value:
+                            utils._set_file_mtime(fullpath, utils._parse_timestamp(prop.value))
 
             if filenode.is_executable:
                 os.chmod(


[buildstream] 02/26: local_cas: Update proto

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit f3fe105dc3a4d1f7908d13a0f1d5038282f281cd
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Wed Dec 11 14:28:08 2019 +0000

    local_cas: Update proto
---
 .../_protos/build/buildgrid/local_cas.proto        |  20 +-
 .../_protos/build/buildgrid/local_cas_pb2.py       | 249 ++++++++++++---------
 .../_protos/build/buildgrid/local_cas_pb2_grpc.py  |   6 +-
 3 files changed, 158 insertions(+), 117 deletions(-)

diff --git a/src/buildstream/_protos/build/buildgrid/local_cas.proto b/src/buildstream/_protos/build/buildgrid/local_cas.proto
index f2955f97..722ac70 100644
--- a/src/buildstream/_protos/build/buildgrid/local_cas.proto
+++ b/src/buildstream/_protos/build/buildgrid/local_cas.proto
@@ -60,10 +60,10 @@ service LocalContentAddressableStorage {
   // in the local cache. Optionally, this will also fetch all blobs referenced
   // by the `Directory` objects, equivalent to `FetchMissingBlobs`.
   //
-  // If part of the tree is missing from the CAS, the server will return the
-  // portion present and omit the rest.
+  // If no remote CAS is available, this will check presence of the entire
+  // directory tree (and optionally also file blobs) in the local cache.
   //
-  // * `NOT_FOUND`: The requested tree root is not present in the CAS.
+  // * `NOT_FOUND`: The requested tree is not present in the CAS or incomplete.
   rpc FetchTree(FetchTreeRequest) returns (FetchTreeResponse) {}
 
   // Upload the entire directory tree from the local cache to a remote CAS.
@@ -139,7 +139,7 @@ message FetchMissingBlobsRequest {
 // A response message for
 // [LocalContentAddressableStorage.FetchMissingBlobs][build.buildgrid.v2.LocalContentAddressableStorage.FetchMissingBlobs].
 message FetchMissingBlobsResponse {
-  // A response corresponding to a single blob that the client tried to upload.
+  // A response corresponding to a single blob that the client tried to download.
   message Response {
     // The digest to which this response corresponds.
     build.bazel.remote.execution.v2.Digest digest = 1;
@@ -281,6 +281,9 @@ message CaptureTreeRequest {
   // This is a hint whether the blobs shall be uploaded to the remote CAS
   // without first storing them in the local cache.
   bool bypass_local_cache = 3;
+
+  // The properties of path(s) in the local filesystem to capture.
+  repeated string node_properties = 4;
 }
 
 // A response message for
@@ -320,6 +323,9 @@ message CaptureFilesRequest {
   // This is a hint whether the blobs shall be uploaded to the remote CAS
   // without first storing them in the local cache.
   bool bypass_local_cache = 3;
+
+  // The properties of path(s) in the local filesystem to capture.
+  repeated string node_properties = 4;
 }
 
 // A response message for
@@ -335,6 +341,12 @@ message CaptureFilesResponse {
 
     // The result of attempting to capture and upload the file.
     google.rpc.Status status = 3;
+
+    // True if the captured file was executable, false otherwise.
+    bool is_executable = 4;
+
+    // The node properties of the captured file.
+    repeated build.bazel.remote.execution.v2.NodeProperty node_properties = 5;
   }
 
   // The responses to the requests.
diff --git a/src/buildstream/_protos/build/buildgrid/local_cas_pb2.py b/src/buildstream/_protos/build/buildgrid/local_cas_pb2.py
index 06df138..6be3662 100644
--- a/src/buildstream/_protos/build/buildgrid/local_cas_pb2.py
+++ b/src/buildstream/_protos/build/buildgrid/local_cas_pb2.py
@@ -23,7 +23,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
   package='build.buildgrid',
   syntax='proto3',
   serialized_options=None,
-  serialized_pb=_b('\n\x1f\x62uild/buildgrid/local_cas.proto\x12\x0f\x62uild.buildgrid\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/rpc/status.proto\"p\n\x18\x46\x65tchMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xcc\x01\n\x19\x46\x65tchMissingBlobsResponse\x12\x46\n\tresponses\x18\x01 \x03(\x0b\x32\x33.build.buil [...]
+  serialized_pb=_b('\n\x1f\x62uild/buildgrid/local_cas.proto\x12\x0f\x62uild.buildgrid\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/rpc/status.proto\"p\n\x18\x46\x65tchMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xcc\x01\n\x19\x46\x65tchMissingBlobsResponse\x12\x46\n\tresponses\x18\x01 \x03(\x0b\x32\x33.build.buil [...]
   ,
   dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
 
@@ -477,6 +477,13 @@ _CAPTURETREEREQUEST = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='node_properties', full_name='build.buildgrid.CaptureTreeRequest.node_properties', index=3,
+      number=4, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -490,7 +497,7 @@ _CAPTURETREEREQUEST = _descriptor.Descriptor(
   oneofs=[
   ],
   serialized_start=1243,
-  serialized_end=1328,
+  serialized_end=1353,
 )
 
 
@@ -534,8 +541,8 @@ _CAPTURETREERESPONSE_RESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1420,
-  serialized_end=1542,
+  serialized_start=1445,
+  serialized_end=1567,
 )
 
 _CAPTURETREERESPONSE = _descriptor.Descriptor(
@@ -564,8 +571,8 @@ _CAPTURETREERESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1331,
-  serialized_end=1542,
+  serialized_start=1356,
+  serialized_end=1567,
 )
 
 
@@ -597,6 +604,13 @@ _CAPTUREFILESREQUEST = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='node_properties', full_name='build.buildgrid.CaptureFilesRequest.node_properties', index=3,
+      number=4, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -609,8 +623,8 @@ _CAPTUREFILESREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1544,
-  serialized_end=1630,
+  serialized_start=1569,
+  serialized_end=1680,
 )
 
 
@@ -642,6 +656,20 @@ _CAPTUREFILESRESPONSE_RESPONSE = _descriptor.Descriptor(
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='is_executable', full_name='build.buildgrid.CaptureFilesResponse.Response.is_executable', index=3,
+      number=4, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='node_properties', full_name='build.buildgrid.CaptureFilesResponse.Response.node_properties', index=4,
+      number=5, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -654,8 +682,8 @@ _CAPTUREFILESRESPONSE_RESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1724,
-  serialized_end=1841,
+  serialized_start=1775,
+  serialized_end=1987,
 )
 
 _CAPTUREFILESRESPONSE = _descriptor.Descriptor(
@@ -684,8 +712,8 @@ _CAPTUREFILESRESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1633,
-  serialized_end=1841,
+  serialized_start=1683,
+  serialized_end=1987,
 )
 
 
@@ -743,8 +771,8 @@ _GETINSTANCENAMEFORREMOTEREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1844,
-  serialized_end=1975,
+  serialized_start=1990,
+  serialized_end=2121,
 )
 
 
@@ -774,8 +802,8 @@ _GETINSTANCENAMEFORREMOTERESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1977,
-  serialized_end=2034,
+  serialized_start=2123,
+  serialized_end=2180,
 )
 
 
@@ -798,8 +826,8 @@ _GETLOCALDISKUSAGEREQUEST = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2036,
-  serialized_end=2062,
+  serialized_start=2182,
+  serialized_end=2208,
 )
 
 
@@ -836,8 +864,8 @@ _GETLOCALDISKUSAGERESPONSE = _descriptor.Descriptor(
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2064,
-  serialized_end=2132,
+  serialized_start=2210,
+  serialized_end=2278,
 )
 
 _FETCHMISSINGBLOBSREQUEST.fields_by_name['blob_digests'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
@@ -859,6 +887,7 @@ _CAPTURETREERESPONSE_RESPONSE.containing_type = _CAPTURETREERESPONSE
 _CAPTURETREERESPONSE.fields_by_name['responses'].message_type = _CAPTURETREERESPONSE_RESPONSE
 _CAPTUREFILESRESPONSE_RESPONSE.fields_by_name['digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
 _CAPTUREFILESRESPONSE_RESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_CAPTUREFILESRESPONSE_RESPONSE.fields_by_name['node_properties'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._NODEPROPERTY
 _CAPTUREFILESRESPONSE_RESPONSE.containing_type = _CAPTUREFILESRESPONSE
 _CAPTUREFILESRESPONSE.fields_by_name['responses'].message_type = _CAPTUREFILESRESPONSE_RESPONSE
 DESCRIPTOR.message_types_by_name['FetchMissingBlobsRequest'] = _FETCHMISSINGBLOBSREQUEST
@@ -881,162 +910,162 @@ DESCRIPTOR.message_types_by_name['GetLocalDiskUsageRequest'] = _GETLOCALDISKUSAG
 DESCRIPTOR.message_types_by_name['GetLocalDiskUsageResponse'] = _GETLOCALDISKUSAGERESPONSE
 _sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
-FetchMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('FetchMissingBlobsRequest', (_message.Message,), dict(
-  DESCRIPTOR = _FETCHMISSINGBLOBSREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+FetchMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('FetchMissingBlobsRequest', (_message.Message,), {
+  'DESCRIPTOR' : _FETCHMISSINGBLOBSREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.FetchMissingBlobsRequest)
-  ))
+  })
 _sym_db.RegisterMessage(FetchMissingBlobsRequest)
 
-FetchMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('FetchMissingBlobsResponse', (_message.Message,), dict(
+FetchMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('FetchMissingBlobsResponse', (_message.Message,), {
 
-  Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
-    DESCRIPTOR = _FETCHMISSINGBLOBSRESPONSE_RESPONSE,
-    __module__ = 'build.buildgrid.local_cas_pb2'
+  'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
+    'DESCRIPTOR' : _FETCHMISSINGBLOBSRESPONSE_RESPONSE,
+    '__module__' : 'build.buildgrid.local_cas_pb2'
     # @@protoc_insertion_point(class_scope:build.buildgrid.FetchMissingBlobsResponse.Response)
-    ))
+    })
   ,
-  DESCRIPTOR = _FETCHMISSINGBLOBSRESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+  'DESCRIPTOR' : _FETCHMISSINGBLOBSRESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.FetchMissingBlobsResponse)
-  ))
+  })
 _sym_db.RegisterMessage(FetchMissingBlobsResponse)
 _sym_db.RegisterMessage(FetchMissingBlobsResponse.Response)
 
-UploadMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('UploadMissingBlobsRequest', (_message.Message,), dict(
-  DESCRIPTOR = _UPLOADMISSINGBLOBSREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+UploadMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('UploadMissingBlobsRequest', (_message.Message,), {
+  'DESCRIPTOR' : _UPLOADMISSINGBLOBSREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.UploadMissingBlobsRequest)
-  ))
+  })
 _sym_db.RegisterMessage(UploadMissingBlobsRequest)
 
-UploadMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('UploadMissingBlobsResponse', (_message.Message,), dict(
+UploadMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('UploadMissingBlobsResponse', (_message.Message,), {
 
-  Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
-    DESCRIPTOR = _UPLOADMISSINGBLOBSRESPONSE_RESPONSE,
-    __module__ = 'build.buildgrid.local_cas_pb2'
+  'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
+    'DESCRIPTOR' : _UPLOADMISSINGBLOBSRESPONSE_RESPONSE,
+    '__module__' : 'build.buildgrid.local_cas_pb2'
     # @@protoc_insertion_point(class_scope:build.buildgrid.UploadMissingBlobsResponse.Response)
-    ))
+    })
   ,
-  DESCRIPTOR = _UPLOADMISSINGBLOBSRESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+  'DESCRIPTOR' : _UPLOADMISSINGBLOBSRESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.UploadMissingBlobsResponse)
-  ))
+  })
 _sym_db.RegisterMessage(UploadMissingBlobsResponse)
 _sym_db.RegisterMessage(UploadMissingBlobsResponse.Response)
 
-FetchTreeRequest = _reflection.GeneratedProtocolMessageType('FetchTreeRequest', (_message.Message,), dict(
-  DESCRIPTOR = _FETCHTREEREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+FetchTreeRequest = _reflection.GeneratedProtocolMessageType('FetchTreeRequest', (_message.Message,), {
+  'DESCRIPTOR' : _FETCHTREEREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.FetchTreeRequest)
-  ))
+  })
 _sym_db.RegisterMessage(FetchTreeRequest)
 
-FetchTreeResponse = _reflection.GeneratedProtocolMessageType('FetchTreeResponse', (_message.Message,), dict(
-  DESCRIPTOR = _FETCHTREERESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+FetchTreeResponse = _reflection.GeneratedProtocolMessageType('FetchTreeResponse', (_message.Message,), {
+  'DESCRIPTOR' : _FETCHTREERESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.FetchTreeResponse)
-  ))
+  })
 _sym_db.RegisterMessage(FetchTreeResponse)
 
-UploadTreeRequest = _reflection.GeneratedProtocolMessageType('UploadTreeRequest', (_message.Message,), dict(
-  DESCRIPTOR = _UPLOADTREEREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+UploadTreeRequest = _reflection.GeneratedProtocolMessageType('UploadTreeRequest', (_message.Message,), {
+  'DESCRIPTOR' : _UPLOADTREEREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.UploadTreeRequest)
-  ))
+  })
 _sym_db.RegisterMessage(UploadTreeRequest)
 
-UploadTreeResponse = _reflection.GeneratedProtocolMessageType('UploadTreeResponse', (_message.Message,), dict(
-  DESCRIPTOR = _UPLOADTREERESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+UploadTreeResponse = _reflection.GeneratedProtocolMessageType('UploadTreeResponse', (_message.Message,), {
+  'DESCRIPTOR' : _UPLOADTREERESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.UploadTreeResponse)
-  ))
+  })
 _sym_db.RegisterMessage(UploadTreeResponse)
 
-StageTreeRequest = _reflection.GeneratedProtocolMessageType('StageTreeRequest', (_message.Message,), dict(
-  DESCRIPTOR = _STAGETREEREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+StageTreeRequest = _reflection.GeneratedProtocolMessageType('StageTreeRequest', (_message.Message,), {
+  'DESCRIPTOR' : _STAGETREEREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.StageTreeRequest)
-  ))
+  })
 _sym_db.RegisterMessage(StageTreeRequest)
 
-StageTreeResponse = _reflection.GeneratedProtocolMessageType('StageTreeResponse', (_message.Message,), dict(
-  DESCRIPTOR = _STAGETREERESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+StageTreeResponse = _reflection.GeneratedProtocolMessageType('StageTreeResponse', (_message.Message,), {
+  'DESCRIPTOR' : _STAGETREERESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.StageTreeResponse)
-  ))
+  })
 _sym_db.RegisterMessage(StageTreeResponse)
 
-CaptureTreeRequest = _reflection.GeneratedProtocolMessageType('CaptureTreeRequest', (_message.Message,), dict(
-  DESCRIPTOR = _CAPTURETREEREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+CaptureTreeRequest = _reflection.GeneratedProtocolMessageType('CaptureTreeRequest', (_message.Message,), {
+  'DESCRIPTOR' : _CAPTURETREEREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.CaptureTreeRequest)
-  ))
+  })
 _sym_db.RegisterMessage(CaptureTreeRequest)
 
-CaptureTreeResponse = _reflection.GeneratedProtocolMessageType('CaptureTreeResponse', (_message.Message,), dict(
+CaptureTreeResponse = _reflection.GeneratedProtocolMessageType('CaptureTreeResponse', (_message.Message,), {
 
-  Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
-    DESCRIPTOR = _CAPTURETREERESPONSE_RESPONSE,
-    __module__ = 'build.buildgrid.local_cas_pb2'
+  'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
+    'DESCRIPTOR' : _CAPTURETREERESPONSE_RESPONSE,
+    '__module__' : 'build.buildgrid.local_cas_pb2'
     # @@protoc_insertion_point(class_scope:build.buildgrid.CaptureTreeResponse.Response)
-    ))
+    })
   ,
-  DESCRIPTOR = _CAPTURETREERESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+  'DESCRIPTOR' : _CAPTURETREERESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.CaptureTreeResponse)
-  ))
+  })
 _sym_db.RegisterMessage(CaptureTreeResponse)
 _sym_db.RegisterMessage(CaptureTreeResponse.Response)
 
-CaptureFilesRequest = _reflection.GeneratedProtocolMessageType('CaptureFilesRequest', (_message.Message,), dict(
-  DESCRIPTOR = _CAPTUREFILESREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+CaptureFilesRequest = _reflection.GeneratedProtocolMessageType('CaptureFilesRequest', (_message.Message,), {
+  'DESCRIPTOR' : _CAPTUREFILESREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.CaptureFilesRequest)
-  ))
+  })
 _sym_db.RegisterMessage(CaptureFilesRequest)
 
-CaptureFilesResponse = _reflection.GeneratedProtocolMessageType('CaptureFilesResponse', (_message.Message,), dict(
+CaptureFilesResponse = _reflection.GeneratedProtocolMessageType('CaptureFilesResponse', (_message.Message,), {
 
-  Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
-    DESCRIPTOR = _CAPTUREFILESRESPONSE_RESPONSE,
-    __module__ = 'build.buildgrid.local_cas_pb2'
+  'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
+    'DESCRIPTOR' : _CAPTUREFILESRESPONSE_RESPONSE,
+    '__module__' : 'build.buildgrid.local_cas_pb2'
     # @@protoc_insertion_point(class_scope:build.buildgrid.CaptureFilesResponse.Response)
-    ))
+    })
   ,
-  DESCRIPTOR = _CAPTUREFILESRESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+  'DESCRIPTOR' : _CAPTUREFILESRESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.CaptureFilesResponse)
-  ))
+  })
 _sym_db.RegisterMessage(CaptureFilesResponse)
 _sym_db.RegisterMessage(CaptureFilesResponse.Response)
 
-GetInstanceNameForRemoteRequest = _reflection.GeneratedProtocolMessageType('GetInstanceNameForRemoteRequest', (_message.Message,), dict(
-  DESCRIPTOR = _GETINSTANCENAMEFORREMOTEREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+GetInstanceNameForRemoteRequest = _reflection.GeneratedProtocolMessageType('GetInstanceNameForRemoteRequest', (_message.Message,), {
+  'DESCRIPTOR' : _GETINSTANCENAMEFORREMOTEREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.GetInstanceNameForRemoteRequest)
-  ))
+  })
 _sym_db.RegisterMessage(GetInstanceNameForRemoteRequest)
 
-GetInstanceNameForRemoteResponse = _reflection.GeneratedProtocolMessageType('GetInstanceNameForRemoteResponse', (_message.Message,), dict(
-  DESCRIPTOR = _GETINSTANCENAMEFORREMOTERESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+GetInstanceNameForRemoteResponse = _reflection.GeneratedProtocolMessageType('GetInstanceNameForRemoteResponse', (_message.Message,), {
+  'DESCRIPTOR' : _GETINSTANCENAMEFORREMOTERESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.GetInstanceNameForRemoteResponse)
-  ))
+  })
 _sym_db.RegisterMessage(GetInstanceNameForRemoteResponse)
 
-GetLocalDiskUsageRequest = _reflection.GeneratedProtocolMessageType('GetLocalDiskUsageRequest', (_message.Message,), dict(
-  DESCRIPTOR = _GETLOCALDISKUSAGEREQUEST,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+GetLocalDiskUsageRequest = _reflection.GeneratedProtocolMessageType('GetLocalDiskUsageRequest', (_message.Message,), {
+  'DESCRIPTOR' : _GETLOCALDISKUSAGEREQUEST,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.GetLocalDiskUsageRequest)
-  ))
+  })
 _sym_db.RegisterMessage(GetLocalDiskUsageRequest)
 
-GetLocalDiskUsageResponse = _reflection.GeneratedProtocolMessageType('GetLocalDiskUsageResponse', (_message.Message,), dict(
-  DESCRIPTOR = _GETLOCALDISKUSAGERESPONSE,
-  __module__ = 'build.buildgrid.local_cas_pb2'
+GetLocalDiskUsageResponse = _reflection.GeneratedProtocolMessageType('GetLocalDiskUsageResponse', (_message.Message,), {
+  'DESCRIPTOR' : _GETLOCALDISKUSAGERESPONSE,
+  '__module__' : 'build.buildgrid.local_cas_pb2'
   # @@protoc_insertion_point(class_scope:build.buildgrid.GetLocalDiskUsageResponse)
-  ))
+  })
 _sym_db.RegisterMessage(GetLocalDiskUsageResponse)
 
 
@@ -1047,8 +1076,8 @@ _LOCALCONTENTADDRESSABLESTORAGE = _descriptor.ServiceDescriptor(
   file=DESCRIPTOR,
   index=0,
   serialized_options=None,
-  serialized_start=2135,
-  serialized_end=3084,
+  serialized_start=2281,
+  serialized_end=3230,
   methods=[
   _descriptor.MethodDescriptor(
     name='FetchMissingBlobs',
diff --git a/src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py b/src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py
index 8dfaec7..68af482 100644
--- a/src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py
+++ b/src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py
@@ -113,10 +113,10 @@ class LocalContentAddressableStorageServicer(object):
     in the local cache. Optionally, this will also fetch all blobs referenced
     by the `Directory` objects, equivalent to `FetchMissingBlobs`.
 
-    If part of the tree is missing from the CAS, the server will return the
-    portion present and omit the rest.
+    If no remote CAS is available, this will check presence of the entire
+    directory tree (and optionally also file blobs) in the local cache.
 
-    * `NOT_FOUND`: The requested tree root is not present in the CAS.
+    * `NOT_FOUND`: The requested tree is not present in the CAS or incomplete.
     """
     context.set_code(grpc.StatusCode.UNIMPLEMENTED)
     context.set_details('Method not implemented!')


[buildstream] 17/26: DEBUG: use new buildbox image

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 529e9b89191b447ed7cd5f306ed47084a9e83178
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Dec 16 16:12:27 2019 +0000

    DEBUG: use new buildbox image
---
 .gitlab-ci.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 6ab79e1..8dcb518 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -24,7 +24,7 @@ variables:
   # Our own variables
   # Version of the docker images we should use for all the images.
   # This is taken from buildstream/buildstream-docker-images
-  DOCKER_IMAGE_VERSION: master-105004115
+  DOCKER_IMAGE_VERSION: traveltissues-testing-106842226
   PYTEST_ADDOPTS: "--color=yes"
   INTEGRATION_CACHE: "${CI_PROJECT_DIR}/cache/integration-cache"
   PYTEST_ARGS: "--color=yes --integration -n 2"


[buildstream] 04/26: cascache: Add properties to CaptureTree requests

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 4f7a3105658e5842aca28a9048a7a39a97eb14f0
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Mon Dec 2 11:59:40 2019 +0000

    cascache: Add properties to CaptureTree requests
---
 src/buildstream/_cas/cascache.py | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/_cas/cascache.py b/src/buildstream/_cas/cascache.py
index 9c0b46d..519de6e 100644
--- a/src/buildstream/_cas/cascache.py
+++ b/src/buildstream/_cas/cascache.py
@@ -26,6 +26,7 @@ import ctypes
 import multiprocessing
 import signal
 import time
+from typing import Optional, List
 
 import grpc
 
@@ -34,7 +35,7 @@ from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
 from .._protos.build.buildgrid import local_cas_pb2
 
 from .. import _signals, utils
-from ..types import FastEnum
+from ..types import FastEnum, SourceRef
 from .._exceptions import CASCacheError
 
 from .casdprocessmanager import CASDProcessManager
@@ -339,15 +340,21 @@ class CASCache:
     #
     # Args:
     #     path (str): Path to directory to import
+    #     properties Optional[List[str]]: List of properties to request
     #
     # Returns:
     #     (Digest): The digest of the imported directory
     #
-    def import_directory(self, path):
+    def import_directory(self, path: str, properties: Optional[List[str]] = None) -> SourceRef:
         local_cas = self.get_local_cas()
 
         request = local_cas_pb2.CaptureTreeRequest()
         request.path.append(path)
+
+        if properties:
+            for _property in properties:
+                request.node_properties.append(_property)
+
         response = local_cas.CaptureTree(request)
 
         if len(response.responses) != 1:


[buildstream] 09/26: _filebaseddirectory: support mtimes in `_import_files_from_cas`

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit bbab470fac99f518737e9a131befe29a5030ea01
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Tue Dec 10 14:11:55 2019 +0000

    _filebaseddirectory: support mtimes in `_import_files_from_cas`
    
    _filebaseddirectory: don't update mtimes/hardlink if mtimes are provided
---
 src/buildstream/storage/_filebaseddirectory.py | 21 +++++++++++++++++++--
 1 file changed, 19 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/storage/_filebaseddirectory.py b/src/buildstream/storage/_filebaseddirectory.py
index d01b5ce..4c6c92f 100644
--- a/src/buildstream/storage/_filebaseddirectory.py
+++ b/src/buildstream/storage/_filebaseddirectory.py
@@ -123,7 +123,9 @@ class FileBasedDirectory(Directory):
                     report_written=report_written,
                 )
 
-        if update_mtime:
+        # do not update times if these were set via nodes
+        properties = properties or []
+        if update_mtime and "MTime" not in properties:
             cur_time = time.time()
 
             for f in import_result.files_written:
@@ -296,7 +298,21 @@ class FileBasedDirectory(Directory):
 
                 if entry.type == _FileType.REGULAR_FILE:
                     src_path = source_directory.cas_cache.objpath(entry.digest)
-                    actionfunc(src_path, dest_path, result=result)
+
+                    # fallback to copying if we require mtime support on this file
+                    if entry.node_properties:
+                        utils.safe_copy(src_path, dest_path, result=result)
+                        mtime = None
+                        for prop in entry.node_properties:
+                            if prop.name == "MTime" and prop.value:
+                                mtime = prop.value
+                            else:
+                                raise ImplError("{} is not a supported node property.".format(prop.name))
+                        if mtime:
+                            utils._set_file_mtime(dest_path, mtime)
+                    else:
+                        utils.safe_link(src_path, dest_path, result=result)
+
                     if entry.is_executable:
                         os.chmod(
                             dest_path,
@@ -308,6 +324,7 @@ class FileBasedDirectory(Directory):
                             | stat.S_IROTH
                             | stat.S_IXOTH,
                         )
+
                 else:
                     assert entry.type == _FileType.SYMLINK
                     os.symlink(entry.target, dest_path)


[buildstream] 11/26: element: pass arbitrary timestamp to import_files

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch traveltissues/mr4
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit af17fe9523a86c59789f17dad4f1cdb98080a70a
Author: Darius Makovsky <tr...@protonmail.com>
AuthorDate: Tue Dec 24 14:02:28 2019 +0000

    element: pass arbitrary timestamp to import_files
---
 src/buildstream/element.py | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index f634537..20cc809 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -75,6 +75,7 @@ Class Reference
 import os
 import re
 import stat
+import time
 import copy
 from collections import OrderedDict
 import contextlib
@@ -90,7 +91,7 @@ from . import _yaml
 from ._variables import Variables
 from ._versions import BST_CORE_ARTIFACT_VERSION
 from ._exceptions import BstError, LoadError, LoadErrorReason, ImplError, ErrorDomain, SourceCacheError
-from .utils import FileListResult
+from .utils import FileListResult, BST_ARBITRARY_TIMESTAMP
 from . import utils
 from . import _cachekey
 from . import _site
@@ -705,7 +706,7 @@ class Element(Plugin):
 
             if update_mtimes:
                 copy_result = vstagedir.import_files(
-                    files_vdir, filter_callback=copy_filter, report_written=True, update_mtime=True
+                    files_vdir, filter_callback=copy_filter, report_written=True, update_mtime=time.time()
                 )
                 result = result.combine(copy_result)
 
@@ -1417,7 +1418,6 @@ class Element(Plugin):
     def _stage_sources_at(self, vdirectory, usebuildtree=False):
 
         context = self._get_context()
-        set_deterministic_mtimes = True
 
         # It's advantageous to have this temporary directory on
         # the same file system as the rest of our cache.
@@ -1456,8 +1456,6 @@ class Element(Plugin):
                         for source in self.__sources[last_required_previous_ix:]:
                             source_dir = sourcecache.export(source)
                             import_dir.import_files(source_dir)
-                            if source.BST_STAGE_VIRTUAL_DIRECTORY:
-                                set_deterministic_mtimes = False
 
                     except SourceCacheError as e:
                         raise ElementError("Error trying to export source for {}: {}".format(self.name, e))
@@ -1467,12 +1465,9 @@ class Element(Plugin):
                             reason="import-source-files-fail",
                         )
 
+            # Ensure deterministic mtime of sources at build time
             with utils._deterministic_umask():
-                vdirectory.import_files(import_dir)
-
-        # Ensure deterministic mtime of sources at build time
-        if set_deterministic_mtimes:
-            vdirectory.set_deterministic_mtime()
+                vdirectory.import_files(import_dir, update_mtime=BST_ARBITRARY_TIMESTAMP)
         # Ensure deterministic owners of sources at build time
         vdirectory.set_deterministic_user()