You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@buildstream.apache.org by ju...@apache.org on 2022/07/06 14:24:05 UTC

[buildstream] 01/03: _protos: Update protos from remote-apis

This is an automated email from the ASF dual-hosted git repository.

juergbi pushed a commit to branch juerg/reapi-2.2
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 4968565577c140c6cab608cbf57d0ee10de04606
Author: Jürg Billeter <j...@bitron.ch>
AuthorDate: Wed Jul 6 14:53:57 2022 +0200

    _protos: Update protos from remote-apis
---
 src/buildstream/_cas/casserver.py                  |   2 +-
 .../build/bazel/remote/asset/v1/remote_asset.proto |   4 +-
 .../bazel/remote/asset/v1/remote_asset_pb2.py      |   4 +-
 .../bazel/remote/asset/v1/remote_asset_pb2_grpc.py |   2 +-
 .../remote/execution/v2/remote_execution.proto     | 334 +++++++++++++++---
 .../remote/execution/v2/remote_execution_pb2.py    | 250 ++++++-------
 .../execution/v2/remote_execution_pb2_grpc.py      | 387 +++++++++++++++------
 7 files changed, 712 insertions(+), 271 deletions(-)

diff --git a/src/buildstream/_cas/casserver.py b/src/buildstream/_cas/casserver.py
index 9fded1a7f..184ff438f 100644
--- a/src/buildstream/_cas/casserver.py
+++ b/src/buildstream/_cas/casserver.py
@@ -198,7 +198,7 @@ class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
         response = remote_execution_pb2.ServerCapabilities()
 
         cache_capabilities = response.cache_capabilities
-        cache_capabilities.digest_function.append(remote_execution_pb2.DigestFunction.SHA256)
+        cache_capabilities.digest_functions.append(remote_execution_pb2.DigestFunction.SHA256)
         cache_capabilities.action_cache_update_capabilities.update_enabled = False
         cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
         cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.SymlinkAbsolutePathStrategy.ALLOWED
diff --git a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto
index 60be76411..4d9be8175 100644
--- a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto
@@ -23,7 +23,7 @@ import "google/protobuf/timestamp.proto";
 import "google/rpc/status.proto";
 
 option csharp_namespace = "Build.Bazel.Remote.Asset.v1";
-option go_package = "remoteasset";
+option go_package = "github.com/bazelbuild/remote-apis/build/bazel/remote/asset/v1;remoteasset";
 option java_multiple_files = true;
 option java_outer_classname = "RemoteAssetProto";
 option java_package = "build.bazel.remote.asset.v1";
@@ -92,7 +92,7 @@ service Fetch {
   //
   // Servers *SHOULD* ensure that referenced files are present in the CAS at the
   // time of the response, and (if supported) that they will remain available
-  // for a reasonable period of time. The TTLs of the referenced blobs *SHOULD*
+  // for a reasonable period of time. The lifetimes of the referenced blobs *SHOULD*
   // be increased if necessary and applicable.
   // In the event that a client receives a reference to content that is no
   // longer present, it *MAY* re-issue the request with
diff --git a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py
index 145f00165..0ca03ede1 100644
--- a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py
@@ -19,7 +19,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__
 from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
 
 
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.build/bazel/remote/asset/v1/remote_asset.proto\x12\x1b\x62uild.bazel.remote.asset.v1\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"(\n\tQualifier\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xdc\x01\n\x10\x46\x65tchBlobRequest\x12\x15\n\rins [...]
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.build/bazel/remote/asset/v1/remote_asset.proto\x12\x1b\x62uild.bazel.remote.asset.v1\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"(\n\tQualifier\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xdc\x01\n\x10\x46\x65tchBlobRequest\x12\x15\n\rins [...]
 
 
 
@@ -100,7 +100,7 @@ _PUSH = DESCRIPTOR.services_by_name['Push']
 if _descriptor._USE_C_DESCRIPTORS == False:
 
   DESCRIPTOR._options = None
-  DESCRIPTOR._serialized_options = b'\n\033build.bazel.remote.asset.v1B\020RemoteAssetProtoP\001Z\013remoteasset\242\002\002RA\252\002\033Build.Bazel.Remote.Asset.v1'
+  DESCRIPTOR._serialized_options = b'\n\033build.bazel.remote.asset.v1B\020RemoteAssetProtoP\001ZIgithub.com/bazelbuild/remote-apis/build/bazel/remote/asset/v1;remoteasset\242\002\002RA\252\002\033Build.Bazel.Remote.Asset.v1'
   _FETCH.methods_by_name['FetchBlob']._options = None
   _FETCH.methods_by_name['FetchBlob']._serialized_options = b'\202\323\344\223\002,\"\'/v1/{instance_name=**}/assets:fetchBlob:\001*'
   _FETCH.methods_by_name['FetchDirectory']._options = None
diff --git a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py
index 203e3d908..2677149fb 100644
--- a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py
@@ -54,7 +54,7 @@ class FetchServicer(object):
 
         Servers *SHOULD* ensure that referenced files are present in the CAS at the
         time of the response, and (if supported) that they will remain available
-        for a reasonable period of time. The TTLs of the referenced blobs *SHOULD*
+        for a reasonable period of time. The lifetimes of the referenced blobs *SHOULD*
         be increased if necessary and applicable.
         In the event that a client receives a reference to content that is no
         longer present, it *MAY* re-issue the request with
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
index b69105d72..437ead7c7 100644
--- a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
@@ -19,13 +19,14 @@ package build.bazel.remote.execution.v2;
 import "build/bazel/semver/semver.proto";
 import "google/api/annotations.proto";
 import "google/longrunning/operations.proto";
+import "google/protobuf/any.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/timestamp.proto";
 import "google/protobuf/wrappers.proto";
 import "google/rpc/status.proto";
 
 option csharp_namespace = "Build.Bazel.Remote.Execution.V2";
-option go_package = "remoteexecution";
+option go_package = "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2;remoteexecution";
 option java_multiple_files = true;
 option java_outer_classname = "RemoteExecutionProto";
 option java_package = "build.bazel.remote.execution.v2";
@@ -104,6 +105,11 @@ service Execution {
   // where, for each requested blob not present in the CAS, there is a
   // `Violation` with a `type` of `MISSING` and a `subject` of
   // `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+  //
+  // The server does not need to guarantee that a call to this method leads to
+  // at most one execution of the action. The server MAY execute the action
+  // multiple times, potentially in parallel. These redundant executions MAY
+  // continue to run, even if the operation is completed.
   rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) {
     option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" };
   }
@@ -142,7 +148,7 @@ service ActionCache {
   // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
   // are available at the time of returning the
   // [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be
-  // for some period of time afterwards. The TTLs of the referenced blobs SHOULD be increased
+  // for some period of time afterwards. The lifetimes of the referenced blobs SHOULD be increased
   // if necessary and applicable.
   //
   // Errors:
@@ -161,6 +167,9 @@ service ActionCache {
   // [Command][build.bazel.remote.execution.v2.Command], into the
   // `ContentAddressableStorage`.
   //
+  // Server implementations MAY modify the
+  // `UpdateActionResultRequest.action_result` and return an equivalent value.
+  //
   // Errors:
   //
   // * `INVALID_ARGUMENT`: One or more arguments are invalid.
@@ -190,47 +199,108 @@ service ActionCache {
 //
 // For small file uploads the client should group them together and call
 // [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+//
 // For large uploads, the client must use the
-// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
-// `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
-// where `instance_name` is as described in the next paragraph, `uuid` is a
-// version 4 UUID generated by the client, and `hash` and `size` are the
-// [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
-// `uuid` is used only to avoid collisions when multiple clients try to upload
-// the same file (or the same client tries to upload the file multiple times at
-// once on different threads), so the client MAY reuse the `uuid` for uploading
-// different blobs. The `resource_name` may optionally have a trailing filename
-// (or other metadata) for a client to use if it is storing URLs, as in
-// `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
-// after the `size` is ignored.
+// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
+//
+// For uncompressed data, The `WriteRequest.resource_name` is of the following form:
+// `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`
+//
+// Where:
+// * `instance_name` is an identifier, possibly containing multiple path
+//   segments, used to distinguish between the various instances on the server,
+//   in a manner defined by the server. If it is the empty path, the leading
+//   slash is omitted, so that  the `resource_name` becomes
+//   `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`.
+//   To simplify parsing, a path segment cannot equal any of the following
+//   keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
+//   `capabilities` or `compressed-blobs`.
+// * `uuid` is a version 4 UUID generated by the client, used to avoid
+//   collisions between concurrent uploads of the same data. Clients MAY
+//   reuse the same `uuid` for uploading different blobs.
+// * `hash` and `size` refer to the [Digest][build.bazel.remote.execution.v2.Digest]
+//   of the data being uploaded.
+// * `optional_metadata` is implementation specific data, which clients MAY omit.
+//   Servers MAY ignore this metadata.
 //
-// A single server MAY support multiple instances of the execution system, each
-// with their own workers, storage, cache, etc. The exact relationship between
-// instances is up to the server. If the server does, then the `instance_name`
-// is an identifier, possibly containing multiple path segments, used to
-// distinguish between the various instances on the server, in a manner defined
-// by the server. For servers which do not support multiple instances, then the
-// `instance_name` is the empty path and the leading slash is omitted, so that
-// the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
-// To simplify parsing, a path segment cannot equal any of the following
-// keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
-// `capabilities`.
+// Data can alternatively be uploaded in compressed form, with the following
+// `WriteRequest.resource_name` form:
+// `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
+//
+// Where:
+// * `instance_name`, `uuid` and `optional_metadata` are defined as above.
+// * `compressor` is a lowercase string form of a `Compressor.Value` enum
+//   other than `identity`, which is supported by the server and advertised in
+//   [CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
+// * `uncompressed_hash` and `uncompressed_size` refer to the
+//   [Digest][build.bazel.remote.execution.v2.Digest] of the data being
+//   uploaded, once uncompressed. Servers MUST verify that these match
+//   the uploaded data once uncompressed, and MUST return an
+//   `INVALID_ARGUMENT` error in the case of mismatch.
+//
+// Note that when writing compressed blobs, the `WriteRequest.write_offset` in
+// the initial request in a stream refers to the offset in the uncompressed form
+// of the blob. In subsequent requests, `WriteRequest.write_offset` MUST be the
+// sum of the first request's 'WriteRequest.write_offset' and the total size of
+// all the compressed data bundles in the previous requests.
+// Note that this mixes an uncompressed offset with a compressed byte length,
+// which is nonsensical, but it is done to fit the semantics of the existing
+// ByteStream protocol.
+//
+// Uploads of the same data MAY occur concurrently in any form, compressed or
+// uncompressed.
+//
+// Clients SHOULD NOT use gRPC-level compression for ByteStream API `Write`
+// calls of compressed blobs, since this would compress already-compressed data.
 //
 // When attempting an upload, if another client has already completed the upload
 // (which may occur in the middle of a single upload if another client uploads
-// the same blob concurrently), the request will terminate immediately with
-// a response whose `committed_size` is the full size of the uploaded file
-// (regardless of how much data was transmitted by the client). If the client
-// completes the upload but the
+// the same blob concurrently), the request will terminate immediately without
+// error, and with a response whose `committed_size` is the value `-1` if this
+// is a compressed upload, or with the full size of the uploaded file if this is
+// an uncompressed upload (regardless of how much data was transmitted by the
+// client). If the client completes the upload but the
 // [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
 // `INVALID_ARGUMENT` error will be returned. In either case, the client should
 // not attempt to retry the upload.
 //
-// For downloading blobs, the client must use the
-// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
-// a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
-// `instance_name` is the instance name (see above), and `hash` and `size` are
-// the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+// Small downloads can be grouped and requested in a batch via
+// [BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
+//
+// For large downloads, the client must use the
+// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
+//
+// For uncompressed data, The `ReadRequest.resource_name` is of the following form:
+// `{instance_name}/blobs/{hash}/{size}`
+// Where `instance_name`, `hash` and `size` are defined as for uploads.
+//
+// Data can alternatively be downloaded in compressed form, with the following
+// `ReadRequest.resource_name` form:
+// `{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}`
+//
+// Where:
+// * `instance_name` and `compressor` are defined as for uploads.
+// * `uncompressed_hash` and `uncompressed_size` refer to the
+//   [Digest][build.bazel.remote.execution.v2.Digest] of the data being
+//   downloaded, once uncompressed. Clients MUST verify that these match
+//   the downloaded data once uncompressed, and take appropriate steps in
+//   the case of failure such as retrying a limited number of times or
+//   surfacing an error to the user.
+//
+// When downloading compressed blobs:
+// * `ReadRequest.read_offset` refers to the offset in the uncompressed form
+//   of the blob.
+// * Servers MUST return `INVALID_ARGUMENT` if `ReadRequest.read_limit` is
+//   non-zero.
+// * Servers MAY use any compression level they choose, including different
+//   levels for different blobs (e.g. choosing a level designed for maximum
+//   speed for data known to be incompressible).
+// * Clients SHOULD NOT use gRPC-level compression, since this would compress
+//   already-compressed data.
+//
+// Servers MUST be able to provide data for all recently advertised blobs in
+// each of the compression formats that the server supports, as well as in
+// uncompressed form.
 //
 // The lifetime of entries in the CAS is implementation specific, but it SHOULD
 // be long enough to allow for newly-added and recently looked-up entries to be
@@ -251,7 +321,7 @@ service ContentAddressableStorage {
   // Clients can use this API before uploading blobs to determine which ones are
   // already present in the CAS and do not need to be uploaded again.
   //
-  // Servers SHOULD increase the TTLs of the referenced blobs if necessary and
+  // Servers SHOULD increase the lifetimes of the referenced blobs if necessary and
   // applicable.
   //
   // There are no method-specific errors.
@@ -406,6 +476,14 @@ message Action {
   // timeout that is longer than the server's maximum timeout, the server MUST
   // reject the request.
   //
+  // The timeout is only intended to cover the "execution" of the specified
+  // action and not time in queue nor any overheads before or after execution
+  // such as marshalling inputs/outputs. The server SHOULD avoid including time
+  // spent the client doesn't have control over, and MAY extend or reduce the
+  // timeout to account for delays or speedups that occur during execution
+  // itself (e.g., lazily loading data from the Content Addressable Storage,
+  // live migration of virtual machines, emulation overhead).
+  //
   // The timeout is a part of the
   // [Action][build.bazel.remote.execution.v2.Action] message, and
   // therefore two `Actions` with different timeouts are different, even if they
@@ -422,6 +500,24 @@ message Action {
   bool do_not_cache = 7;
 
   reserved 8; // Used for field moved to [Command][build.bazel.remote.execution.v2.Command].
+
+  // An optional additional salt value used to place this `Action` into a
+  // separate cache namespace from other instances having the same field
+  // contents. This salt typically comes from operational configuration
+  // specific to sources such as repo and service configuration,
+  // and allows disowning an entire set of ActionResults that might have been
+  // poisoned by buggy software or tool failures.
+  bytes salt = 9;
+
+  // The optional platform requirements for the execution environment. The
+  // server MAY choose to execute the action on any worker satisfying the
+  // requirements, so the client SHOULD ensure that running the action on any
+  // such worker will have the same result.  A detailed lexicon for this can be
+  // found in the accompanying platform.md.
+  // New in version 2.2: clients SHOULD set these platform properties as well
+  // as those in the [Command][build.bazel.remote.execution.v2.Command]. Servers
+  // SHOULD prefer those set here.
+  Platform platform = 10;
 }
 
 // A `Command` is the actual command executed by a worker running an
@@ -442,9 +538,21 @@ message Command {
     string value = 2;
   }
 
-  // The arguments to the command. The first argument must be the path to the
-  // executable, which must be either a relative path, in which case it is
-  // evaluated with respect to the input root, or an absolute path.
+  // The arguments to the command.
+  //
+  // The first argument specifies the command to run, which may be either an
+  // absolute path, a path relative to the working directory, or an unqualified
+  // path (without path separators) which will be resolved using the operating
+  // system's equivalent of the PATH environment variable. Path separators
+  // native to the operating system running on the worker SHOULD be used. If the
+  // `environment_variables` list contains an entry for the PATH environment
+  // variable, it SHOULD be respected. If not, the resolution process is
+  // implementation-defined.
+  //
+  // Changed in v2.3. v2.2 and older require that no PATH lookups are performed,
+  // and that relative paths are resolved relative to the input root. This
+  // behavior can, however, not be relied upon, as most implementations already
+  // followed the rules described above.
   repeated string arguments = 1;
 
   // The environment variables to set when running the program. The worker may
@@ -518,10 +626,10 @@ message Command {
   // The type of the output (file or directory) is not specified, and will be
   // determined by the server after action execution. If the resulting path is
   // a file, it will be returned in an
-  // [OutputFile][build.bazel.remote.execution.v2.OutputFile]) typed field.
+  // [OutputFile][build.bazel.remote.execution.v2.OutputFile] typed field.
   // If the path is a directory, the entire directory structure will be returned
   // as a [Tree][build.bazel.remote.execution.v2.Tree] message digest, see
-  // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory])
+  // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory]
   // Other files or directories that may be created during command execution
   // are discarded.
   //
@@ -546,8 +654,11 @@ message Command {
   // The platform requirements for the execution environment. The server MAY
   // choose to execute the action on any worker satisfying the requirements, so
   // the client SHOULD ensure that running the action on any such worker will
-  // have the same result.
-  // A detailed lexicon for this can be found in the accompanying platform.md.
+  // have the same result.  A detailed lexicon for this can be found in the
+  // accompanying platform.md.
+  // DEPRECATED as of v2.2: platform properties are now specified directly in
+  // the action. See documentation note in the
+  // [Action][build.bazel.remote.execution.v2.Action] for migration.
   Platform platform = 5;
 
   // The working directory, relative to the input root, for the command to run
@@ -592,6 +703,11 @@ message Platform {
   // The server MAY use the `value` of one or more properties to determine how
   // it sets up the execution environment, such as by making specific system
   // files available to the worker.
+  //
+  // Both names and values are typically case-sensitive. Note that the platform
+  // is implicitly part of the action digest, so even tiny changes in the names
+  // or values (like changing case) may result in different action cache
+  // entries.
   message Property {
     // The property name.
     string name = 1;
@@ -769,8 +885,11 @@ message SymlinkNode {
   // The target path can be relative to the parent directory of the symlink or
   // it can be an absolute path starting with `/`. Support for absolute paths
   // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
-  // API. The canonical form forbids the substrings `/./` and `//` in the target
-  // path. `..` components are allowed anywhere in the target path.
+  // API. `..` components are allowed anywhere in the target path as logical
+  // canonicalization may lead to different behavior in the presence of
+  // directory symlinks (e.g. `foo/../bar` may not be the same as `bar`).
+  // To reduce potential cache misses, canonicalization is still recommended
+  // where this is possible without impacting correctness.
   string target = 2;
 
   // The node properties of the SymlinkNode.
@@ -844,15 +963,44 @@ message ExecutedActionMetadata {
   // When the worker completed executing the action command.
   google.protobuf.Timestamp execution_completed_timestamp = 8;
 
+  // New in v2.3: the amount of time the worker spent executing the action
+  // command, potentially computed using a worker-specific virtual clock.
+  //
+  // The virtual execution duration is only intended to cover the "execution" of
+  // the specified action and not time in queue nor any overheads before or
+  // after execution such as marshalling inputs/outputs. The server SHOULD avoid
+  // including time spent the client doesn't have control over, and MAY extend
+  // or reduce the execution duration to account for delays or speedups that
+  // occur during execution itself (e.g., lazily loading data from the Content
+  // Addressable Storage, live migration of virtual machines, emulation
+  // overhead).
+  //
+  // The method of timekeeping used to compute the virtual execution duration
+  // MUST be consistent with what is used to enforce the
+  // [Action][[build.bazel.remote.execution.v2.Action]'s `timeout`. There is no
+  // relationship between the virtual execution duration and the values of
+  // `execution_start_timestamp` and `execution_completed_timestamp`.
+  google.protobuf.Duration virtual_execution_duration = 12;
+
   // When the worker started uploading action outputs.
   google.protobuf.Timestamp output_upload_start_timestamp = 9;
 
   // When the worker finished uploading action outputs.
   google.protobuf.Timestamp output_upload_completed_timestamp = 10;
+
+  // Details that are specific to the kind of worker used. For example,
+  // on POSIX-like systems this could contain a message with
+  // getrusage(2) statistics.
+  repeated google.protobuf.Any auxiliary_metadata = 11;
 }
 
 // An ActionResult represents the result of an
 // [Action][build.bazel.remote.execution.v2.Action] being run.
+//
+// It is advised that at least one field (for example
+// `ActionResult.execution_metadata.Worker`) have a non-default value, to
+// ensure that the serialized value is non-empty, which can then be used
+// as a basic data sanity check.
 message ActionResult {
   reserved 1; // Reserved for use as the resource name.
 
@@ -997,6 +1145,7 @@ message ActionResult {
   // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
   // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
   // would cause the response to exceed message size limits.
+  // Clients SHOULD NOT populate this field when uploading to the cache.
   bytes stdout_raw = 5;
 
   // The digest for a blob containing the standard output of the action, which
@@ -1009,6 +1158,7 @@ message ActionResult {
   // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
   // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
   // would cause the response to exceed message size limits.
+  // Clients SHOULD NOT populate this field when uploading to the cache.
   bytes stderr_raw = 7;
 
   // The digest for a blob containing the standard error of the action, which
@@ -1043,6 +1193,7 @@ message OutputFile {
   // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
   // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
   // would cause the response to exceed message size limits.
+  // Clients SHOULD NOT populate this field when uploading to the cache.
   bytes contents = 5;
 
   // The supported node properties of the OutputFile, if requested by the Action.
@@ -1061,6 +1212,9 @@ message Tree {
   // recursively, all its children. In order to reconstruct the directory tree,
   // the client must take the digests of each of the child directories and then
   // build up a tree starting from the `root`.
+  // Servers SHOULD ensure that these are ordered consistently such that two
+  // actions producing equivalent output directories on the same server
+  // implementation also produce Tree messages with matching digests.
   repeated Directory children = 2;
 }
 
@@ -1096,8 +1250,7 @@ message OutputSymlink {
   // The target path can be relative to the parent directory of the symlink or
   // it can be an absolute path starting with `/`. Support for absolute paths
   // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
-  // API. The canonical form forbids the substrings `/./` and `//` in the target
-  // path. `..` components are allowed anywhere in the target path.
+  // API. `..` components are allowed anywhere in the target path.
   string target = 2;
 
   // The supported node properties of the OutputSymlink, if requested by the
@@ -1227,6 +1380,17 @@ message ExecuteResponse {
 }
 
 // The current stage of action execution.
+//
+// Even though these stages are numbered according to the order in which
+// they generally occur, there is no requirement that the remote
+// execution system reports events along this order. For example, an
+// operation MAY transition from the EXECUTING stage back to QUEUED
+// in case the hardware on which the operation executes fails.
+//
+// If and only if the remote execution system reports that an operation
+// has reached the COMPLETED stage, it MUST set the [done
+// field][google.longrunning.Operation.done] of the
+// [Operation][google.longrunning.Operation] and terminate the stream.
 message ExecutionStage {
   enum Value {
     // Invalid value.
@@ -1301,7 +1465,8 @@ message GetActionResultRequest {
   bool inline_stderr = 4;
 
   // A hint to the server to inline the contents of the listed output files.
-  // Each path needs to exactly match one path in `output_files` in the
+  // Each path needs to exactly match one file path in either `output_paths` or
+  // `output_files` (DEPRECATED since v2.1) in the
   // [Command][build.bazel.remote.execution.v2.Command] message.
   repeated string inline_output_files = 5;
 }
@@ -1361,6 +1526,12 @@ message BatchUpdateBlobsRequest {
 
     // The raw binary data.
     bytes data = 2;
+
+    // The format of `data`. Must be `IDENTITY`/unspecified, or one of the
+    // compressors advertised by the 
+    // [CacheCapabilities.supported_batch_compressors][build.bazel.remote.execution.v2.CacheCapabilities.supported_batch_compressors]
+    // field.
+    Compressor.Value compressor = 3;
   }
 
   // The instance of the execution system to operate against. A server may
@@ -1402,6 +1573,10 @@ message BatchReadBlobsRequest {
 
   // The individual blob digests.
   repeated Digest digests = 2;
+
+  // A list of acceptable encodings for the returned inlined data, in no
+  // particular order. `IDENTITY` is always allowed even if not specified here.
+  repeated Compressor.Value acceptable_compressors = 3;
 }
 
 // A response message for
@@ -1415,6 +1590,10 @@ message BatchReadBlobsResponse {
     // The raw binary data.
     bytes data = 2;
 
+    // The format the data is encoded in. MUST be `IDENTITY`/unspecified,
+    // or one of the acceptable compressors specified in the `BatchReadBlobsRequest`.
+    Compressor.Value compressor = 4;
+
     // The result of attempting to download that blob.
     google.rpc.Status status = 3;
   }
@@ -1520,6 +1699,11 @@ message DigestFunction {
 
     // The SHA-512 digest function.
     SHA512 = 6;
+
+    // Murmur3 128-bit digest function, x64 variant. Note that this is not a
+    // cryptographic hash function and its collision properties are not strongly guaranteed.
+    // See https://github.com/aappleby/smhasher/wiki/MurmurHash3 .
+    MURMUR3 = 7;
   }
 }
 
@@ -1529,12 +1713,17 @@ message ActionCacheUpdateCapabilities {
 }
 
 // Allowed values for priority in
-// [ResultsCachePolicy][google.devtools.remoteexecution.v2.ResultsCachePolicy]
+// [ResultsCachePolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy] and
+// [ExecutionPolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy]
 // Used for querying both cache and execution valid priority ranges.
 message PriorityCapabilities {
   // Supported range of priorities, including boundaries.
   message PriorityRange {
+    // The minimum numeric value for this priority range, which represents the
+    // most urgent task or longest retained item.
     int32 min_priority = 1;
+    // The maximum numeric value for this priority range, which represents the
+    // least urgent task or shortest retained item.
     int32 max_priority = 2;
   }
   repeated PriorityRange priorities = 1;
@@ -1558,11 +1747,31 @@ message SymlinkAbsolutePathStrategy {
   }
 }
 
+// Compression formats which may be supported.
+message Compressor {
+  enum Value {
+    // No compression. Servers and clients MUST always support this, and do
+    // not need to advertise it.
+    IDENTITY = 0;
+
+    // Zstandard compression.
+    ZSTD = 1;
+
+    // RFC 1951 Deflate. This format is identical to what is used by ZIP
+    // files. Headers such as the one generated by gzip are not
+    // included.
+    //
+    // It is advised to use algorithms such as Zstandard instead, as
+    // those are faster and/or provide a better compression ratio.
+    DEFLATE = 2;
+  }
+}
+
 // Capabilities of the remote cache system.
 message CacheCapabilities {
   // All the digest functions supported by the remote cache.
   // Remote cache may support multiple digest functions simultaneously.
-  repeated DigestFunction.Value digest_function = 1;
+  repeated DigestFunction.Value digest_functions = 1;
 
   // Capabilities for updating the action cache.
   ActionCacheUpdateCapabilities action_cache_update_capabilities = 2;
@@ -1578,6 +1787,19 @@ message CacheCapabilities {
 
   // Whether absolute symlink targets are supported.
   SymlinkAbsolutePathStrategy.Value symlink_absolute_path_strategy = 5;
+
+  // Compressors supported by the "compressed-blobs" bytestream resources.
+  // Servers MUST support identity/no-compression, even if it is not listed
+  // here.
+  //
+  // Note that this does not imply which if any compressors are supported by
+  // the server at the gRPC level.
+  repeated Compressor.Value supported_compressors = 6;
+
+  // Compressors supported for inlined data in
+  // [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
+  // requests.
+  repeated Compressor.Value supported_batch_update_compressors = 7;
 }
 
 // Capabilities of the remote execution system.
@@ -1632,4 +1854,18 @@ message RequestMetadata {
   // An identifier to tie multiple tool invocations together. For example,
   // runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
   string correlated_invocations_id = 4;
+
+  // A brief description of the kind of action, for example, CppCompile or GoLink.
+  // There is no standard agreed set of values for this, and they are expected to vary between different client tools.
+  string action_mnemonic = 5;
+
+  // An identifier for the target which produced this action.
+  // No guarantees are made around how many actions may relate to a single target.
+  string target_id = 6;
+
+  // An identifier for the configuration in which the target was built,
+  // e.g. for differentiating building host tools or different target platforms.
+  // There is no expectation that this value will have any particular structure,
+  // or equality across invocations, though some client tools may offer these guarantees.
+  string configuration_id = 7;
 }
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
index 800485616..1c9ea49b8 100644
--- a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
@@ -15,13 +15,14 @@ _sym_db = _symbol_database.Default()
 from buildstream._protos.build.bazel.semver import semver_pb2 as build_dot_bazel_dot_semver_dot_semver__pb2
 from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
 from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
 from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
 from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
 
 
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xdb\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01 [...]
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xa6\x02\n\x06\x41\x63tion\x12?\n\ [...]
 
 
 
@@ -72,6 +73,7 @@ _ACTIONCACHEUPDATECAPABILITIES = DESCRIPTOR.message_types_by_name['ActionCacheUp
 _PRIORITYCAPABILITIES = DESCRIPTOR.message_types_by_name['PriorityCapabilities']
 _PRIORITYCAPABILITIES_PRIORITYRANGE = _PRIORITYCAPABILITIES.nested_types_by_name['PriorityRange']
 _SYMLINKABSOLUTEPATHSTRATEGY = DESCRIPTOR.message_types_by_name['SymlinkAbsolutePathStrategy']
+_COMPRESSOR = DESCRIPTOR.message_types_by_name['Compressor']
 _CACHECAPABILITIES = DESCRIPTOR.message_types_by_name['CacheCapabilities']
 _EXECUTIONCAPABILITIES = DESCRIPTOR.message_types_by_name['ExecutionCapabilities']
 _TOOLDETAILS = DESCRIPTOR.message_types_by_name['ToolDetails']
@@ -79,6 +81,7 @@ _REQUESTMETADATA = DESCRIPTOR.message_types_by_name['RequestMetadata']
 _EXECUTIONSTAGE_VALUE = _EXECUTIONSTAGE.enum_types_by_name['Value']
 _DIGESTFUNCTION_VALUE = _DIGESTFUNCTION.enum_types_by_name['Value']
 _SYMLINKABSOLUTEPATHSTRATEGY_VALUE = _SYMLINKABSOLUTEPATHSTRATEGY.enum_types_by_name['Value']
+_COMPRESSOR_VALUE = _COMPRESSOR.enum_types_by_name['Value']
 Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), {
   'DESCRIPTOR' : _ACTION,
   '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
@@ -415,6 +418,13 @@ SymlinkAbsolutePathStrategy = _reflection.GeneratedProtocolMessageType('SymlinkA
   })
 _sym_db.RegisterMessage(SymlinkAbsolutePathStrategy)
 
+Compressor = _reflection.GeneratedProtocolMessageType('Compressor', (_message.Message,), {
+  'DESCRIPTOR' : _COMPRESSOR,
+  '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
+  # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Compressor)
+  })
+_sym_db.RegisterMessage(Compressor)
+
 CacheCapabilities = _reflection.GeneratedProtocolMessageType('CacheCapabilities', (_message.Message,), {
   'DESCRIPTOR' : _CACHECAPABILITIES,
   '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
@@ -450,7 +460,7 @@ _CAPABILITIES = DESCRIPTOR.services_by_name['Capabilities']
 if _descriptor._USE_C_DESCRIPTORS == False:
 
   DESCRIPTOR._options = None
-  DESCRIPTOR._serialized_options = b'\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001Z\017remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2'
+  DESCRIPTOR._serialized_options = b'\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001ZQgithub.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2;remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2'
   _EXECUTERESPONSE_SERVERLOGSENTRY._options = None
   _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_options = b'8\001'
   _EXECUTION.methods_by_name['Execute']._options = None
@@ -471,120 +481,124 @@ if _descriptor._USE_C_DESCRIPTORS == False:
   _CONTENTADDRESSABLESTORAGE.methods_by_name['GetTree']._serialized_options = b'\202\323\344\223\002R\022P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree'
   _CAPABILITIES.methods_by_name['GetCapabilities']._options = None
   _CAPABILITIES.methods_by_name['GetCapabilities']._serialized_options = b'\202\323\344\223\002%\022#/v2/{instance_name=**}/capabilities'
-  _ACTION._serialized_start=314
-  _ACTION._serialized_end=533
-  _COMMAND._serialized_start=536
-  _COMMAND._serialized_end=901
-  _COMMAND_ENVIRONMENTVARIABLE._serialized_start=851
-  _COMMAND_ENVIRONMENTVARIABLE._serialized_end=901
-  _PLATFORM._serialized_start=903
-  _PLATFORM._serialized_end=1026
-  _PLATFORM_PROPERTY._serialized_start=987
-  _PLATFORM_PROPERTY._serialized_end=1026
-  _DIRECTORY._serialized_start=1029
-  _DIRECTORY._serialized_end=1311
-  _NODEPROPERTY._serialized_start=1313
-  _NODEPROPERTY._serialized_end=1356
-  _NODEPROPERTIES._serialized_start=1359
-  _NODEPROPERTIES._serialized_end=1534
-  _FILENODE._serialized_start=1537
-  _FILENODE._serialized_end=1727
-  _DIRECTORYNODE._serialized_start=1729
-  _DIRECTORYNODE._serialized_end=1815
-  _SYMLINKNODE._serialized_start=1817
-  _SYMLINKNODE._serialized_end=1940
-  _DIGEST._serialized_start=1942
-  _DIGEST._serialized_end=1984
-  _EXECUTEDACTIONMETADATA._serialized_start=1987
-  _EXECUTEDACTIONMETADATA._serialized_end=2607
-  _ACTIONRESULT._serialized_start=2610
-  _ACTIONRESULT._serialized_end=3281
-  _OUTPUTFILE._serialized_start=3284
-  _OUTPUTFILE._serialized_end=3494
-  _TREE._serialized_start=3496
-  _TREE._serialized_end=3622
-  _OUTPUTDIRECTORY._serialized_start=3624
-  _OUTPUTDIRECTORY._serialized_end=3723
-  _OUTPUTSYMLINK._serialized_start=3725
-  _OUTPUTSYMLINK._serialized_end=3850
-  _EXECUTIONPOLICY._serialized_start=3852
-  _EXECUTIONPOLICY._serialized_end=3887
-  _RESULTSCACHEPOLICY._serialized_start=3889
-  _RESULTSCACHEPOLICY._serialized_end=3927
-  _EXECUTEREQUEST._serialized_start=3930
-  _EXECUTEREQUEST._serialized_end=4237
-  _LOGFILE._serialized_start=4239
-  _LOGFILE._serialized_end=4329
-  _EXECUTERESPONSE._serialized_start=4332
-  _EXECUTERESPONSE._serialized_end=4668
-  _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_start=4577
-  _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_end=4668
-  _EXECUTIONSTAGE._serialized_start=4670
-  _EXECUTIONSTAGE._serialized_end=4767
-  _EXECUTIONSTAGE_VALUE._serialized_start=4688
-  _EXECUTIONSTAGE_VALUE._serialized_end=4767
-  _EXECUTEOPERATIONMETADATA._serialized_start=4770
-  _EXECUTEOPERATIONMETADATA._serialized_end=4986
-  _WAITEXECUTIONREQUEST._serialized_start=4988
-  _WAITEXECUTIONREQUEST._serialized_end=5024
-  _GETACTIONRESULTREQUEST._serialized_start=5027
-  _GETACTIONRESULTREQUEST._serialized_end=5213
-  _UPDATEACTIONRESULTREQUEST._serialized_start=5216
-  _UPDATEACTIONRESULTREQUEST._serialized_end=5483
-  _FINDMISSINGBLOBSREQUEST._serialized_start=5485
-  _FINDMISSINGBLOBSREQUEST._serialized_end=5596
-  _FINDMISSINGBLOBSRESPONSE._serialized_start=5598
-  _FINDMISSINGBLOBSRESPONSE._serialized_end=5695
-  _BATCHUPDATEBLOBSREQUEST._serialized_start=5698
-  _BATCHUPDATEBLOBSREQUEST._serialized_end=5912
-  _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_start=5832
-  _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_end=5912
-  _BATCHUPDATEBLOBSRESPONSE._serialized_start=5915
-  _BATCHUPDATEBLOBSRESPONSE._serialized_end=6133
-  _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_start=6030
-  _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_end=6133
-  _BATCHREADBLOBSREQUEST._serialized_start=6135
-  _BATCHREADBLOBSREQUEST._serialized_end=6239
-  _BATCHREADBLOBSRESPONSE._serialized_start=6242
-  _BATCHREADBLOBSRESPONSE._serialized_end=6470
-  _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_start=6353
-  _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_end=6470
-  _GETTREEREQUEST._serialized_start=6473
-  _GETTREEREQUEST._serialized_end=6613
-  _GETTREERESPONSE._serialized_start=6615
-  _GETTREERESPONSE._serialized_end=6722
-  _GETCAPABILITIESREQUEST._serialized_start=6724
-  _GETCAPABILITIESREQUEST._serialized_end=6771
-  _SERVERCAPABILITIES._serialized_start=6774
-  _SERVERCAPABILITIES._serialized_end=7129
-  _DIGESTFUNCTION._serialized_start=7131
-  _DIGESTFUNCTION._serialized_end=7233
-  _DIGESTFUNCTION_VALUE._serialized_start=7149
-  _DIGESTFUNCTION_VALUE._serialized_end=7233
-  _ACTIONCACHEUPDATECAPABILITIES._serialized_start=7235
-  _ACTIONCACHEUPDATECAPABILITIES._serialized_end=7290
-  _PRIORITYCAPABILITIES._serialized_start=7293
-  _PRIORITYCAPABILITIES._serialized_end=7465
-  _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_start=7406
-  _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_end=7465
-  _SYMLINKABSOLUTEPATHSTRATEGY._serialized_start=7467
-  _SYMLINKABSOLUTEPATHSTRATEGY._serialized_end=7547
-  _SYMLINKABSOLUTEPATHSTRATEGY_VALUE._serialized_start=7498
-  _SYMLINKABSOLUTEPATHSTRATEGY_VALUE._serialized_end=7547
-  _CACHECAPABILITIES._serialized_start=7550
-  _CACHECAPABILITIES._serialized_end=7991
-  _EXECUTIONCAPABILITIES._serialized_start=7994
-  _EXECUTIONCAPABILITIES._serialized_end=8250
-  _TOOLDETAILS._serialized_start=8252
-  _TOOLDETAILS._serialized_end=8306
-  _REQUESTMETADATA._serialized_start=8309
-  _REQUESTMETADATA._serialized_end=8476
-  _EXECUTION._serialized_start=8479
-  _EXECUTION._serialized_end=8792
-  _ACTIONCACHE._serialized_start=8795
-  _ACTIONCACHE._serialized_end=9265
-  _CONTENTADDRESSABLESTORAGE._serialized_start=9268
-  _CONTENTADDRESSABLESTORAGE._serialized_end=10063
-  _CAPABILITIES._serialized_start=10066
-  _CAPABILITIES._serialized_end=10255
+  _ACTION._serialized_start=341
+  _ACTION._serialized_end=635
+  _COMMAND._serialized_start=638
+  _COMMAND._serialized_end=1003
+  _COMMAND_ENVIRONMENTVARIABLE._serialized_start=953
+  _COMMAND_ENVIRONMENTVARIABLE._serialized_end=1003
+  _PLATFORM._serialized_start=1005
+  _PLATFORM._serialized_end=1128
+  _PLATFORM_PROPERTY._serialized_start=1089
+  _PLATFORM_PROPERTY._serialized_end=1128
+  _DIRECTORY._serialized_start=1131
+  _DIRECTORY._serialized_end=1413
+  _NODEPROPERTY._serialized_start=1415
+  _NODEPROPERTY._serialized_end=1458
+  _NODEPROPERTIES._serialized_start=1461
+  _NODEPROPERTIES._serialized_end=1636
+  _FILENODE._serialized_start=1639
+  _FILENODE._serialized_end=1829
+  _DIRECTORYNODE._serialized_start=1831
+  _DIRECTORYNODE._serialized_end=1917
+  _SYMLINKNODE._serialized_start=1919
+  _SYMLINKNODE._serialized_end=2042
+  _DIGEST._serialized_start=2044
+  _DIGEST._serialized_end=2086
+  _EXECUTEDACTIONMETADATA._serialized_start=2089
+  _EXECUTEDACTIONMETADATA._serialized_end=2822
+  _ACTIONRESULT._serialized_start=2825
+  _ACTIONRESULT._serialized_end=3496
+  _OUTPUTFILE._serialized_start=3499
+  _OUTPUTFILE._serialized_end=3709
+  _TREE._serialized_start=3711
+  _TREE._serialized_end=3837
+  _OUTPUTDIRECTORY._serialized_start=3839
+  _OUTPUTDIRECTORY._serialized_end=3938
+  _OUTPUTSYMLINK._serialized_start=3940
+  _OUTPUTSYMLINK._serialized_end=4065
+  _EXECUTIONPOLICY._serialized_start=4067
+  _EXECUTIONPOLICY._serialized_end=4102
+  _RESULTSCACHEPOLICY._serialized_start=4104
+  _RESULTSCACHEPOLICY._serialized_end=4142
+  _EXECUTEREQUEST._serialized_start=4145
+  _EXECUTEREQUEST._serialized_end=4452
+  _LOGFILE._serialized_start=4454
+  _LOGFILE._serialized_end=4544
+  _EXECUTERESPONSE._serialized_start=4547
+  _EXECUTERESPONSE._serialized_end=4883
+  _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_start=4792
+  _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_end=4883
+  _EXECUTIONSTAGE._serialized_start=4885
+  _EXECUTIONSTAGE._serialized_end=4982
+  _EXECUTIONSTAGE_VALUE._serialized_start=4903
+  _EXECUTIONSTAGE_VALUE._serialized_end=4982
+  _EXECUTEOPERATIONMETADATA._serialized_start=4985
+  _EXECUTEOPERATIONMETADATA._serialized_end=5201
+  _WAITEXECUTIONREQUEST._serialized_start=5203
+  _WAITEXECUTIONREQUEST._serialized_end=5239
+  _GETACTIONRESULTREQUEST._serialized_start=5242
+  _GETACTIONRESULTREQUEST._serialized_end=5428
+  _UPDATEACTIONRESULTREQUEST._serialized_start=5431
+  _UPDATEACTIONRESULTREQUEST._serialized_end=5698
+  _FINDMISSINGBLOBSREQUEST._serialized_start=5700
+  _FINDMISSINGBLOBSREQUEST._serialized_end=5811
+  _FINDMISSINGBLOBSRESPONSE._serialized_start=5813
+  _FINDMISSINGBLOBSRESPONSE._serialized_end=5910
+  _BATCHUPDATEBLOBSREQUEST._serialized_start=5913
+  _BATCHUPDATEBLOBSREQUEST._serialized_end=6199
+  _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_start=6048
+  _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_end=6199
+  _BATCHUPDATEBLOBSRESPONSE._serialized_start=6202
+  _BATCHUPDATEBLOBSRESPONSE._serialized_end=6420
+  _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_start=6317
+  _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_end=6420
+  _BATCHREADBLOBSREQUEST._serialized_start=6423
+  _BATCHREADBLOBSREQUEST._serialized_end=6610
+  _BATCHREADBLOBSRESPONSE._serialized_start=6613
+  _BATCHREADBLOBSRESPONSE._serialized_end=6913
+  _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_start=6725
+  _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_end=6913
+  _GETTREEREQUEST._serialized_start=6916
+  _GETTREEREQUEST._serialized_end=7056
+  _GETTREERESPONSE._serialized_start=7058
+  _GETTREERESPONSE._serialized_end=7165
+  _GETCAPABILITIESREQUEST._serialized_start=7167
+  _GETCAPABILITIESREQUEST._serialized_end=7214
+  _SERVERCAPABILITIES._serialized_start=7217
+  _SERVERCAPABILITIES._serialized_end=7572
+  _DIGESTFUNCTION._serialized_start=7574
+  _DIGESTFUNCTION._serialized_end=7689
+  _DIGESTFUNCTION_VALUE._serialized_start=7592
+  _DIGESTFUNCTION_VALUE._serialized_end=7689
+  _ACTIONCACHEUPDATECAPABILITIES._serialized_start=7691
+  _ACTIONCACHEUPDATECAPABILITIES._serialized_end=7746
+  _PRIORITYCAPABILITIES._serialized_start=7749
+  _PRIORITYCAPABILITIES._serialized_end=7921
+  _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_start=7862
+  _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_end=7921
+  _SYMLINKABSOLUTEPATHSTRATEGY._serialized_start=7923
+  _SYMLINKABSOLUTEPATHSTRATEGY._serialized_end=8003
+  _SYMLINKABSOLUTEPATHSTRATEGY_VALUE._serialized_start=7954
+  _SYMLINKABSOLUTEPATHSTRATEGY_VALUE._serialized_end=8003
+  _COMPRESSOR._serialized_start=8005
+  _COMPRESSOR._serialized_end=8063
+  _COMPRESSOR_VALUE._serialized_start=8019
+  _COMPRESSOR_VALUE._serialized_end=8063
+  _CACHECAPABILITIES._serialized_start=8066
+  _CACHECAPABILITIES._serialized_end=8685
+  _EXECUTIONCAPABILITIES._serialized_start=8688
+  _EXECUTIONCAPABILITIES._serialized_end=8944
+  _TOOLDETAILS._serialized_start=8946
+  _TOOLDETAILS._serialized_end=9000
+  _REQUESTMETADATA._serialized_start=9003
+  _REQUESTMETADATA._serialized_end=9240
+  _EXECUTION._serialized_start=9243
+  _EXECUTION._serialized_end=9556
+  _ACTIONCACHE._serialized_start=9559
+  _ACTIONCACHE._serialized_end=10029
+  _CONTENTADDRESSABLESTORAGE._serialized_start=10032
+  _CONTENTADDRESSABLESTORAGE._serialized_end=10827
+  _CAPABILITIES._serialized_start=10830
+  _CAPABILITIES._serialized_end=11019
 # @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
index 050eb8d7d..5225958ae 100644
--- a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
@@ -110,6 +110,11 @@ class ExecutionServicer(object):
         where, for each requested blob not present in the CAS, there is a
         `Violation` with a `type` of `MISSING` and a `subject` of
         `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+
+        The server does not need to guarantee that a call to this method leads to
+        at most one execution of the action. The server MAY execute the action
+        multiple times, potentially in parallel. These redundant executions MAY
+        continue to run, even if the operation is completed.
         """
         context.set_code(grpc.StatusCode.UNIMPLEMENTED)
         context.set_details('Method not implemented!')
@@ -256,7 +261,7 @@ class ActionCacheServicer(object):
         [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
         are available at the time of returning the
         [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be
-        for some period of time afterwards. The TTLs of the referenced blobs SHOULD be increased
+        for some period of time afterwards. The lifetimes of the referenced blobs SHOULD be increased
         if necessary and applicable.
 
         Errors:
@@ -277,6 +282,9 @@ class ActionCacheServicer(object):
         [Command][build.bazel.remote.execution.v2.Command], into the
         `ContentAddressableStorage`.
 
+        Server implementations MAY modify the
+        `UpdateActionResultRequest.action_result` and return an equivalent value.
+
         Errors:
 
         * `INVALID_ARGUMENT`: One or more arguments are invalid.
@@ -381,47 +389,108 @@ class ContentAddressableStorageStub(object):
 
     For small file uploads the client should group them together and call
     [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+
     For large uploads, the client must use the
-    [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
-    `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
-    where `instance_name` is as described in the next paragraph, `uuid` is a
-    version 4 UUID generated by the client, and `hash` and `size` are the
-    [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
-    `uuid` is used only to avoid collisions when multiple clients try to upload
-    the same file (or the same client tries to upload the file multiple times at
-    once on different threads), so the client MAY reuse the `uuid` for uploading
-    different blobs. The `resource_name` may optionally have a trailing filename
-    (or other metadata) for a client to use if it is storing URLs, as in
-    `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
-    after the `size` is ignored.
-
-    A single server MAY support multiple instances of the execution system, each
-    with their own workers, storage, cache, etc. The exact relationship between
-    instances is up to the server. If the server does, then the `instance_name`
-    is an identifier, possibly containing multiple path segments, used to
-    distinguish between the various instances on the server, in a manner defined
-    by the server. For servers which do not support multiple instances, then the
-    `instance_name` is the empty path and the leading slash is omitted, so that
-    the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+    [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
+
+    For uncompressed data, The `WriteRequest.resource_name` is of the following form:
+    `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`
+
+    Where:
+    * `instance_name` is an identifier, possibly containing multiple path
+    segments, used to distinguish between the various instances on the server,
+    in a manner defined by the server. If it is the empty path, the leading
+    slash is omitted, so that  the `resource_name` becomes
+    `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`.
     To simplify parsing, a path segment cannot equal any of the following
-    keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
-    `capabilities`.
+    keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
+    `capabilities` or `compressed-blobs`.
+    * `uuid` is a version 4 UUID generated by the client, used to avoid
+    collisions between concurrent uploads of the same data. Clients MAY
+    reuse the same `uuid` for uploading different blobs.
+    * `hash` and `size` refer to the [Digest][build.bazel.remote.execution.v2.Digest]
+    of the data being uploaded.
+    * `optional_metadata` is implementation specific data, which clients MAY omit.
+    Servers MAY ignore this metadata.
+
+    Data can alternatively be uploaded in compressed form, with the following
+    `WriteRequest.resource_name` form:
+    `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
+
+    Where:
+    * `instance_name`, `uuid` and `optional_metadata` are defined as above.
+    * `compressor` is a lowercase string form of a `Compressor.Value` enum
+    other than `identity`, which is supported by the server and advertised in
+    [CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
+    * `uncompressed_hash` and `uncompressed_size` refer to the
+    [Digest][build.bazel.remote.execution.v2.Digest] of the data being
+    uploaded, once uncompressed. Servers MUST verify that these match
+    the uploaded data once uncompressed, and MUST return an
+    `INVALID_ARGUMENT` error in the case of mismatch.
+
+    Note that when writing compressed blobs, the `WriteRequest.write_offset` in
+    the initial request in a stream refers to the offset in the uncompressed form
+    of the blob. In subsequent requests, `WriteRequest.write_offset` MUST be the
+    sum of the first request's 'WriteRequest.write_offset' and the total size of
+    all the compressed data bundles in the previous requests.
+    Note that this mixes an uncompressed offset with a compressed byte length,
+    which is nonsensical, but it is done to fit the semantics of the existing
+    ByteStream protocol.
+
+    Uploads of the same data MAY occur concurrently in any form, compressed or
+    uncompressed.
+
+    Clients SHOULD NOT use gRPC-level compression for ByteStream API `Write`
+    calls of compressed blobs, since this would compress already-compressed data.
 
     When attempting an upload, if another client has already completed the upload
     (which may occur in the middle of a single upload if another client uploads
-    the same blob concurrently), the request will terminate immediately with
-    a response whose `committed_size` is the full size of the uploaded file
-    (regardless of how much data was transmitted by the client). If the client
-    completes the upload but the
+    the same blob concurrently), the request will terminate immediately without
+    error, and with a response whose `committed_size` is the value `-1` if this
+    is a compressed upload, or with the full size of the uploaded file if this is
+    an uncompressed upload (regardless of how much data was transmitted by the
+    client). If the client completes the upload but the
     [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
     `INVALID_ARGUMENT` error will be returned. In either case, the client should
     not attempt to retry the upload.
 
-    For downloading blobs, the client must use the
-    [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
-    a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
-    `instance_name` is the instance name (see above), and `hash` and `size` are
-    the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+    Small downloads can be grouped and requested in a batch via
+    [BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
+
+    For large downloads, the client must use the
+    [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
+
+    For uncompressed data, The `ReadRequest.resource_name` is of the following form:
+    `{instance_name}/blobs/{hash}/{size}`
+    Where `instance_name`, `hash` and `size` are defined as for uploads.
+
+    Data can alternatively be downloaded in compressed form, with the following
+    `ReadRequest.resource_name` form:
+    `{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}`
+
+    Where:
+    * `instance_name` and `compressor` are defined as for uploads.
+    * `uncompressed_hash` and `uncompressed_size` refer to the
+    [Digest][build.bazel.remote.execution.v2.Digest] of the data being
+    downloaded, once uncompressed. Clients MUST verify that these match
+    the downloaded data once uncompressed, and take appropriate steps in
+    the case of failure such as retrying a limited number of times or
+    surfacing an error to the user.
+
+    When downloading compressed blobs:
+    * `ReadRequest.read_offset` refers to the offset in the uncompressed form
+    of the blob.
+    * Servers MUST return `INVALID_ARGUMENT` if `ReadRequest.read_limit` is
+    non-zero.
+    * Servers MAY use any compression level they choose, including different
+    levels for different blobs (e.g. choosing a level designed for maximum
+    speed for data known to be incompressible).
+    * Clients SHOULD NOT use gRPC-level compression, since this would compress
+    already-compressed data.
+
+    Servers MUST be able to provide data for all recently advertised blobs in
+    each of the compression formats that the server supports, as well as in
+    uncompressed form.
 
     The lifetime of entries in the CAS is implementation specific, but it SHOULD
     be long enough to allow for newly-added and recently looked-up entries to be
@@ -484,47 +553,108 @@ class ContentAddressableStorageServicer(object):
 
     For small file uploads the client should group them together and call
     [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+
     For large uploads, the client must use the
-    [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
-    `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
-    where `instance_name` is as described in the next paragraph, `uuid` is a
-    version 4 UUID generated by the client, and `hash` and `size` are the
-    [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
-    `uuid` is used only to avoid collisions when multiple clients try to upload
-    the same file (or the same client tries to upload the file multiple times at
-    once on different threads), so the client MAY reuse the `uuid` for uploading
-    different blobs. The `resource_name` may optionally have a trailing filename
-    (or other metadata) for a client to use if it is storing URLs, as in
-    `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
-    after the `size` is ignored.
-
-    A single server MAY support multiple instances of the execution system, each
-    with their own workers, storage, cache, etc. The exact relationship between
-    instances is up to the server. If the server does, then the `instance_name`
-    is an identifier, possibly containing multiple path segments, used to
-    distinguish between the various instances on the server, in a manner defined
-    by the server. For servers which do not support multiple instances, then the
-    `instance_name` is the empty path and the leading slash is omitted, so that
-    the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+    [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
+
+    For uncompressed data, The `WriteRequest.resource_name` is of the following form:
+    `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`
+
+    Where:
+    * `instance_name` is an identifier, possibly containing multiple path
+    segments, used to distinguish between the various instances on the server,
+    in a manner defined by the server. If it is the empty path, the leading
+    slash is omitted, so that  the `resource_name` becomes
+    `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`.
     To simplify parsing, a path segment cannot equal any of the following
-    keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
-    `capabilities`.
+    keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
+    `capabilities` or `compressed-blobs`.
+    * `uuid` is a version 4 UUID generated by the client, used to avoid
+    collisions between concurrent uploads of the same data. Clients MAY
+    reuse the same `uuid` for uploading different blobs.
+    * `hash` and `size` refer to the [Digest][build.bazel.remote.execution.v2.Digest]
+    of the data being uploaded.
+    * `optional_metadata` is implementation specific data, which clients MAY omit.
+    Servers MAY ignore this metadata.
+
+    Data can alternatively be uploaded in compressed form, with the following
+    `WriteRequest.resource_name` form:
+    `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
+
+    Where:
+    * `instance_name`, `uuid` and `optional_metadata` are defined as above.
+    * `compressor` is a lowercase string form of a `Compressor.Value` enum
+    other than `identity`, which is supported by the server and advertised in
+    [CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
+    * `uncompressed_hash` and `uncompressed_size` refer to the
+    [Digest][build.bazel.remote.execution.v2.Digest] of the data being
+    uploaded, once uncompressed. Servers MUST verify that these match
+    the uploaded data once uncompressed, and MUST return an
+    `INVALID_ARGUMENT` error in the case of mismatch.
+
+    Note that when writing compressed blobs, the `WriteRequest.write_offset` in
+    the initial request in a stream refers to the offset in the uncompressed form
+    of the blob. In subsequent requests, `WriteRequest.write_offset` MUST be the
+    sum of the first request's 'WriteRequest.write_offset' and the total size of
+    all the compressed data bundles in the previous requests.
+    Note that this mixes an uncompressed offset with a compressed byte length,
+    which is nonsensical, but it is done to fit the semantics of the existing
+    ByteStream protocol.
+
+    Uploads of the same data MAY occur concurrently in any form, compressed or
+    uncompressed.
+
+    Clients SHOULD NOT use gRPC-level compression for ByteStream API `Write`
+    calls of compressed blobs, since this would compress already-compressed data.
 
     When attempting an upload, if another client has already completed the upload
     (which may occur in the middle of a single upload if another client uploads
-    the same blob concurrently), the request will terminate immediately with
-    a response whose `committed_size` is the full size of the uploaded file
-    (regardless of how much data was transmitted by the client). If the client
-    completes the upload but the
+    the same blob concurrently), the request will terminate immediately without
+    error, and with a response whose `committed_size` is the value `-1` if this
+    is a compressed upload, or with the full size of the uploaded file if this is
+    an uncompressed upload (regardless of how much data was transmitted by the
+    client). If the client completes the upload but the
     [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
     `INVALID_ARGUMENT` error will be returned. In either case, the client should
     not attempt to retry the upload.
 
-    For downloading blobs, the client must use the
-    [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
-    a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
-    `instance_name` is the instance name (see above), and `hash` and `size` are
-    the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+    Small downloads can be grouped and requested in a batch via
+    [BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
+
+    For large downloads, the client must use the
+    [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
+
+    For uncompressed data, The `ReadRequest.resource_name` is of the following form:
+    `{instance_name}/blobs/{hash}/{size}`
+    Where `instance_name`, `hash` and `size` are defined as for uploads.
+
+    Data can alternatively be downloaded in compressed form, with the following
+    `ReadRequest.resource_name` form:
+    `{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}`
+
+    Where:
+    * `instance_name` and `compressor` are defined as for uploads.
+    * `uncompressed_hash` and `uncompressed_size` refer to the
+    [Digest][build.bazel.remote.execution.v2.Digest] of the data being
+    downloaded, once uncompressed. Clients MUST verify that these match
+    the downloaded data once uncompressed, and take appropriate steps in
+    the case of failure such as retrying a limited number of times or
+    surfacing an error to the user.
+
+    When downloading compressed blobs:
+    * `ReadRequest.read_offset` refers to the offset in the uncompressed form
+    of the blob.
+    * Servers MUST return `INVALID_ARGUMENT` if `ReadRequest.read_limit` is
+    non-zero.
+    * Servers MAY use any compression level they choose, including different
+    levels for different blobs (e.g. choosing a level designed for maximum
+    speed for data known to be incompressible).
+    * Clients SHOULD NOT use gRPC-level compression, since this would compress
+    already-compressed data.
+
+    Servers MUST be able to provide data for all recently advertised blobs in
+    each of the compression formats that the server supports, as well as in
+    uncompressed form.
 
     The lifetime of entries in the CAS is implementation specific, but it SHOULD
     be long enough to allow for newly-added and recently looked-up entries to be
@@ -547,7 +677,7 @@ class ContentAddressableStorageServicer(object):
         Clients can use this API before uploading blobs to determine which ones are
         already present in the CAS and do not need to be uploaded again.
 
-        Servers SHOULD increase the TTLs of the referenced blobs if necessary and
+        Servers SHOULD increase the lifetimes of the referenced blobs if necessary and
         applicable.
 
         There are no method-specific errors.
@@ -689,47 +819,108 @@ class ContentAddressableStorage(object):
 
     For small file uploads the client should group them together and call
     [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+
     For large uploads, the client must use the
-    [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
-    `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
-    where `instance_name` is as described in the next paragraph, `uuid` is a
-    version 4 UUID generated by the client, and `hash` and `size` are the
-    [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
-    `uuid` is used only to avoid collisions when multiple clients try to upload
-    the same file (or the same client tries to upload the file multiple times at
-    once on different threads), so the client MAY reuse the `uuid` for uploading
-    different blobs. The `resource_name` may optionally have a trailing filename
-    (or other metadata) for a client to use if it is storing URLs, as in
-    `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
-    after the `size` is ignored.
-
-    A single server MAY support multiple instances of the execution system, each
-    with their own workers, storage, cache, etc. The exact relationship between
-    instances is up to the server. If the server does, then the `instance_name`
-    is an identifier, possibly containing multiple path segments, used to
-    distinguish between the various instances on the server, in a manner defined
-    by the server. For servers which do not support multiple instances, then the
-    `instance_name` is the empty path and the leading slash is omitted, so that
-    the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+    [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
+
+    For uncompressed data, The `WriteRequest.resource_name` is of the following form:
+    `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`
+
+    Where:
+    * `instance_name` is an identifier, possibly containing multiple path
+    segments, used to distinguish between the various instances on the server,
+    in a manner defined by the server. If it is the empty path, the leading
+    slash is omitted, so that  the `resource_name` becomes
+    `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`.
     To simplify parsing, a path segment cannot equal any of the following
-    keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
-    `capabilities`.
+    keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
+    `capabilities` or `compressed-blobs`.
+    * `uuid` is a version 4 UUID generated by the client, used to avoid
+    collisions between concurrent uploads of the same data. Clients MAY
+    reuse the same `uuid` for uploading different blobs.
+    * `hash` and `size` refer to the [Digest][build.bazel.remote.execution.v2.Digest]
+    of the data being uploaded.
+    * `optional_metadata` is implementation specific data, which clients MAY omit.
+    Servers MAY ignore this metadata.
+
+    Data can alternatively be uploaded in compressed form, with the following
+    `WriteRequest.resource_name` form:
+    `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
+
+    Where:
+    * `instance_name`, `uuid` and `optional_metadata` are defined as above.
+    * `compressor` is a lowercase string form of a `Compressor.Value` enum
+    other than `identity`, which is supported by the server and advertised in
+    [CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
+    * `uncompressed_hash` and `uncompressed_size` refer to the
+    [Digest][build.bazel.remote.execution.v2.Digest] of the data being
+    uploaded, once uncompressed. Servers MUST verify that these match
+    the uploaded data once uncompressed, and MUST return an
+    `INVALID_ARGUMENT` error in the case of mismatch.
+
+    Note that when writing compressed blobs, the `WriteRequest.write_offset` in
+    the initial request in a stream refers to the offset in the uncompressed form
+    of the blob. In subsequent requests, `WriteRequest.write_offset` MUST be the
+    sum of the first request's 'WriteRequest.write_offset' and the total size of
+    all the compressed data bundles in the previous requests.
+    Note that this mixes an uncompressed offset with a compressed byte length,
+    which is nonsensical, but it is done to fit the semantics of the existing
+    ByteStream protocol.
+
+    Uploads of the same data MAY occur concurrently in any form, compressed or
+    uncompressed.
+
+    Clients SHOULD NOT use gRPC-level compression for ByteStream API `Write`
+    calls of compressed blobs, since this would compress already-compressed data.
 
     When attempting an upload, if another client has already completed the upload
     (which may occur in the middle of a single upload if another client uploads
-    the same blob concurrently), the request will terminate immediately with
-    a response whose `committed_size` is the full size of the uploaded file
-    (regardless of how much data was transmitted by the client). If the client
-    completes the upload but the
+    the same blob concurrently), the request will terminate immediately without
+    error, and with a response whose `committed_size` is the value `-1` if this
+    is a compressed upload, or with the full size of the uploaded file if this is
+    an uncompressed upload (regardless of how much data was transmitted by the
+    client). If the client completes the upload but the
     [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
     `INVALID_ARGUMENT` error will be returned. In either case, the client should
     not attempt to retry the upload.
 
-    For downloading blobs, the client must use the
-    [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
-    a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
-    `instance_name` is the instance name (see above), and `hash` and `size` are
-    the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+    Small downloads can be grouped and requested in a batch via
+    [BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
+
+    For large downloads, the client must use the
+    [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
+
+    For uncompressed data, The `ReadRequest.resource_name` is of the following form:
+    `{instance_name}/blobs/{hash}/{size}`
+    Where `instance_name`, `hash` and `size` are defined as for uploads.
+
+    Data can alternatively be downloaded in compressed form, with the following
+    `ReadRequest.resource_name` form:
+    `{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}`
+
+    Where:
+    * `instance_name` and `compressor` are defined as for uploads.
+    * `uncompressed_hash` and `uncompressed_size` refer to the
+    [Digest][build.bazel.remote.execution.v2.Digest] of the data being
+    downloaded, once uncompressed. Clients MUST verify that these match
+    the downloaded data once uncompressed, and take appropriate steps in
+    the case of failure such as retrying a limited number of times or
+    surfacing an error to the user.
+
+    When downloading compressed blobs:
+    * `ReadRequest.read_offset` refers to the offset in the uncompressed form
+    of the blob.
+    * Servers MUST return `INVALID_ARGUMENT` if `ReadRequest.read_limit` is
+    non-zero.
+    * Servers MAY use any compression level they choose, including different
+    levels for different blobs (e.g. choosing a level designed for maximum
+    speed for data known to be incompressible).
+    * Clients SHOULD NOT use gRPC-level compression, since this would compress
+    already-compressed data.
+
+    Servers MUST be able to provide data for all recently advertised blobs in
+    each of the compression formats that the server supports, as well as in
+    uncompressed form.
 
     The lifetime of entries in the CAS is implementation specific, but it SHOULD
     be long enough to allow for newly-added and recently looked-up entries to be