| // Copyright 2018 The Fuchsia Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| library fuchsia.pkg; |
| |
| using fuchsia.fxfs; |
| using fuchsia.io; |
| using zx; |
| |
| /// This manages the system package cache. |
| /// |
| /// This is intended to be implemented by the package manager component and used by |
| /// package resolver components. |
| @discoverable |
| closed protocol PackageCache { |
| /// Gets the package directory if it is present on the local system. If it is not, the |
| /// `missing_blobs` iterator will provide all the blobs in the package that are missing from |
| /// the system, and the ability to write those blobs to blobfs. If all the missing blobs are |
| /// downloaded and written to by the client, the `dir` directory will be resolved. This method |
| /// will return successfully when the package has been fully resolved, or return an error if |
| /// the client closes `needed_blobs` or `dir` handle before the package has been resolved. |
| /// |
| /// This method does not guarantee the missing blobs have been persisted. In order to guarantee |
| /// missing blobs are persisted, clients should call ['Sync']. |
| /// |
| /// Clients must not make concurrent `Get()` calls for the same `meta_far_blob`, even across |
| /// different `PackageCache` connections, *unless* the `meta_far_blob` is in base or already |
| /// active in the dynamic index. Violating this may result in `Get()` errors. |
| /// |
| /// + request `meta_far_blob` the blob info for the package's meta.far. |
| /// + request `needed_blobs` an iterator over all the blobs in the package that |
| /// are not present on the system. |
| /// + request `dir` the channel on which the package directory will be served. |
| /// * error a zx_status value indicating failure. One of the following: |
| /// * `ZX_ERR_UNAVAILABLE` if the client closed `needed_blobs` handles before |
| /// all the missing blobs were downloaded to the system. |
| strict Get(resource struct { |
| meta_far_blob BlobInfo; |
| gc_protection GcProtection; |
| needed_blobs server_end:NeededBlobs; |
| dir server_end:<fuchsia.io.Directory>; |
| }) -> () error zx.Status; |
| |
| /// Gets the package directory for a subpackage. |
| /// The connection to the superpackage's package directory must still be open when this is |
| /// called. |
| /// The returned package will be protected by open package tracking. |
| /// |
| /// + request `superpackage` the hash of the superpackage's meta.far. |
| /// + request `subpackage` the relative package URL of the subpackage. |
| /// + request `dir` the channel on which the package directory will be served. |
| /// * error a GetSubpackageError value indicating failure. |
| strict GetSubpackage(resource struct { |
| superpackage BlobId; |
| subpackage PackageUrl; |
| dir server_end:<fuchsia.io.Directory>; |
| }) -> () error GetSubpackageError; |
| |
| /// Retrieves a chunk iterator to the base package index. |
| /// |
| /// + request `iterator` a request for the `PackageIndexIterator` that will return sets of |
| /// `PackageIndexEntry` objects until all packages in the base index have been iterated. |
| strict BasePackageIndex(resource struct { |
| iterator server_end:PackageIndexIterator; |
| }); |
| |
| /// Retrieves a chunk iterator to the cache package index. |
| /// |
| /// + request `iterator` a request for the `PackageIndexIterator` that will return sets of |
| /// `PackageIndexEntry` objects until all packages in the cache index have been iterated. |
| strict CachePackageIndex(resource struct { |
| iterator server_end:PackageIndexIterator; |
| }); |
| |
| /// Synchronizes updates to the cached packages to the underlying persistent storage. |
| /// |
| /// * error a zx_status value indicating failure. One of the following: |
| /// * `ZX_ERR_INTERNAL` if the sync fails. |
| strict Sync() -> () error zx.Status; |
| }; |
| |
| /// How the package served by [`PackageCache.Get`] should be protected from GC. |
| type GcProtection = strict enum { |
| /// Package will be protected from GC as long as there is an open connection to the directory. |
| OPEN_PACKAGE_TRACKING = 1; |
| |
| /// Package will be protected from GC as long as it is in the Retained index, which is |
| /// controlled by the [`RetainedPackages`] protocol. Client is responsible for ensuring |
| /// the package is in the Retained index. |
| RETAINED = 2; |
| }; |
| |
| /// Error type for [`PackageCache.GetSubpackage`]. |
| type GetSubpackageError = strict enum { |
| /// The superpackage was not open. |
| SUPERPACKAGE_CLOSED = 1; |
| |
| /// The requested subpackage does not exist. |
| DOES_NOT_EXIST = 2; |
| |
| /// An unspecified error occurred. |
| INTERNAL = 3; |
| }; |
| |
| /// Error type for [`NeededBlobs.OpenMetaBlob`] and [`NeededBlobs.OpenBlob`]. |
| type OpenBlobError = strict enum { |
| /// There is insufficient storage space available to persist this blob. |
| OUT_OF_SPACE = 1; |
| |
| /// This blob is already open for write by another cache operation. |
| CONCURRENT_WRITE = 2; |
| |
| /// An unspecified error occurred during underlying I/O. |
| UNSPECIFIED_IO = 3; |
| |
| /// An unspecified error occurred. |
| INTERNAL = 4; |
| }; |
| |
| /// Error type for [`NeededBlobs.BlobWritten`]. |
| type BlobWrittenError = strict enum { |
| /// Client called BlobWritten but blob was not readable in blobfs. |
| NOT_WRITTEN = 1; |
| |
| /// Client called BlobWritten for a blob it has not yet opened. |
| UNOPENED_BLOB = 2; |
| }; |
| |
| /// Represents the transaction for caching a particular package. |
| /// |
| /// Server expects client to follow the normal operation sequence defined below. |
| /// Violating the protocol (e.g. calling wrong methods at the wrong time) will result |
| /// in the channel being closed by the package cache with a `ZX_ERR_BAD_STATE` epitaph |
| /// and aborting the package cache operation. |
| /// If a fatal error occurs at any step, server will close the channel, and client |
| /// should not proceed with the sequence. |
| /// Non-fatal errors could be retried, as long as the channel remains open. |
| /// |
| /// Normal operation sequence: |
| /// 1. Clients should start by requesting to `OpenMetaBlob()`, and fetch and write |
| /// the metadata blob if needed, calling `BlobWritten()` when done to indicate the |
| /// write is complete. |
| /// 2. `GetMissingBlobs()` should be used to determine which blobs need to be |
| /// fetched and written. |
| /// 3. Each of the missing blobs needs to be written using `OpenBlob()` and |
| /// `BlobWritten()` should be called after each blob is written. |
| /// |
| /// Clients are responsible for avoiding concurrent creation of the same blob if the underlying |
| /// blobstore does not support it. |
| /// This manifests as the following constraints (applied per `BlobId`): |
| /// 1. If the `BlobWriter` returned by calls to `Open[Meta]Blob` is the `file` variant, once Clients |
| /// call Resize on a file, they must close *all* file connections obtained from `Open[Meta]Blob` |
| /// before calling `Open[Meta]Blob` again. |
| /// 2. If the `BlobWriter` returned by calls to `Open[Meta]Blob` is the `writer` variant, Clients |
| /// must not write all the bytes to more than one of the `writer`s. |
| /// This applies per `BlobId` to all `BlobWriter`s returned by all calls to `Open[Meta]Blob` across |
| /// all `NeededBlobs` connections across all `PackageCache` connections. |
| /// Once c++blobfs support is removed and fxblob is changed to support duplicate concurrent creation |
| /// requests (https://fxbug.dev/335870456#comment9), this requirement can be dropped. |
| /// |
| /// Once all needed blobs are written by the client, the package cache will |
| /// complete the pending [`PackageCache.Get`] request and close this channel |
| /// with a `ZX_OK` epitaph. |
| closed protocol NeededBlobs { |
| /// Opens the package's metadata blob for writing. `GetMissingBlobs()` |
| /// should not be called until writing the meta blob or this request |
| /// responds with `false`. |
| /// |
| /// If the package was already cached, server will close the channel with a |
| /// `ZX_OK` epitaph. |
| /// |
| /// - response `writer` is used to write the blob. If `writer` is absent, |
| /// the blob is already cached and so does not need to be written. |
| /// * error an OpenBlobError indicating failure. Clients may retry this |
| /// request, though the server end may abort this cache operation on |
| /// errors it considers to be fatal. |
| strict OpenMetaBlob() -> (resource struct { |
| writer BlobWriter:optional; |
| }) error OpenBlobError; |
| |
| /// Returns an iterator of blobs that are not present on the system that |
| /// must be written using the `OpenBlob` request before the package will be |
| /// fully cached. |
| /// |
| /// Client should call `OpenMetaBlob`, and write it if needed, before |
| /// calling `GetMissingBlobs`. |
| /// |
| /// A client should make this request no more than once per `NeededBlobs` |
| /// connection. Once all blobs yielded by this iterator are written, the |
| /// package open request will complete. |
| /// |
| /// New items may be added to the obtained `BlobInfoIterator` as the client |
| /// calls `OpenBlob`, so, to guaranatee termination of the iterator, clients |
| /// should call `OpenBlob` concurrently with reading the iterator. |
| /// |
| /// + request `iterator` a request for an iterator of [`BlobInfo`] of blobs |
| /// that the client should try to write. |
| strict GetMissingBlobs(resource struct { |
| iterator server_end:BlobInfoIterator; |
| }); |
| |
| /// Opens a blob for writing. |
| /// |
| /// + request `blob_id` the blob id describing this blob. |
| /// - response `writer` is used to write the blob. If `writer` is absent, |
| /// the blob is already cached and so does not need to be written. |
| /// * error an OpenBlobError indicating failure. Clients may retry this |
| /// request, though the server end may abort this cache operation on |
| /// errors it considers to be fatal. |
| strict OpenBlob(struct { |
| blob_id BlobId; |
| }) -> (resource struct { |
| writer BlobWriter:optional; |
| }) error OpenBlobError; |
| |
| /// Indicates that a blob opened by `Open[Meta]Blob` has been successfully |
| /// written. |
| /// |
| /// A client should call this once the blob has been fully written using |
| /// the `writer` returned by `Open[Meta]Blob`. |
| /// |
| /// + request `blob_id` the blob id describing this blob. |
| /// * error a BlobWrittenError indicating failure. Clients may retry the |
| /// `Open[Meta]Blob` request that prompted this call, though the server |
| /// end may abort this cache operation on errors it considers to be fatal. |
| strict BlobWritten(struct { |
| blob_id BlobId; |
| }) -> () error BlobWrittenError; |
| |
| /// Aborts this caching operation for the package. |
| /// |
| /// Any open blobs and any missing blobs iterator will be closed. Any `dir` |
| /// provided to the associated [`PackageCache.Get`] request will also be |
| /// closed. Once this request is acknowledged, this channel will be closed. |
| /// |
| /// Note, dropping this NeededBlobs channel without writing all needed blobs |
| /// will also abort the package cache operation. However, this API provides |
| /// the ability to wait for the operation to be torn down. |
| strict Abort() -> (); |
| }; |
| |
| /// Used to write a blob to the underlying storage. |
| type BlobWriter = strict resource union { |
| /// To write a blob with `file`: |
| /// 1. If the blob is uncompressed, use `fuchsia.io/File.Resize` to set |
| /// the blob's uncompressed size. |
| /// 2. Use `fuchsia.io/File.Write` to write the blob's contents from |
| /// start to finish (seeks are not supported). |
| /// A corrupt blob is indicated by a `Write()` (usually the final write), |
| /// failing with `ZX_ERR_IO_DATA_INTEGRITY`. |
| 1: file client_end:fuchsia.io.File; |
| /// To write a blob with `writer`, follow the instructions on the |
| /// `fuchsia.fxfs.BlobWriter` protocol. |
| 2: writer client_end:fuchsia.fxfs.BlobWriter; |
| }; |
| |
| /// A chunked iterator of [`BlobInfo`], allowing transfer of more [`BlobInfo`]s |
| /// that can fit in a single FIDL message. |
| closed protocol BlobInfoIterator { |
| /// Responds with the next chunk of [`BlobInfo`]s. When the iterator is |
| /// exhausted, responds with an empty vector and closes the connection. |
| /// |
| /// - response `blobs` the next chunk of [`BlobInfo`]s. |
| strict Next() -> (struct { |
| blobs vector<BlobInfo>:MAX; |
| }); |
| }; |
| |
| /// A chunk iterator for the package index. This is required because it is possible for the |
| /// package index to be too large to send over in a single request (over 64KiB). |
| closed protocol PackageIndexIterator { |
| /// Returns the next chunk of package index entries. When the iterator is exhausted, |
| /// this returns an empty vector. |
| /// |
| /// - response `entries` the next chunk of entries in the package index. |
| strict Next() -> (struct { |
| entries vector<PackageIndexEntry>:MAX; |
| }); |
| }; |
| |
| /// Manages the set of retained packages. |
| /// |
| /// Retained packages will not be removed from the package cache, even if they |
| /// aren't fully present. There is only a single set active at once, and the |
| /// provided APIs for configuring the set atomically replace that set. On boot, |
| /// the retained package set is always initialized to the empty set. |
| /// Documentation on [garbage collection]( |
| /// https://fuchsia.dev/fuchsia-src/concepts/packages/garbage_collection) contains |
| /// details on various types of package indexes (static, retained, etc) and |
| /// describes when a package will be garbage collected or retained. |
| @discoverable |
| closed protocol RetainedPackages { |
| /// Atomically clear the retained package set, releasing any previously |
| /// retained packages. |
| strict Clear() -> (); |
| |
| /// Atomically replace the retained package set with the [package hashes]( |
| /// https://fuchsia.dev/fuchsia-src/concepts/packages/package_url#package-hash) |
| /// provided by the given iterator. |
| /// Duplicate IDs provided will be merged and processed as a single one. |
| /// |
| /// + request `iterator` an iterator of package blob IDs that should be |
| /// retained. |
| strict Replace(resource struct { |
| iterator client_end:BlobIdIterator; |
| }) -> (); |
| }; |
| |
| /// A chunked iterator of blob IDs, allowing transfer of more blob IDs that can |
| /// fit in a single FIDL message. |
| closed protocol BlobIdIterator { |
| /// Responds with the next chunk of blob IDs. When the iterator is |
| /// exhausted, responds with an empty vector and closes the connection. |
| /// |
| /// - response `blobs` the next chunk of blob IDs. |
| strict Next() -> (struct { |
| blobs vector<BlobId>:MAX; |
| }); |
| }; |