[release] Snap to da8af5bb55
Change-Id: I2bacf1059afb0cb268f66b66737cb0753e6d819a
diff --git a/build/bazel/scripts/bazel_action.py b/build/bazel/scripts/bazel_action.py
index 815db3d..d30d573 100644
--- a/build/bazel/scripts/bazel_action.py
+++ b/build/bazel/scripts/bazel_action.py
@@ -1130,7 +1130,10 @@
+ os.path.abspath(args.bazel_build_events_log_json),
]
- cmd += configured_args + args.bazel_targets
+ # Always use --verbose_failures to get relevant information when
+ # Bazel commands fail. This is necessary to make the log output of
+ # CQ/CI bots usable.
+ cmd += configured_args + args.bazel_targets + ["--verbose_failures"]
ret = subprocess.run(cmd)
if ret.returncode != 0:
diff --git a/docs/glossary/_glossary.yaml b/docs/glossary/_glossary.yaml
index e4fa02f..712af60 100644
--- a/docs/glossary/_glossary.yaml
+++ b/docs/glossary/_glossary.yaml
@@ -530,7 +530,7 @@
- term: "Configuration Capability"
short_description: "A
- A <a href=\"/docs/glossary#capability\">capability</a> that provides a data
+ <a href=\"/docs/glossary#capability\">capability</a> that provides a data
value that a component can use for configuration. A component
<a href=\"/docs/glossary#use\">using</a> a configuration capability will be
able to read the value in it's structured configuration."
@@ -618,12 +618,12 @@
- term: "Directory capability"
short_description: "A
- A <a href=\"/docs/glossary#capability\">capability</a> that permits access to a filesystem
+ <a href=\"/docs/glossary#capability\">capability</a> that permits access to a filesystem
directory by adding it to the <a href=\"/docs/glossary#namespace\">namespace</a> of the
<a href=\"/docs/glossary#component-instance\">component instance</a>
that <a href=\"/docs/glossary#use\">uses</a> it."
full_description: "A
- A <a href=\"/docs/glossary#capability\">capability</a> that permits access to a filesystem
+ <a href=\"/docs/glossary#capability\">capability</a> that permits access to a filesystem
directory by adding it to the <a href=\"/docs/glossary#namespace\">namespace</a> of the
<a href=\"/docs/glossary#component-instance\">component instance</a>
that <a href=\"/docs/glossary#use\">uses</a> it. If multiple
diff --git a/sdk/lib/ld/test/modules/BUILD.gn b/sdk/lib/ld/test/modules/BUILD.gn
index fb6c6a19..c755405 100644
--- a/sdk/lib/ld/test/modules/BUILD.gn
+++ b/sdk/lib/ld/test/modules/BUILD.gn
@@ -102,6 +102,14 @@
}
}
+# On POSIX systems, a dlopen'd test module's DT_NEEDED dependencies will be
+# looked for in a search path. Embedding DT_RUNPATH of $ORIGIN tells it to
+# look first in the directory containing the original dlopen'd module.
+config("runpath-origin") {
+ visibility = [ ":*" ]
+ ldflags = [ "-Wl,--enable-new-dtags,-rpath,\$ORIGIN" ]
+}
+
template("_test_non_executable") {
shlib_target_name = target_name
@@ -175,7 +183,12 @@
template("test_loadable_module") {
_test_non_executable(target_name) {
target_type = "loadable_module"
+ configs = []
forward_variables_from(invoker, "*", [ "target_type" ])
+
+ # Make sure a system dlopen loading the module will look for its
+ # dependencies adjacent to it.
+ configs += [ ":runpath-origin" ]
}
}
@@ -200,6 +213,10 @@
visibility = [ ":*" ]
testonly = true
+ if (is_linux) {
+ configs -= [ "//build/config/linux:implicit-host-libs" ]
+ }
+
deps = []
data_deps = []
forward_variables_from(invoker,
@@ -316,6 +333,10 @@
# test_executable() is built, after variant selection. So it gets the
# right version of ld-startup installed where the test needs to find it.
data_deps = [ "..:ld-startup.test-data($shlib_toolchain)" ]
+ } else if (!is_fuchsia) {
+ # Make sure a system dlopen loading the module will look for its
+ # dependencies adjacent to it.
+ public_configs += [ ":runpath-origin" ]
}
}
}
diff --git a/sdk/lib/sys/cpp/outgoing_directory.cc b/sdk/lib/sys/cpp/outgoing_directory.cc
index 4f56f1a..10d468c 100644
--- a/sdk/lib/sys/cpp/outgoing_directory.cc
+++ b/sdk/lib/sys/cpp/outgoing_directory.cc
@@ -22,7 +22,7 @@
}
vfs::PseudoDir* GetOrCreateDirectory(vfs::PseudoDir* dir, std::string name) {
- vfs::internal::Node* node;
+ vfs::Node* node;
zx_status_t status = dir->Lookup(name, &node);
if (status != ZX_OK) {
return AddNewEmptyDirectory(dir, std::move(name));
@@ -101,7 +101,7 @@
zx_status_t OutgoingDirectory::RemoveNamedService(const std::string& service,
const std::string& instance) const {
- vfs::internal::Node* node;
+ vfs::Node* node;
zx_status_t status = svc_->Lookup(instance, &node);
if (status != ZX_OK) {
return ZX_OK;
diff --git a/sdk/lib/vfs/cpp/BUILD.gn b/sdk/lib/vfs/cpp/BUILD.gn
index 5755bf8..b04dae8 100644
--- a/sdk/lib/vfs/cpp/BUILD.gn
+++ b/sdk/lib/vfs/cpp/BUILD.gn
@@ -14,8 +14,8 @@
sources = [
"composed_service_dir.h",
- "internal/node.h",
"lazy_dir.h",
+ "node.h",
"pseudo_dir.h",
"pseudo_file.h",
"remote_dir.h",
diff --git a/sdk/lib/vfs/cpp/composed_service_dir.h b/sdk/lib/vfs/cpp/composed_service_dir.h
index 19ab0b4..d840fb4 100644
--- a/sdk/lib/vfs/cpp/composed_service_dir.h
+++ b/sdk/lib/vfs/cpp/composed_service_dir.h
@@ -6,7 +6,7 @@
#define LIB_VFS_CPP_COMPOSED_SERVICE_DIR_H_
#include <fuchsia/io/cpp/fidl.h>
-#include <lib/vfs/cpp/internal/node.h>
+#include <lib/vfs/cpp/node.h>
#include <lib/vfs/cpp/service.h>
#include <zircon/assert.h>
@@ -21,11 +21,11 @@
// This class is thread-safe.
//
// TODO(https://fxbug.dev/309685624): Remove when all callers have migrated.
-class ComposedServiceDir final : public internal::Node {
+class ComposedServiceDir final : public Node {
public:
ComposedServiceDir() : Node(MakeComposedServiceDir()) {}
- using internal::Node::Serve;
+ using Node::Serve;
// Sets the fallback directory for services. Services in this directory can be connected to, but
// will not be enumerated. This method may only be called once.
diff --git a/sdk/lib/vfs/cpp/lazy_dir.h b/sdk/lib/vfs/cpp/lazy_dir.h
index 2a1d214..58f01f3 100644
--- a/sdk/lib/vfs/cpp/lazy_dir.h
+++ b/sdk/lib/vfs/cpp/lazy_dir.h
@@ -5,7 +5,7 @@
#ifndef LIB_VFS_CPP_LAZY_DIR_H_
#define LIB_VFS_CPP_LAZY_DIR_H_
-#include <lib/vfs/cpp/internal/node.h>
+#include <lib/vfs/cpp/node.h>
#include <zircon/availability.h>
#include <zircon/compiler.h>
#include <zircon/types.h>
@@ -26,7 +26,7 @@
// requirements.
//
// TODO(https://fxbug.dev/309685624): Remove LazyDir once all out-of-tree users have been migrated.
-class LazyDir : public vfs::internal::Node {
+class LazyDir : public vfs::Node {
public:
LazyDir() : Node(MakeLazyDir(this)) {}
diff --git a/sdk/lib/vfs/cpp/internal/node.h b/sdk/lib/vfs/cpp/node.h
similarity index 80%
rename from sdk/lib/vfs/cpp/internal/node.h
rename to sdk/lib/vfs/cpp/node.h
index e623e20..0e3a24a 100644
--- a/sdk/lib/vfs/cpp/internal/node.h
+++ b/sdk/lib/vfs/cpp/node.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef LIB_VFS_CPP_INTERNAL_NODE_H_
-#define LIB_VFS_CPP_INTERNAL_NODE_H_
+#ifndef LIB_VFS_CPP_NODE_H_
+#define LIB_VFS_CPP_NODE_H_
#include <fuchsia/io/cpp/fidl.h>
#include <lib/async/default.h>
@@ -16,9 +16,6 @@
class ComposedServiceDir;
class LazyDir;
class PseudoDir;
-} // namespace vfs
-
-namespace vfs::internal {
// Represents an object in a file system that communicates via the `fuchsia.io.Node` protocol, and
// potentially supports the composed protocols `fuchsia.io.Directory` and `fuchsia.io.File`.
@@ -65,12 +62,16 @@
vfs_internal_node_t* const handle_;
};
-// TODO(https://fxbug.dev/311176363): Deprecate/remove the following type aliases which are used OOT
-// and move `Node` to the `vfs` namespace.
+namespace internal {
-using Directory = Node;
-using File = Node;
+// TODO(https://fxbug.dev/311176363): Remove the following type aliases when possible.
+using Node ZX_REMOVED_SINCE(1, 19, 20, "Use vfs::Node or a concrete type instead.") = vfs::Node;
+using Directory ZX_REMOVED_SINCE(1, 19, 20,
+ "Use vfs::Node or a concrete type instead.") = vfs::Node;
+using File ZX_REMOVED_SINCE(1, 19, 20, "Use vfs::Node or a concrete type instead.") = vfs::Node;
-} // namespace vfs::internal
+} // namespace internal
-#endif // LIB_VFS_CPP_INTERNAL_NODE_H_
+} // namespace vfs
+
+#endif // LIB_VFS_CPP_NODE_H_
diff --git a/sdk/lib/vfs/cpp/pseudo_dir.h b/sdk/lib/vfs/cpp/pseudo_dir.h
index 7920f08..f9ca0d6 100644
--- a/sdk/lib/vfs/cpp/pseudo_dir.h
+++ b/sdk/lib/vfs/cpp/pseudo_dir.h
@@ -6,7 +6,7 @@
#define LIB_VFS_CPP_PSEUDO_DIR_H_
#include <fuchsia/io/cpp/fidl.h>
-#include <lib/vfs/cpp/internal/node.h>
+#include <lib/vfs/cpp/node.h>
#include <zircon/assert.h>
#include <zircon/compiler.h>
@@ -22,9 +22,9 @@
// create, remove, or rename them.
//
// This class is thread-safe.
-class PseudoDir final : public internal::Node {
+class PseudoDir final : public Node {
public:
- PseudoDir() : internal::Node(CreateDirectory()) {}
+ PseudoDir() : Node(CreateDirectory()) {}
~PseudoDir() override {
// We must close all connections to the nodes this directory owns before destroying them, since
@@ -32,7 +32,7 @@
vfs_internal_node_shutdown(handle_);
}
- using internal::Node::Serve;
+ using Node::Serve;
// Adds a directory entry associating the given `name` with `vn`. The same node may be added
// multiple times with different names. Returns `ZX_ERR_ALREADY_EXISTS` if there is already a node
@@ -113,8 +113,7 @@
// nodes added to this directory. `Lookup()` returns a `vfs::Node*` which callers downcast to the
// concrete node type. The underlying `vfs_internal_node_t` type has no concept of the `vfs::Node`
// type, so we must store them here to allow safe downcasting.
- std::map<std::string, std::shared_ptr<internal::Node>, std::less<>> node_map_
- __TA_GUARDED(mutex_);
+ std::map<std::string, std::shared_ptr<Node>, std::less<>> node_map_ __TA_GUARDED(mutex_);
};
} // namespace vfs
diff --git a/sdk/lib/vfs/cpp/pseudo_file.h b/sdk/lib/vfs/cpp/pseudo_file.h
index c62760a..8f03fdf 100644
--- a/sdk/lib/vfs/cpp/pseudo_file.h
+++ b/sdk/lib/vfs/cpp/pseudo_file.h
@@ -6,7 +6,7 @@
#define LIB_VFS_CPP_PSEUDO_FILE_H_
#include <lib/fit/function.h>
-#include <lib/vfs/cpp/internal/node.h>
+#include <lib/vfs/cpp/node.h>
#include <vector>
@@ -30,7 +30,7 @@
// the file.
//
// This class is thread-safe.
-class PseudoFile final : public internal::Node {
+class PseudoFile final : public Node {
public:
// Handler called to read from the pseudo-file.
using ReadHandler = fit::function<zx_status_t(std::vector<uint8_t>* output, size_t max_bytes)>;
@@ -47,7 +47,7 @@
WriteHandler write_handler = nullptr)
: Node(MakePseudoFile(max_file_size, std::move(read_handler), std::move(write_handler))) {}
- using internal::Node::Serve;
+ using Node::Serve;
private:
struct PseudoFileState {
diff --git a/sdk/lib/vfs/cpp/remote_dir.h b/sdk/lib/vfs/cpp/remote_dir.h
index 5c2dc33..8ec3517 100644
--- a/sdk/lib/vfs/cpp/remote_dir.h
+++ b/sdk/lib/vfs/cpp/remote_dir.h
@@ -7,7 +7,7 @@
#include <fuchsia/io/cpp/fidl.h>
#include <lib/fidl/cpp/interface_handle.h>
-#include <lib/vfs/cpp/internal/node.h>
+#include <lib/vfs/cpp/node.h>
#include <lib/zx/channel.h>
namespace vfs {
@@ -22,18 +22,17 @@
// place.
//
// This class is thread-safe.
-class RemoteDir final : public internal::Node {
+class RemoteDir final : public Node {
public:
// Binds to a remotely hosted directory channel via `remote_dir`. The channel must be valid and
// must be compatible with the `fuchsia.io.Directory` protocol.
- explicit RemoteDir(zx::channel remote_dir)
- : internal::Node(CreateRemoteDir(std::move(remote_dir))) {}
+ explicit RemoteDir(zx::channel remote_dir) : Node(CreateRemoteDir(std::move(remote_dir))) {}
// Binds to a remotely hosted directory using the specified `dir`. The `dir` handle must be valid.
explicit RemoteDir(fidl::InterfaceHandle<fuchsia::io::Directory> dir)
: RemoteDir(dir.TakeChannel()) {}
- using internal::Node::Serve;
+ using Node::Serve;
private:
static vfs_internal_node_t* CreateRemoteDir(zx::channel dir) {
diff --git a/sdk/lib/vfs/cpp/service.h b/sdk/lib/vfs/cpp/service.h
index 6226de5..74def4f 100644
--- a/sdk/lib/vfs/cpp/service.h
+++ b/sdk/lib/vfs/cpp/service.h
@@ -8,18 +8,18 @@
#include <fuchsia/io/cpp/fidl.h>
#include <lib/async/default.h>
#include <lib/fit/function.h>
-#include <lib/vfs/cpp/internal/node.h>
+#include <lib/vfs/cpp/node.h>
namespace vfs {
// A node which binds a channel to a service implementation when opened.
//
// This class is thread-safe.
-class Service final : public internal::Node {
+class Service final : public Node {
public:
// Handler callback which binds `channel` to a service instance.
using Connector = fit::function<void(zx::channel channel, async_dispatcher_t* dispatcher)>;
- explicit Service(Connector connector) : internal::Node(MakeService(std::move(connector))) {}
+ explicit Service(Connector connector) : Node(MakeService(std::move(connector))) {}
template <typename Interface>
explicit Service(fidl::InterfaceRequestHandler<Interface> handler)
@@ -28,7 +28,7 @@
handler(fidl::InterfaceRequest<Interface>(std::move(channel)));
}) {}
- using internal::Node::Serve;
+ using Node::Serve;
private:
static vfs_internal_node_t* MakeService(Connector connector) {
diff --git a/sdk/lib/vfs/cpp/tests/lazy_dir_test.cc b/sdk/lib/vfs/cpp/tests/lazy_dir_test.cc
index eca8c8e..2698c47 100644
--- a/sdk/lib/vfs/cpp/tests/lazy_dir_test.cc
+++ b/sdk/lib/vfs/cpp/tests/lazy_dir_test.cc
@@ -37,7 +37,7 @@
struct Entry {
uint64_t id;
uint32_t mode_type;
- std::unique_ptr<vfs::internal::Node> node;
+ std::unique_ptr<vfs::Node> node;
};
using EntryMap = std::map<std::string_view, Entry, std::less<>>;
diff --git a/sdk/lib/vfs/cpp/tests/pseudo_dir_test.cc b/sdk/lib/vfs/cpp/tests/pseudo_dir_test.cc
index 73e624a..69ec485 100644
--- a/sdk/lib/vfs/cpp/tests/pseudo_dir_test.cc
+++ b/sdk/lib/vfs/cpp/tests/pseudo_dir_test.cc
@@ -62,19 +62,19 @@
ASSERT_EQ(root()->Lookup("does_not_exist", nullptr), ZX_ERR_NOT_FOUND);
// Both subdir_a and subdir_b should point to the same node.
- vfs::internal::Node* subdir_a;
+ vfs::Node* subdir_a;
ASSERT_EQ(root()->Lookup("subdir_a", &subdir_a), ZX_OK);
ASSERT_EQ(subdir_a, subdir());
- vfs::internal::Node* subdir_b;
+ vfs::Node* subdir_b;
ASSERT_EQ(root()->Lookup("subdir_b", &subdir_b), ZX_OK);
ASSERT_EQ(subdir_b, subdir());
- vfs::internal::Node* unique_file;
+ vfs::Node* unique_file;
ASSERT_EQ(subdir()->Lookup("unique_file", &unique_file), ZX_OK);
- vfs::internal::Node* unique_file_a;
+ vfs::Node* unique_file_a;
ASSERT_EQ(static_cast<vfs::PseudoDir*>(subdir_a)->Lookup("unique_file", &unique_file_a), ZX_OK);
- vfs::internal::Node* unique_file_b;
+ vfs::Node* unique_file_b;
ASSERT_EQ(static_cast<vfs::PseudoDir*>(subdir_b)->Lookup("unique_file", &unique_file_b), ZX_OK);
// The entry for `unique_file` should be the same node in both sub directories.
diff --git a/sdk/lib/vfs/cpp/tests/vmo_file_test.cc b/sdk/lib/vfs/cpp/tests/vmo_file_test.cc
index 502edc6..b91dc4f 100644
--- a/sdk/lib/vfs/cpp/tests/vmo_file_test.cc
+++ b/sdk/lib/vfs/cpp/tests/vmo_file_test.cc
@@ -44,13 +44,13 @@
ASSERT_EQ(zx::vmo::create(kFileContents.size(), 0, &vmo), ZX_OK);
ASSERT_EQ(vmo.write(kFileContents.data(), 0, kFileContents.size()), ZX_OK);
auto writable_file = std::make_unique<vfs::VmoFile>(std::move(vmo), kFileContents.size(),
- vfs::VmoFile::WriteMode::WRITABLE);
+ vfs::VmoFile::WriteMode::kWritable);
ASSERT_EQ(root_->AddEntry("writable_file", std::move(writable_file)), ZX_OK);
ASSERT_EQ(zx::vmo::create(kFileContents.size(), 0, &vmo), ZX_OK);
ASSERT_EQ(vmo.write(kFileContents.data(), 0, kFileContents.size()), ZX_OK);
auto read_only_file = std::make_unique<vfs::VmoFile>(std::move(vmo), kFileContents.size(),
- vfs::VmoFile::WriteMode::READ_ONLY);
+ vfs::VmoFile::WriteMode::kReadOnly);
ASSERT_EQ(root_->AddEntry("read_only_file", std::move(read_only_file)), ZX_OK);
zx::channel root_server;
diff --git a/sdk/lib/vfs/cpp/vfs_cpp.api b/sdk/lib/vfs/cpp/vfs_cpp.api
index 0622e69..31becd0 100644
--- a/sdk/lib/vfs/cpp/vfs_cpp.api
+++ b/sdk/lib/vfs/cpp/vfs_cpp.api
@@ -1,10 +1,10 @@
{
- "pkg/vfs_cpp/include/lib/vfs/cpp/composed_service_dir.h": "5684b0d2992146d9e3d1f945c5ffc951",
- "pkg/vfs_cpp/include/lib/vfs/cpp/internal/node.h": "846ce5b1deb58889b509217a5662aad9",
- "pkg/vfs_cpp/include/lib/vfs/cpp/lazy_dir.h": "d6cce592cbaf4f7407c8db859b259e58",
- "pkg/vfs_cpp/include/lib/vfs/cpp/pseudo_dir.h": "6e842bf5ad133bf62b60dd9391058540",
- "pkg/vfs_cpp/include/lib/vfs/cpp/pseudo_file.h": "cee72c8963a7fe8b6fca1ee3231efb60",
- "pkg/vfs_cpp/include/lib/vfs/cpp/remote_dir.h": "8eca7f79792fda792685da8fa6fa8f9b",
- "pkg/vfs_cpp/include/lib/vfs/cpp/service.h": "924d2ad1075bc30b0f87ad1bb36de9d9",
- "pkg/vfs_cpp/include/lib/vfs/cpp/vmo_file.h": "4b28c7ccc1ad741e1c08ee454b860b19"
+ "pkg/vfs_cpp/include/lib/vfs/cpp/composed_service_dir.h": "f0a3dfa46f1566dacdc7af40f2362a79",
+ "pkg/vfs_cpp/include/lib/vfs/cpp/lazy_dir.h": "26f8309af12fc38971f83d06e955e101",
+ "pkg/vfs_cpp/include/lib/vfs/cpp/node.h": "55049b522a54088dccdd37de95d4eb06",
+ "pkg/vfs_cpp/include/lib/vfs/cpp/pseudo_dir.h": "0870ebe4c4534993c4ebcce4331e1ad0",
+ "pkg/vfs_cpp/include/lib/vfs/cpp/pseudo_file.h": "0ff9f9de27271c95c811c3e473b0b1e2",
+ "pkg/vfs_cpp/include/lib/vfs/cpp/remote_dir.h": "67c1cdb2d1b6d38b8c45c3324ad955a4",
+ "pkg/vfs_cpp/include/lib/vfs/cpp/service.h": "2340785de0ea4f669857690346a0e13f",
+ "pkg/vfs_cpp/include/lib/vfs/cpp/vmo_file.h": "7ca23dbb8f4ca218c89100a558d275a4"
}
\ No newline at end of file
diff --git a/sdk/lib/vfs/cpp/vmo_file.h b/sdk/lib/vfs/cpp/vmo_file.h
index acad499..495bf70 100644
--- a/sdk/lib/vfs/cpp/vmo_file.h
+++ b/sdk/lib/vfs/cpp/vmo_file.h
@@ -5,7 +5,7 @@
#ifndef LIB_VFS_CPP_VMO_FILE_H_
#define LIB_VFS_CPP_VMO_FILE_H_
-#include <lib/vfs/cpp/internal/node.h>
+#include <lib/vfs/cpp/node.h>
#include <lib/zx/vmo.h>
#include <zircon/availability.h>
#include <zircon/status.h>
@@ -20,14 +20,18 @@
// written into.
//
// This class is thread-safe.
-class VmoFile final : public internal::Node {
+class VmoFile final : public Node {
public:
+ // TODO(https://fxbug.dev/311176363): Remove deprecated enum constants and type aliases below.
+
// Specifies the desired behavior of writes.
enum class WriteMode : vfs_internal_write_mode_t {
// The VmoFile is read only.
- READ_ONLY = VFS_INTERNAL_WRITE_MODE_READ_ONLY,
+ kReadOnly = VFS_INTERNAL_WRITE_MODE_READ_ONLY,
// The VmoFile will be writable.
- WRITABLE = VFS_INTERNAL_WRITE_MODE_WRITABLE,
+ kWritable = VFS_INTERNAL_WRITE_MODE_WRITABLE,
+ READ_ONLY ZX_REMOVED_SINCE(1, 19, 20, "Use kReadOnly instead.") = kReadOnly,
+ WRITABLE ZX_REMOVED_SINCE(1, 19, 20, "Use kWritable instead.") = kWritable,
};
// Specifies the default behavior when a client asks for the file's underlying VMO, but does not
@@ -35,12 +39,9 @@
//
// *NOTE*: This does not affect the behavior of requests that specify the required sharing mode.
// Requests for a specific sharing mode will be fulfilled as requested.
- //
- // TODO(https://fxbug.dev/311176363): Introduce new constants for these enumerations that conform
- // to the Fuchsia C++ style guide, and deprecate the old ones.
enum class DefaultSharingMode : vfs_internal_sharing_mode_t {
- // NOT_SUPPORTED will be returned, unless a sharing mode is specified in the request.
- NONE = VFS_INTERNAL_SHARING_MODE_NONE,
+ // Will return `ZX_ERR_NOT_SUPPORTED` if a sharing mode was not specified in the request.
+ kNone = VFS_INTERNAL_SHARING_MODE_NONE,
// The VMO handle is duplicated for each client.
//
@@ -51,7 +52,7 @@
// This mode is significantly more efficient than |CLONE_COW| and should be
// preferred when file spans the whole VMO or when the VMO's entire content
// is safe for clients to read.
- DUPLICATE = VFS_INTERNAL_SHARING_MODE_DUPLICATE,
+ kDuplicate = VFS_INTERNAL_SHARING_MODE_DUPLICATE,
// The VMO range spanned by the file is cloned on demand, using
// copy-on-write semantics to isolate modifications of clients which open
@@ -60,19 +61,22 @@
// This is appropriate when clients need to be restricted from accessing
// portions of the VMO outside of the range of the file and when file
// modifications by clients should not be visible to each other.
- CLONE_COW = VFS_INTERNAL_SHARING_MODE_COW,
+ kCloneCow = VFS_INTERNAL_SHARING_MODE_COW,
+
+ NONE ZX_REMOVED_SINCE(1, 19, 20, "Use kNone instead.") = kNone,
+ DUPLICATE ZX_REMOVED_SINCE(1, 19, 20, "Use kDuplicate instead.") = kDuplicate,
+ CLONE_COW ZX_REMOVED_SINCE(1, 19, 20, "Use kCloneCow instead.") = kCloneCow,
};
- // TODO(https://fxbug.dev/311176363): Deprecate and remove these type aliases.
- using WriteOption = WriteMode;
- using Sharing = DefaultSharingMode;
+ using WriteOption ZX_REMOVED_SINCE(1, 19, 20, "Use WriteMode instead.") = WriteMode;
+ using Sharing ZX_REMOVED_SINCE(1, 19, 20, "Use DefaultSharingMode instead.") = DefaultSharingMode;
// Creates a file node backed by a VMO.
- VmoFile(zx::vmo vmo, size_t length, WriteMode write_option = WriteMode::READ_ONLY,
- DefaultSharingMode vmo_sharing = DefaultSharingMode::DUPLICATE)
+ VmoFile(zx::vmo vmo, size_t length, WriteMode write_option = WriteMode::kReadOnly,
+ DefaultSharingMode vmo_sharing = DefaultSharingMode::kDuplicate)
: VmoFile(vmo.release(), length, write_option, vmo_sharing) {}
- using internal::Node::Serve;
+ using Node::Serve;
// Returns a borrowed handle to the VMO backing this file.
zx::unowned_vmo vmo() const { return vmo_->borrow(); }
@@ -80,7 +84,7 @@
private:
VmoFile(zx_handle_t vmo_handle, size_t length, WriteMode write_option,
DefaultSharingMode vmo_sharing)
- : internal::Node(CreateVmoFile(vmo_handle, length, write_option, vmo_sharing)),
+ : Node(CreateVmoFile(vmo_handle, length, write_option, vmo_sharing)),
vmo_(zx::unowned_vmo{vmo_handle}) {}
// The underlying node is responsible for closing `vmo_handle` when the node is destroyed.
diff --git a/sdk/lib/vfs/internal/libvfs.cc b/sdk/lib/vfs/internal/libvfs.cc
index a664e8e..fb602cc 100644
--- a/sdk/lib/vfs/internal/libvfs.cc
+++ b/sdk/lib/vfs/internal/libvfs.cc
@@ -255,9 +255,6 @@
if (!out_vnode) {
return ZX_ERR_INVALID_ARGS;
}
- // TODO(https://fxbug.dev/293936429): We might have to relax this check, as RemoteDir should
- // gracefully handle this case. The existing SDK VFS node constructors are infallible even when
- // `remote` is invalid.
if (remote == ZX_HANDLE_INVALID) {
return ZX_ERR_BAD_HANDLE;
}
@@ -292,9 +289,6 @@
if (!out_vnode) {
return ZX_ERR_INVALID_ARGS;
}
- // TODO(https://fxbug.dev/293936429): We might have to relax this check, as VmoFile should
- // gracefully handle this case. The existing SDK VFS node constructors are infallible even when
- // `vmo` is invalid.
if (vmo == ZX_HANDLE_INVALID) {
return ZX_ERR_BAD_HANDLE;
}
diff --git a/src/connectivity/network/tests/integration/dhcp/src/lib.rs b/src/connectivity/network/tests/integration/dhcp/src/lib.rs
index a3e27ef..d07201d 100644
--- a/src/connectivity/network/tests/integration/dhcp/src/lib.rs
+++ b/src/connectivity/network/tests/integration/dhcp/src/lib.rs
@@ -665,6 +665,10 @@
.await;
}
+// TODO(https://fxbug.dev/42077260): Enable this test for Netstack3. Note that
+// the test will need to be updated to make DAD more robust to CQ timing
+// variability. This could be done by configuring the number of IPV4
+// `dad_transmits` to some large value.
#[netstack_test]
async fn acquire_with_dhcpd_bound_device_dup_addr<
SERVER: Netstack,
diff --git a/src/connectivity/network/tests/integration/expects/netstack-dhcp-integration-test.json5 b/src/connectivity/network/tests/integration/expects/netstack-dhcp-integration-test.json5
index 9bde2d8..9ef119c1 100644
--- a/src/connectivity/network/tests/integration/expects/netstack-dhcp-integration-test.json5
+++ b/src/connectivity/network/tests/integration/expects/netstack-dhcp-integration-test.json5
@@ -10,6 +10,15 @@
],
},
{
+ type: "skip",
+ matchers: [
+ // This test relies on using ARP probes to perform duplicate
+ // address detection, making it prone to flaking in CQ, where
+ // timing is less stable. See https://fxbug.dev/332181505.
+ "acquire_with_dhcpd_bound_device_dup_addr_ns[23]_ns2_with_dhcp_in_stack",
+ ],
+ },
+ {
type: "expect_pass_with_err_logs",
matchers: [
"acquire_then_renew_with_dhcpd_bound_device_ns3_ns2_with_dhcp_in_stack",
diff --git a/src/connectivity/weave/adaptation/tests/fake_directory.h b/src/connectivity/weave/adaptation/tests/fake_directory.h
index 92d09da..24138a5 100644
--- a/src/connectivity/weave/adaptation/tests/fake_directory.h
+++ b/src/connectivity/weave/adaptation/tests/fake_directory.h
@@ -59,8 +59,8 @@
return nullptr;
}
return std::make_unique<vfs::VmoFile>(std::move(file_vmo.vmo()), file_vmo.size(),
- vfs::VmoFile::WriteOption::WRITABLE,
- vfs::VmoFile::Sharing::CLONE_COW);
+ vfs::VmoFile::WriteMode::kWritable,
+ vfs::VmoFile::DefaultSharingMode::kCloneCow);
}
// Pseudo-directory to serve the resources from.
diff --git a/src/developer/ffx/plugins/product/BUILD.gn b/src/developer/ffx/plugins/product/BUILD.gn
index d660e811..5662609 100644
--- a/src/developer/ffx/plugins/product/BUILD.gn
+++ b/src/developer/ffx/plugins/product/BUILD.gn
@@ -65,5 +65,8 @@
group("tests") {
testonly = true
- deps = [ ":ffx_product_tests($host_toolchain)" ]
+ deps = [
+ ":ffx_product_tests($host_toolchain)",
+ "common:tests",
+ ]
}
diff --git a/src/developer/ffx/plugins/product/common/BUILD.gn b/src/developer/ffx/plugins/product/common/BUILD.gn
index 4d7fd8e..48b8c07 100644
--- a/src/developer/ffx/plugins/product/common/BUILD.gn
+++ b/src/developer/ffx/plugins/product/common/BUILD.gn
@@ -18,10 +18,13 @@
"//third_party/rust_crates:schemars",
"//third_party/rust_crates:serde",
]
- test_deps = []
+ test_deps = [
+ "//src/lib/fuchsia",
+ "//third_party/rust_crates:serde_json",
+ ]
}
group("tests") {
testonly = true
- deps = [ ":ffx_product_common_test" ]
+ deps = [ ":lib_test" ]
}
diff --git a/src/developer/ffx/plugins/product/common/src/lib.rs b/src/developer/ffx/plugins/product/common/src/lib.rs
index 9f56930..18b025b 100644
--- a/src/developer/ffx/plugins/product/common/src/lib.rs
+++ b/src/developer/ffx/plugins/product/common/src/lib.rs
@@ -29,6 +29,9 @@
pub enum MachineOutput<T: JsonSchema + Serialize> {
CommandStatus(CommandStatus),
Notice { title: Option<String>, message: Option<String> },
+ // Since we are using a tag field to identify the enum variant,
+ // T cannot be simple type since there is no field name to associate
+ // the value with.
Data(T),
}
@@ -65,3 +68,84 @@
self.writer.borrow_mut().machine(&data).map_err(move |e| e.into())
}
}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use fho::{Format, TestBuffers};
+
+ #[derive(Debug, Serialize, JsonSchema)]
+ struct TestBundle {
+ pub base_url: String,
+ pub value: u32,
+ pub name: String,
+ }
+
+ impl TestBundle {
+ fn new() -> Self {
+ Self { base_url: "/some/url".into(), value: 42, name: "Sample_Bundle".into() }
+ }
+ }
+
+ #[fuchsia::test]
+ fn test_machine_schema() {
+ let outputs = vec![
+ MachineOutput::CommandStatus(CommandStatus::Ok { message: None }),
+ MachineOutput::CommandStatus(CommandStatus::Ok {
+ message: Some("Ok with message".into()),
+ }),
+ MachineOutput::CommandStatus(CommandStatus::UnexpectedError {
+ message: "an error".into(),
+ }),
+ MachineOutput::CommandStatus(CommandStatus::UserError { message: "an error".into() }),
+ MachineOutput::Notice { title: None, message: None },
+ MachineOutput::Notice { title: Some("The title".into()), message: None },
+ MachineOutput::Notice {
+ title: Some("The title".into()),
+ message: Some("a message".into()),
+ },
+ MachineOutput::Notice { title: None, message: Some("a message".into()) },
+ MachineOutput::Data(TestBundle::new()),
+ ];
+
+ for o in outputs {
+ let test_buffers = TestBuffers::default();
+ let mut writer = VerifiedMachineWriter::<MachineOutput<TestBundle>>::new_test(
+ Some(Format::JsonPretty),
+ &test_buffers,
+ );
+ writer.machine(&o).expect("write data");
+ let data_str = test_buffers.into_stdout_str();
+ let data = serde_json::from_str(&data_str).expect("json value");
+ match writer.verify_schema(&data) {
+ Ok(_) => (),
+ Err(e) => {
+ println!("Error verifying schema: {e}");
+ println!("{data:?}");
+ }
+ };
+ }
+ }
+ #[fuchsia::test]
+ fn test_machine_no_value_schema() {
+ let outputs = vec![MachineOutput::Data(())];
+
+ for o in outputs {
+ let test_buffers = TestBuffers::default();
+ let mut writer = VerifiedMachineWriter::<MachineOutput<()>>::new_test(
+ Some(Format::JsonPretty),
+ &test_buffers,
+ );
+ writer.machine(&o).expect("write data");
+ let data_str = test_buffers.into_stdout_str();
+ let data = serde_json::from_str(&data_str).expect("json value");
+ match writer.verify_schema(&data) {
+ Ok(_) => (),
+ Err(e) => {
+ println!("Error verifying schema: {e}");
+ println!("{data:?}");
+ }
+ };
+ }
+ }
+}
diff --git a/src/developer/ffx/plugins/product/download/src/lib.rs b/src/developer/ffx/plugins/product/download/src/lib.rs
index 459219a..32dbe1d 100644
--- a/src/developer/ffx/plugins/product/download/src/lib.rs
+++ b/src/developer/ffx/plugins/product/download/src/lib.rs
@@ -28,7 +28,8 @@
#[async_trait(?Send)]
impl FfxMain for PbDownloadTool {
- type Writer = VerifiedMachineWriter<MachineOutput<String>>;
+ type Writer = VerifiedMachineWriter<MachineOutput<()>>;
+
async fn main(self, writer: Self::Writer) -> fho::Result<()> {
let client = Client::initial()?;
diff --git a/src/devices/bin/driver_runtime/dispatcher.cc b/src/devices/bin/driver_runtime/dispatcher.cc
index d442b3d..540c3e0 100644
--- a/src/devices/bin/driver_runtime/dispatcher.cc
+++ b/src/devices/bin/driver_runtime/dispatcher.cc
@@ -1866,6 +1866,7 @@
}
zx_status_t Dispatcher::ThreadPool::SetRoleProfile() {
+#if __Fuchsia_API_level__ >= FUCHSIA_HEAD
zx::result client_end = component::Connect<fuchsia_scheduler::RoleManager>();
if (client_end.is_error()) {
return client_end.status_value();
@@ -1894,6 +1895,8 @@
return result.value().error_value();
}
return ZX_OK;
+#endif
+ return ZX_ERR_NOT_SUPPORTED;
}
zx_status_t Dispatcher::ThreadPool::AddThread() {
diff --git a/src/lib/assembly/domain_config/src/domain_config.rs b/src/lib/assembly/domain_config/src/domain_config.rs
index 6701772..06b7c19 100644
--- a/src/lib/assembly/domain_config/src/domain_config.rs
+++ b/src/lib/assembly/domain_config/src/domain_config.rs
@@ -6,6 +6,7 @@
use assembly_platform_configuration::{DomainConfig, FileOrContents};
use assembly_util::FileEntry;
use camino::{Utf8Path, Utf8PathBuf};
+use cml::RelativePath;
use fidl::persist;
use fuchsia_pkg::{PackageBuilder, PackageManifest, RelativeTo};
use std::io::Write;
@@ -42,9 +43,8 @@
// Find all the directory routes to expose.
let mut exposes = vec![];
for (directory, directory_config) in self.config.directories {
- let subdir =
- cml::RelativePath::new(&format!("meta/fuchsia.domain_config/{}", directory))
- .with_context(|| format!("Calculating relative path for {directory}"))?;
+ let subdir = RelativePath::new(&format!("meta/fuchsia.domain_config/{}", directory))
+ .with_context(|| format!("Calculating relative path for {directory}"))?;
let name = cml::Name::new(&directory)
.with_context(|| format!("Calculating name for {directory}"))?;
exposes.push(cml::Expose {
@@ -124,7 +124,6 @@
use fuchsia_pkg::PackageName;
use pretty_assertions::assert_eq;
use std::fs::File;
- use std::path::PathBuf;
use std::str::FromStr;
use tempfile::tempdir;
@@ -181,13 +180,13 @@
target: ExposeTarget::Parent,
target_name,
rights: _,
- subdir: Some(subdir),
+ subdir,
availability: _,
}) => {
assert_eq!(source_name, &cml::Name::new("pkg").unwrap());
assert!(source_dictionary.is_dot());
assert_eq!(target_name, &cml::Name::new("config-dir").unwrap());
- assert_eq!(subdir, &PathBuf::from("meta/fuchsia.domain_config/config-dir"));
+ assert_eq!(subdir, &RelativePath::new("meta/fuchsia.domain_config/config-dir").unwrap());
});
let contents = far_reader.read_file("meta/contents").unwrap();
let contents = std::str::from_utf8(&contents).unwrap();
@@ -340,13 +339,13 @@
target: ExposeTarget::Parent,
target_name,
rights: _,
- subdir: Some(subdir),
+ subdir,
availability: _,
}) => {
assert_eq!(source_name, &cml::Name::new("pkg").unwrap());
assert!(source_dictionary.is_dot());
assert_eq!(target_name, &cml::Name::new("config-dir").unwrap());
- assert_eq!(subdir, &PathBuf::from("meta/fuchsia.domain_config/config-dir"));
+ assert_eq!(subdir, &RelativePath::new("meta/fuchsia.domain_config/config-dir").unwrap());
});
let contents = far_reader.read_file("meta/contents").unwrap();
let contents = std::str::from_utf8(&contents).unwrap();
diff --git a/src/lib/fuchsia-component-test/realm_builder_server/src/main.rs b/src/lib/fuchsia-component-test/realm_builder_server/src/main.rs
index 6c2c63f..af5a6b8 100644
--- a/src/lib/fuchsia-component-test/realm_builder_server/src/main.rs
+++ b/src/lib/fuchsia-component-test/realm_builder_server/src/main.rs
@@ -5,6 +5,7 @@
use {
anyhow::Context,
cm_rust::{FidlIntoNative, NativeIntoFidl, OfferDeclCommon},
+ cm_types::RelativePath,
fidl::endpoints::{DiscoverableProtocolMarker, ProtocolMarker, Proxy, ServerEnd},
fidl_fuchsia_component as fcomponent, fidl_fuchsia_component_decl as fcdecl,
fidl_fuchsia_component_runner as fcrunner, fidl_fuchsia_component_test as ftest,
@@ -20,7 +21,6 @@
std::{
collections::HashMap,
ops::{Deref, DerefMut},
- path::PathBuf,
str::FromStr,
sync::{
atomic::{AtomicBool, Ordering},
@@ -1393,6 +1393,19 @@
})?)
}
+fn try_into_subdir(input: &Option<String>) -> Result<RelativePath, RealmBuilderError> {
+ input
+ .as_ref()
+ .map(|p| {
+ RelativePath::new(p).map_err(|_| {
+ RealmBuilderError::CapabilityInvalid(anyhow::format_err!(
+ "Field `subdir` is not a valid relative path."
+ ))
+ })
+ })
+ .unwrap_or_else(|| Ok(Default::default()))
+}
+
/// Attempts to produce a valid path from the "path" field from a capability
fn try_into_capability_path(input: &Option<String>) -> Result<cm_types::Path, RealmBuilderError> {
input
@@ -1517,7 +1530,7 @@
target,
target_name,
rights: directory.rights,
- subdir: directory.subdir.map(PathBuf::from),
+ subdir: try_into_subdir(&directory.subdir)?,
dependency_type,
availability,
})
@@ -1627,8 +1640,8 @@
// exposing_in field to ensure that we apply the subdir field in the parent, and not in
// a local child's manifest.
let subdir = match exposing_in {
- ExposingIn::Child => None,
- ExposingIn::Realm => directory.subdir.map(PathBuf::from),
+ ExposingIn::Child => Default::default(),
+ ExposingIn::Realm => try_into_subdir(&directory.subdir)?,
};
cm_rust::ExposeDecl::Directory(cm_rust::ExposeDirectoryDecl {
source,
@@ -1733,7 +1746,7 @@
// We only want to set the sub-directory field once, and if we're generating a use
// declaration then we've already generated an offer declaration in the parent and
// we'll set the sub-directory field there.
- subdir: None,
+ subdir: Default::default(),
dependency_type,
availability: check_and_unwrap_use_availability(directory.availability)?,
})
@@ -3411,7 +3424,7 @@
target_name: "config-data".parse().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: Some(fio::RW_STAR_DIR),
- subdir: Some(PathBuf::from("component")),
+ subdir: "component".parse().unwrap(),
availability: cm_rust::Availability::Required,
}),
cm_rust::OfferDecl::Storage(cm_rust::OfferStorageDecl {
@@ -3552,7 +3565,7 @@
source_dictionary: Default::default(),
target_path: "/config-data".parse().unwrap(),
rights: fio::RW_STAR_DIR,
- subdir: None,
+ subdir: Default::default(),
dependency_type: cm_rust::DependencyType::Strong,
availability: cm_rust::Availability::Optional,
}),
@@ -3609,7 +3622,7 @@
target_name: "config-data".parse().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: Some(fio::RW_STAR_DIR),
- subdir: Some(PathBuf::from("component")),
+ subdir: "component".parse().unwrap(),
availability: cm_rust::Availability::Optional,
}),
cm_rust::OfferDecl::Directory(cm_rust::OfferDirectoryDecl {
@@ -3620,7 +3633,7 @@
target_name: "config-data".parse().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: Some(fio::RW_STAR_DIR),
- subdir: Some(PathBuf::from("component")),
+ subdir: "component".parse().unwrap(),
availability: cm_rust::Availability::Optional,
}),
cm_rust::OfferDecl::Storage(cm_rust::OfferStorageDecl {
@@ -3744,7 +3757,7 @@
target_name: "config-data".parse().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: Some(fio::RW_STAR_DIR),
- subdir: Some(PathBuf::from("component")),
+ subdir: "component".parse().unwrap(),
availability: cm_rust::Availability::SameAsTarget,
}),
cm_rust::OfferDecl::Storage(cm_rust::OfferStorageDecl {
@@ -4660,7 +4673,7 @@
target: cm_rust::ExposeTarget::Parent,
target_name: "data".parse().unwrap(),
rights: Some(fio::R_STAR_DIR),
- subdir: None,
+ subdir: Default::default(),
availability: cm_rust::Availability::Required,
})],
..cm_rust::ComponentDecl::default()
@@ -4689,7 +4702,7 @@
target_name: "data".parse().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: Some(fio::R_STAR_DIR),
- subdir: None,
+ subdir: Default::default(),
availability: cm_rust::Availability::Required,
})],
..cm_rust::ComponentDecl::default()
diff --git a/src/lib/fuchsia-component-test/src/lib.rs b/src/lib/fuchsia-component-test/src/lib.rs
index 14af116..6d4fb2c1e2 100644
--- a/src/lib/fuchsia-component-test/src/lib.rs
+++ b/src/lib/fuchsia-component-test/src/lib.rs
@@ -1112,13 +1112,8 @@
let cap = Capability::directory(name)
.path(source_path.expect("missing capability source path"))
.rights(rights);
- let cap = if let Some(subdir) = expose.subdir {
- cap.subdir(
- subdir
- .into_os_string()
- .into_string()
- .expect("subdir path is invalid string"),
- )
+ let cap = if !expose.subdir.is_dot() {
+ cap.subdir(expose.subdir.to_string())
} else {
cap
};
diff --git a/src/performance/lib/trace_processing/trace_importing.py b/src/performance/lib/trace_processing/trace_importing.py
index 5b6d1fc..33001188 100644
--- a/src/performance/lib/trace_processing/trace_importing.py
+++ b/src/performance/lib/trace_processing/trace_importing.py
@@ -292,55 +292,16 @@
# or is of a different type than what is asserted here, then the JSON trace
# event is considered to be malformed.
def check_trace_event(json_trace_event: Dict[str, Any]) -> None:
- if not (
- "ph" in json_trace_event and isinstance(json_trace_event["ph"], str)
- ):
- raise TypeError(
- f"Expected {json_trace_event} to have field 'ph' of type str"
- )
- if json_trace_event["ph"] != "M" and not (
- "cat" in json_trace_event
- and isinstance(json_trace_event["cat"], str)
- ):
- raise TypeError(
- f"Expected {json_trace_event} to have field 'cat' of type str"
- )
- if not (
- "name" in json_trace_event
- and isinstance(json_trace_event["name"], str)
- ):
- raise TypeError(
- f"Expected {json_trace_event} to have field 'name' of type str"
- )
- if json_trace_event["ph"] != "M" and not (
- "ts" in json_trace_event
- and isinstance(json_trace_event["ts"], (float, int))
- ):
- raise TypeError(
- f"Expected {json_trace_event} to have field 'ts' of type float "
- f"or int"
- )
- if not (
- "pid" in json_trace_event
- and isinstance(json_trace_event["pid"], int)
- ):
- raise TypeError(
- f"Expected {json_trace_event} to have field 'pid' of type int"
- )
- if not (
- "tid" in json_trace_event
- and isinstance(json_trace_event["tid"], (float, int))
- ):
- raise TypeError(
- f"Expected {json_trace_event} to have field 'tid' of type "
- f"float or int"
- )
+ validate_field_type(json_trace_event, "ph", str)
+ if json_trace_event["ph"] != "M":
+ validate_field_type(json_trace_event, "cat", str)
+ validate_field_type(json_trace_event, "name", str)
+ if json_trace_event["ph"] != "M":
+ validate_field_type(json_trace_event, "ts", float | int)
+ validate_field_type(json_trace_event, "pid", int)
+ validate_field_type(json_trace_event, "tid", float | int)
if "args" in json_trace_event:
- if not isinstance(json_trace_event["args"], dict):
- raise TypeError(
- f"Expected {json_trace_event} with 'args' field to have "
- f"'args' field of type dict"
- )
+ validate_field_type(json_trace_event, "args", dict)
# A helper lambda to add duration events to the appropriate duration stack
# and do the appropriate duration/flow graph setup. It is used for both
@@ -357,13 +318,7 @@
top_parent.child_durations.append(duration_event)
# Obtain the overall list of trace events.
- if not (
- "traceEvents" in root_object
- and isinstance(root_object["traceEvents"], list)
- ):
- raise TypeError(
- f"Expected {root_object} to have field 'traceEvents' of type List"
- )
+ validate_field_type(root_object, "traceEvents", list)
trace_events: List[Dict[str, Any]] = root_object["traceEvents"].copy()
# Add synthetic end events for each complete event in the trace data to
@@ -663,86 +618,30 @@
)
system_trace_events_list = root_object["systemTraceEvents"]
- if not (
- "type" in system_trace_events_list
- and isinstance(system_trace_events_list["type"], str)
- ):
- raise TypeError(
- f"Expected {system_trace_events_list} to have field 'type' of "
- f"type str"
- )
+ validate_field_type(system_trace_events_list, "type", str)
if not system_trace_events_list["type"] == "fuchsia":
raise TypeError(
f"Expected {system_trace_events_list} to have field 'type' "
f"equal to value 'fuchsia'"
)
- if not (
- "events" in system_trace_events_list
- and isinstance(system_trace_events_list["events"], list)
- ):
- raise TypeError(
- f"Expected {system_trace_events_list} to have field 'events' "
- f"of type list"
- )
+ validate_field_type(system_trace_events_list, "events", list)
system_trace_events = system_trace_events_list["events"]
for system_trace_event in system_trace_events:
- if not (
- "ph" in system_trace_event
- and isinstance(system_trace_event["ph"], str)
- ):
- raise TypeError(
- f"Expected {system_trace_event} to have field 'ph' of type "
- f"String"
- )
+ validate_field_type(system_trace_event, "ph", str)
system_event_type: str = system_trace_event["ph"]
if system_event_type == "p":
- if not (
- "pid" in system_trace_event
- and isinstance(system_trace_event["pid"], int)
- ):
- raise TypeError(
- f"Expected {system_trace_event} to have field 'pid' "
- f"of type int"
- )
- if not (
- "name" in system_trace_event
- and isinstance(system_trace_event["name"], str)
- ):
- raise TypeError(
- f"Expected {system_trace_event} to have field 'name' "
- f"of type str"
- )
+ validate_field_type(system_trace_event, "pid", int)
+ validate_field_type(system_trace_event, "name", str)
pid = system_trace_event["pid"]
name = system_trace_event["name"]
pid_to_name[pid] = name
elif system_event_type == "t":
- if not (
- "pid" in system_trace_event
- and isinstance(system_trace_event["pid"], int)
- ):
- raise TypeError(
- f"Expected {system_trace_event} to have field 'pid' "
- f"of type int"
- )
- if not (
- "name" in system_trace_event
- and isinstance(system_trace_event["name"], str)
- ):
- raise TypeError(
- f"Expected {system_trace_event} to have field 'name' "
- f"of type str"
- )
- if not (
- "tid" in system_trace_event
- and isinstance(system_trace_event["tid"], (float, int))
- ):
- raise TypeError(
- f"Expected {system_trace_event} to have field 'tid' of "
- f"type int or float"
- )
+ validate_field_type(system_trace_event, "pid", int)
+ validate_field_type(system_trace_event, "name", str)
+ validate_field_type(system_trace_event, "tid", float | int)
tid = int(system_trace_event["tid"])
name = system_trace_event["name"]
diff --git a/src/security/lib/scrutiny/plugins/src/verify/controller/route_sources.rs b/src/security/lib/scrutiny/plugins/src/verify/controller/route_sources.rs
index b9255d8..540fd80 100644
--- a/src/security/lib/scrutiny/plugins/src/verify/controller/route_sources.rs
+++ b/src/security/lib/scrutiny/plugins/src/verify/controller/route_sources.rs
@@ -13,7 +13,7 @@
route::VerifyRouteResult,
},
cm_rust::{CapabilityDecl, CapabilityTypeName, ComponentDecl, ExposeDecl, OfferDecl, UseDecl},
- cm_types::{Name, Path},
+ cm_types::{Name, Path, RelativePath},
moniker::Moniker,
routing::{component_instance::ComponentInstanceInterface, mapper::RouteSegment},
scrutiny::model::{controller::DataController, model::DataModel},
@@ -179,7 +179,7 @@
let subdirs = get_subdirs(other);
let source_path_str = subdirs.iter().fold(source_path.to_path_buf(), |path_buf, next| {
let mut next_buf = path_buf.clone();
- next_buf.push(next);
+ next_buf.push(next.to_path_buf());
next_buf
}).to_str().ok_or_else(|| anyhow!("Failed to format PathBuf as string; components; {:?} appended with {:?}", decl.source_path, subdirs))?.to_string();
let source_path = Path::from_str(&source_path_str).with_context(|| {
@@ -195,38 +195,38 @@
}
}
-fn get_subdirs(route: &Vec<RouteSegment>) -> Vec<PathBuf> {
+fn get_subdirs(route: &Vec<RouteSegment>) -> Vec<RelativePath> {
let mut subdir = vec![];
for segment in route.iter() {
match segment {
RouteSegment::UseBy { capability, .. } => match capability {
UseDecl::Directory(decl) => {
- if let Some(decl_subdir) = &decl.subdir {
- subdir.push(decl_subdir.clone());
+ if !decl.subdir.is_dot() {
+ subdir.push(decl.subdir.clone());
}
}
_ => {}
},
RouteSegment::OfferBy { capability, .. } => match capability {
OfferDecl::Directory(decl) => {
- if let Some(decl_subdir) = &decl.subdir {
- subdir.push(decl_subdir.clone());
+ if !decl.subdir.is_dot() {
+ subdir.push(decl.subdir.clone());
}
}
_ => {}
},
RouteSegment::ExposeBy { capability, .. } => match capability {
ExposeDecl::Directory(decl) => {
- if let Some(decl_subdir) = &decl.subdir {
- subdir.push(decl_subdir.clone());
+ if !decl.subdir.is_dot() {
+ subdir.push(decl.subdir.clone());
}
}
_ => {}
},
RouteSegment::DeclareBy { capability, .. } => match capability {
CapabilityDecl::Storage(decl) => {
- if let Some(decl_subdir) = &decl.subdir {
- subdir.push(decl_subdir.clone());
+ if !decl.subdir.is_dot() {
+ subdir.push(decl.subdir.clone());
}
}
_ => {}
@@ -638,7 +638,7 @@
scrutiny::prelude::{DataController, DataModel},
scrutiny_testing::fake::fake_data_model,
serde_json::json,
- std::{path::PathBuf, str::FromStr, sync::Arc},
+ std::{str::FromStr, sync::Arc},
url::Url,
};
@@ -806,7 +806,7 @@
target_name: "routed_from_provider".parse().unwrap(),
dependency_type: DependencyType::Strong,
rights: Some(fio::Operations::CONNECT),
- subdir: Some(PathBuf::from_str("root_subdir").unwrap()),
+ subdir: "root_subdir".parse().unwrap(),
availability: Availability::Required,
}.into(),
OfferDirectoryDecl{
@@ -817,7 +817,7 @@
target_name: "routed_from_root".parse().unwrap(),
dependency_type: DependencyType::Strong,
rights: Some(fio::Operations::CONNECT),
- subdir: Some(PathBuf::from_str("root_subdir").unwrap()),
+ subdir: "root_subdir".parse().unwrap(),
availability: Availability::Required,
}.into(),
],
@@ -849,7 +849,7 @@
source_dictionary: Default::default(),
target_path: Path::from_str("/data/from/provider").unwrap(),
rights: fio::Operations::CONNECT,
- subdir: Some(PathBuf::from_str("user_subdir").unwrap()),
+ subdir: "user_subdir".parse().unwrap(),
dependency_type: DependencyType::Strong,
availability: Availability::Required,
}.into(),
@@ -859,7 +859,7 @@
source_dictionary: Default::default(),
target_path: Path::from_str("/data/from/root").unwrap(),
rights: fio::Operations::CONNECT,
- subdir: Some(PathBuf::from_str("user_subdir").unwrap()),
+ subdir: "user_subdir".parse().unwrap(),
dependency_type: DependencyType::Strong,
availability: Availability::Required,
}.into(),
@@ -883,7 +883,7 @@
target: ExposeTarget::Parent,
target_name: "exposed_by_provider".parse().unwrap(),
rights: Some(fio::Operations::CONNECT),
- subdir: Some(PathBuf::from_str("provider_subdir").unwrap()),
+ subdir: "provider_subdir".parse().unwrap(),
availability: cm_rust::Availability::Required,
}.into(),
],
@@ -2166,7 +2166,7 @@
source_dictionary: Default::default(),
target_path: Path::from_str("/data/from/root").unwrap(),
rights: fio::Operations::CONNECT,
- subdir: Some(PathBuf::from_str("user_subdir").unwrap()),
+ subdir: "user_subdir".parse().unwrap(),
dependency_type: DependencyType::Strong,
availability: Availability::Required,
}.into(),
@@ -2182,7 +2182,7 @@
source_dictionary: Default::default(),
target_path: Path::from_str("/data/from/provider").unwrap(),
rights: fio::Operations::CONNECT,
- subdir: Some(PathBuf::from_str("user_subdir").unwrap()),
+ subdir: "user_subdir".parse().unwrap(),
dependency_type: DependencyType::Strong,
availability: Availability::Required,
}.into(),
diff --git a/src/security/lib/scrutiny/plugins/src/verify/mod.rs b/src/security/lib/scrutiny/plugins/src/verify/mod.rs
index f9aa3c6..e030cbd 100644
--- a/src/security/lib/scrutiny/plugins/src/verify/mod.rs
+++ b/src/security/lib/scrutiny/plugins/src/verify/mod.rs
@@ -280,7 +280,7 @@
source_dictionary: Default::default(),
target_path: "/dir".parse().unwrap(),
rights,
- subdir: None,
+ subdir: Default::default(),
dependency_type: DependencyType::Strong,
availability: Availability::Required,
}
@@ -300,7 +300,7 @@
target,
target_name,
rights,
- subdir: None,
+ subdir: Default::default(),
dependency_type: DependencyType::Strong,
availability: Availability::Required,
}
@@ -1188,7 +1188,7 @@
"source": "parent",
"source_dictionary": ".",
"source_name": "bad_dir",
- "subdir": null,
+ "subdir": ".",
"target_path": "/dir",
"type": "directory",
},
@@ -1321,7 +1321,7 @@
"source": "parent",
"source_name": "bad_dir",
"source_dictionary": ".",
- "subdir": null,
+ "subdir": ".",
"target_path": "/dir",
"type": "directory",
},
@@ -1343,7 +1343,7 @@
"source": "parent",
"source_name": "good_dir",
"source_dictionary": ".",
- "subdir": null,
+ "subdir": ".",
"target_path": "/dir",
"type": "directory"
},
@@ -1358,7 +1358,7 @@
"source": "self_",
"source_name": "good_dir",
"source_dictionary": ".",
- "subdir": null,
+ "subdir": ".",
"target": {
"child": {
"name": "child",
@@ -1504,7 +1504,7 @@
"source": "parent",
"source_name": "bad_dir",
"source_dictionary": ".",
- "subdir": null,
+ "subdir": ".",
"target_path": "/dir",
"type": "directory",
},
@@ -1631,7 +1631,7 @@
"source": "parent",
"source_name": "bad_dir",
"source_dictionary": ".",
- "subdir": null,
+ "subdir": ".",
"target_path": "/dir",
"type": "directory",
},
diff --git a/src/starnix/kernel/selinux/hooks/current_task_hooks.rs b/src/starnix/kernel/selinux/hooks/current_task_hooks.rs
index 129a0d2..15e0101 100644
--- a/src/starnix/kernel/selinux/hooks/current_task_hooks.rs
+++ b/src/starnix/kernel/selinux/hooks/current_task_hooks.rs
@@ -33,12 +33,12 @@
R: Default,
{
if let Some(security_server) = ¤t_task.kernel().security_server {
- if !security_server.has_policy() || security_server.is_fake() {
+ if !security_server.has_policy() {
return Ok(R::default());
}
let result = hook(security_server);
// TODO(b/331375792): Relocate "enforcing" check into the AVC.
- if !security_server.is_enforcing() {
+ if !security_server.is_enforcing() || security_server.is_fake() {
return Ok(R::default());
}
result
@@ -65,7 +65,7 @@
D: Fn() -> R,
{
current_task.kernel().security_server.as_ref().map_or_else(&default, |ss| {
- if ss.has_policy() && !ss.is_fake() {
+ if ss.has_policy() {
hook(ss)
} else {
default()
@@ -522,7 +522,7 @@
}
#[fuchsia::test]
- async fn no_state_update_for_fake_mode() {
+ async fn state_update_for_fake_mode() {
let security_server = security_server_with_policy(Mode::Fake);
let initial_state = SeLinuxThreadGroupState::for_kernel();
let (kernel, task) = create_kernel_and_task_with_selinux(security_server);
@@ -539,8 +539,13 @@
assert_ne!(elf_sid, initial_state.current_sid);
update_state_on_exec(&mut task, &Some(elf_state));
assert_eq!(
- task.thread_group.read().selinux_state.as_ref().expect("missing SELinux state"),
- &initial_state
+ task.thread_group
+ .read()
+ .selinux_state
+ .as_ref()
+ .expect("missing SELinux state")
+ .current_sid,
+ elf_sid
);
}
@@ -734,7 +739,7 @@
}
#[fuchsia::test]
- async fn post_setxattr_noop_selinux_fake() {
+ async fn post_setxattr_selinux_fake() {
let security_server = security_server_with_policy(Mode::Fake);
let (_kernel, current_task, mut locked) =
create_kernel_task_and_unlocked_with_selinux(security_server);
@@ -748,7 +753,7 @@
VALID_SECURITY_CONTEXT.into(),
);
- assert_eq!(None, node.cached_sid());
+ assert!(node.cached_sid().is_some());
}
#[fuchsia::test]
diff --git a/src/starnix/lib/ebpf/BUILD.gn b/src/starnix/lib/ebpf/BUILD.gn
index 641edcf..48d64de 100644
--- a/src/starnix/lib/ebpf/BUILD.gn
+++ b/src/starnix/lib/ebpf/BUILD.gn
@@ -46,6 +46,7 @@
inputs = [
"src/test_grammar.pest",
+ "src/tests/err-write-r10.data",
"//third_party/ubpf/src/tests/add64.data",
"//third_party/ubpf/src/tests/add.data",
"//third_party/ubpf/src/tests/alu64-arith.data",
diff --git a/src/starnix/lib/ebpf/src/conformance.rs b/src/starnix/lib/ebpf/src/conformance.rs
index 4eaf1d9..32ca491 100644
--- a/src/starnix/lib/ebpf/src/conformance.rs
+++ b/src/starnix/lib/ebpf/src/conformance.rs
@@ -528,139 +528,146 @@
assert_eq!(parse_asm(code).len(), 1);
}
- macro_rules! test_data {
+ macro_rules! ubpf_test_data {
($file_name:tt) => {
include_str!(concat!("../../../../../third_party/ubpf/src/tests/", $file_name))
};
}
- #[test_case(test_data!("add64.data"))]
- #[test_case(test_data!("add.data"))]
- #[test_case(test_data!("alu64-arith.data"))]
- #[test_case(test_data!("alu64-bit.data"))]
- #[test_case(test_data!("alu64.data"))]
- #[test_case(test_data!("alu-arith.data"))]
- #[test_case(test_data!("alu-bit.data"))]
- #[test_case(test_data!("alu.data"))]
- #[test_case(test_data!("arsh32-high-shift.data"))]
- #[test_case(test_data!("arsh64.data"))]
- #[test_case(test_data!("arsh.data"))]
- #[test_case(test_data!("arsh-reg.data"))]
- #[test_case(test_data!("be16.data"))]
- #[test_case(test_data!("be16-high.data"))]
- #[test_case(test_data!("be32.data"))]
- #[test_case(test_data!("be32-high.data"))]
- #[test_case(test_data!("be64.data"))]
- #[test_case(test_data!("call.data"))]
- #[test_case(test_data!("call-memfrob.data"))]
- #[test_case(test_data!("call-save.data"))]
- #[test_case(test_data!("div32-by-zero-reg.data"))]
- #[test_case(test_data!("div32-high-divisor.data"))]
- #[test_case(test_data!("div32-imm.data"))]
- #[test_case(test_data!("div32-reg.data"))]
- #[test_case(test_data!("div64-by-zero-imm.data"))]
- #[test_case(test_data!("div64-by-zero-reg.data"))]
- #[test_case(test_data!("div64-imm.data"))]
- #[test_case(test_data!("div64-negative-imm.data"))]
- #[test_case(test_data!("div64-negative-reg.data"))]
- #[test_case(test_data!("div64-reg.data"))]
- #[test_case(test_data!("div-by-zero-imm.data"))]
- #[test_case(test_data!("div-by-zero-reg.data"))]
- #[test_case(test_data!("early-exit.data"))]
- #[test_case(test_data!("err-call-bad-imm.data"))]
- #[test_case(test_data!("err-call-unreg.data"))]
- #[test_case(test_data!("err-endian-size.data"))]
- #[test_case(test_data!("err-incomplete-lddw2.data"))]
- #[test_case(test_data!("err-incomplete-lddw.data"))]
- #[test_case(test_data!("err-infinite-loop.data"))]
- #[test_case(test_data!("err-invalid-reg-dst.data"))]
- #[test_case(test_data!("err-invalid-reg-src.data"))]
- #[test_case(test_data!("err-jmp-lddw.data"))]
- #[test_case(test_data!("err-jmp-out.data"))]
- #[test_case(test_data!("err-lddw-invalid-src.data"))]
- #[test_case(test_data!("err-stack-oob.data"))]
- #[test_case(test_data!("err-too-many-instructions.data"))]
- #[test_case(test_data!("err-unknown-opcode.data"))]
- #[test_case(test_data!("exit.data"))]
- #[test_case(test_data!("exit-not-last.data"))]
- #[test_case(test_data!("ja.data"))]
- #[test_case(test_data!("jeq-imm.data"))]
- #[test_case(test_data!("jeq-reg.data"))]
- #[test_case(test_data!("jge-imm.data"))]
- #[test_case(test_data!("jgt-imm.data"))]
- #[test_case(test_data!("jgt-reg.data"))]
- #[test_case(test_data!("jit-bounce.data"))]
- #[test_case(test_data!("jle-imm.data"))]
- #[test_case(test_data!("jle-reg.data"))]
- #[test_case(test_data!("jlt-imm.data"))]
- #[test_case(test_data!("jlt-reg.data"))]
- #[test_case(test_data!("jmp.data"))]
- #[test_case(test_data!("jne-reg.data"))]
- #[test_case(test_data!("jset-imm.data"))]
- #[test_case(test_data!("jset-reg.data"))]
- #[test_case(test_data!("jsge-imm.data"))]
- #[test_case(test_data!("jsge-reg.data"))]
- #[test_case(test_data!("jsgt-imm.data"))]
- #[test_case(test_data!("jsgt-reg.data"))]
- #[test_case(test_data!("jsle-imm.data"))]
- #[test_case(test_data!("jsle-reg.data"))]
- #[test_case(test_data!("jslt-imm.data"))]
- #[test_case(test_data!("jslt-reg.data"))]
- #[test_case(test_data!("lddw2.data"))]
- #[test_case(test_data!("lddw.data"))]
- #[test_case(test_data!("ldxb-all.data"))]
- #[test_case(test_data!("ldxb.data"))]
- #[test_case(test_data!("ldx.data"))]
- #[test_case(test_data!("ldxdw.data"))]
- #[test_case(test_data!("ldxh-all2.data"))]
- #[test_case(test_data!("ldxh-all.data"))]
- #[test_case(test_data!("ldxh.data"))]
- #[test_case(test_data!("ldxh-same-reg.data"))]
- #[test_case(test_data!("ldxw-all.data"))]
- #[test_case(test_data!("ldxw.data"))]
- #[test_case(test_data!("le16.data"))]
- #[test_case(test_data!("le32.data"))]
- #[test_case(test_data!("le64.data"))]
- #[test_case(test_data!("lsh-reg.data"))]
- #[test_case(test_data!("mem-len.data"))]
- #[test_case(test_data!("mod32.data"))]
- #[test_case(test_data!("mod64-by-zero-imm.data"))]
- #[test_case(test_data!("mod64-by-zero-reg.data"))]
- #[test_case(test_data!("mod64.data"))]
- #[test_case(test_data!("mod-by-zero-imm.data"))]
- #[test_case(test_data!("mod-by-zero-reg.data"))]
- #[test_case(test_data!("mod.data"))]
- #[test_case(test_data!("mov64-sign-extend.data"))]
- #[test_case(test_data!("mov.data"))]
- #[test_case(test_data!("mul32-imm.data"))]
- #[test_case(test_data!("mul32-reg.data"))]
- #[test_case(test_data!("mul32-reg-overflow.data"))]
- #[test_case(test_data!("mul64-imm.data"))]
- #[test_case(test_data!("mul64-reg.data"))]
- #[test_case(test_data!("mul-loop.data"))]
- #[test_case(test_data!("neg64.data"))]
- #[test_case(test_data!("neg.data"))]
- #[test_case(test_data!("prime.data"))]
- #[test_case(test_data!("rsh32.data"))]
- #[test_case(test_data!("rsh-reg.data"))]
- #[test_case(test_data!("stack2.data"))]
- #[test_case(test_data!("stack3.data"))]
- #[test_case(test_data!("stack.data"))]
- #[test_case(test_data!("stb.data"))]
- #[test_case(test_data!("st.data"))]
- #[test_case(test_data!("stdw.data"))]
- #[test_case(test_data!("sth.data"))]
- #[test_case(test_data!("string-stack.data"))]
- #[test_case(test_data!("stw.data"))]
- #[test_case(test_data!("stxb-all2.data"))]
- #[test_case(test_data!("stxb-all.data"))]
- #[test_case(test_data!("stxb-chain.data"))]
- #[test_case(test_data!("stxb.data"))]
- #[test_case(test_data!("stx.data"))]
- #[test_case(test_data!("stxdw.data"))]
- #[test_case(test_data!("stxh.data"))]
- #[test_case(test_data!("stxw.data"))]
- #[test_case(test_data!("subnet.data"))]
+ macro_rules! local_test_data {
+ ($file_name:tt) => {
+ include_str!(concat!("tests/", $file_name))
+ };
+ }
+
+ #[test_case(ubpf_test_data!("add64.data"))]
+ #[test_case(ubpf_test_data!("add.data"))]
+ #[test_case(ubpf_test_data!("alu64-arith.data"))]
+ #[test_case(ubpf_test_data!("alu64-bit.data"))]
+ #[test_case(ubpf_test_data!("alu64.data"))]
+ #[test_case(ubpf_test_data!("alu-arith.data"))]
+ #[test_case(ubpf_test_data!("alu-bit.data"))]
+ #[test_case(ubpf_test_data!("alu.data"))]
+ #[test_case(ubpf_test_data!("arsh32-high-shift.data"))]
+ #[test_case(ubpf_test_data!("arsh64.data"))]
+ #[test_case(ubpf_test_data!("arsh.data"))]
+ #[test_case(ubpf_test_data!("arsh-reg.data"))]
+ #[test_case(ubpf_test_data!("be16.data"))]
+ #[test_case(ubpf_test_data!("be16-high.data"))]
+ #[test_case(ubpf_test_data!("be32.data"))]
+ #[test_case(ubpf_test_data!("be32-high.data"))]
+ #[test_case(ubpf_test_data!("be64.data"))]
+ #[test_case(ubpf_test_data!("call.data"))]
+ #[test_case(ubpf_test_data!("call-memfrob.data"))]
+ #[test_case(ubpf_test_data!("call-save.data"))]
+ #[test_case(ubpf_test_data!("div32-by-zero-reg.data"))]
+ #[test_case(ubpf_test_data!("div32-high-divisor.data"))]
+ #[test_case(ubpf_test_data!("div32-imm.data"))]
+ #[test_case(ubpf_test_data!("div32-reg.data"))]
+ #[test_case(ubpf_test_data!("div64-by-zero-imm.data"))]
+ #[test_case(ubpf_test_data!("div64-by-zero-reg.data"))]
+ #[test_case(ubpf_test_data!("div64-imm.data"))]
+ #[test_case(ubpf_test_data!("div64-negative-imm.data"))]
+ #[test_case(ubpf_test_data!("div64-negative-reg.data"))]
+ #[test_case(ubpf_test_data!("div64-reg.data"))]
+ #[test_case(ubpf_test_data!("div-by-zero-imm.data"))]
+ #[test_case(ubpf_test_data!("div-by-zero-reg.data"))]
+ #[test_case(ubpf_test_data!("early-exit.data"))]
+ #[test_case(ubpf_test_data!("err-call-bad-imm.data"))]
+ #[test_case(ubpf_test_data!("err-call-unreg.data"))]
+ #[test_case(ubpf_test_data!("err-endian-size.data"))]
+ #[test_case(ubpf_test_data!("err-incomplete-lddw2.data"))]
+ #[test_case(ubpf_test_data!("err-incomplete-lddw.data"))]
+ #[test_case(ubpf_test_data!("err-infinite-loop.data"))]
+ #[test_case(ubpf_test_data!("err-invalid-reg-dst.data"))]
+ #[test_case(ubpf_test_data!("err-invalid-reg-src.data"))]
+ #[test_case(ubpf_test_data!("err-jmp-lddw.data"))]
+ #[test_case(ubpf_test_data!("err-jmp-out.data"))]
+ #[test_case(ubpf_test_data!("err-lddw-invalid-src.data"))]
+ #[test_case(ubpf_test_data!("err-stack-oob.data"))]
+ #[test_case(ubpf_test_data!("err-too-many-instructions.data"))]
+ #[test_case(ubpf_test_data!("err-unknown-opcode.data"))]
+ #[test_case(ubpf_test_data!("exit.data"))]
+ #[test_case(ubpf_test_data!("exit-not-last.data"))]
+ #[test_case(ubpf_test_data!("ja.data"))]
+ #[test_case(ubpf_test_data!("jeq-imm.data"))]
+ #[test_case(ubpf_test_data!("jeq-reg.data"))]
+ #[test_case(ubpf_test_data!("jge-imm.data"))]
+ #[test_case(ubpf_test_data!("jgt-imm.data"))]
+ #[test_case(ubpf_test_data!("jgt-reg.data"))]
+ #[test_case(ubpf_test_data!("jit-bounce.data"))]
+ #[test_case(ubpf_test_data!("jle-imm.data"))]
+ #[test_case(ubpf_test_data!("jle-reg.data"))]
+ #[test_case(ubpf_test_data!("jlt-imm.data"))]
+ #[test_case(ubpf_test_data!("jlt-reg.data"))]
+ #[test_case(ubpf_test_data!("jmp.data"))]
+ #[test_case(ubpf_test_data!("jne-reg.data"))]
+ #[test_case(ubpf_test_data!("jset-imm.data"))]
+ #[test_case(ubpf_test_data!("jset-reg.data"))]
+ #[test_case(ubpf_test_data!("jsge-imm.data"))]
+ #[test_case(ubpf_test_data!("jsge-reg.data"))]
+ #[test_case(ubpf_test_data!("jsgt-imm.data"))]
+ #[test_case(ubpf_test_data!("jsgt-reg.data"))]
+ #[test_case(ubpf_test_data!("jsle-imm.data"))]
+ #[test_case(ubpf_test_data!("jsle-reg.data"))]
+ #[test_case(ubpf_test_data!("jslt-imm.data"))]
+ #[test_case(ubpf_test_data!("jslt-reg.data"))]
+ #[test_case(ubpf_test_data!("lddw2.data"))]
+ #[test_case(ubpf_test_data!("lddw.data"))]
+ #[test_case(ubpf_test_data!("ldxb-all.data"))]
+ #[test_case(ubpf_test_data!("ldxb.data"))]
+ #[test_case(ubpf_test_data!("ldx.data"))]
+ #[test_case(ubpf_test_data!("ldxdw.data"))]
+ #[test_case(ubpf_test_data!("ldxh-all2.data"))]
+ #[test_case(ubpf_test_data!("ldxh-all.data"))]
+ #[test_case(ubpf_test_data!("ldxh.data"))]
+ #[test_case(ubpf_test_data!("ldxh-same-reg.data"))]
+ #[test_case(ubpf_test_data!("ldxw-all.data"))]
+ #[test_case(ubpf_test_data!("ldxw.data"))]
+ #[test_case(ubpf_test_data!("le16.data"))]
+ #[test_case(ubpf_test_data!("le32.data"))]
+ #[test_case(ubpf_test_data!("le64.data"))]
+ #[test_case(ubpf_test_data!("lsh-reg.data"))]
+ #[test_case(ubpf_test_data!("mem-len.data"))]
+ #[test_case(ubpf_test_data!("mod32.data"))]
+ #[test_case(ubpf_test_data!("mod64-by-zero-imm.data"))]
+ #[test_case(ubpf_test_data!("mod64-by-zero-reg.data"))]
+ #[test_case(ubpf_test_data!("mod64.data"))]
+ #[test_case(ubpf_test_data!("mod-by-zero-imm.data"))]
+ #[test_case(ubpf_test_data!("mod-by-zero-reg.data"))]
+ #[test_case(ubpf_test_data!("mod.data"))]
+ #[test_case(ubpf_test_data!("mov64-sign-extend.data"))]
+ #[test_case(ubpf_test_data!("mov.data"))]
+ #[test_case(ubpf_test_data!("mul32-imm.data"))]
+ #[test_case(ubpf_test_data!("mul32-reg.data"))]
+ #[test_case(ubpf_test_data!("mul32-reg-overflow.data"))]
+ #[test_case(ubpf_test_data!("mul64-imm.data"))]
+ #[test_case(ubpf_test_data!("mul64-reg.data"))]
+ #[test_case(ubpf_test_data!("mul-loop.data"))]
+ #[test_case(ubpf_test_data!("neg64.data"))]
+ #[test_case(ubpf_test_data!("neg.data"))]
+ #[test_case(ubpf_test_data!("prime.data"))]
+ #[test_case(ubpf_test_data!("rsh32.data"))]
+ #[test_case(ubpf_test_data!("rsh-reg.data"))]
+ #[test_case(ubpf_test_data!("stack2.data"))]
+ #[test_case(ubpf_test_data!("stack3.data"))]
+ #[test_case(ubpf_test_data!("stack.data"))]
+ #[test_case(ubpf_test_data!("stb.data"))]
+ #[test_case(ubpf_test_data!("st.data"))]
+ #[test_case(ubpf_test_data!("stdw.data"))]
+ #[test_case(ubpf_test_data!("sth.data"))]
+ #[test_case(ubpf_test_data!("string-stack.data"))]
+ #[test_case(ubpf_test_data!("stw.data"))]
+ #[test_case(ubpf_test_data!("stxb-all2.data"))]
+ #[test_case(ubpf_test_data!("stxb-all.data"))]
+ #[test_case(ubpf_test_data!("stxb-chain.data"))]
+ #[test_case(ubpf_test_data!("stxb.data"))]
+ #[test_case(ubpf_test_data!("stx.data"))]
+ #[test_case(ubpf_test_data!("stxdw.data"))]
+ #[test_case(ubpf_test_data!("stxh.data"))]
+ #[test_case(ubpf_test_data!("stxw.data"))]
+ #[test_case(ubpf_test_data!("subnet.data"))]
+ #[test_case(local_test_data!("err-write-r10.data"))]
fn test_ebpf_conformance(content: &str) {
let Some(mut test_case) = TestCase::parse(content) else {
// Special case that only test the test framework.
diff --git a/src/starnix/lib/ebpf/src/tests/err-write-r10.data b/src/starnix/lib/ebpf/src/tests/err-write-r10.data
new file mode 100644
index 0000000..58b8549
--- /dev/null
+++ b/src/starnix/lib/ebpf/src/tests/err-write-r10.data
@@ -0,0 +1,5 @@
+-- asm
+mov %r10, 0
+exit
+-- error
+r10 out of bound
diff --git a/src/storage/conformance/conformance_harness/sdkcpp/harness.cc b/src/storage/conformance/conformance_harness/sdkcpp/harness.cc
index 8007f2a..ea4edf9 100644
--- a/src/storage/conformance/conformance_harness/sdkcpp/harness.cc
+++ b/src/storage/conformance/conformance_harness/sdkcpp/harness.cc
@@ -69,7 +69,7 @@
}
}
- // TODO(https://fxbug.dev/29393642): Support the new C++ bindings in the SDK VFS so that we can
+ // TODO(https://fxbug.dev/311176363): Support the new C++ bindings in the SDK VFS so that we can
// use `fuchsia_io::OpenFlags` instead of the deprecated HLCPP `fuchsia::io::OpenFlags` type.
fuchsia::io::OpenFlags flags = fuchsia::io::OpenFlags{static_cast<uint32_t>(request.flags())};
ZX_ASSERT_MSG(dir->Serve(flags, request.directory_request().TakeChannel()) == ZX_OK,
@@ -96,7 +96,7 @@
case fio_test::DirectoryEntry::Tag::kRemoteDirectory: {
fio_test::RemoteDirectory remote_directory = std::move(entry.remote_directory().value());
- // TODO(https://fxbug.dev/29393642): Support the new C++ bindings in the SDK VFS so we can
+ // TODO(https://fxbug.dev/311176363): Support the new C++ bindings in the SDK VFS so we can
// construct a `vfs::RemoteDir` using a `fidl::ClientEnd` directly.
auto remote_dir_entry =
std::make_unique<vfs::RemoteDir>(remote_directory.remote_client()->TakeChannel());
@@ -124,7 +124,7 @@
ZX_ASSERT_MSG(status == ZX_OK, "Failed to get VMO content size: %s",
zx_status_get_string(status));
auto vmo_file_entry = std::make_unique<vfs::VmoFile>(std::move(vmo), size,
- vfs::VmoFile::WriteOption::WRITABLE);
+ vfs::VmoFile::WriteMode::kWritable);
ZX_ASSERT_MSG(dest.AddEntry(*vmo_file.name(), std::move(vmo_file_entry)) == ZX_OK,
"Failed to add VmoFile entry!");
break;
diff --git a/src/storage/f2fs/test/unit/unit_lib.cc b/src/storage/f2fs/test/unit/unit_lib.cc
index a2cf54a..9c59201 100644
--- a/src/storage/f2fs/test/unit/unit_lib.cc
+++ b/src/storage/f2fs/test/unit/unit_lib.cc
@@ -236,7 +236,7 @@
uint8_t *buf_ptr = buf;
while (len > 0 && buf_ptr < buf + kPageSize) {
-// TODO(b/293936429): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
+// TODO(b/293947862): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
// as part of io2 migration.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
diff --git a/src/storage/lib/vfs/cpp/dir_test_util.h b/src/storage/lib/vfs/cpp/dir_test_util.h
index 9cfe4ce..5e70269 100644
--- a/src/storage/lib/vfs/cpp/dir_test_util.h
+++ b/src/storage/lib/vfs/cpp/dir_test_util.h
@@ -31,7 +31,7 @@
void ExpectEntry(const char* name, uint32_t vtype) {
ASSERT_NE(0u, remaining_);
-// TODO(b/293936429): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
+// TODO(b/293947862): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
// as part of io2 migration.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
diff --git a/src/storage/lib/vfs/cpp/vnode.cc b/src/storage/lib/vfs/cpp/vnode.cc
index e9cd31a..fcbae54 100644
--- a/src/storage/lib/vfs/cpp/vnode.cc
+++ b/src/storage/lib/vfs/cpp/vnode.cc
@@ -196,7 +196,7 @@
: ptr_(static_cast<char*>(ptr)), pos_(0), len_(len) {}
zx_status_t DirentFiller::Next(std::string_view name, uint8_t type, uint64_t ino) {
-// TODO(b/293936429): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
+// TODO(b/293947862): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
// as part of io2 migration.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
diff --git a/src/storage/lib/vfs/cpp/watcher.cc b/src/storage/lib/vfs/cpp/watcher.cc
index 1e514e4..dc44e8f 100644
--- a/src/storage/lib/vfs/cpp/watcher.cc
+++ b/src/storage/lib/vfs/cpp/watcher.cc
@@ -135,7 +135,7 @@
break;
}
char* ptr = readdir_buf;
-// TODO(b/293936429): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
+// TODO(b/293947862): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
// as part of io2 migration.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
diff --git a/src/storage/minfs/host.cc b/src/storage/minfs/host.cc
index 776a25a..c1d5abe 100644
--- a/src/storage/minfs/host.cc
+++ b/src/storage/minfs/host.cc
@@ -491,7 +491,7 @@
dirent* emu_readdir(DIR* dirp) {
MinDir* dir = reinterpret_cast<MinDir*>(dirp);
for (;;) {
-// TODO(b/293936429): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
+// TODO(b/293947862): Remove use of deprecated `vdirent_t` when transitioning ReadDir to Enumerate
// as part of io2 migration.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
diff --git a/src/sys/component_manager/src/builtin/system_controller.rs b/src/sys/component_manager/src/builtin/system_controller.rs
index eed22a0..7aca86a 100644
--- a/src/sys/component_manager/src/builtin/system_controller.rs
+++ b/src/sys/component_manager/src/builtin/system_controller.rs
@@ -98,7 +98,7 @@
queue.extend(resolved_state.children().map(|(_, i)| i.clone()));
let execution_state = next.lock_execution();
- if execution_state.is_started() && !execution_state.is_shut_down() {
+ if execution_state.is_started() {
monikers.push(next.moniker.to_string());
}
}
diff --git a/src/sys/component_manager/src/directory_ready_notifier.rs b/src/sys/component_manager/src/directory_ready_notifier.rs
index 2fadfec..02317d8 100644
--- a/src/sys/component_manager/src/directory_ready_notifier.rs
+++ b/src/sys/component_manager/src/directory_ready_notifier.rs
@@ -263,7 +263,10 @@
};
let decl = match *component.lock_state().await {
InstanceState::Resolved(ref s) => s.decl().clone(),
- InstanceState::New | InstanceState::Unresolved(_) | InstanceState::Destroyed => {
+ InstanceState::New
+ | InstanceState::Unresolved(_)
+ | InstanceState::Shutdown(_, _)
+ | InstanceState::Destroyed => {
return vec![];
}
};
diff --git a/src/sys/component_manager/src/framework/lifecycle_controller.rs b/src/sys/component_manager/src/framework/lifecycle_controller.rs
index bc3cf06..a862057 100644
--- a/src/sys/component_manager/src/framework/lifecycle_controller.rs
+++ b/src/sys/component_manager/src/framework/lifecycle_controller.rs
@@ -121,8 +121,7 @@
.map_err(|_| fsys::CreateError::BadMoniker)?;
let parent_component =
model.root().find_and_maybe_resolve(&parent_moniker).await.map_err(|e| match e {
- ModelError::PathIsNotUtf8 { path: _ }
- | ModelError::UnexpectedComponentManagerMoniker
+ ModelError::UnexpectedComponentManagerMoniker
| ModelError::ComponentInstanceError { err: _ } => {
fsys::CreateError::InstanceNotFound
}
diff --git a/src/sys/component_manager/src/model/actions/destroy.rs b/src/sys/component_manager/src/model/actions/destroy.rs
index 77ba5e7..5fa5de7 100644
--- a/src/sys/component_manager/src/model/actions/destroy.rs
+++ b/src/sys/component_manager/src/model/actions/destroy.rs
@@ -42,92 +42,110 @@
}
async fn do_destroy(component: &Arc<ComponentInstance>) -> Result<(), ActionError> {
- // Do nothing if already destroyed.
- {
- if let InstanceState::Destroyed = *component.lock_state().await {
- return Ok(());
+ loop {
+ // Do nothing if already destroyed.
+ {
+ if let InstanceState::Destroyed = *component.lock_state().await {
+ return Ok(());
+ }
}
- }
- // Require the component to be discovered before deleting it so a Destroyed event is
- // always preceded by a Discovered.
- // TODO: wait for a discover, don't register a new one
- ActionSet::register(component.clone(), DiscoverAction::new(ComponentInput::default())).await?;
+ // Require the component to be discovered before deleting it so a Destroyed event is
+ // always preceded by a Discovered.
+ // TODO: wait for a discover, don't register a new one
+ ActionSet::register(component.clone(), DiscoverAction::new(ComponentInput::default()))
+ .await?;
- // For destruction to behave correctly, the component has to be shut down first.
- // NOTE: This will recursively shut down the whole subtree. If this component has children,
- // we'll call DestroyChild on them which in turn will call Shutdown on the child. Because
- // the parent's subtree was shutdown, this shutdown is a no-op.
- ActionSet::register(component.clone(), ShutdownAction::new(ShutdownType::Instance))
- .await
- .map_err(|e| DestroyActionError::ShutdownFailed { err: Box::new(e) })?;
+ // For destruction to behave correctly, the component has to be shut down first.
+ // NOTE: This will recursively shut down the whole subtree. If this component has children,
+ // we'll call DestroyChild on them which in turn will call Shutdown on the child. Because
+ // the parent's subtree was shutdown, this shutdown is a no-op.
+ ActionSet::register(component.clone(), ShutdownAction::new(ShutdownType::Instance))
+ .await
+ .map_err(|e| DestroyActionError::ShutdownFailed { err: Box::new(e) })?;
- let nfs = {
- match *component.lock_state().await {
- InstanceState::Resolved(ref s) => {
- let mut nfs = vec![];
- for (m, c) in s.children() {
- let component = component.clone();
- let m = m.clone();
- let incarnation = c.incarnation_id();
- let nf = async move { component.destroy_child(m, incarnation).await };
- nfs.push(nf);
+ let nfs = {
+ match *component.lock_state().await {
+ InstanceState::Shutdown(ref state, _) => {
+ let mut nfs = vec![];
+ for (m, c) in state.children.iter() {
+ let component = component.clone();
+ let m = m.clone();
+ let incarnation = c.incarnation_id();
+ let nf = async move { component.destroy_child(m, incarnation).await };
+ nfs.push(nf);
+ }
+ nfs
}
- nfs
+ InstanceState::Unresolved(_) | InstanceState::Resolved(_) => {
+ // The instance is not shut down, we must have raced with an unresolve action
+ // (potentially followed by a resolve action). Let's try again.
+ continue;
+ }
+ InstanceState::New => {
+ panic!("discover action returned above but the component is undiscovered, this should be impossible");
+ }
+ InstanceState::Destroyed => {
+ panic!(
+ "component was destroyed earlier but is not now, this should be impossible"
+ );
+ }
}
- InstanceState::New | InstanceState::Unresolved(_) | InstanceState::Destroyed => {
- // Component was never resolved. No explicit cleanup is required for children.
- vec![]
+ };
+ let results = join_all(nfs).await;
+ ok_or_first_error(results)?;
+
+ // Now that all children have been destroyed, destroy the parent.
+ component.destroy_instance().await?;
+
+ // Wait for any remaining blocking tasks and actions finish up.
+ fn wait(nf: Option<impl Future + Send + 'static>) -> BoxFuture<'static, ()> {
+ Box::pin(async {
+ if let Some(nf) = nf {
+ nf.await;
+ }
+ })
+ }
+ let task_shutdown = Box::pin(component.blocking_task_group().join());
+ let nfs = {
+ let actions = component.lock_actions().await;
+ vec![
+ wait(actions.wait(ResolveAction::new())),
+ wait(actions.wait(StartAction::new(
+ StartReason::Debug,
+ None,
+ IncomingCapabilities::default(),
+ ))),
+ task_shutdown,
+ ]
+ };
+ join_all(nfs.into_iter()).await;
+
+ // Only consider the component fully destroyed once it's no longer executing any lifecycle
+ // transitions.
+ component.lock_state().await.set(InstanceState::Destroyed);
+
+ // Send the Destroyed event for the component
+ let event = Event::new(&component, EventPayload::Destroyed);
+ component.hooks.dispatch(&event).await;
+
+ // Remove this component from the parent's list of children
+ if let Some(child_name) = component.moniker.leaf() {
+ if let Ok(ExtendedInstanceInterface::Component(parent)) = component.parent.upgrade() {
+ match *parent.lock_state().await {
+ InstanceState::Resolved(ref mut resolved_state) => {
+ resolved_state.remove_child(child_name);
+ }
+ InstanceState::Shutdown(ref mut state, _) => {
+ state.children.remove(child_name);
+ }
+ _ => (),
+ }
}
}
- };
- let results = join_all(nfs).await;
- ok_or_first_error(results)?;
- // Now that all children have been destroyed, destroy the parent.
- component.destroy_instance().await?;
-
- // Wait for any remaining blocking tasks and actions finish up.
- fn wait(nf: Option<impl Future + Send + 'static>) -> BoxFuture<'static, ()> {
- Box::pin(async {
- if let Some(nf) = nf {
- nf.await;
- }
- })
+ return Ok(());
}
- let task_shutdown = Box::pin(component.blocking_task_group().join());
- let nfs = {
- let actions = component.lock_actions().await;
- vec![
- wait(actions.wait(ResolveAction::new())),
- wait(actions.wait(StartAction::new(
- StartReason::Debug,
- None,
- IncomingCapabilities::default(),
- ))),
- task_shutdown,
- ]
- };
- join_all(nfs.into_iter()).await;
-
- // Only consider the component fully destroyed once it's no longer executing any lifecycle
- // transitions.
- component.lock_state().await.set(InstanceState::Destroyed);
-
- // Send the Destroyed event for the component
- let event = Event::new(&component, EventPayload::Destroyed);
- component.hooks.dispatch(&event).await;
-
- // Remove this component from the parent's list of children
- if let Ok(ExtendedInstanceInterface::Component(parent)) = component.parent.upgrade() {
- if let Ok(mut resolved_state) = parent.lock_resolved_state().await {
- if let Some(child_name) = component.moniker.leaf() {
- resolved_state.remove_child(child_name)
- }
- }
- }
-
- Ok(())
}
fn ok_or_first_error(results: Vec<Result<(), ActionError>>) -> Result<(), ActionError> {
@@ -140,8 +158,6 @@
super::*,
crate::model::{
actions::test_utils::{is_child_deleted, is_destroyed},
- events::{registry::EventSubscription, stream::EventStream},
- hooks::EventType,
testing::{
test_helpers::{
component_decl_with_test_runner, execution_is_shut_down, get_incarnation_id,
@@ -150,10 +166,8 @@
test_hook::Lifecycle,
},
},
- assert_matches::assert_matches,
- cm_rust::{Availability, UseEventStreamDecl, UseSource},
cm_rust_testing::*,
- fidl_fuchsia_component_decl as fdecl, fuchsia_async as fasync, fuchsia_zircon as zx,
+ fuchsia_async as fasync, fuchsia_zircon as zx,
futures::{channel::mpsc, StreamExt},
moniker::{ChildName, Moniker},
std::sync::atomic::Ordering,
@@ -333,34 +347,6 @@
}
}
- async fn setup_destroy_waits_test_event_stream(
- test: &ActionsTest,
- event_types: Vec<EventType>,
- ) -> EventStream {
- let events: Vec<_> = event_types
- .into_iter()
- .map(|e| UseEventStreamDecl {
- source_name: e.into(),
- source: UseSource::Parent,
- scope: None,
- target_path: "/svc/fuchsia.component.EventStream".parse().unwrap(),
- filter: None,
- availability: Availability::Required,
- })
- .collect();
- let mut event_source =
- test.builtin_environment.lock().await.event_source_factory.create_for_above_root();
- let event_stream = event_source
- .subscribe(
- events.into_iter().map(|event| EventSubscription { event_name: event }).collect(),
- )
- .await
- .expect("subscribe to event stream");
- let model = test.model.clone();
- fasync::Task::spawn(async move { model.start(ComponentInput::default()).await }).detach();
- event_stream
- }
-
async fn run_destroy_waits_test(
mock_action_key: ActionKey,
mock_action_result: Result<(), ActionError>,
@@ -539,67 +525,6 @@
}
#[fuchsia::test]
- async fn destroy_registers_discover() {
- let components = vec![("root", ComponentDeclBuilder::new().build())];
- let test = ActionsTest::new("root", components, None).await;
- let component_root = test.model.root();
- // This setup circumvents the registration of the Discover action on component_a.
- {
- let mut resolved_state = component_root.lock_resolved_state().await.unwrap();
- let child = cm_rust::ChildDecl {
- name: format!("a"),
- url: format!("test:///a"),
- startup: fdecl::StartupMode::Lazy,
- environment: None,
- on_terminate: None,
- config_overrides: None,
- };
- assert!(resolved_state
- .add_child_no_discover(&component_root, &child, None)
- .await
- .is_ok());
- }
- let mut event_stream = setup_destroy_waits_test_event_stream(
- &test,
- vec![EventType::Discovered, EventType::Destroyed],
- )
- .await;
-
- // Shut down component so we can destroy it.
- let component_a = match *component_root.lock_state().await {
- InstanceState::Resolved(ref s) => {
- s.get_child(&ChildName::try_from("a").unwrap()).expect("child a not found").clone()
- }
- _ => panic!("not resolved"),
- };
- ActionSet::register(component_a.clone(), ShutdownAction::new(ShutdownType::Instance))
- .await
- .expect("shutdown failed");
-
- // Confirm component is still in New state.
- {
- let state = &*component_a.lock_state().await;
- assert_matches!(state, InstanceState::New);
- };
-
- // Register DestroyChild.
- let component_root_clone = component_root.clone();
- let nf = fasync::Task::spawn(async move {
- component_root_clone.destroy_child("a".try_into().unwrap(), 0).await
- });
-
- // Wait for Discover action, which should be registered by Destroy, followed by
- // Destroyed.
- event_stream
- .wait_until(EventType::Discovered, vec!["a"].try_into().unwrap())
- .await
- .unwrap();
- event_stream.wait_until(EventType::Destroyed, vec!["a"].try_into().unwrap()).await.unwrap();
- nf.await.unwrap();
- assert!(is_child_deleted(&component_root, &component_a).await);
- }
-
- #[fuchsia::test]
async fn destroy_not_resolved() {
let components = vec![
("root", ComponentDeclBuilder::new().child_default("a").build()),
diff --git a/src/sys/component_manager/src/model/actions/discover.rs b/src/sys/component_manager/src/model/actions/discover.rs
index 145b8f4..afb9ef7 100644
--- a/src/sys/component_manager/src/model/actions/discover.rs
+++ b/src/sys/component_manager/src/model/actions/discover.rs
@@ -46,6 +46,7 @@
InstanceState::New => false,
InstanceState::Unresolved(_) => true,
InstanceState::Resolved(_) => true,
+ InstanceState::Shutdown(_, _) => true,
InstanceState::Destroyed => {
return Err(DiscoverActionError::InstanceDestroyed {
moniker: component.moniker.clone(),
@@ -65,7 +66,7 @@
"Component in unexpected state after discover"
);
match *state {
- InstanceState::Destroyed => {
+ InstanceState::Shutdown(_, _) | InstanceState::Destroyed => {
// Nothing to do.
}
InstanceState::Unresolved(_) | InstanceState::Resolved(_) => {
diff --git a/src/sys/component_manager/src/model/actions/mod.rs b/src/sys/component_manager/src/model/actions/mod.rs
index ab1bcb4..515b29e 100644
--- a/src/sys/component_manager/src/model/actions/mod.rs
+++ b/src/sys/component_manager/src/model/actions/mod.rs
@@ -503,7 +503,6 @@
found_child.is_none()
&& matches!(*child_state, InstanceState::Destroyed)
&& child_execution.runtime.is_none()
- && child_execution.is_shut_down()
}
pub async fn is_stopped(component: &ComponentInstance, moniker: &ChildName) -> bool {
@@ -512,6 +511,7 @@
Some(child) => !child.is_started(),
None => false,
},
+ InstanceState::Shutdown(_, _) => true,
InstanceState::Destroyed => false,
InstanceState::New | InstanceState::Unresolved(_) => {
panic!("not resolved")
@@ -522,9 +522,7 @@
pub async fn is_destroyed(component: &ComponentInstance) -> bool {
let state = component.lock_state().await;
let execution = component.lock_execution();
- matches!(*state, InstanceState::Destroyed)
- && execution.runtime.is_none()
- && execution.is_shut_down()
+ matches!(*state, InstanceState::Destroyed) && execution.runtime.is_none()
}
pub async fn is_resolved(component: &ComponentInstance) -> bool {
@@ -536,11 +534,4 @@
let state = component.lock_state().await;
matches!(*state, InstanceState::Unresolved(_))
}
-
- pub async fn is_unresolved(component: &ComponentInstance) -> bool {
- let state = component.lock_state().await;
- let execution = component.lock_execution();
- execution.runtime.is_none()
- && matches!(*state, InstanceState::New | InstanceState::Unresolved(_))
- }
}
diff --git a/src/sys/component_manager/src/model/actions/resolve/mod.rs b/src/sys/component_manager/src/model/actions/resolve/mod.rs
index acebe94..a43c2e9 100644
--- a/src/sys/component_manager/src/model/actions/resolve/mod.rs
+++ b/src/sys/component_manager/src/model/actions/resolve/mod.rs
@@ -42,8 +42,8 @@
async fn do_resolve(component: &Arc<ComponentInstance>) -> Result<(), ResolveActionError> {
{
- let execution = component.lock_execution();
- if execution.is_shut_down() {
+ let state = component.lock_state().await;
+ if state.is_shut_down() {
return Err(ResolveActionError::InstanceShutDown {
moniker: component.moniker.clone(),
});
@@ -55,8 +55,6 @@
component.lock_actions().await.wait_for_action(ActionKey::Discover);
discover_completed.await.unwrap();
}
- // Let's comment it out and see what happens. Fingers crossed this was superfluous. There's a
- // panic right below this that should catch if we're undiscovered.
let result = async move {
let first_resolve = {
let state = component.lock_state().await;
@@ -66,6 +64,11 @@
}
InstanceState::Unresolved(_) => true,
InstanceState::Resolved(_) => false,
+ InstanceState::Shutdown(_, _) => {
+ return Err(ResolveActionError::InstanceShutDown {
+ moniker: component.moniker.clone(),
+ });
+ }
InstanceState::Destroyed => {
return Err(ResolveActionError::InstanceDestroyed {
moniker: component.moniker.clone(),
@@ -106,6 +109,11 @@
InstanceState::Resolved(_) => {
panic!("Component was marked Resolved during Resolve action?");
}
+ InstanceState::Shutdown(_, _) => {
+ return Err(ResolveActionError::InstanceShutDown {
+ moniker: component.moniker.clone(),
+ });
+ }
InstanceState::New => {
panic!("Component was not marked Discovered before Resolve action?");
}
@@ -215,7 +223,7 @@
ActionSet::register(component_a.clone(), ResolveAction::new()).await,
Err(ActionError::ResolveError { err: ResolveActionError::InstanceShutDown { .. } })
);
- assert!(is_resolved(&component_a).await);
+ assert!(!is_resolved(&component_a).await);
assert!(is_stopped(&component_root, &"a".try_into().unwrap()).await);
}
}
diff --git a/src/sys/component_manager/src/model/actions/resolve/sandbox_construction.rs b/src/sys/component_manager/src/model/actions/resolve/sandbox_construction.rs
index ec1dc0d..b312f55 100644
--- a/src/sys/component_manager/src/model/actions/resolve/sandbox_construction.rs
+++ b/src/sys/component_manager/src/model/actions/resolve/sandbox_construction.rs
@@ -6,7 +6,9 @@
crate::{
capability::CapabilitySource,
model::{
- component::{ComponentInstance, ResolvedInstanceState, WeakComponentInstance},
+ component::{
+ ComponentInstance, InstanceState, ResolvedInstanceState, WeakComponentInstance,
+ },
routing::router::{Request, Router},
},
sandbox_util::{DictExt, LaunchTaskOnReceive},
@@ -650,36 +652,32 @@
};
async move {
let router = {
- let state = match component.lock_resolved_state().await {
- Ok(state) => state,
- Err(err) => {
- return Err(RoutingError::from(ComponentInstanceError::resolve_failed(
- component.moniker.clone(),
- err,
- ))
- .into());
+ match *component.lock_state().await {
+ InstanceState::Resolved(ref state)
+ if state.program_input_dict_additions.is_some() =>
+ {
+ let additions = state.program_input_dict_additions.as_ref().unwrap();
+ match additions.get_capability(&source_path) {
+ // There's an addition to the program input dictionary for this
+ // capability, let's use it.
+ Some(Capability::Open(o)) => Router::new_ok(o),
+ // There's no addition to the program input dictionary for this
+ // capability, let's use the component input dictionary.
+ _ => component_input_capability,
+ }
}
- };
- // Try to get the capability from the incoming dict, which was passed when the child was
- // started.
- //
- // Unlike the program input dict below that contains Routers created by
- // component manager, the incoming dict may contain capabilities created externally.
- // Currently there is no way to create a Router externally, so assume these
- // are Open capabilities and convert them to Router here.
- //
- // TODO(https://fxbug.dev/319542502): Convert from the external Router type, once it
- // exists.
- state
- .program_input_dict_additions
- .as_ref()
- .and_then(|dict| match dict.get_capability(&source_path) {
- Some(Capability::Open(o)) => Some(Router::new_ok(o)),
- _ => None,
- })
- // Try to get the capability from the component input dict, created from static
- // routes when the component was resolved.
- .unwrap_or(component_input_capability)
+ _ => {
+ // If the component is not resolved and/or does not have additions to the
+ // program input dictionary, then route this capability without any
+ // additions.
+ //
+ // NOTE: there's a chance that the component is in the shutdown stage here.
+ // The stop action clears the program_input_dict_additions, so even if
+ // additions were set the last time the component was run they won't apply
+ // after the component has stopped.
+ component_input_capability
+ }
+ }
};
router.route(request).await
}
diff --git a/src/sys/component_manager/src/model/actions/shutdown.rs b/src/sys/component_manager/src/model/actions/shutdown.rs
index dde5ec3d..3ea8544 100644
--- a/src/sys/component_manager/src/model/actions/shutdown.rs
+++ b/src/sys/component_manager/src/model/actions/shutdown.rs
@@ -263,6 +263,12 @@
component: &Arc<ComponentInstance>,
shutdown_type: ShutdownType,
) -> Result<(), ActionError> {
+ // Ensure `Shutdown` is dispatched after `Discovered`.
+ {
+ let discover_completed =
+ component.lock_actions().await.wait_for_action(ActionKey::Discover);
+ discover_completed.await.unwrap();
+ }
// Keep logs short to preserve as much as possible in the crash report
// NS: Shutdown of {moniker} was no-op
// RS: Beginning shutdown of resolved component {moniker}
@@ -271,15 +277,6 @@
// ES: Errored shutdown of {moniker}
{
let state = component.lock_state().await;
- {
- let exec_state = component.lock_execution();
- if exec_state.is_shut_down() {
- if matches!(shutdown_type, ShutdownType::System) {
- info!("=NS {}", component.moniker);
- }
- return Ok(());
- }
- }
match *state {
InstanceState::Resolved(ref s) => {
if matches!(shutdown_type, ShutdownType::System) {
@@ -296,6 +293,12 @@
}
return Ok(());
}
+ InstanceState::Shutdown(_, _) => {
+ if matches!(shutdown_type, ShutdownType::System) {
+ info!("=NS {}", component.moniker);
+ }
+ return Ok(());
+ }
InstanceState::New | InstanceState::Unresolved(_) | InstanceState::Destroyed => {}
}
}
@@ -929,7 +932,7 @@
use {
super::*,
crate::model::{
- actions::{test_utils::is_unresolved, StopAction},
+ actions::StopAction,
component::StartReason,
error::StopActionError,
testing::{
@@ -2818,14 +2821,14 @@
name: "cdata".parse().unwrap(),
source: StorageDirectorySource::Child("childB".to_string()),
backing_dir: "directory".parse().unwrap(),
- subdir: None,
+ subdir: Default::default(),
storage_id: fdecl::StorageId::StaticInstanceIdOrMoniker,
}),
CapabilityDecl::Storage(StorageDecl {
name: "pdata".parse().unwrap(),
source: StorageDirectorySource::Parent,
backing_dir: "directory".parse().unwrap(),
- subdir: None,
+ subdir: Default::default(),
storage_id: fdecl::StorageId::StaticInstanceIdOrMoniker,
}),
],
@@ -3416,14 +3419,13 @@
let component_b = {
let state = component_a.lock_state().await;
match *state {
- InstanceState::Resolved(ref s) => {
- s.get_child(&"b".try_into().unwrap()).expect("child b not found").clone()
+ InstanceState::Shutdown(ref state, _) => {
+ state.children.get(&"b".try_into().unwrap()).expect("child b not found").clone()
}
- _ => panic!("not resolved"),
+ _ => panic!("not shutdown"),
}
};
assert!(execution_is_shut_down(&component_b).await);
- assert!(is_unresolved(&component_b).await);
// Now "a" is shut down. There should be no event for "b" because it was never started
// (or resolved).
diff --git a/src/sys/component_manager/src/model/actions/start.rs b/src/sys/component_manager/src/model/actions/start.rs
index f57991c..61b8613 100644
--- a/src/sys/component_manager/src/model/actions/start.rs
+++ b/src/sys/component_manager/src/model/actions/start.rs
@@ -8,8 +8,8 @@
crate::model::{
actions::{Action, ActionKey},
component::{
- ComponentInstance, ComponentRuntime, ExecutionState, IncomingCapabilities,
- InstanceState, StartReason,
+ ComponentInstance, ExecutionState, IncomingCapabilities, InstanceState, StartReason,
+ StartedInstanceState,
},
error::{ActionError, CreateNamespaceError, StartActionError, StructuredConfigError},
hooks::{Event, EventPayload, RuntimeInfo},
@@ -262,7 +262,7 @@
async fn start_component(
component: &Arc<ComponentInstance>,
decl: ComponentDecl,
- mut pending_runtime: ComponentRuntime,
+ mut pending_runtime: StartedInstanceState,
start_context: StartContext,
) -> Result<(), StartActionError> {
let _actions = component.lock_actions().await;
@@ -393,17 +393,14 @@
moniker: &Moniker,
) -> Option<Result<(), StartActionError>> {
match component {
- InstanceState::New | InstanceState::Unresolved(_) | InstanceState::Resolved(_) => {}
- InstanceState::Destroyed => {
- return Some(Err(StartActionError::InstanceDestroyed { moniker: moniker.clone() }));
+ InstanceState::Resolved(_) if execution.runtime.is_some() => Some(Ok(())),
+ InstanceState::New | InstanceState::Unresolved(_) | InstanceState::Resolved(_) => None,
+ InstanceState::Shutdown(_, _) => {
+ Some(Err(StartActionError::InstanceShutDown { moniker: moniker.clone() }))
}
- }
- if execution.is_shut_down() {
- Some(Err(StartActionError::InstanceShutDown { moniker: moniker.clone() }))
- } else if execution.runtime.is_some() {
- Some(Ok(()))
- } else {
- None
+ InstanceState::Destroyed => {
+ Some(Err(StartActionError::InstanceDestroyed { moniker: moniker.clone() }))
+ }
}
}
@@ -582,7 +579,7 @@
decl: &cm_rust::ComponentDecl,
start_reason: StartReason,
execution_controller_task: Option<controller::ExecutionControllerTask>,
-) -> Result<ComponentRuntime, StartActionError> {
+) -> Result<StartedInstanceState, StartActionError> {
// TODO(https://fxbug.dev/42071809): Consider moving this check to ComponentInstance::add_child
match component.on_terminate {
fdecl::OnTerminate::Reboot => {
@@ -608,7 +605,7 @@
None
};
- Ok(ComponentRuntime::new(start_reason, execution_controller_task, logger))
+ Ok(StartedInstanceState::new(start_reason, execution_controller_task, logger))
}
/// Returns the UseProtocolDecl for the LogSink protocol, if any.
@@ -1011,7 +1008,7 @@
};
let ris = ResolvedInstanceState::new(
&child,
- resolved_component,
+ resolved_component.clone(),
ComponentAddress::from_absolute_url(&child.component_url).unwrap(),
Default::default(),
ComponentInput::default(),
@@ -1022,18 +1019,30 @@
// Check for already_started:
{
+ let ris = ResolvedInstanceState::new(
+ &child,
+ resolved_component,
+ ComponentAddress::from_absolute_url(&child.component_url).unwrap(),
+ Default::default(),
+ ComponentInput::default(),
+ )
+ .await
+ .unwrap();
let mut es = ExecutionState::new();
- es.runtime = Some(ComponentRuntime::new(StartReason::Debug, None, None));
- assert!(!es.is_shut_down());
- assert_matches!(should_return_early(&InstanceState::New, &es, &m), Some(Ok(())));
+ es.runtime = Some(StartedInstanceState::new(StartReason::Debug, None, None));
+ assert_matches!(
+ should_return_early(&InstanceState::Resolved(ris), &es, &m),
+ Some(Ok(()))
+ );
}
// Check for shut_down:
let _ = child.stop_instance_internal(true).await;
+ assert!(child.lock_state().await.is_shut_down());
+ let state = child.lock_state().await;
let execution = child.lock_execution();
- assert!(execution.is_shut_down());
assert_matches!(
- should_return_early(&InstanceState::New, &execution, &m),
+ should_return_early(&*state, &execution, &m),
Some(Err(StartActionError::InstanceShutDown { moniker: _ }))
);
}
@@ -1051,6 +1060,9 @@
let m = Moniker::try_from(vec!["TEST_CHILD_NAME"]).unwrap();
let execution = child.lock_execution();
- assert_matches!(should_return_early(&InstanceState::New, &execution, &m), Some(Ok(())));
+ assert_matches!(
+ should_return_early(&*child.lock_state().await, &execution, &m),
+ Some(Ok(()))
+ );
}
}
diff --git a/src/sys/component_manager/src/model/actions/stop.rs b/src/sys/component_manager/src/model/actions/stop.rs
index c4dc7fd..ada50830 100644
--- a/src/sys/component_manager/src/model/actions/stop.rs
+++ b/src/sys/component_manager/src/model/actions/stop.rs
@@ -26,6 +26,12 @@
#[async_trait]
impl Action for StopAction {
async fn handle(self, component: &Arc<ComponentInstance>) -> Result<(), ActionError> {
+ // Ensure `Stop` is dispatched after `Discovered`.
+ {
+ let discover_completed =
+ component.lock_actions().await.wait_for_action(ActionKey::Discover);
+ discover_completed.await.unwrap();
+ }
component.stop_instance_internal(self.shut_down).await.map_err(Into::into)
}
fn key(&self) -> ActionKey {
diff --git a/src/sys/component_manager/src/model/actions/unresolve.rs b/src/sys/component_manager/src/model/actions/unresolve.rs
index 275b8694..a6f4b03 100644
--- a/src/sys/component_manager/src/model/actions/unresolve.rs
+++ b/src/sys/component_manager/src/model/actions/unresolve.rs
@@ -10,6 +10,7 @@
hooks::{Event, EventPayload},
},
async_trait::async_trait,
+ futures::future::join_all,
std::{ops::DerefMut, sync::Arc},
};
@@ -41,56 +42,61 @@
// Shut down the component, preventing new starts or resolves during the UnresolveAction.
ActionSet::register(component.clone(), ShutdownAction::new(ShutdownType::Instance)).await?;
- if component.lock_execution().runtime.is_some() {
+ if !component.lock_state().await.is_shut_down() {
return Err(
UnresolveActionError::InstanceRunning { moniker: component.moniker.clone() }.into()
);
}
- let children: Vec<Arc<ComponentInstance>> = {
- match *component.lock_state().await {
- InstanceState::Resolved(ref s) => s.children().map(|(_, c)| c.clone()).collect(),
+ // Unresolve all children
+ let children = {
+ let state = component.lock_state().await;
+ match *state {
+ InstanceState::Shutdown(ref state, _) => state.children.clone(),
InstanceState::Destroyed => {
return Err(UnresolveActionError::InstanceDestroyed {
moniker: component.moniker.clone(),
}
.into())
}
- InstanceState::Unresolved(_) | InstanceState::New => return Ok(()),
+ _ => {
+ panic!("component {} was moved to unexpected state {:?}", component.moniker, state)
+ }
}
};
-
- // Unresolve the children before unresolving the component because removing the resolved
- // state removes the ChildInstanceState that contains the list of children.
- for child in children {
- ActionSet::register(child, UnresolveAction::new()).await?;
+ let mut futures = vec![];
+ for (_name, child) in children {
+ futures
+ .push(async move { ActionSet::register(child.clone(), UnresolveAction::new()).await });
}
+ // Run all the futures, and then return the first error if there were any.
+ join_all(futures).await.into_iter().fold(Ok(()), |acc, r| acc.and_then(|_| r))?;
- // Move the component back to the Discovered state. We can't use a DiscoverAction for this
- // change because the system allows and does call DiscoverAction on resolved components with
- // the expectation that they will return without changing the instance state to Discovered.
- // The state may have changed during the time taken for the recursions, so recheck here.
+ // Move this component back to the Unresolved state. The state may have changed during the time
+ // taken for the children to unresolve, so recheck here.
{
let mut state = component.lock_state().await;
- match state.deref_mut() {
- InstanceState::Resolved(resolved_state) => {
- let next = InstanceState::Unresolved(resolved_state.to_unresolved());
- state.set(next);
- true
- }
+ let unresolved_state = match state.deref_mut() {
+ InstanceState::Shutdown(_, unresolved_state) => unresolved_state.take(),
InstanceState::Destroyed => {
return Err(UnresolveActionError::InstanceDestroyed {
moniker: component.moniker.clone(),
}
.into())
}
- InstanceState::Unresolved(_) | InstanceState::New => return Ok(()),
- }
+ InstanceState::Resolved(_) | InstanceState::New => {
+ panic!(
+ "component {} was shutdown, but then moved to unexpected state {:?}",
+ component.moniker, state
+ );
+ }
+ InstanceState::Unresolved(_) => {
+ panic!("component {} moved to unresolved state before we set it to unresolved, this should be impossible", component.moniker);
+ }
+ };
+ state.set(InstanceState::Unresolved(unresolved_state));
};
- // The component was shut down, so won't start. Re-enable it.
- component.lock_execution().reset_shut_down();
-
let event = Event::new(&component, EventPayload::Unresolved);
component.hooks.dispatch(&event).await;
Ok(())
diff --git a/src/sys/component_manager/src/model/component.rs b/src/sys/component_manager/src/model/component.rs
index b557033..1aacc04 100644
--- a/src/sys/component_manager/src/model/component.rs
+++ b/src/sys/component_manager/src/model/component.rs
@@ -63,7 +63,7 @@
cm_moniker::{IncarnationId, InstancedChildName, InstancedMoniker},
cm_rust::{
CapabilityDecl, CapabilityTypeName, ChildDecl, CollectionDecl, ComponentDecl, DeliveryType,
- FidlIntoNative, NativeIntoFidl, OfferDeclCommon, SourceName, UseDecl,
+ FidlIntoNative, NativeIntoFidl, OfferDeclCommon, SourceName, UseDecl, UseStorageDecl,
},
cm_types::Name,
cm_util::{channel, TaskGroup},
@@ -86,6 +86,7 @@
clone::Clone,
collections::{HashMap, HashSet},
fmt,
+ ops::DerefMut,
sync::{Arc, Weak},
time::Duration,
},
@@ -521,36 +522,53 @@
pub async fn lock_resolved_state<'a>(
self: &'a Arc<Self>,
) -> Result<MappedMutexGuard<'a, InstanceState, ResolvedInstanceState>, ActionError> {
- fn get_resolved(s: &mut InstanceState) -> &mut ResolvedInstanceState {
- match s {
- InstanceState::Resolved(s) => s,
- _ => panic!("not resolved"),
- }
- }
- {
- let state = self.state.lock().await;
- match *state {
- InstanceState::Resolved(_) => {
- return Ok(MutexGuard::map(state, get_resolved));
+ loop {
+ fn get_resolved(s: &mut InstanceState) -> &mut ResolvedInstanceState {
+ match s {
+ InstanceState::Resolved(s) => s,
+ _ => panic!("not resolved"),
}
- InstanceState::Destroyed => {
+ }
+ /// Returns Ok(Some(_)) when the component is in a resolved state, Ok(None) when the
+ /// component is in a state from which it can be resolved, and Err(_) when the
+ /// component is in a state from which it cannot be resolved.
+ async fn get_mapped_mutex_or_error<'a>(
+ self_: &'a Arc<ComponentInstance>,
+ ) -> Result<
+ Option<MappedMutexGuard<'a, InstanceState, ResolvedInstanceState>>,
+ ActionError,
+ > {
+ let state = self_.state.lock().await;
+ if let InstanceState::Resolved(_) = *state {
+ return Ok(Some(MutexGuard::map(state, get_resolved)));
+ }
+ if let InstanceState::Destroyed = *state {
return Err(ResolveActionError::InstanceDestroyed {
- moniker: self.moniker.clone(),
+ moniker: self_.moniker.clone(),
}
.into());
}
- InstanceState::New | InstanceState::Unresolved(_) => {}
+ if state.is_shut_down() {
+ return Err(ResolveActionError::InstanceShutDown {
+ moniker: self_.moniker.clone(),
+ }
+ .into());
+ }
+ Ok(None)
}
- // Drop the lock before doing the work to resolve the state.
+
+ if let Some(mapped_guard) = get_mapped_mutex_or_error(&self).await? {
+ return Ok(mapped_guard);
+ }
+ self.resolve().await?;
+ if let Some(mapped_guard) = get_mapped_mutex_or_error(&self).await? {
+ return Ok(mapped_guard);
+ }
+ // If we've reached here, then the component must have been unresolved in-between our
+ // calls to resolved and get_mapped_mutex_or_error. Our mission here remains to resolve
+ // the component if necessary and then return the resolved state, so let's loop and try
+ // to resolve it again.
}
- self.resolve().await?;
- let state = self.state.lock().await;
- if let InstanceState::Destroyed = *state {
- return Err(
- ResolveActionError::InstanceDestroyed { moniker: self.moniker.clone() }.into()
- );
- }
- Ok(MutexGuard::map(state, get_resolved))
}
/// Resolves the component declaration, populating `ResolvedInstanceState` as necessary. A
@@ -776,8 +794,6 @@
) -> Result<(), StopActionError> {
let mut runtime = {
let mut execution = self.lock_execution();
- let shut_down = execution.shut_down | shut_down;
- execution.shut_down = shut_down;
execution.runtime.take()
};
@@ -860,6 +876,10 @@
self.hooks.dispatch(&event).await;
}
+ if shut_down {
+ self.move_state_to_shutdown().await;
+ }
+
if let ExtendedInstance::Component(parent) =
self.try_get_parent().map_err(|_| StopActionError::GetParentFailed)?
{
@@ -873,6 +893,68 @@
Ok(())
}
+ async fn move_state_to_shutdown(self: &Arc<Self>) {
+ loop {
+ fn get_storage_uses(resolved_state: &ResolvedInstanceState) -> Vec<UseStorageDecl> {
+ resolved_state
+ .resolved_component
+ .decl
+ .uses
+ .iter()
+ .filter_map(|use_| match use_ {
+ UseDecl::Storage(ref storage_use) => Some(storage_use.clone()),
+ _ => None,
+ })
+ .collect::<Vec<_>>()
+ }
+
+ // If the component is in a resolved state, then we have to route its storage
+ // capabilities. We shouldn't do this while holding the state lock, so let's do this in
+ // advance before grabbing the state lock below.
+ let mut routed_storage = vec![];
+ let storage_uses = {
+ let state = self.lock_state().await;
+ match &*state {
+ InstanceState::Resolved(resolved_state) => get_storage_uses(&resolved_state),
+ _ => vec![],
+ }
+ };
+ for storage_use in &storage_uses {
+ if let Ok(info) = routing::route_storage(storage_use.clone(), &self).await {
+ routed_storage.push(info);
+ }
+ }
+
+ // Now that any necessary routing operations are out of the way, grab the state lock
+ // and let's calculate our new state.
+ let mut state = self.lock_state().await;
+ let new_state = match state.deref_mut() {
+ InstanceState::New => {
+ panic!("component should be discovered before shutting down");
+ }
+ InstanceState::Unresolved(unresolved_state) => Some(InstanceState::Shutdown(
+ ShutdownInstanceState { children: HashMap::new(), routed_storage: vec![] },
+ unresolved_state.take(),
+ )),
+ InstanceState::Resolved(resolved_state) => {
+ let children = resolved_state.children.clone();
+ if storage_uses != get_storage_uses(&resolved_state) {
+ continue;
+ }
+ Some(InstanceState::Shutdown(
+ ShutdownInstanceState { children, routed_storage },
+ resolved_state.to_unresolved(),
+ ))
+ }
+ InstanceState::Shutdown(_, _) | InstanceState::Destroyed => None,
+ };
+ if let Some(new_state) = new_state {
+ state.set(new_state);
+ }
+ return;
+ }
+ }
+
async fn destroy_child_if_single_run(
self: &Arc<Self>,
child_moniker: &ChildName,
@@ -926,35 +1008,25 @@
return Ok(());
}
// Clean up isolated storage.
- let uses = {
- let state = self.lock_state().await;
+ let routed_storage = {
+ let mut state = self.lock_state().await;
match *state {
- InstanceState::Resolved(ref s) => s.resolved_component.decl.uses.clone(),
- _ => {
- // The instance was never resolved and therefore never ran, it can't possibly
- // have storage to clean up.
- return Ok(());
- }
+ InstanceState::Shutdown(ref mut s, _) => s.routed_storage.drain(..).collect::<Vec<_>>(),
+ _ => panic!("cannot destroy component instance {} because it is not shutdown, it is in state {:?}", self.moniker, *state),
}
};
- for use_ in uses {
- if let UseDecl::Storage(use_storage) = use_ {
- match routing::route_and_delete_storage(use_storage.clone(), &self).await {
- Ok(()) => (),
- Err(ModelError::RoutingError { .. }) => {
- // If the routing for this storage capability is invalid then there's no
- // storage for us to delete. Ignore this error, and proceed.
- }
- Err(error) => {
- // We received an error we weren't expecting, but we still want to destroy
- // this instance. It's bad to leave storage state undeleted, but it would
- // be worse to not continue with destroying this instance. Log the error,
- // and proceed.
- warn!(
- component=%self.moniker, %error,
- "failed to delete storage during instance destruction, proceeding with destruction anyway",
- );
- }
+ for storage in routed_storage {
+ match routing::delete_storage(storage).await {
+ Ok(()) => (),
+ Err(error) => {
+ // We received an error we weren't expecting, but we still want to destroy
+ // this instance. It's bad to leave storage state undeleted, but it would
+ // be worse to not continue with destroying this instance. Log the error,
+ // and proceed.
+ warn!(
+ component=%self.moniker, %error,
+ "failed to delete storage during instance destruction, proceeding with destruction anyway",
+ );
}
}
}
@@ -964,15 +1036,18 @@
/// Registers actions to destroy all dynamic children of collections belonging to this instance.
async fn destroy_dynamic_children(self: &Arc<Self>) -> Result<(), ActionError> {
let moniker_incarnations: Vec<_> = {
- let state = self.lock_state().await;
- let state = match *state {
- InstanceState::Resolved(ref s) => s,
+ match *self.lock_state().await {
+ InstanceState::Resolved(ref state) => {
+ state.children().map(|(k, c)| (k.clone(), c.incarnation_id())).collect()
+ }
+ InstanceState::Shutdown(ref state, _) => {
+ state.children.iter().map(|(k, c)| (k.clone(), c.incarnation_id())).collect()
+ }
_ => {
// Component instance was not resolved, so no dynamic children.
return Ok(());
}
- };
- state.children().map(|(k, c)| (k.clone(), c.incarnation_id())).collect()
+ }
};
let mut futures = vec![];
// Destroy all children that belong to a collection.
@@ -998,6 +1073,9 @@
let child = s.get_child(&moniker).map(|r| r.clone());
child
}
+ InstanceState::Shutdown(ref state, _) => {
+ state.children.get(&moniker).map(|r| r.clone())
+ }
InstanceState::Destroyed => None,
InstanceState::New | InstanceState::Unresolved(_) => {
panic!("DestroyChild: target is not resolved");
@@ -1137,6 +1215,12 @@
fdecl::StartupMode::Lazy => None,
})
.collect(),
+ InstanceState::Shutdown(_, _) => {
+ return Err(StartActionError::InstanceShutDown {
+ moniker: self.moniker.clone(),
+ }
+ .into());
+ }
InstanceState::Destroyed => {
return Err(StartActionError::InstanceDestroyed {
moniker: self.moniker.clone(),
@@ -1194,7 +1278,7 @@
pub async fn with_logger_as_default<T>(&self, op: impl FnOnce() -> T) -> T {
let execution = self.lock_execution();
match &execution.runtime {
- Some(ComponentRuntime { logger: Some(ref logger), .. }) => {
+ Some(StartedInstanceState { logger: Some(ref logger), .. }) => {
let logger = logger.clone() as Arc<dyn tracing::Subscriber + Send + Sync>;
tracing::subscriber::with_default(logger, op)
}
@@ -1438,19 +1522,15 @@
/// The execution state of a component.
pub struct ExecutionState {
- /// True if the component instance has shut down. This means that the component is stopped
- /// and cannot be restarted.
- shut_down: bool,
-
/// Runtime support for the component. From component manager's point of view, the component
/// instance is running iff this field is set.
- pub runtime: Option<ComponentRuntime>,
+ pub runtime: Option<StartedInstanceState>,
}
impl ExecutionState {
/// Creates a new ExecutionState.
pub fn new() -> Self {
- Self { shut_down: false, runtime: None }
+ Self { runtime: None }
}
/// Returns whether the component is started, i.e. if it has a runtime.
@@ -1458,17 +1538,6 @@
self.runtime.is_some()
}
- /// Returns whether the instance has shut down.
- pub fn is_shut_down(&self) -> bool {
- self.shut_down
- }
-
- /// Enables the component to restart after being shut down. Used by the UnresolveAction.
- /// Use of this function is strongly discouraged.
- pub fn reset_shut_down(&mut self) {
- self.shut_down = false;
- }
-
/// Scope server_end to `runtime` of this state. This ensures that the channel
/// will be kept alive as long as runtime is set to Some(...). If it is
/// None when this method is called, this operation is a no-op and the channel
@@ -1488,6 +1557,8 @@
Unresolved(UnresolvedInstanceState),
/// The instance has been resolved.
Resolved(ResolvedInstanceState),
+ /// The instance has been shutdown, and may not run anymore.
+ Shutdown(ShutdownInstanceState, UnresolvedInstanceState),
/// The instance has been destroyed. It has no content and no further actions may be registered
/// on it.
Destroyed,
@@ -1518,6 +1589,13 @@
}
}
+ pub fn is_shut_down(&self) -> bool {
+ match &self {
+ InstanceState::Shutdown(_, _) | InstanceState::Destroyed => true,
+ _ => false,
+ }
+ }
+
/// Requests a token that represents this component instance, minting it if needed.
///
/// If the component instance is destroyed or not discovered, returns `None`.
@@ -1528,8 +1606,9 @@
) -> Option<InstanceToken> {
match self {
InstanceState::New => None,
- InstanceState::Unresolved(unresolved) => {
- Some(unresolved.instance_token(moniker, context))
+ InstanceState::Unresolved(unresolved_state)
+ | InstanceState::Shutdown(_, unresolved_state) => {
+ Some(unresolved_state.instance_token(moniker, context))
}
InstanceState::Resolved(resolved) => Some(resolved.instance_token(moniker, context)),
InstanceState::Destroyed => None,
@@ -1543,6 +1622,7 @@
InstanceState::New => None,
InstanceState::Unresolved(_) => None,
InstanceState::Resolved(resolved) => resolved.program_escrow()?.will_start().await,
+ InstanceState::Shutdown(_, _) => None,
InstanceState::Destroyed => None,
}
}
@@ -1554,12 +1634,23 @@
Self::New => "New",
Self::Unresolved(_) => "Discovered",
Self::Resolved(_) => "Resolved",
+ Self::Shutdown(_, _) => "Shutdown",
Self::Destroyed => "Destroyed",
};
f.write_str(s)
}
}
+pub struct ShutdownInstanceState {
+ /// The children of this component, which is retained in case a destroy action is performed, as
+ /// in that case the children will need to be destroyed as well.
+ pub children: HashMap<ChildName, Arc<ComponentInstance>>,
+
+ /// Information about used storage capabilities the component had in its manifest. This is
+ /// retained because the storage contents will be deleted if this component is destroyed.
+ pub routed_storage: Vec<routing::RoutedStorage>,
+}
+
pub struct UnresolvedInstanceState {
/// Caches an instance token.
instance_token_state: InstanceTokenState,
@@ -1579,10 +1670,17 @@
/// Returns relevant information and prepares to enter the resolved state.
pub fn to_resolved(&mut self) -> (InstanceTokenState, ComponentInput) {
- (
- std::mem::replace(&mut self.instance_token_state, Default::default()),
- self.component_input.clone(),
- )
+ (std::mem::take(&mut self.instance_token_state), self.component_input.clone())
+ }
+
+ /// Creates a new UnresolvedInstanceState by either cloning values from this struct or moving
+ /// values from it (and replacing the values with their default values). This struct should be
+ /// dropped after this function is called.
+ pub fn take(&mut self) -> Self {
+ Self {
+ instance_token_state: std::mem::take(&mut self.instance_token_state),
+ component_input: self.component_input.clone(),
+ }
}
}
@@ -2490,7 +2588,7 @@
/// The execution state for a component instance that has started running.
///
/// If the component instance has a program, it may also have a [`ProgramRuntime`].
-pub struct ComponentRuntime {
+pub struct StartedInstanceState {
/// If set, that means this component is associated with a running program.
program: Option<ProgramRuntime>,
@@ -2515,14 +2613,14 @@
logger: Option<Arc<ScopedLogger>>,
}
-impl ComponentRuntime {
+impl StartedInstanceState {
pub fn new(
start_reason: StartReason,
execution_controller_task: Option<controller::ExecutionControllerTask>,
logger: Option<ScopedLogger>,
) -> Self {
let timestamp = zx::Time::get_monotonic();
- ComponentRuntime {
+ StartedInstanceState {
program: None,
timestamp,
binder_server_ends: vec![],
@@ -2538,7 +2636,7 @@
self.program.as_ref().map(|program_runtime| program_runtime.program.runtime())
}
- /// Associates the [ComponentRuntime] with a running [Program].
+ /// Associates the [StartedInstanceState] with a running [Program].
///
/// Creates a background task waiting for the program to terminate. When that happens, use the
/// [WeakComponentInstance] to stop the component.
@@ -2549,7 +2647,7 @@
/// Stop the program, if any. The timer defines how long the runner is given to stop the
/// program gracefully before we request the controller to terminate the program.
///
- /// Regardless if the runner honored our request, after this method, the [`ComponentRuntime`] is
+ /// Regardless if the runner honored our request, after this method, the [`StartedInstanceState`] is
/// no longer associated with a [Program].
pub async fn stop_program<'a, 'b>(
&'a mut self,
diff --git a/src/sys/component_manager/src/model/error.rs b/src/sys/component_manager/src/model/error.rs
index e275671..1e9c02f7 100644
--- a/src/sys/component_manager/src/model/error.rs
+++ b/src/sys/component_manager/src/model/error.rs
@@ -21,16 +21,13 @@
fidl_fuchsia_component as fcomponent, fidl_fuchsia_sys2 as fsys, fuchsia_zircon as zx,
moniker::{ChildName, Moniker, MonikerError},
sandbox::ConversionError,
- std::{path::PathBuf, sync::Arc},
+ std::sync::Arc,
thiserror::Error,
};
/// Errors produced by `Model`.
#[derive(Debug, Error, Clone)]
pub enum ModelError {
- // TODO(https://fxbug.dev/42068250): Remove this error by using the `camino` library
- #[error("path is not utf-8: {:?}", path)]
- PathIsNotUtf8 { path: PathBuf },
#[error("Moniker error: {}", err)]
MonikerError {
#[from]
@@ -133,10 +130,6 @@
ModelError::from(ComponentInstanceError::instance_not_found(moniker))
}
- pub fn path_is_not_utf8(path: PathBuf) -> ModelError {
- ModelError::PathIsNotUtf8 { path }
- }
-
pub fn open_directory_error(moniker: Moniker, relative_path: impl Into<String>) -> ModelError {
ModelError::OpenDirectoryError { moniker, relative_path: relative_path.into() }
}
diff --git a/src/sys/component_manager/src/model/events/error.rs b/src/sys/component_manager/src/model/events/error.rs
index 5e80c63..362f3e9 100644
--- a/src/sys/component_manager/src/model/events/error.rs
+++ b/src/sys/component_manager/src/model/events/error.rs
@@ -12,6 +12,9 @@
#[error("Model not available")]
ModelNotAvailable,
+ #[error("Instance shut down")]
+ InstanceShutdown,
+
#[error("Instance destroyed")]
InstanceDestroyed,
diff --git a/src/sys/component_manager/src/model/events/registry.rs b/src/sys/component_manager/src/model/events/registry.rs
index fb84e4a..90f4900 100644
--- a/src/sys/component_manager/src/model/events/registry.rs
+++ b/src/sys/component_manager/src/model/events/registry.rs
@@ -307,6 +307,9 @@
unreachable!("route_events: not resolved");
}
InstanceState::Resolved(ref s) => s.decl().clone(),
+ InstanceState::Shutdown(_, _) => {
+ return Err(ModelError::EventsError { err: EventsError::InstanceShutdown });
+ }
InstanceState::Destroyed => {
return Err(ModelError::EventsError { err: EventsError::InstanceDestroyed });
}
diff --git a/src/sys/component_manager/src/model/events/synthesizer.rs b/src/sys/component_manager/src/model/events/synthesizer.rs
index 9cb037e..4dc3773 100644
--- a/src/sys/component_manager/src/model/events/synthesizer.rs
+++ b/src/sys/component_manager/src/model/events/synthesizer.rs
@@ -240,6 +240,7 @@
match *state_guard {
InstanceState::New
| InstanceState::Unresolved(_)
+ | InstanceState::Shutdown(_, _)
| InstanceState::Destroyed => {}
InstanceState::Resolved(ref s) => {
for (_, child) in s.children() {
diff --git a/src/sys/component_manager/src/model/model.rs b/src/sys/component_manager/src/model/model.rs
index c0d9e45..6f140fb 100644
--- a/src/sys/component_manager/src/model/model.rs
+++ b/src/sys/component_manager/src/model/model.rs
@@ -117,7 +117,7 @@
// if we never got everything started that we wanted to.
let action_set = self.root.lock_actions().await;
if !action_set.contains(&ActionKey::Shutdown) {
- if !self.root.lock_execution().is_shut_down() {
+ if !self.root.lock_state().await.is_shut_down() {
panic!(
"failed to start root component {}: {:?}",
self.root.component_url, e
diff --git a/src/sys/component_manager/src/model/routing/mod.rs b/src/sys/component_manager/src/model/routing/mod.rs
index 783458c..0b231f1 100644
--- a/src/sys/component_manager/src/model/routing/mod.rs
+++ b/src/sys/component_manager/src/model/routing/mod.rs
@@ -12,7 +12,11 @@
use {
crate::{
capability::CapabilitySource,
- model::{component::ComponentInstance, error::ModelError, storage},
+ model::{
+ component::{ComponentInstance, WeakComponentInstance},
+ error::ModelError,
+ storage,
+ },
},
::routing::{component_instance::ComponentInstanceInterface, mapper::NoopRouteMapper},
async_trait::async_trait,
@@ -126,21 +130,31 @@
}
}
-/// Routes a storage capability from `target` to its source and deletes its isolated storage.
-pub(super) async fn route_and_delete_storage(
+pub struct RoutedStorage {
+ backing_dir_info: storage::BackingDirectoryInfo,
+ target: WeakComponentInstance,
+}
+
+pub(super) async fn route_storage(
use_storage_decl: UseStorageDecl,
target: &Arc<ComponentInstance>,
-) -> Result<(), ModelError> {
+) -> Result<RoutedStorage, ModelError> {
let storage_source = RouteRequest::UseStorage(use_storage_decl.clone()).route(target).await?;
-
let backing_dir_info = storage::route_backing_directory(storage_source.source).await?;
+ Ok(RoutedStorage { backing_dir_info, target: WeakComponentInstance::new(target) })
+}
+
+pub(super) async fn delete_storage(routed_storage: RoutedStorage) -> Result<(), ModelError> {
+ let target = routed_storage.target.upgrade()?;
// As of today, the storage component instance must contain the target. This is because
// it is impossible to expose storage declarations up.
- let moniker =
- target.instanced_moniker().strip_prefix(&backing_dir_info.storage_source_moniker).unwrap();
+ let moniker = target
+ .instanced_moniker()
+ .strip_prefix(&routed_storage.backing_dir_info.storage_source_moniker)
+ .unwrap();
storage::delete_isolated_storage(
- backing_dir_info,
+ routed_storage.backing_dir_info,
target.persistent_storage,
moniker,
target.instance_id(),
diff --git a/src/sys/component_manager/src/model/routing/open.rs b/src/sys/component_manager/src/model/routing/open.rs
index c9e9957..9ec83d0 100644
--- a/src/sys/component_manager/src/model/routing/open.rs
+++ b/src/sys/component_manager/src/model/routing/open.rs
@@ -66,6 +66,7 @@
mut open_options: OpenOptions<'a>,
) -> Self {
let RouteSource { source, relative_path } = route_source;
+ let relative_path = relative_path.to_path_buf();
open_options.relative_path =
relative_path.attach(open_options.relative_path).to_string_lossy().into();
Self::OutgoingDirectory { open_options, source, target }
diff --git a/src/sys/component_manager/src/model/routing/service.rs b/src/sys/component_manager/src/model/routing/service.rs
index c095f1f..77e57e4 100644
--- a/src/sys/component_manager/src/model/routing/service.rs
+++ b/src/sys/component_manager/src/model/routing/service.rs
@@ -381,7 +381,7 @@
let (proxy, server) = fidl::endpoints::create_proxy::<fio::DirectoryMarker>().unwrap();
CapabilityOpenRequest::new_from_route_source(
- RouteSource { source: source.clone(), relative_path: "".into() },
+ RouteSource { source: source.clone(), relative_path: Default::default() },
&target,
OpenOptions {
flags: fio::OpenFlags::DIRECTORY,
diff --git a/src/sys/component_manager/src/model/storage/admin_protocol.rs b/src/sys/component_manager/src/model/storage/admin_protocol.rs
index 3db25f2..be4ee57 100644
--- a/src/sys/component_manager/src/model/storage/admin_protocol.rs
+++ b/src/sys/component_manager/src/model/storage/admin_protocol.rs
@@ -260,7 +260,7 @@
capability: ComponentCapability::Storage(storage_decl.clone()),
component: component.clone(),
},
- relative_path: PathBuf::new(),
+ relative_path: Default::default(),
};
let backing_dir_source_info = storage::route_backing_directory(storage_source.source)
.await
diff --git a/src/sys/component_manager/src/model/storage/mod.rs b/src/sys/component_manager/src/model/storage/mod.rs
index 26114c0..a782185 100644
--- a/src/sys/component_manager/src/model/storage/mod.rs
+++ b/src/sys/component_manager/src/model/storage/mod.rs
@@ -20,15 +20,13 @@
anyhow::Error,
clonable_error::ClonableError,
cm_moniker::InstancedMoniker,
+ cm_types::RelativePath,
component_id_index::InstanceId,
derivative::Derivative,
fidl::endpoints,
fidl_fuchsia_io as fio,
moniker::MonikerBase,
- std::{
- path::{Path, PathBuf},
- sync::Arc,
- },
+ std::{path::PathBuf, sync::Arc},
thiserror::Error,
};
@@ -52,13 +50,13 @@
pub backing_directory_path: cm_types::Path,
/// The subdirectory inside of the backing directory capability to use, if any
- pub backing_directory_subdir: Option<PathBuf>,
+ pub backing_directory_subdir: RelativePath,
/// The subdirectory inside of the backing directory's sub-directory to use, if any. The
/// difference between this and backing_directory_subdir is that backing_directory_subdir is
/// appended to backing_directory_path first, and component_manager will create this subdir if
/// it doesn't exist but won't create backing_directory_subdir.
- pub storage_subdir: Option<PathBuf>,
+ pub storage_subdir: RelativePath,
/// The moniker of the component that defines the storage capability, with instance ids. This
/// is used for generating moniker-based storage paths.
@@ -199,30 +197,24 @@
) -> Result<fio::DirectoryProxy, ModelError> {
let (mut dir_proxy, local_server_end) =
endpoints::create_proxy::<fio::DirectoryMarker>().expect("failed to create proxy");
- let full_backing_directory_path = match storage_source_info.backing_directory_subdir.as_ref() {
- Some(subdir) => storage_source_info.backing_directory_path.to_path_buf().join(subdir),
- None => storage_source_info.backing_directory_path.to_path_buf(),
- };
+ let mut full_backing_directory_path = storage_source_info.backing_directory_path.clone();
+ full_backing_directory_path.extend(storage_source_info.backing_directory_subdir.clone());
if let Some(dir_source_component) = storage_source_info.storage_provider.as_ref() {
// TODO(https://fxbug.dev/42127827): This should be StartReason::AccessCapability, but we haven't
// plumbed in all the details needed to use it.
dir_source_component.ensure_started(&StartReason::StorageAdmin).await?;
- let path = full_backing_directory_path
- .to_str()
- .ok_or_else(|| ModelError::path_is_not_utf8(full_backing_directory_path.clone()))?;
+ let path = full_backing_directory_path.to_string();
dir_source_component
.open_outgoing(
FLAGS | fio::OpenFlags::DIRECTORY,
- path,
+ &path,
&mut local_server_end.into_channel(),
)
.await?;
} else {
// If storage_source_info.storage_provider is None, the directory comes from component_manager's namespace
- let path = full_backing_directory_path
- .to_str()
- .ok_or_else(|| ModelError::path_is_not_utf8(full_backing_directory_path.clone()))?;
- fuchsia_fs::directory::open_channel_in_namespace(path, FLAGS, local_server_end).map_err(
+ let path = full_backing_directory_path.to_string();
+ fuchsia_fs::directory::open_channel_in_namespace(&path, FLAGS, local_server_end).map_err(
|e| {
ModelError::from(StorageError::open_root(
None,
@@ -232,10 +224,10 @@
},
)?;
}
- if let Some(subdir) = storage_source_info.storage_subdir.as_ref() {
+ if !storage_source_info.storage_subdir.is_dot() {
dir_proxy = fuchsia_fs::directory::create_directory_recursive(
&dir_proxy,
- subdir.to_str().ok_or(ModelError::path_is_not_utf8(subdir.clone()))?,
+ &storage_source_info.storage_subdir.to_string(),
FLAGS,
)
.await
@@ -275,7 +267,7 @@
.route(&storage_component)
.await?;
- let (dir_source_path, dir_source_instance, relative_path) = match source {
+ let (dir_source_path, dir_source_instance, dir_subdir) = match source {
RouteSource {
source: CapabilitySource::Component { capability, component },
relative_path,
@@ -291,8 +283,6 @@
),
_ => unreachable!("not valid sources"),
};
- let dir_subdir =
- if relative_path == Path::new("") { None } else { Some(relative_path.clone()) };
Ok(BackingDirectoryInfo {
storage_provider: dir_source_instance,
@@ -328,7 +318,7 @@
fuchsia_fs::directory::create_directory_recursive(
&root_dir,
- storage_path.to_str().ok_or(ModelError::path_is_not_utf8(storage_path.clone()))?,
+ storage_path.to_str().expect("must be utf-8"),
FLAGS,
)
.await
@@ -354,7 +344,7 @@
fuchsia_fs::directory::create_directory_recursive(
&root_dir,
- storage_path.to_str().ok_or(ModelError::path_is_not_utf8(storage_path.clone()))?,
+ storage_path.to_str().expect("must be utf-8"),
FLAGS,
)
.await
@@ -388,15 +378,13 @@
StorageError::invalid_storage_path(moniker.clone(), Some(instance_id.clone()))
})?
.to_str()
- .ok_or_else(|| ModelError::path_is_not_utf8(storage_path.clone()))?
+ .expect("must be utf-8")
.to_string();
let parent_path = storage_path.parent().ok_or_else(|| {
StorageError::invalid_storage_path(moniker.clone(), Some(instance_id.clone()))
})?;
- let parent_path_str = parent_path
- .to_str()
- .ok_or_else(|| ModelError::path_is_not_utf8(storage_path.clone()))?;
+ let parent_path_str = parent_path.to_str().expect("must be utf-8");
let dir = if parent_path_str.is_empty() {
root_dir
} else {
@@ -432,8 +420,7 @@
let name = storage_path_parent
.file_name()
.ok_or_else(|| StorageError::invalid_storage_path(moniker.clone(), None))?;
- let name =
- name.to_str().ok_or_else(|| ModelError::path_is_not_utf8(storage_path.clone()))?;
+ let name = name.to_str().expect("must be utf-8");
let dir = if dir_path.parent().is_none() {
root_dir
} else {
@@ -584,8 +571,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: InstancedMoniker::root(),
},
false,
@@ -603,8 +590,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: InstancedMoniker::root(),
},
false,
@@ -621,8 +608,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: InstancedMoniker::root(),
},
false,
@@ -674,8 +661,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: InstancedMoniker::root(),
},
false,
@@ -710,8 +697,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: InstancedMoniker::root(),
},
false,
@@ -744,8 +731,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&test.model.root())),
backing_directory_path: "/data".parse().unwrap(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: InstancedMoniker::root(),
},
false,
@@ -795,8 +782,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: storage_moniker.clone(),
},
false,
@@ -814,8 +801,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: storage_moniker.clone(),
},
false,
@@ -833,8 +820,8 @@
BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: storage_moniker.clone(),
},
false,
@@ -849,8 +836,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: storage_moniker.clone(),
},
false,
@@ -872,8 +859,8 @@
BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path,
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: storage_moniker.clone(),
},
false,
@@ -928,8 +915,8 @@
&BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: parent_moniker.clone(),
},
false,
@@ -956,8 +943,8 @@
BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path.clone(),
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: parent_moniker.clone(),
},
false,
@@ -977,8 +964,8 @@
BackingDirectoryInfo {
storage_provider: Some(Arc::clone(&b_component)),
backing_directory_path: dir_source_path,
- backing_directory_subdir: None,
- storage_subdir: None,
+ backing_directory_subdir: Default::default(),
+ storage_subdir: Default::default(),
storage_source_moniker: parent_moniker.clone(),
},
false,
diff --git a/src/sys/component_manager/src/model/testing/test_helpers.rs b/src/sys/component_manager/src/model/testing/test_helpers.rs
index cbf978b..592d487 100644
--- a/src/sys/component_manager/src/model/testing/test_helpers.rs
+++ b/src/sys/component_manager/src/model/testing/test_helpers.rs
@@ -81,9 +81,8 @@
.expect("request map didn't have channel id, perhaps the controller wasn't started?");
assert_eq!(*request_vec, vec![ControlMessage::Stop]);
- let execution = self.component.lock_execution();
- assert!(execution.runtime.is_none());
- assert!(execution.is_shut_down());
+ assert!(self.component.lock_execution().runtime.is_none());
+ assert!(self.component.lock_state().await.is_shut_down());
}
/// Checks that the component has not been shut down, panics if it has.
@@ -96,15 +95,14 @@
assert_eq!(*request_vec, vec![]);
}
- let execution = self.component.lock_execution();
- assert!(execution.runtime.is_some());
- assert!(!execution.is_shut_down());
+ assert!(self.component.lock_execution().runtime.is_some());
}
}
pub async fn execution_is_shut_down(component: &ComponentInstance) -> bool {
+ let state = component.lock_state().await;
let execution = component.lock_execution();
- execution.runtime.is_none() && execution.is_shut_down()
+ execution.runtime.is_none() && state.is_shut_down()
}
/// Returns true if the given child (live or deleting) exists.
@@ -113,6 +111,9 @@
InstanceState::Resolved(ref s) => {
s.children().map(|(k, _)| k.clone()).any(|m| m == moniker.try_into().unwrap())
}
+ InstanceState::Shutdown(ref state, _) => {
+ state.children.iter().map(|(k, _)| k.clone()).any(|m| m == moniker.try_into().unwrap())
+ }
InstanceState::Destroyed => false,
_ => panic!("not resolved"),
}
@@ -132,6 +133,7 @@
pub async fn get_live_children(component: &ComponentInstance) -> HashSet<ChildName> {
match *component.lock_state().await {
InstanceState::Resolved(ref s) => s.children().map(|(m, _)| m.clone()).collect(),
+ InstanceState::Shutdown(ref s, _) => s.children.iter().map(|(m, _)| m.clone()).collect(),
InstanceState::Destroyed => HashSet::new(),
_ => panic!("not resolved"),
}
diff --git a/src/sys/component_manager/src/model/tests/directory.rs b/src/sys/component_manager/src/model/tests/directory.rs
index 779e0fd..3babee3 100644
--- a/src/sys/component_manager/src/model/tests/directory.rs
+++ b/src/sys/component_manager/src/model/tests/directory.rs
@@ -20,6 +20,7 @@
use zx::AsHandleRef;
use crate::model::{
+ actions::{ActionSet, DestroyAction},
component::StartReason,
start::Start,
testing::{out_dir::OutDir, routing_test_helpers::RoutingTest},
@@ -172,7 +173,7 @@
}
// Drain routing and open requests.
b.stop().await.unwrap();
- b.destroy_instance().await.unwrap();
+ ActionSet::register(b.clone(), DestroyAction::new()).await.unwrap();
// `c` should only get one open call after we drain any requests.
test.mock_runner.wait_for_url("test:///c_resolved").await;
diff --git a/src/sys/component_manager/tests/pkg_from_framework/pkg_from_framework.rs b/src/sys/component_manager/tests/pkg_from_framework/pkg_from_framework.rs
index cdd5c25..f32be23 100644
--- a/src/sys/component_manager/tests/pkg_from_framework/pkg_from_framework.rs
+++ b/src/sys/component_manager/tests/pkg_from_framework/pkg_from_framework.rs
@@ -145,7 +145,7 @@
target: cm_rust::ExposeTarget::Parent,
target_name: "config".parse().unwrap(),
rights: Some(fio::R_STAR_DIR),
- subdir: Some("data".into()),
+ subdir: "data".parse().unwrap(),
availability: cm_rust::Availability::Required,
})],
..cm_rust::ComponentDecl::default()
@@ -195,7 +195,7 @@
source_dictionary: Default::default(),
target_path: "/config".parse().unwrap(),
rights: fio::R_STAR_DIR,
- subdir: Some("data".into()),
+ subdir: "data".parse().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
availability: cm_rust::Availability::Required,
}));
diff --git a/src/sys/early_boot_instrumentation/coverage_source.cc b/src/sys/early_boot_instrumentation/coverage_source.cc
index b023259..34cff67 100644
--- a/src/sys/early_boot_instrumentation/coverage_source.cc
+++ b/src/sys/early_boot_instrumentation/coverage_source.cc
@@ -109,7 +109,7 @@
std::string path(DataTypeDir(type));
auto& root_dir = *(it->second);
- vfs::internal::Node* node = nullptr;
+ vfs::Node* node = nullptr;
// Both subdirs should always be available.
ZX_ASSERT(root_dir.Lookup(path, &node) == ZX_OK);
return *reinterpret_cast<vfs::PseudoDir*>(node);
diff --git a/src/sys/early_boot_instrumentation/coverage_source_test.cc b/src/sys/early_boot_instrumentation/coverage_source_test.cc
index abe2dda..073d11a 100644
--- a/src/sys/early_boot_instrumentation/coverage_source_test.cc
+++ b/src/sys/early_boot_instrumentation/coverage_source_test.cc
@@ -87,7 +87,7 @@
std::string dir_name(path.substr(0, curr));
path = path.substr(curr + 1, path.length());
// see if dir exists.
- vfs::internal::Node* existing_entry = nullptr;
+ vfs::Node* existing_entry = nullptr;
if (root.Lookup(dir_name, &existing_entry) == ZX_ERR_NOT_FOUND) {
std::unique_ptr<vfs::PseudoDir> new_dir = std::make_unique<vfs::PseudoDir>();
existing_entry = new_dir.get();
@@ -117,13 +117,12 @@
ASSERT_TRUE(ExposeBootDebugdata(debugdata_dir, sink_map).is_ok());
vfs::PseudoDir* lookup = nullptr;
- ASSERT_EQ(
- sink_map["random-sink"]->Lookup("static", reinterpret_cast<vfs::internal::Node**>(&lookup)),
- ZX_OK);
+ ASSERT_EQ(sink_map["random-sink"]->Lookup("static", reinterpret_cast<vfs::Node**>(&lookup)),
+ ZX_OK);
vfs::PseudoDir& out_dir = *lookup;
ASSERT_FALSE(out_dir.IsEmpty());
- vfs::internal::Node* node = nullptr;
+ vfs::Node* node = nullptr;
ASSERT_EQ(out_dir.Lookup("my-sink-data.my-data", &node), ZX_OK);
ASSERT_NE(node, nullptr);
}
@@ -139,13 +138,12 @@
ASSERT_TRUE(ExposeBootDebugdata(debugdata_dir, sink_map).is_ok());
vfs::PseudoDir* lookup = nullptr;
- ASSERT_EQ(
- sink_map["random-sink"]->Lookup("dynamic", reinterpret_cast<vfs::internal::Node**>(&lookup)),
- ZX_OK);
+ ASSERT_EQ(sink_map["random-sink"]->Lookup("dynamic", reinterpret_cast<vfs::Node**>(&lookup)),
+ ZX_OK);
vfs::PseudoDir& out_dir = *lookup;
ASSERT_FALSE(out_dir.IsEmpty());
- vfs::internal::Node* node = nullptr;
+ vfs::Node* node = nullptr;
ASSERT_EQ(out_dir.Lookup("my-sink-data.my-data", &node), ZX_OK);
ASSERT_NE(node, nullptr);
}
@@ -173,12 +171,11 @@
for (const auto& [sink, data_dir, file_name] : lookup_entries) {
vfs::PseudoDir* lookup = nullptr;
- ASSERT_EQ(sink_map[sink]->Lookup(data_dir, reinterpret_cast<vfs::internal::Node**>(&lookup)),
- ZX_OK);
+ ASSERT_EQ(sink_map[sink]->Lookup(data_dir, reinterpret_cast<vfs::Node**>(&lookup)), ZX_OK);
vfs::PseudoDir& out_dir = *lookup;
ASSERT_FALSE(out_dir.IsEmpty());
- vfs::internal::Node* node = nullptr;
+ vfs::Node* node = nullptr;
ASSERT_EQ(out_dir.Lookup(file_name, &node), ZX_OK);
ASSERT_NE(node, nullptr);
}
@@ -217,7 +214,7 @@
ASSERT_NE(it, sink_map.end());
auto& sink_root = *it->second;
- vfs::internal::Node* lookup_node = nullptr;
+ vfs::Node* lookup_node = nullptr;
ASSERT_EQ(sink_root.Lookup(path, &lookup_node), ZX_OK);
auto* typed_dir = reinterpret_cast<vfs::PseudoDir*>(lookup_node);
diff --git a/src/sys/lib/cm_fidl_validator/BUILD.gn b/src/sys/lib/cm_fidl_validator/BUILD.gn
index 0796df3..fa96979 100644
--- a/src/sys/lib/cm_fidl_validator/BUILD.gn
+++ b/src/sys/lib/cm_fidl_validator/BUILD.gn
@@ -26,12 +26,6 @@
"//third_party/rust_crates:url",
]
- # TODO: https://fxbug.dev/325448727 - Make Rust support for API levels more systematic.
- features = []
- if (clang_fuchsia_api_level == CLANG_FUCHSIA_HEAD_VALUE) {
- features += [ "target_api_level_head" ]
- }
-
sources = [
"src/error.rs",
"src/lib.rs",
diff --git a/src/sys/lib/cm_fidl_validator/src/lib.rs b/src/sys/lib/cm_fidl_validator/src/lib.rs
index 67db7d8..eb000f3 100644
--- a/src/sys/lib/cm_fidl_validator/src/lib.rs
+++ b/src/sys/lib/cm_fidl_validator/src/lib.rs
@@ -36,13 +36,13 @@
}
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
macro_rules! get_source_dictionary {
($decl:ident) => {
$decl.source_dictionary.as_ref()
};
}
-#[cfg(not(feature = "target_api_level_head"))]
+#[cfg(fuchsia_api_level_less_than = "HEAD")]
macro_rules! get_source_dictionary {
($decl:ident) => {
None
@@ -285,7 +285,7 @@
all_runners: HashSet<&'a str>,
all_resolvers: HashSet<&'a str>,
all_dictionaries: HashMap<&'a str, Option<&'a fdecl::Ref>>,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
all_configs: HashSet<&'a str>,
all_environment_names: HashSet<&'a str>,
strong_dependencies: DirectedGraph<DependencyNode<'a>>,
@@ -571,11 +571,11 @@
self.errors.push(Error::CapabilityMustBeBuiltin(DeclType::EventStream))
}
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Capability::Dictionary(dictionary) => {
self.validate_dictionary_decl(&dictionary);
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Capability::Config(config) => {
self.validate_configuration_decl(&config);
}
@@ -594,7 +594,7 @@
}
self.validate_use_paths(&uses);
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
{
let mut use_runner_name = None;
let mut use_runner_source = None;
@@ -610,7 +610,7 @@
}
return (use_runner_name, use_runner_source);
}
- #[cfg(not(feature = "target_api_level_head"))]
+ #[cfg(fuchsia_api_level_less_than = "HEAD")]
return (None, None);
}
@@ -729,7 +729,7 @@
}
}
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Use::Runner(u) => {
const DEPENDENCY_TYPE: Option<fdecl::DependencyType> =
Some(fdecl::DependencyType::Strong);
@@ -746,7 +746,7 @@
AVAILABILITY.as_ref(),
);
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Use::Config(u) => {
const DEPENDENCY_TYPE: Option<fdecl::DependencyType> =
Some(fdecl::DependencyType::Strong);
@@ -778,7 +778,7 @@
match &program.runner {
Some(_) =>
{
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
if use_runner_name.is_some() {
if use_runner_name != program.runner.as_ref()
|| use_runner_source
@@ -914,7 +914,7 @@
Some(fdecl::Ref::Framework(_)) => {}
Some(fdecl::Ref::Debug(_)) => {}
Some(fdecl::Ref::Self_(_)) => {}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Some(fdecl::Ref::Environment(_)) => {}
Some(fdecl::Ref::Child(child)) => {
if self.validate_child_ref(decl, "source", &child, OfferType::Static)
@@ -1351,7 +1351,7 @@
}
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fn validate_dictionary_decl(&mut self, dictionary: &'a fdecl::Dictionary) {
let decl = DeclType::Dictionary;
if check_name(dictionary.name.as_ref(), decl, "name", &mut self.errors) {
@@ -1402,7 +1402,7 @@
}
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fn validate_configuration_decl(&mut self, config: &'a fdecl::Configuration) {
let decl = DeclType::Configuration;
if check_name(config.name.as_ref(), decl, "name", &mut self.errors) {
@@ -1757,7 +1757,7 @@
}
}
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Expose::Dictionary(e) => {
let decl = DeclType::ExposeDictionary;
self.validate_expose_fields(
@@ -1779,7 +1779,7 @@
}
}
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Expose::Config(e) => {
let decl = DeclType::ExposeConfig;
self.validate_expose_fields(
@@ -2167,7 +2167,7 @@
fdecl::Offer::EventStream(e) => {
self.validate_event_stream_offer_fields(e, offer_type);
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Offer::Dictionary(o) => {
let decl = DeclType::OfferDictionary;
let source_dictionary = get_source_dictionary!(o);
@@ -2202,7 +2202,7 @@
self.target_dependency_from_ref(o.target.as_ref()),
);
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Offer::Config(o) => {
let decl = DeclType::OfferConfig;
self.validate_offer_fields(
diff --git a/src/sys/lib/cm_rust/BUILD.gn b/src/sys/lib/cm_rust/BUILD.gn
index 16d7812..40f28fc 100644
--- a/src/sys/lib/cm_rust/BUILD.gn
+++ b/src/sys/lib/cm_rust/BUILD.gn
@@ -26,11 +26,7 @@
]
sources = [ "src/lib.rs" ]
- # TODO: https://fxbug.dev/325448727 - Make Rust support for API levels more systematic.
features = []
- if (clang_fuchsia_api_level == CLANG_FUCHSIA_HEAD_VALUE) {
- features += [ "target_api_level_head" ]
- }
test_deps = [ "//third_party/rust_crates:difference" ]
diff --git a/src/sys/lib/cm_rust/src/lib.rs b/src/sys/lib/cm_rust/src/lib.rs
index 96cfd0d..71ac81f 100644
--- a/src/sys/lib/cm_rust/src/lib.rs
+++ b/src/sys/lib/cm_rust/src/lib.rs
@@ -15,7 +15,6 @@
std::collections::{BTreeMap, HashMap},
std::fmt,
std::hash::Hash,
- std::path::PathBuf,
thiserror::Error,
};
@@ -174,7 +173,7 @@
impl ComponentDecl {
/// Returns the runner used by this component, or `None` if this is a non-executable component.
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
pub fn get_runner(&self) -> Option<UseRunnerDecl> {
self.program
.as_ref()
@@ -302,9 +301,9 @@
Directory(UseDirectoryDecl),
Storage(UseStorageDecl),
EventStream(UseEventStreamDecl),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Runner(UseRunnerDecl),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Config(UseConfigurationDecl),
}
@@ -314,7 +313,7 @@
pub struct UseServiceDecl {
pub source: UseSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target_path: Path,
@@ -329,7 +328,7 @@
pub struct UseProtocolDecl {
pub source: UseSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target_path: Path,
@@ -344,7 +343,7 @@
pub struct UseDirectoryDecl {
pub source: UseSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target_path: Path,
@@ -358,7 +357,8 @@
)]
pub rights: fio::Operations,
- pub subdir: Option<PathBuf>,
+ #[fidl_decl(default_preserve_none)]
+ pub subdir: RelativePath,
pub dependency_type: DependencyType,
#[fidl_decl(default)]
pub availability: Availability,
@@ -410,26 +410,26 @@
pub availability: Availability,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(FidlDecl, Debug, Clone, PartialEq, Eq)]
#[fidl_decl(fidl_table = "fdecl::UseRunner", source_path = "dictionary")]
pub struct UseRunnerDecl {
pub source: UseSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
impl SourceName for UseRunnerDecl {
fn source_name(&self) -> &Name {
&self.source_name
}
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
impl UseDeclCommon for UseRunnerDecl {
fn source(&self) -> &UseSource {
&self.source
@@ -440,7 +440,7 @@
}
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(FidlDecl, Debug, Clone, PartialEq, Eq)]
#[fidl_decl(fidl_table = "fdecl::UseConfiguration", source_path = "name_only")]
@@ -453,14 +453,14 @@
pub type_: ConfigValueType,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
impl SourceName for UseConfigurationDecl {
fn source_name(&self) -> &Name {
&self.source_name
}
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
impl UseDeclCommon for UseConfigurationDecl {
fn source(&self) -> &UseSource {
&self.source
@@ -486,9 +486,9 @@
Runner(OfferRunnerDecl),
Resolver(OfferResolverDecl),
EventStream(OfferEventStreamDecl),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Dictionary(OfferDictionaryDecl),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Config(OfferConfigurationDecl),
}
@@ -530,7 +530,7 @@
pub struct OfferServiceDecl {
pub source: OfferSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: OfferTarget,
@@ -547,7 +547,7 @@
pub struct OfferProtocolDecl {
pub source: OfferSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: OfferTarget,
@@ -563,7 +563,7 @@
pub struct OfferDirectoryDecl {
pub source: OfferSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: OfferTarget,
@@ -579,7 +579,8 @@
)]
pub rights: Option<fio::Operations>,
- pub subdir: Option<PathBuf>,
+ #[fidl_decl(default_preserve_none)]
+ pub subdir: RelativePath,
#[fidl_decl(default)]
pub availability: Availability,
}
@@ -602,7 +603,7 @@
pub struct OfferRunnerDecl {
pub source: OfferSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: OfferTarget,
@@ -615,21 +616,21 @@
pub struct OfferResolverDecl {
pub source: OfferSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: OfferTarget,
pub target_name: Name,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(FidlDecl, OfferDeclCommon, Debug, Clone, PartialEq, Eq)]
#[fidl_decl(fidl_table = "fdecl::OfferDictionary", source_path = "dictionary")]
pub struct OfferDictionaryDecl {
pub source: OfferSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: OfferTarget,
@@ -639,7 +640,7 @@
pub availability: Availability,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(FidlDecl, OfferDeclCommon, Debug, Clone, PartialEq, Eq)]
#[fidl_decl(fidl_table = "fdecl::OfferConfiguration", source_path = "name_only")]
@@ -662,9 +663,9 @@
OfferDecl::Runner(o) => o.source_name(),
OfferDecl::Resolver(o) => o.source_name(),
OfferDecl::EventStream(o) => o.source_name(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Dictionary(o) => o.source_name(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Config(o) => o.source_name(),
}
}
@@ -680,9 +681,9 @@
OfferDecl::Runner(o) => o.source_path(),
OfferDecl::Resolver(o) => o.source_path(),
OfferDecl::EventStream(o) => o.source_path(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Dictionary(o) => o.source_path(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Config(o) => o.source_path(),
}
}
@@ -696,9 +697,9 @@
UseDecl::Directory(u) => u.source(),
UseDecl::Storage(u) => u.source(),
UseDecl::EventStream(u) => u.source(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Runner(u) => u.source(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Config(u) => u.source(),
}
}
@@ -710,9 +711,9 @@
UseDecl::Directory(u) => u.availability(),
UseDecl::Storage(u) => u.availability(),
UseDecl::EventStream(u) => u.availability(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Runner(u) => u.availability(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Config(u) => u.availability(),
}
}
@@ -728,9 +729,9 @@
OfferDecl::Runner(o) => o.target_name(),
OfferDecl::Resolver(o) => o.target_name(),
OfferDecl::EventStream(o) => o.target_name(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Dictionary(o) => o.target_name(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Config(o) => o.target_name(),
}
}
@@ -744,9 +745,9 @@
OfferDecl::Runner(o) => o.target(),
OfferDecl::Resolver(o) => o.target(),
OfferDecl::EventStream(o) => o.target(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Dictionary(o) => o.target(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Config(o) => o.target(),
}
}
@@ -760,9 +761,9 @@
OfferDecl::Runner(o) => o.source(),
OfferDecl::Resolver(o) => o.source(),
OfferDecl::EventStream(o) => o.source(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Dictionary(o) => o.source(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Config(o) => o.source(),
}
}
@@ -776,9 +777,9 @@
OfferDecl::Runner(o) => o.availability(),
OfferDecl::Resolver(o) => o.availability(),
OfferDecl::EventStream(o) => o.availability(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Dictionary(o) => o.availability(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Config(o) => o.availability(),
}
}
@@ -821,9 +822,9 @@
Directory(ExposeDirectoryDecl),
Runner(ExposeRunnerDecl),
Resolver(ExposeResolverDecl),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Dictionary(ExposeDictionaryDecl),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Config(ExposeConfigurationDecl),
}
@@ -835,9 +836,9 @@
Self::Directory(e) => e.source_name(),
Self::Runner(e) => e.source_name(),
Self::Resolver(e) => e.source_name(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Dictionary(e) => e.source_name(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Config(e) => e.source_name(),
}
}
@@ -851,9 +852,9 @@
Self::Directory(e) => e.source_path(),
Self::Runner(e) => e.source_path(),
Self::Resolver(e) => e.source_path(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Dictionary(e) => e.source_path(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Config(e) => e.source_path(),
}
}
@@ -867,9 +868,9 @@
Self::Directory(e) => e.source(),
Self::Runner(e) => e.source(),
Self::Resolver(e) => e.source(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Dictionary(e) => e.source(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Config(e) => e.source(),
}
}
@@ -881,9 +882,9 @@
Self::Directory(e) => e.target(),
Self::Runner(e) => e.target(),
Self::Resolver(e) => e.target(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Dictionary(e) => e.target(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Config(e) => e.target(),
}
}
@@ -895,9 +896,9 @@
Self::Directory(e) => e.target_name(),
Self::Runner(e) => e.target_name(),
Self::Resolver(e) => e.target_name(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Dictionary(e) => e.target_name(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Config(e) => e.target_name(),
}
}
@@ -909,9 +910,9 @@
Self::Directory(e) => e.availability(),
Self::Runner(e) => e.availability(),
Self::Resolver(e) => e.availability(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Dictionary(e) => e.availability(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Config(e) => e.availability(),
}
}
@@ -923,7 +924,7 @@
pub struct ExposeServiceDecl {
pub source: ExposeSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: ExposeTarget,
@@ -938,7 +939,7 @@
pub struct ExposeProtocolDecl {
pub source: ExposeSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: ExposeTarget,
@@ -953,7 +954,7 @@
pub struct ExposeDirectoryDecl {
pub source: ExposeSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: ExposeTarget,
@@ -968,7 +969,8 @@
)]
pub rights: Option<fio::Operations>,
- pub subdir: Option<PathBuf>,
+ #[fidl_decl(default_preserve_none)]
+ pub subdir: RelativePath,
#[fidl_decl(default)]
pub availability: Availability,
@@ -980,7 +982,7 @@
pub struct ExposeRunnerDecl {
pub source: ExposeSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: ExposeTarget,
@@ -993,21 +995,21 @@
pub struct ExposeResolverDecl {
pub source: ExposeSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: ExposeTarget,
pub target_name: Name,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(FidlDecl, ExposeDeclCommon, Debug, Clone, PartialEq, Eq)]
#[fidl_decl(fidl_table = "fdecl::ExposeDictionary", source_path = "dictionary")]
pub struct ExposeDictionaryDecl {
pub source: ExposeSource,
pub source_name: Name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
#[fidl_decl(default_preserve_none)]
pub source_dictionary: RelativePath,
pub target: ExposeTarget,
@@ -1016,7 +1018,7 @@
pub availability: Availability,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(FidlDecl, ExposeDeclCommon, Debug, Clone, PartialEq, Eq)]
#[fidl_decl(fidl_table = "fdecl::ExposeConfiguration", source_path = "name_only")]
@@ -1044,9 +1046,9 @@
Runner(RunnerDecl),
Resolver(ResolverDecl),
EventStream(EventStreamDecl),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Dictionary(DictionaryDecl),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Config(ConfigurationDecl),
}
@@ -1092,7 +1094,8 @@
pub name: Name,
pub source: StorageDirectorySource,
pub backing_dir: Name,
- pub subdir: Option<PathBuf>,
+ #[fidl_decl(default_preserve_none)]
+ pub subdir: RelativePath,
#[cfg_attr(feature = "serde", serde(with = "serde_ext::StorageId"))]
pub storage_id: fdecl::StorageId,
}
@@ -1120,7 +1123,7 @@
pub name: Name,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(FidlDecl, Debug, Clone, PartialEq, Eq)]
#[fidl_decl(fidl_table = "fdecl::Dictionary")]
@@ -1130,7 +1133,7 @@
pub source_dictionary: Option<RelativePath>,
}
-#[cfg(feature = "target_api_level_head")]
+#[cfg(fuchsia_api_level_at_least = "HEAD")]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(FidlDecl, Debug, Clone, PartialEq, Eq)]
#[fidl_decl(fidl_table = "fdecl::Configuration")]
@@ -1149,9 +1152,9 @@
CapabilityDecl::Service(decl) => &decl.name,
CapabilityDecl::Storage(decl) => &decl.name,
CapabilityDecl::EventStream(decl) => &decl.name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
CapabilityDecl::Dictionary(decl) => &decl.name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
CapabilityDecl::Config(decl) => &decl.name,
}
}
@@ -1165,9 +1168,9 @@
CapabilityDecl::Service(decl) => decl.source_path.as_ref(),
CapabilityDecl::Storage(_) => None,
CapabilityDecl::EventStream(_) => None,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
CapabilityDecl::Dictionary(_) => None,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
CapabilityDecl::Config(_) => None,
}
}
@@ -1921,9 +1924,9 @@
UseDecl::Directory(d) => Some(&d.target_path),
UseDecl::Storage(d) => Some(&d.target_path),
UseDecl::EventStream(d) => Some(&d.target_path),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Runner(_) => None,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Config(_) => None,
}
}
@@ -1933,9 +1936,9 @@
UseDecl::Storage(storage_decl) => Some(&storage_decl.source_name),
UseDecl::EventStream(_) => None,
UseDecl::Service(_) | UseDecl::Protocol(_) | UseDecl::Directory(_) => None,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Runner(_) => None,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Config(_) => None,
}
}
@@ -1949,9 +1952,9 @@
UseDecl::Protocol(protocol_decl) => &protocol_decl.source_name,
UseDecl::Directory(directory_decl) => &directory_decl.source_name,
UseDecl::EventStream(event_stream_decl) => &event_stream_decl.source_name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Runner(runner_decl) => &runner_decl.source_name,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Config(u) => &u.source_name,
}
}
@@ -1965,9 +1968,9 @@
UseDecl::Directory(u) => u.source_path(),
UseDecl::Storage(u) => u.source_path(),
UseDecl::EventStream(u) => u.source_path(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Runner(u) => u.source_path(),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Config(u) => u.source_path(),
}
}
@@ -2068,9 +2071,9 @@
UseDecl::Directory(_) => Self::Directory,
UseDecl::Storage(_) => Self::Storage,
UseDecl::EventStream(_) => Self::EventStream,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Runner(_) => Self::Runner,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseDecl::Config(_) => Self::Config,
}
}
@@ -2086,9 +2089,9 @@
OfferDecl::Runner(_) => Self::Runner,
OfferDecl::Resolver(_) => Self::Resolver,
OfferDecl::EventStream(_) => Self::EventStream,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Dictionary(_) => Self::Dictionary,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
OfferDecl::Config(_) => Self::Config,
}
}
@@ -2102,9 +2105,9 @@
ExposeDecl::Directory(_) => Self::Directory,
ExposeDecl::Runner(_) => Self::Runner,
ExposeDecl::Resolver(_) => Self::Resolver,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
ExposeDecl::Dictionary(_) => Self::Dictionary,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
ExposeDecl::Config(_) => Self::Config,
}
}
@@ -2120,9 +2123,9 @@
CapabilityDecl::Runner(_) => Self::Runner,
CapabilityDecl::Resolver(_) => Self::Resolver,
CapabilityDecl::EventStream(_) => Self::EventStream,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
CapabilityDecl::Dictionary(_) => Self::Dictionary,
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
CapabilityDecl::Config(_) => Self::Config,
}
}
@@ -2170,18 +2173,6 @@
}
}
-impl FidlIntoNative<PathBuf> for String {
- fn fidl_into_native(self) -> PathBuf {
- PathBuf::from(self)
- }
-}
-
-impl NativeIntoFidl<String> for PathBuf {
- fn native_into_fidl(self) -> String {
- self.into_os_string().into_string().expect("invalid utf8")
- }
-}
-
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum DictionaryValue {
Str(String),
@@ -2261,7 +2252,7 @@
Self_,
Capability(Name),
Child(String),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Environment,
}
@@ -2274,7 +2265,7 @@
Self::Self_ => write!(f, "self"),
Self::Capability(c) => write!(f, "capability `{}`", c),
Self::Child(c) => write!(f, "child `#{}`", c),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
Self::Environment => write!(f, "environment"),
}
}
@@ -2290,7 +2281,7 @@
// cm_fidl_validator should have already validated this
fdecl::Ref::Capability(c) => UseSource::Capability(c.name.parse().unwrap()),
fdecl::Ref::Child(c) => UseSource::Child(c.name),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
fdecl::Ref::Environment(_) => UseSource::Environment,
_ => panic!("invalid UseSource variant"),
}
@@ -2308,7 +2299,7 @@
fdecl::Ref::Capability(fdecl::CapabilityRef { name: name.to_string() })
}
UseSource::Child(name) => fdecl::Ref::Child(fdecl::ChildRef { name, collection: None }),
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
UseSource::Environment => fdecl::Ref::Environment(fdecl::EnvironmentRef {}),
}
}
@@ -3364,7 +3355,7 @@
source_dictionary: "in/dict".parse().unwrap(),
target_path: "/data".parse().unwrap(),
rights: fio::Operations::CONNECT,
- subdir: Some("foo/bar".into()),
+ subdir: "foo/bar".parse().unwrap(),
availability: Availability::Required,
}),
UseDecl::Storage(UseStorageDecl {
@@ -3414,7 +3405,7 @@
target_name: "data".parse().unwrap(),
target: ExposeTarget::Parent,
rights: Some(fio::Operations::CONNECT),
- subdir: Some("foo/bar".into()),
+ subdir: "foo/bar".parse().unwrap(),
availability: Availability::Optional,
}),
ExposeDecl::Runner(ExposeRunnerDecl {
@@ -3473,7 +3464,7 @@
target: OfferTarget::Collection("modular".parse().unwrap()),
target_name: "data".parse().unwrap(),
rights: Some(fio::Operations::CONNECT),
- subdir: None,
+ subdir: ".".parse().unwrap(),
dependency_type: DependencyType::Strong,
availability: Availability::Optional,
}),
@@ -3567,7 +3558,7 @@
name: "cache".parse().unwrap(),
backing_dir: "data".parse().unwrap(),
source: StorageDirectorySource::Parent,
- subdir: Some("cache".parse().unwrap()),
+ subdir: "cache".parse().unwrap(),
storage_id: fdecl::StorageId::StaticInstanceId,
}),
CapabilityDecl::Runner(RunnerDecl {
@@ -3818,7 +3809,7 @@
name: "minfs".parse().unwrap(),
backing_dir: "minfs".parse().unwrap(),
source: StorageDirectorySource::Child("foo".to_string()),
- subdir: None,
+ subdir: ".".parse().unwrap(),
storage_id: fdecl::StorageId::StaticInstanceIdOrMoniker,
},
],
@@ -3844,7 +3835,7 @@
name: "minfs".parse().unwrap(),
backing_dir: "minfs".parse().unwrap(),
source: StorageDirectorySource::Child("foo".to_string()),
- subdir: None,
+ subdir: ".".parse().unwrap(),
storage_id: fdecl::StorageId::StaticInstanceId,
},
],
diff --git a/src/sys/lib/cm_rust/src/macro.rs b/src/sys/lib/cm_rust/src/macro.rs
index e58531e..43ce665 100644
--- a/src/sys/lib/cm_rust/src/macro.rs
+++ b/src/sys/lib/cm_rust/src/macro.rs
@@ -170,9 +170,9 @@
lazy_static::lazy_static! {
static ref DOT: RelativePath = RelativePath::dot();
}
- #[cfg(feature = "target_api_level_head")]
+ #[cfg(fuchsia_api_level_at_least = "HEAD")]
let dirname = &self.source_dictionary;
- #[cfg(not(feature = "target_api_level_head"))]
+ #[cfg(fuchsia_api_level_less_than = "HEAD")]
let dirname = &*DOT;
BorrowedSeparatedPath {
dirname,
diff --git a/src/sys/lib/cm_rust/testing/src/lib.rs b/src/sys/lib/cm_rust/testing/src/lib.rs
index 014a40a..d8aeb93 100644
--- a/src/sys/lib/cm_rust/testing/src/lib.rs
+++ b/src/sys/lib/cm_rust/testing/src/lib.rs
@@ -9,7 +9,7 @@
cm_types::{Name, Path, RelativePath},
derivative::Derivative,
fidl_fuchsia_component_decl as fdecl, fidl_fuchsia_data as fdata, fidl_fuchsia_io as fio,
- std::{collections::BTreeMap, path::PathBuf},
+ std::collections::BTreeMap,
};
/// Name of the test runner.
@@ -371,7 +371,7 @@
dictionary_source: Option<cm_rust::DictionarySource>,
source_dictionary: Option<RelativePath>,
rights: fio::Operations,
- subdir: Option<PathBuf>,
+ subdir: RelativePath,
backing_dir: Option<Name>,
storage_source: Option<cm_rust::StorageDirectorySource>,
storage_id: fdecl::StorageId,
@@ -447,7 +447,7 @@
dictionary_source: None,
source_dictionary: None,
rights: fio::R_STAR_DIR,
- subdir: None,
+ subdir: Default::default(),
backing_dir: None,
storage_source: None,
storage_id: fdecl::StorageId::StaticInstanceIdOrMoniker,
@@ -506,7 +506,7 @@
pub fn subdir(mut self, subdir: &str) -> Self {
assert_matches!(self.type_, CapabilityTypeName::Storage);
- self.subdir = Some(subdir.parse().unwrap());
+ self.subdir = subdir.parse().unwrap();
self
}
@@ -599,7 +599,7 @@
dependency_type: cm_rust::DependencyType,
availability: cm_rust::Availability,
rights: fio::Operations,
- subdir: Option<PathBuf>,
+ subdir: RelativePath,
scope: Option<Vec<cm_rust::EventScope>>,
filter: Option<BTreeMap<String, cm_rust::DictionaryValue>>,
config_type: Option<cm_rust::ConfigValueType>,
@@ -643,7 +643,7 @@
target_path: None,
source_dictionary: Default::default(),
rights: fio::R_STAR_DIR,
- subdir: None,
+ subdir: Default::default(),
dependency_type: cm_rust::DependencyType::Strong,
availability: cm_rust::Availability::Required,
scope: None,
@@ -748,7 +748,7 @@
pub fn subdir(mut self, subdir: &str) -> Self {
assert_matches!(self.type_, CapabilityTypeName::Directory);
- self.subdir = Some(subdir.parse().unwrap());
+ self.subdir = subdir.parse().unwrap();
self
}
@@ -847,7 +847,7 @@
target_name: Option<Name>,
availability: cm_rust::Availability,
rights: Option<fio::Operations>,
- subdir: Option<PathBuf>,
+ subdir: RelativePath,
}
impl ExposeBuilder {
@@ -888,7 +888,7 @@
target_name: None,
source_dictionary: Default::default(),
rights: None,
- subdir: None,
+ subdir: Default::default(),
availability: cm_rust::Availability::Required,
}
}
@@ -952,7 +952,7 @@
pub fn subdir(mut self, subdir: &str) -> Self {
assert_matches!(self.type_, CapabilityTypeName::Directory);
- self.subdir = Some(subdir.parse().unwrap());
+ self.subdir = subdir.parse().unwrap();
self
}
@@ -1052,7 +1052,7 @@
source_instance_filter: Option<Vec<String>>,
renamed_instances: Option<Vec<cm_rust::NameMapping>>,
rights: Option<fio::Operations>,
- subdir: Option<PathBuf>,
+ subdir: RelativePath,
scope: Option<Vec<cm_rust::EventScope>>,
dependency_type: cm_rust::DependencyType,
availability: cm_rust::Availability,
@@ -1106,7 +1106,7 @@
source_instance_filter: None,
renamed_instances: None,
rights: None,
- subdir: None,
+ subdir: Default::default(),
scope: None,
dependency_type: cm_rust::DependencyType::Strong,
availability: cm_rust::Availability::Required,
@@ -1197,7 +1197,7 @@
pub fn subdir(mut self, subdir: &str) -> Self {
assert_matches!(self.type_, CapabilityTypeName::Directory);
- self.subdir = Some(subdir.parse().unwrap());
+ self.subdir = subdir.parse().unwrap();
self
}
diff --git a/src/sys/lib/cm_types/src/lib.rs b/src/sys/lib/cm_types/src/lib.rs
index 7378051..ba22dcc 100644
--- a/src/sys/lib/cm_types/src/lib.rs
+++ b/src/sys/lib/cm_types/src/lib.rs
@@ -468,6 +468,10 @@
pub fn basename(&self) -> &Name {
self.0.basename().expect("can't be root")
}
+
+ pub fn extend(&mut self, other: RelativePath) {
+ self.0.segments.extend(other.segments);
+ }
}
impl IterablePath for Path {
@@ -616,7 +620,15 @@
}
pub fn to_path_buf(&self) -> PathBuf {
- PathBuf::from(self.to_string())
+ if self.is_dot() {
+ PathBuf::new()
+ } else {
+ PathBuf::from(self.to_string())
+ }
+ }
+
+ pub fn extend(&mut self, other: Self) {
+ self.segments.extend(other.segments);
}
}
diff --git a/src/sys/lib/routing/src/capability_source.rs b/src/sys/lib/routing/src/capability_source.rs
index 13a9702..616c87f 100644
--- a/src/sys/lib/routing/src/capability_source.rs
+++ b/src/sys/lib/routing/src/capability_source.rs
@@ -724,7 +724,7 @@
name: "foo".parse().unwrap(),
source: StorageDirectorySource::Parent,
backing_dir: "bar".parse().unwrap(),
- subdir: None,
+ subdir: Default::default(),
storage_id: fdecl::StorageId::StaticInstanceIdOrMoniker,
});
assert_eq!(storage_capability.type_name(), CapabilityTypeName::Storage);
diff --git a/src/sys/lib/routing/src/lib.rs b/src/sys/lib/routing/src/lib.rs
index 830aa85..80066fb 100644
--- a/src/sys/lib/routing/src/lib.rs
+++ b/src/sys/lib/routing/src/lib.rs
@@ -31,7 +31,6 @@
ErrorNotFoundInChild, ExposeVisitor, NoopVisitor, OfferVisitor, RouteBundle,
},
mapper::DebugRouteMapper,
- path::PathBufExt,
rights::Rights,
walk_state::WalkState,
},
@@ -47,11 +46,11 @@
UseEventStreamDecl, UseProtocolDecl, UseRunnerDecl, UseServiceDecl, UseSource,
UseStorageDecl,
},
- cm_types::Name,
+ cm_types::{Name, RelativePath},
fidl_fuchsia_component_decl as fdecl, fidl_fuchsia_io as fio,
from_enum::FromEnum,
moniker::{ChildName, Moniker},
- std::{path::PathBuf, sync::Arc},
+ std::sync::Arc,
tracing::warn,
};
@@ -275,15 +274,18 @@
#[derive(Debug)]
pub struct RouteSource<C: ComponentInstanceInterface> {
pub source: CapabilitySource<C>,
- pub relative_path: PathBuf,
+ pub relative_path: RelativePath,
}
impl<C: ComponentInstanceInterface> RouteSource<C> {
pub fn new(source: CapabilitySource<C>) -> Self {
- Self { source, relative_path: "".into() }
+ Self { source, relative_path: Default::default() }
}
- pub fn new_with_relative_path(source: CapabilitySource<C>, relative_path: PathBuf) -> Self {
+ pub fn new_with_relative_path(
+ source: CapabilitySource<C>,
+ relative_path: RelativePath,
+ ) -> Self {
Self { source, relative_path }
}
}
@@ -424,7 +426,7 @@
{
let mut state = DirectoryState {
rights: WalkState::new(),
- subdir: PathBuf::new(),
+ subdir: Default::default(),
availability_state: offer_decl.availability.into(),
};
let allowed_sources = AllowedSourcesBuilder::new(CapabilityTypeName::Directory)
@@ -846,19 +848,15 @@
#[derive(Clone, Debug)]
pub struct DirectoryState {
rights: WalkState<Rights>,
- pub subdir: PathBuf,
+ pub subdir: RelativePath,
availability_state: Availability,
}
impl DirectoryState {
- fn new(
- operations: fio::Operations,
- subdir: Option<PathBuf>,
- availability: &Availability,
- ) -> Self {
+ fn new(operations: fio::Operations, subdir: RelativePath, availability: &Availability) -> Self {
DirectoryState {
rights: WalkState::at(operations.into()),
- subdir: subdir.unwrap_or_else(PathBuf::new),
+ subdir,
availability_state: availability.clone(),
}
}
@@ -877,22 +875,22 @@
fn advance(
&mut self,
rights: Option<fio::Operations>,
- subdir: Option<PathBuf>,
+ mut subdir: RelativePath,
) -> Result<(), RoutingError> {
self.rights = self.rights.advance(rights.map(Rights::from))?;
- let subdir = subdir.clone().unwrap_or_else(PathBuf::new);
- self.subdir = subdir.attach(&self.subdir);
+ subdir.extend(self.subdir.clone());
+ self.subdir = subdir;
Ok(())
}
fn finalize(
&mut self,
rights: fio::Operations,
- subdir: Option<PathBuf>,
+ mut subdir: RelativePath,
) -> Result<(), RoutingError> {
self.rights = self.rights.finalize(Some(rights.into()))?;
- let subdir = subdir.clone().unwrap_or_else(PathBuf::new);
- self.subdir = subdir.attach(&self.subdir);
+ subdir.extend(self.subdir.clone());
+ self.subdir = subdir;
Ok(())
}
}
@@ -924,7 +922,9 @@
impl CapabilityVisitor for DirectoryState {
fn visit(&mut self, capability: &cm_rust::CapabilityDecl) -> Result<(), RoutingError> {
match capability {
- cm_rust::CapabilityDecl::Directory(dir) => self.finalize(dir.rights.clone(), None),
+ cm_rust::CapabilityDecl::Directory(dir) => {
+ self.finalize(dir.rights.clone(), Default::default())
+ }
_ => Ok(()),
}
}
@@ -963,7 +963,7 @@
&use_decl.availability,
);
if let UseSource::Framework = &use_decl.source {
- state.finalize(fio::RW_STAR_DIR, None)?;
+ state.finalize(fio::RW_STAR_DIR, Default::default())?;
}
let allowed_sources = AllowedSourcesBuilder::new(CapabilityTypeName::Directory)
.framework(InternalCapability::Directory)
@@ -997,7 +997,7 @@
{
let mut state = DirectoryState {
rights: WalkState::new(),
- subdir: PathBuf::new(),
+ subdir: Default::default(),
availability_state: expose_decl.availability.into(),
};
let allowed_sources = AllowedSourcesBuilder::new(CapabilityTypeName::Directory)
@@ -1097,7 +1097,8 @@
C: ComponentInstanceInterface + 'static,
{
// Storage rights are always READ+WRITE.
- let mut state = DirectoryState::new(fio::RW_STAR_DIR, None, &Availability::Required);
+ let mut state =
+ DirectoryState::new(fio::RW_STAR_DIR, Default::default(), &Availability::Required);
let allowed_sources =
AllowedSourcesBuilder::new(CapabilityTypeName::Directory).component().namespace();
let source = legacy_router::route_from_registration(
diff --git a/src/sys/lib/routing/testing/src/lib.rs b/src/sys/lib/routing/testing/src/lib.rs
index 21636c9..da7abfc 100644
--- a/src/sys/lib/routing/testing/src/lib.rs
+++ b/src/sys/lib/routing/testing/src/lib.rs
@@ -1543,7 +1543,7 @@
component,
},
relative_path,
- }) if capability_decl == expected_protocol_decl && component.moniker == expected_source_moniker && relative_path == PathBuf::new()
+ }) if capability_decl == expected_protocol_decl && component.moniker == expected_source_moniker && relative_path.is_dot()
);
}
@@ -1768,7 +1768,7 @@
..
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "foo");
}
_ => panic!("bad capability source"),
@@ -1865,7 +1865,7 @@
..
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "foo");
assert_eq!(members.len(), 3);
for c in [AggregateMember::Child("c".try_into().unwrap()), AggregateMember::Parent,
@@ -3147,7 +3147,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "foo");
assert_eq!(
source_path.expect("missing source path"),
@@ -3200,7 +3200,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "foo");
assert_eq!(
source_path.expect("missing source path"),
@@ -3269,7 +3269,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "foo");
assert_eq!(
source_path.expect("missing source path"),
@@ -3341,7 +3341,7 @@
capability_provider,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "foo");
assert_eq!(component.moniker, "c".parse().unwrap());
let mut data = capability_provider.route_instances();
@@ -3437,7 +3437,7 @@
capability_provider,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "foo");
assert_eq!(component.moniker, "c".parse().unwrap());
let mut data = capability_provider.route_instances();
@@ -3520,7 +3520,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
assert_eq!(
source_path.expect("missing source path"),
@@ -3599,7 +3599,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
assert_eq!(
source_path.expect("missing source path"),
@@ -3675,7 +3675,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
assert_eq!(
source_path.expect("missing source path"),
@@ -3748,7 +3748,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
assert_eq!(
source_path.expect("missing source path"),
@@ -3867,7 +3867,7 @@
..
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
}
_ => panic!("bad capability source"),
@@ -3912,7 +3912,7 @@
..
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
}
_ => panic!("bad capability source"),
@@ -4070,7 +4070,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
assert_eq!(
source_path.expect("missing source path"),
@@ -4134,7 +4134,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
assert_eq!(
source_path.expect("missing source path"),
@@ -4202,7 +4202,7 @@
component,
},
relative_path,
- } if relative_path == PathBuf::new() => {
+ } if relative_path.is_dot() => {
assert_eq!(name, "elf");
assert_eq!(
source_path.expect("missing source path"),
diff --git a/src/sys/lib/routing/testing/src/policy.rs b/src/sys/lib/routing/testing/src/policy.rs
index 2b36470..ae32667 100644
--- a/src/sys/lib/routing/testing/src/policy.rs
+++ b/src/sys/lib/routing/testing/src/policy.rs
@@ -250,7 +250,7 @@
backing_dir: "cache".parse().unwrap(),
name: "cache".parse().unwrap(),
source: StorageDirectorySource::Parent,
- subdir: None,
+ subdir: Default::default(),
storage_id: fdecl::StorageId::StaticInstanceIdOrMoniker,
}),
component: component.as_weak(),
diff --git a/src/sys/pkg/bin/pm/README.md b/src/sys/pkg/bin/pm/README.md
index f6c614d..a63ba55 100644
--- a/src/sys/pkg/bin/pm/README.md
+++ b/src/sys/pkg/bin/pm/README.md
@@ -129,13 +129,8 @@
A Package Snapshot contains package and file metadata from a set of Fuchsia
packages, and two package snapshots can be compared to simulate updating from
-one snapshot of packages to another.
-
-Within the Platform Source Tree, a build automatically produces a package
-snapshot of all products and packages enabled by `fx set`. The automatically
-generated snapshot is stored in the output directory at
-`obj/build/images/system.snapshot`. Outside of the Platform Source Tree,
-snapshots can be built from a set of packages using `fx snapshot`.
+one snapshot of packages to another. Snapshots can be built from a set of
+packages using `fx snapshot`.
To manually produce a package snapshot file:
1. When building a Fuchsia package with `pm build`, pass in an `-blobsfile`
diff --git a/src/sys/test_manager/src/running_suite.rs b/src/sys/test_manager/src/running_suite.rs
index b21c8be..b48ba25 100644
--- a/src/sys/test_manager/src/running_suite.rs
+++ b/src/sys/test_manager/src/running_suite.rs
@@ -751,7 +751,7 @@
name: CUSTOM_ARTIFACTS_CAPABILITY_NAME.parse().unwrap(),
source: cm_rust::StorageDirectorySource::Child(MEMFS_REALM_NAME.to_string()),
backing_dir: "memfs".parse().unwrap(),
- subdir: Some("custom_artifacts".into()),
+ subdir: "custom_artifacts".parse().unwrap(),
storage_id: fdecl::StorageId::StaticInstanceIdOrMoniker,
}));
diff --git a/src/ui/lib/escher/fs/fuchsia_data_source.cc b/src/ui/lib/escher/fs/fuchsia_data_source.cc
index c37d068..1dedb2d 100644
--- a/src/ui/lib/escher/fs/fuchsia_data_source.cc
+++ b/src/ui/lib/escher/fs/fuchsia_data_source.cc
@@ -51,7 +51,7 @@
auto dir = root_dir_.get();
for (size_t i = 0; i + 1 < segs.size(); ++i) {
const auto& seg = segs[i];
- vfs::internal::Node* subdir;
+ vfs::Node* subdir;
if (ZX_OK != dir->Lookup(seg, &subdir)) {
auto node = std::make_unique<vfs::PseudoDir>();
subdir = node.get();
diff --git a/zircon/system/utest/core/pager-writeback/pager-writeback.cc b/zircon/system/utest/core/pager-writeback/pager-writeback.cc
index 9c685cf..5f88b6d 100644
--- a/zircon/system/utest/core/pager-writeback/pager-writeback.cc
+++ b/zircon/system/utest/core/pager-writeback/pager-writeback.cc
@@ -534,10 +534,10 @@
// if the test thread is blocked on pagers outside of the test. WaitForBlocked() can only be
// relied upon in a non-component environment. The pager-writeback tests cannot run as standalone
// bootfs tests either because they need the next vDSO. Hence the only supported mode for this
- // test is unified mode, where the root resource will be available.
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ // test is unified mode, where the system resource will be available.
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
@@ -1547,10 +1547,10 @@
// if the test thread is blocked on pagers outside of the test. WaitForBlocked() can only be
// relied upon in a non-component environment. The pager-writeback tests cannot run as standalone
// bootfs tests either because they need the next vDSO. Hence the only supported mode for this
- // test is unified mode, where the root resource will be available.
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ // test is unified mode, where the system resource will be available.
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
@@ -2111,10 +2111,10 @@
// if the test thread is blocked on pagers outside of the test. WaitForBlocked() can only be
// relied upon in a non-component environment. The pager-writeback tests cannot run as standalone
// bootfs tests either because they need the next vDSO. Hence the only supported mode for this
- // test is unified mode, where the root resource will be available.
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ // test is unified mode, where the system resource will be available.
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
@@ -4650,12 +4650,17 @@
// Tests that zeroing across a pinned page clips expansion of the tail.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(OpZeroPinned, ZX_VMO_RESIZABLE) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -4670,7 +4675,7 @@
zx::bti bti;
zx::pmt pmt;
zx_iommu_desc_dummy_t desc;
- ASSERT_OK(zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ ASSERT_OK(zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()));
ASSERT_OK(zx::bti::create(iommu, 0, 0xdeadbeef, &bti));
zx_paddr_t addr;
@@ -5515,12 +5520,17 @@
// Tests that a VMO is not marked modified on a failed resize.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(NotModifiedOnFailedResize, ZX_VMO_RESIZABLE) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -5541,7 +5551,7 @@
zx::bti bti;
zx::pmt pmt;
zx_iommu_desc_dummy_t desc;
- ASSERT_OK(zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ ASSERT_OK(zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()));
ASSERT_OK(zx::bti::create(iommu, 0, 0xdeadbeef, &bti));
zx_paddr_t addr;
@@ -5776,12 +5786,17 @@
// Tests that pinning a page for read does not dirty it and does not mark the VMO modified.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(PinForRead, 0) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -5802,7 +5817,7 @@
zx::bti bti;
zx::pmt pmt;
zx_iommu_desc_dummy_t desc;
- ASSERT_OK(zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ ASSERT_OK(zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()));
ASSERT_OK(zx::bti::create(iommu, 0, 0xdeadbeef, &bti));
zx_paddr_t addr;
@@ -5829,12 +5844,17 @@
// Tests that pinning a page for write dirties it and marks the VMO modified.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(PinForWrite, 0) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -5852,11 +5872,11 @@
// Pin a page for write.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -5902,12 +5922,17 @@
// Tests that a page cannot be marked clean while it is pinned.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(PinnedWriteback, 0) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -5925,11 +5950,11 @@
// Pin a page for write.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -5996,12 +6021,17 @@
// Tests pinned read with interleaved writeback.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(ReadPinAwaitingClean, 0) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6050,7 +6080,7 @@
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- ASSERT_OK(zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ ASSERT_OK(zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()));
ASSERT_OK(zx::bti::create(iommu, 0, 0xdeadbeef, &bti));
zx_paddr_t addr;
@@ -6077,12 +6107,17 @@
// Tests pinned write with interleaved writeback.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(WritePinAwaitingClean, 0) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6128,11 +6163,11 @@
// Pin a page for write.
zx::pmt pmt;
- TestThread t2([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t2([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -6193,12 +6228,17 @@
// Tests delayed pinned write with interleaved writeback.
TEST(PagerWriteback, DelayedPinAwaitingClean) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6242,11 +6282,11 @@
// Try to pin for write.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -6299,12 +6339,17 @@
// Tests failed pin with interleaved writeback.
TEST(PagerWriteback, FailedPinAwaitingClean) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6348,11 +6393,11 @@
// Try to pin for write.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -6398,12 +6443,17 @@
// Tests that writing to a page after pinning does not generate additional DIRTY requests.
TEST(PagerWriteback, DirtyAfterPin) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6421,11 +6471,11 @@
// Pin a page for write.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -6482,12 +6532,17 @@
// Tests that pinning an already dirty page does not generate additional DIRTY requests.
TEST(PagerWriteback, PinAfterDirty) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6529,7 +6584,7 @@
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- ASSERT_OK(zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ ASSERT_OK(zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()));
ASSERT_OK(zx::bti::create(iommu, 0, 0xdeadbeef, &bti));
zx_paddr_t addr;
@@ -6563,12 +6618,17 @@
// Tests that both READ and DIRTY requests are generated as expected when pinning an unpopulated
// range for write.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(PinForWriteUnpopulated, 0) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6587,11 +6647,11 @@
// Pin both pages for write.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -6648,12 +6708,17 @@
// Tests that a failed pin write does not mark the VMO modified.
TEST(PagerWriteback, NotModifiedFailedPinWrite) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6671,11 +6736,11 @@
// Pin a page for write.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -6717,12 +6782,17 @@
// Tests that a pin write that fails part of the way does not mark the VMO modified.
TEST(PagerWriteback, NotModifiedPartialFailedPinWrite) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6740,11 +6810,11 @@
// Pin both pages for write.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, vmo]() -> bool {
+ TestThread t([&pmt, &iommu_resource, vmo]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -6790,12 +6860,17 @@
// Tests pinning for write through a slice.
TEST_WITH_AND_WITHOUT_TRAP_DIRTY(SlicePinWrite, 0) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -6817,11 +6892,11 @@
// Pin both pages for write through a slice.
zx::pmt pmt;
- TestThread t([&pmt, &root_resource, &slice]() -> bool {
+ TestThread t([&pmt, &iommu_resource, &slice]() -> bool {
zx::iommu iommu;
zx::bti bti;
zx_iommu_desc_dummy_t desc;
- if (zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ if (zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()) != ZX_OK) {
return false;
}
@@ -7092,12 +7167,17 @@
// Tests that a write completes successfully if a clean page is evicted after the generation of a
// DIRTY request but before it has been resolved.
TEST(PagerWriteback, EvictAfterDirtyRequest) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_DEBUG_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource debug_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -7137,7 +7217,7 @@
zx_system_get_page_size(), nullptr, 0));
// Request a scanner reclaim.
constexpr char k_command[] = "scanner reclaim_all";
- ASSERT_OK(zx_debug_send_command(root_resource->get(), k_command, strlen(k_command)));
+ ASSERT_OK(zx_debug_send_command(debug_resource.get(), k_command, strlen(k_command)));
// Check if the middle page has been evicted yet.
// Eviction is asynchronous. Wait for the eviction to occur.
diff --git a/zircon/system/utest/core/pager/pager.cc b/zircon/system/utest/core/pager/pager.cc
index 228d50a..9ae054d 100644
--- a/zircon/system/utest/core/pager/pager.cc
+++ b/zircon/system/utest/core/pager/pager.cc
@@ -1758,8 +1758,8 @@
ASSERT_EQ(zx_pager_supply_pages(pager.get(), vmo.get(), 0, 0, aux_vmo.get(), 1),
ZX_ERR_INVALID_ARGS);
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (root_resource->is_valid()) {
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (system_resource->is_valid()) {
zx::result<vmo_test::PhysVmo> result = vmo_test::GetTestPhysVmo(zx_system_get_page_size());
ASSERT_TRUE(result.is_ok());
zx_handle_t physical_vmo_handle = result.value().vmo.get();
@@ -1775,7 +1775,7 @@
kViolationCount,
};
for (uint32_t i = 0; i < kViolationCount; i++) {
- if (i == kHasPinned && !root_resource->is_valid()) {
+ if (i == kHasPinned && !system_resource->is_valid()) {
continue;
}
@@ -1809,9 +1809,14 @@
zx::iommu iommu;
zx::bti bti;
zx::pmt pmt;
+
if (i == kHasPinned) {
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
zx_iommu_desc_dummy_t desc;
- ASSERT_EQ(zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ ASSERT_EQ(zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()),
ZX_OK);
ASSERT_EQ(zx::bti::create(iommu, 0, 0xdeadbeef, &bti), ZX_OK);
@@ -1887,11 +1892,16 @@
// Tests that supply_pages works when the destination has some pinned pages.
TEST(Pager, PinnedSupplyPages) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
zx::pager pager;
ASSERT_OK(zx::pager::create(0, &pager));
@@ -1928,7 +1938,7 @@
zx::bti bti;
zx::pmt pmt;
zx_iommu_desc_dummy_t desc;
- ASSERT_OK(zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ ASSERT_OK(zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()));
ASSERT_OK(zx::bti::create(iommu, 0, 0xdeadbeef, &bti));
zx_paddr_t addr;
@@ -2405,9 +2415,19 @@
// zero scanner to run, since the zero fork queue looks close enough to the pager backed queue
// that most things will 'just work'.
constexpr char k_command[] = "scanner reclaim_all";
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid() ||
- zx_debug_send_command(root_resource->get(), k_command, strlen(k_command)) != ZX_OK) {
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
+ return;
+ }
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_DEBUG_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource debug_resource = std::move(result.value());
+
+ if (!debug_resource.is_valid() ||
+ zx_debug_send_command(debug_resource.get(), k_command, strlen(k_command)) != ZX_OK) {
// Failed to manually force the zero scanner to run, fall back to sleeping for a moment and hope
// it runs.
zx::nanosleep(zx::deadline_after(zx::sec(1)));
@@ -2421,12 +2441,17 @@
// Test that if we resize a vmo while it is waiting on a page to fullfill the commit for a pin
// request that neither the resize nor the pin cause a crash and fail gracefully.
TEST(Pager, ResizeBlockedPin) {
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_IOMMU_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource iommu_resource = std::move(result.value());
+
UserPager pager;
ASSERT_TRUE(pager.Init());
@@ -2438,7 +2463,7 @@
zx::bti bti;
zx::pmt pmt;
zx_iommu_desc_dummy_t desc;
- ASSERT_EQ(zx_iommu_create(root_resource->get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
+ ASSERT_EQ(zx_iommu_create(iommu_resource.get(), ZX_IOMMU_TYPE_DUMMY, &desc, sizeof(desc),
iommu.reset_and_get_address()),
ZX_OK);
ASSERT_EQ(zx::bti::create(iommu, 0, 0xdeadbeef, &bti), ZX_OK);
@@ -2858,17 +2883,22 @@
uint64_t offset, length;
ASSERT_FALSE(pager.GetPageReadRequest(vmo, 0, &offset, &length));
- zx::unowned_resource root_resource = maybe_standalone::GetRootResource();
- if (!root_resource->is_valid()) {
- printf("Root resource not available, skipping\n");
+ zx::unowned_resource system_resource = maybe_standalone::GetSystemResource();
+ if (!system_resource->is_valid()) {
+ printf("System resource not available, skipping\n");
return;
}
+ zx::result<zx::resource> result =
+ maybe_standalone::GetSystemResourceWithBase(system_resource, ZX_RSRC_SYSTEM_DEBUG_BASE);
+ ASSERT_OK(result.status_value());
+ zx::resource debug_resource = std::move(result.value());
+
// Trigger reclamation of only oldest evictable memory. This will include the pages we hinted
// DONT_NEED.
constexpr char k_command_reclaim[] = "scanner reclaim 1 only_old";
ASSERT_OK(
- zx_debug_send_command(root_resource->get(), k_command_reclaim, strlen(k_command_reclaim)));
+ zx_debug_send_command(debug_resource.get(), k_command_reclaim, strlen(k_command_reclaim)));
// Verify that the vmo has no committed pages after eviction.
// Eviction is asynchronous. Poll in a loop until we see the committed page count drop. In case
@@ -3440,8 +3470,8 @@
ASSERT_FALSE(pager.GetPageReadRequest(vmo, 0, &offset, &length));
}
-// Regression test for https://fxbug.dev/42173905. Tests that a port dequeue racing with pager destruction on a
-// detached VMO does not result in use-after-frees.
+// Regression test for https://fxbug.dev/42173905. Tests that a port dequeue racing with pager
+// destruction on a detached VMO does not result in use-after-frees.
TEST(Pager, RacyPortDequeue) {
// Repeat multiple times so we can hit the race. 1000 is a good balance between trying to
// reproduce the race without drastically increasing the test runtime.