[devcoord] Fix unbind task race condition.

We do not want to add an already posted task as a dependent,
otherwise when we complete we will try to post the task again.

This can happen if DdkRemove is called on a parent device and
child device at the same time.

Example
-----------------
Parent Device A
Child Device B

DdkRemove(A) -> UnbindTask(A) posted
DdkRemove(B) -> UnbindTask(B) posted

Async loop runs UnbindTask(A)
UnbindTask(A) adds UnbindTask(B) as a dependent
UnbindTask(A) completes immediately
UnbindTask(A).Complete tries to post UnbindTask(B)

BUG=35361

Change-Id: I376bceb219601088c7345a0169041c27770a42f7
diff --git a/zircon/system/core/devmgr/devcoordinator/coordinator-test.cc b/zircon/system/core/devmgr/devcoordinator/coordinator-test.cc
index 2b214a6..136f029 100644
--- a/zircon/system/core/devmgr/devcoordinator/coordinator-test.cc
+++ b/zircon/system/core/devmgr/devcoordinator/coordinator-test.cc
@@ -1264,6 +1264,53 @@
   loop()->RunUntilIdle();
 }
 
+TEST_F(UnbindTestCase, TwoConcurrentRemovals) {
+  size_t parent_index;
+  ASSERT_NO_FATAL_FAILURES(
+      AddDevice(platform_bus(), "parent", 0 /* protocol id */, "", &parent_index));
+
+  auto* parent_device = device(parent_index);
+
+  size_t child_index;
+  ASSERT_NO_FATAL_FAILURES(
+      AddDevice(parent_device->device, "child", 0 /* protocol id */, "", &child_index));
+
+  auto* child_device = device(child_index);
+
+  // Schedule concurrent removals.
+  ASSERT_NO_FATAL_FAILURES(coordinator_.ScheduleRemove(parent_device->device));
+  ASSERT_NO_FATAL_FAILURES(coordinator_.ScheduleRemove(child_device->device));
+  loop()->RunUntilIdle();
+
+  ASSERT_NO_FATAL_FAILURES(CheckRemoveReceivedAndReply(child_device->remote));
+  loop()->RunUntilIdle();
+
+  ASSERT_NO_FATAL_FAILURES(CheckRemoveReceivedAndReply(parent_device->remote));
+  loop()->RunUntilIdle();
+}
+
+TEST_F(UnbindTestCase, ManyConcurrentRemovals) {
+  size_t num_devices = 100;
+  size_t idx_map[num_devices];
+
+  for (size_t i = 0; i < num_devices; i++) {
+    auto parent = i == 0 ? platform_bus() : device(idx_map[i - 1])->device;
+    ASSERT_NO_FATAL_FAILURES(AddDevice(parent, "child", 0 /* protocol id */, "", &idx_map[i]));
+  }
+
+  for (size_t i = 0; i < num_devices; i++) {
+    ASSERT_NO_FATAL_FAILURES(coordinator_.ScheduleRemove(device(idx_map[i])->device));
+  }
+
+  loop()->RunUntilIdle();
+
+  for (size_t i = 0; i < num_devices; i++) {
+    ASSERT_NO_FATAL_FAILURES(
+        CheckRemoveReceivedAndReply(device(idx_map[num_devices - i - 1])->remote));
+    loop()->RunUntilIdle();
+  }
+}
+
 class SuspendTestCase : public MultipleDeviceTestCase {
  public:
   void SuspendTest(uint32_t flags);
diff --git a/zircon/system/core/devmgr/devcoordinator/task-test.cc b/zircon/system/core/devmgr/devcoordinator/task-test.cc
index 8131f6e..b25b957 100644
--- a/zircon/system/core/devmgr/devcoordinator/task-test.cc
+++ b/zircon/system/core/devmgr/devcoordinator/task-test.cc
@@ -77,6 +77,39 @@
   bool fail_on_dep_failure_;
 };
 
+// A task which is dependent on their parent task.
+class DepOnParentTask : public CountingTask {
+ public:
+  // The created descendents will be stored in |out_deps|, beginning at |start_index|.
+  static fbl::RefPtr<DepOnParentTask> Create(zx_status_t root_status,
+                                             size_t num_descendents,
+                                             fbl::Vector<fbl::RefPtr<DepOnParentTask>>* out_deps,
+                                             size_t start_index = 0) {
+    auto task = fbl::MakeRefCounted<DepOnParentTask>();
+    task->mock_status_ = root_status;
+    if (num_descendents > 0) {
+      auto child_task =
+          DepOnParentTask::Create(ZX_OK, num_descendents - 1, out_deps, start_index + 1);
+      child_task->AddDependency(task);
+      out_deps->push_back(child_task);
+    }
+    return task;
+  }
+
+ private:
+  void Run() override {
+    CountingTask::Run();
+    Complete(mock_status_);
+  }
+
+  void DependencyFailed(zx_status_t status) override {
+    CountingTask::DependencyFailed(status);
+    Complete(status);
+  }
+
+  zx_status_t mock_status_;
+};
+
 class SequenceTask : public devmgr::Task {
  public:
   SequenceTask() : Task(async_get_default_dispatcher()) {}
@@ -202,4 +235,44 @@
   ASSERT_EQ(task->Dependencies().size(), 0);
 }
 
+TEST_F(TaskTestCase, DependentOnParentSuccess) {
+  size_t num_deps = 10;
+  fbl::Vector<fbl::RefPtr<DepOnParentTask>> deps;
+  auto root_task = DepOnParentTask::Create(ZX_OK, num_deps, &deps);
+
+  loop().RunUntilIdle();
+
+  ASSERT_TRUE(root_task->is_completed());
+  EXPECT_OK(root_task->status());
+  EXPECT_EQ(root_task->run_calls(), 1);
+  EXPECT_EQ(root_task->dep_fail_calls(), 0);
+
+  for (auto task : deps) {
+    ASSERT_TRUE(task->is_completed());
+    EXPECT_OK(task->status());
+    EXPECT_EQ(task->run_calls(), 1);
+    EXPECT_EQ(task->dep_fail_calls(), 0);
+  }
+}
+
+TEST_F(TaskTestCase, DependentOnParentFailure) {
+  size_t num_deps = 10;
+  fbl::Vector<fbl::RefPtr<DepOnParentTask>> deps;
+  auto root_task = DepOnParentTask::Create(ZX_ERR_BAD_STATE, num_deps, &deps);
+
+  loop().RunUntilIdle();
+
+  ASSERT_TRUE(root_task->is_completed());
+  EXPECT_NOT_OK(root_task->status());
+  EXPECT_EQ(root_task->run_calls(), 1);
+  EXPECT_EQ(root_task->dep_fail_calls(), 0);
+
+  for (auto task : deps) {
+    ASSERT_TRUE(task->is_completed());
+    EXPECT_NOT_OK(task->status());
+    EXPECT_EQ(task->run_calls(), 0);
+    EXPECT_EQ(task->dep_fail_calls(), 1);
+  }
+}
+
 }  // namespace
diff --git a/zircon/system/core/devmgr/devcoordinator/task.cc b/zircon/system/core/devmgr/devcoordinator/task.cc
index 60461fe..0443191 100644
--- a/zircon/system/core/devmgr/devcoordinator/task.cc
+++ b/zircon/system/core/devmgr/devcoordinator/task.cc
@@ -48,8 +48,11 @@
 
 void Task::DependencyComplete(const Task* dependency, zx_status_t status) {
   ++finished_dependencies_count_;
-  if (finished_dependencies_count_ == total_dependencies_count_) {
-    ZX_ASSERT(async_task_.Post(dispatcher_) == ZX_OK);
+  // If this task is already scheduled to run, we shouldn't try to run it again.
+  if (!is_pending()) {
+    if (finished_dependencies_count_ == total_dependencies_count_) {
+      ZX_ASSERT(async_task_.Post(dispatcher_) == ZX_OK);
+    }
   }
   if (status != ZX_OK && std::holds_alternative<Incomplete>(status_)) {
     DependencyFailed(status);
diff --git a/zircon/system/core/devmgr/devcoordinator/task.h b/zircon/system/core/devmgr/devcoordinator/task.h
index a6104d1..eb0f2d5 100644
--- a/zircon/system/core/devmgr/devcoordinator/task.h
+++ b/zircon/system/core/devmgr/devcoordinator/task.h
@@ -71,6 +71,8 @@
   // element of dependencies_.
   void DependencyComplete(const Task* dependency, zx_status_t status);
 
+  bool is_pending() const { return async_task_.is_pending(); }
+
   // List of tasks that should be notified when this task is complete
   fbl::Vector<fbl::RefPtr<Task>> dependents_;
   // Reverse of dependents_