blob: 61a6836e70ee21833a77c53b9f1ca4b1e10011e5 [file] [log] [blame]
// Copyright 2016 The Fuchsia Authors
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT
#include <object/channel_dispatcher.h>
#include <string.h>
#include <assert.h>
#include <err.h>
//#include <trace.h>
#include <kernel/event.h>
//#include <platform.h>
#include <object/handle.h>
#include <object/message_packet.h>
#ifdef _KERNEL
#include <object/process_dispatcher.h>
#include <object/thread_dispatcher.h>
#endif
#include <zircon/rights.h>
#include <fbl/alloc_checker.h>
#include <fbl/auto_lock.h>
#include <fbl/type_support.h>
using fbl::AutoLock;
#define LOCAL_TRACE 0
#ifndef _KERNEL
#define thread_reschedule(args...) ((void)0)
#undef TA_NO_THREAD_SAFETY_ANALYSIS
#define TA_NO_THREAD_SAFETY_ANALYSIS /**/
#endif
// static
zx_status_t ChannelDispatcher::Create(fbl::RefPtr<Dispatcher>* dispatcher0,
fbl::RefPtr<Dispatcher>* dispatcher1,
zx_rights_t* rights) {
fbl::AllocChecker ac;
auto ch0 = fbl::AdoptRef(new (&ac) ChannelDispatcher());
if (!ac.check())
return ZX_ERR_NO_MEMORY;
auto ch1 = fbl::AdoptRef(new (&ac) ChannelDispatcher());
if (!ac.check())
return ZX_ERR_NO_MEMORY;
ch0->Init(ch1);
ch1->Init(ch0);
*rights = ZX_DEFAULT_CHANNEL_RIGHTS;
*dispatcher0 = fbl::move(ch0);
*dispatcher1 = fbl::move(ch1);
return ZX_OK;
}
ChannelDispatcher::ChannelDispatcher()
: state_tracker_(ZX_CHANNEL_WRITABLE) {
}
// This is called before either ChannelDispatcher is accessible from threads other than the one
// initializing the channel, so it does not need locking.
void ChannelDispatcher::Init(fbl::RefPtr<ChannelDispatcher> other) TA_NO_THREAD_SAFETY_ANALYSIS {
other_ = fbl::move(other);
other_koid_ = other_->get_koid();
}
ChannelDispatcher::~ChannelDispatcher() {
// At this point the other endpoint no longer holds
// a reference to us, so we can be sure we're discarding
// any remaining messages safely.
// It's not possible to do this safely in on_zero_handles()
messages_.clear();
}
zx_status_t ChannelDispatcher::add_observer(StateObserver* observer) {
canary_.Assert();
AutoLock lock(&lock_);
StateObserver::CountInfo cinfo =
{{{messages_.size_slow(), ZX_CHANNEL_READABLE}, {0u, 0u}}};
state_tracker_.AddObserver(observer, &cinfo);
return ZX_OK;
}
void ChannelDispatcher::RemoveWaiter(MessageWaiter* waiter) {
AutoLock lock(&lock_);
if (!waiter->InContainer()) {
return;
}
waiters_.erase(*waiter);
}
void ChannelDispatcher::on_zero_handles() {
canary_.Assert();
// Detach other endpoint
fbl::RefPtr<ChannelDispatcher> other;
{
AutoLock lock(&lock_);
other = fbl::move(other_);
// (3A) Abort any waiting Call operations
// because we've been canceled by reason
// of our local handle going away.
// Remove waiter from list.
while (!waiters_.is_empty()) {
auto waiter = waiters_.pop_front();
waiter->Cancel(ZX_ERR_CANCELED);
}
}
// Ensure other endpoint detaches us
if (other)
other->OnPeerZeroHandles();
}
void ChannelDispatcher::OnPeerZeroHandles() {
canary_.Assert();
AutoLock lock(&lock_);
other_.reset();
state_tracker_.UpdateState(ZX_CHANNEL_WRITABLE, ZX_CHANNEL_PEER_CLOSED);
// (3B) Abort any waiting Call operations
// because we've been canceled by reason
// of the opposing endpoint going away.
// Remove waiter from list.
while (!waiters_.is_empty()) {
auto waiter = waiters_.pop_front();
waiter->Cancel(ZX_ERR_PEER_CLOSED);
}
}
zx_status_t ChannelDispatcher::Read(uint32_t* msg_size,
uint32_t* msg_handle_count,
fbl::unique_ptr<MessagePacket>* msg,
bool may_discard) {
canary_.Assert();
auto max_size = *msg_size;
auto max_handle_count = *msg_handle_count;
AutoLock lock(&lock_);
if (messages_.is_empty())
return other_ ? ZX_ERR_SHOULD_WAIT : ZX_ERR_PEER_CLOSED;
*msg_size = messages_.front().data_size();
*msg_handle_count = messages_.front().num_handles();
zx_status_t rv = ZX_OK;
if (*msg_size > max_size || *msg_handle_count > max_handle_count) {
if (!may_discard)
return ZX_ERR_BUFFER_TOO_SMALL;
rv = ZX_ERR_BUFFER_TOO_SMALL;
}
*msg = messages_.pop_front();
if (messages_.is_empty())
state_tracker_.UpdateState(ZX_CHANNEL_READABLE, 0u);
return rv;
}
zx_status_t ChannelDispatcher::Write(fbl::unique_ptr<MessagePacket> msg) {
canary_.Assert();
fbl::RefPtr<ChannelDispatcher> other;
{
AutoLock lock(&lock_);
if (!other_) {
// |msg| will be destroyed but we want to keep the handles alive since
// the caller should put them back into the process table.
msg->set_owns_handles(false);
return ZX_ERR_PEER_CLOSED;
}
other = other_;
}
if (other->WriteSelf(fbl::move(msg)) > 0)
thread_reschedule();
return ZX_OK;
}
zx_status_t ChannelDispatcher::Call(fbl::unique_ptr<MessagePacket> msg,
zx_time_t deadline, bool* return_handles,
fbl::unique_ptr<MessagePacket>* reply) {
canary_.Assert();
#ifdef _KERNEL
auto waiter = ThreadDispatcher::GetCurrent()->GetMessageWaiter();
#else
MessageWaiter *waiter = nullptr;
#endif
if (unlikely(waiter->BeginWait(fbl::WrapRefPtr(this), msg->get_txid()) != ZX_OK)) {
#ifdef _KERNEL
// If a thread tries BeginWait'ing twice, the VDSO contract around retrying
// channel calls has been violated. Shoot the misbehaving process.
ProcessDispatcher::GetCurrent()->Kill();
#endif
return ZX_ERR_BAD_STATE;
}
fbl::RefPtr<ChannelDispatcher> other;
{
AutoLock lock(&lock_);
if (!other_) {
// |msg| will be destroyed but we want to keep the handles alive since
// the caller should put them back into the process table.
msg->set_owns_handles(false);
*return_handles = true;
waiter->EndWait(reply);
return ZX_ERR_PEER_CLOSED;
}
other = other_;
// (0) Before writing the outbound message and waiting, add our
// waiter to the list.
waiters_.push_back(waiter);
}
// (1) Write outbound message to opposing endpoint.
other->WriteSelf(fbl::move(msg));
// Reuse the code from the half-call used for retrying a Call after thread
// suspend.
return ResumeInterruptedCall(waiter, deadline, reply);
}
zx_status_t ChannelDispatcher::ResumeInterruptedCall(MessageWaiter* waiter,
zx_time_t deadline,
fbl::unique_ptr<MessagePacket>* reply) {
canary_.Assert();
// (2) Wait for notification via waiter's event or for the
// deadline to hit.
zx_status_t status = waiter->Wait(deadline);
if (status == ZX_ERR_INTERNAL_INTR_RETRY) {
// If we got interrupted, return out to usermode, but
// do not clear the waiter.
return status;
}
// (3) see (3A), (3B) above or (3C) below for paths where
// the waiter could be signaled and removed from the list.
//
// If the deadline hits, the waiter is not removed
// from the list *but* another thread could still
// cause (3A), (3B), or (3C) before the lock below.
{
AutoLock lock(&lock_);
// (4) If any of (3A), (3B), or (3C) have occurred,
// we were removed from the waiters list already
// and EndWait() returns a non-ZX_ERR_TIMED_OUT status.
// Otherwise, the status is ZX_ERR_TIMED_OUT and it
// is our job to remove the waiter from the list.
if ((status = waiter->EndWait(reply)) == ZX_ERR_TIMED_OUT)
waiters_.erase(*waiter);
}
return status;
}
int ChannelDispatcher::WriteSelf(fbl::unique_ptr<MessagePacket> msg) {
canary_.Assert();
AutoLock lock(&lock_);
if (!waiters_.is_empty()) {
// If the far side is waiting for replies to messages
// send via "call", see if this message has a matching
// txid to one of the waiters, and if so, deliver it.
zx_txid_t txid = msg->get_txid();
for (auto& waiter: waiters_) {
// (3C) Deliver message to waiter.
// Remove waiter from list.
if (waiter.get_txid() == txid) {
waiters_.erase(waiter);
// we return how many threads have been woken up, or zero.
return waiter.Deliver(fbl::move(msg));
}
}
}
messages_.push_back(fbl::move(msg));
state_tracker_.UpdateState(0u, ZX_CHANNEL_READABLE);
return 0;
}
zx_status_t ChannelDispatcher::user_signal(uint32_t clear_mask, uint32_t set_mask, bool peer) {
canary_.Assert();
if ((set_mask & ~ZX_USER_SIGNAL_ALL) || (clear_mask & ~ZX_USER_SIGNAL_ALL))
return ZX_ERR_INVALID_ARGS;
if (!peer) {
state_tracker_.UpdateState(clear_mask, set_mask);
return ZX_OK;
}
fbl::RefPtr<ChannelDispatcher> other;
{
AutoLock lock(&lock_);
if (!other_)
return ZX_ERR_PEER_CLOSED;
other = other_;
}
return other->UserSignalSelf(clear_mask, set_mask);
}
zx_status_t ChannelDispatcher::UserSignalSelf(uint32_t clear_mask, uint32_t set_mask) {
canary_.Assert();
state_tracker_.UpdateState(clear_mask, set_mask);
return ZX_OK;
}
ChannelDispatcher::MessageWaiter::~MessageWaiter() {
if (unlikely(channel_)) {
channel_->RemoveWaiter(this);
}
ZX_DEBUG_ASSERT(!InContainer());
}
zx_status_t ChannelDispatcher::MessageWaiter::BeginWait(fbl::RefPtr<ChannelDispatcher> channel,
zx_txid_t txid) {
if (unlikely(channel_)) {
return ZX_ERR_BAD_STATE;
}
ZX_DEBUG_ASSERT(!InContainer());
txid_ = txid;
status_ = ZX_ERR_TIMED_OUT;
channel_ = fbl::move(channel);
event_.Unsignal();
return ZX_OK;
}
int ChannelDispatcher::MessageWaiter::Deliver(fbl::unique_ptr<MessagePacket> msg) {
ZX_DEBUG_ASSERT(channel_);
msg_ = fbl::move(msg);
status_ = ZX_OK;
return event_.Signal(ZX_OK);
}
int ChannelDispatcher::MessageWaiter::Cancel(zx_status_t status) {
ZX_DEBUG_ASSERT(!InContainer());
ZX_DEBUG_ASSERT(channel_);
status_ = status;
return event_.Signal(status);
}
zx_status_t ChannelDispatcher::MessageWaiter::Wait(lk_time_t deadline) {
if (unlikely(!channel_)) {
return ZX_ERR_BAD_STATE;
}
return event_.Wait(deadline);
}
// Returns any delivered message via out and the status.
zx_status_t ChannelDispatcher::MessageWaiter::EndWait(fbl::unique_ptr<MessagePacket>* out) {
if (unlikely(!channel_)) {
return ZX_ERR_BAD_STATE;
}
*out = fbl::move(msg_);
channel_ = nullptr;
return status_;
}