blob: 6ce68506ab7a125b0b571967f521f88c537d90c5 [file] [log] [blame]
// Copyright 2016 The Fuchsia Authors
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT
#include "vm/vm_aspace.h"
#include <assert.h>
#include <err.h>
#include <inttypes.h>
#include <lib/cmdline.h>
#include <lib/crypto/global_prng.h>
#include <lib/crypto/prng.h>
#include <lib/userabi/vdso.h>
#include <stdlib.h>
#include <string.h>
#include <trace.h>
#include <zircon/types.h>
#include <fbl/alloc_checker.h>
#include <fbl/auto_call.h>
#include <fbl/intrusive_double_list.h>
#include <fbl/mutex.h>
#include <kernel/thread.h>
#include <kernel/thread_lock.h>
#include <vm/fault.h>
#include <vm/vm.h>
#include <vm/vm_address_region.h>
#include <vm/vm_object.h>
#include <vm/vm_object_paged.h>
#include <vm/vm_object_physical.h>
#include "object/dispatcher.h"
#include "object/vm_object_dispatcher.h"
#include "object/vm_address_region_dispatcher.h"
#include "object/process_dispatcher.h"
#include "vm_priv.h"
#define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
#define GUEST_PHYSICAL_ASPACE_BASE 0UL
#define GUEST_PHYSICAL_ASPACE_SIZE (1UL << MMU_GUEST_SIZE_SHIFT)
// pointer to a singleton kernel address space
VmAspace* VmAspace::kernel_aspace_ = nullptr;
// pointer to the dummy root VMAR singleton
static VmAddressRegion* dummy_root_vmar = nullptr;
// list of all address spaces
struct VmAspaceListGlobal {};
static DECLARE_MUTEX(VmAspaceListGlobal) aspace_list_lock;
static fbl::DoublyLinkedList<VmAspace*> aspaces TA_GUARDED(aspace_list_lock);
// Called once at boot to initialize the singleton kernel address
// space. Thread safety analysis is disabled since we don't need to
// lock yet.
void VmAspace::KernelAspaceInitPreHeap() TA_NO_THREAD_SAFETY_ANALYSIS {
// the singleton kernel address space
static VmAspace _kernel_aspace(KERNEL_ASPACE_BASE, KERNEL_ASPACE_SIZE, VmAspace::TYPE_KERNEL,
"kernel");
// the singleton dummy root vmar (used to break a reference cycle in
// Destroy())
static VmAddressRegionDummy dummy_vmar;
#if LK_DEBUGLEVEL > 1
_kernel_aspace.Adopt();
dummy_vmar.Adopt();
#endif
dummy_root_vmar = &dummy_vmar;
static VmAddressRegion _kernel_root_vmar(_kernel_aspace);
_kernel_aspace.root_vmar_ = fbl::AdoptRef(&_kernel_root_vmar);
zx_status_t status = _kernel_aspace.Init();
ASSERT(status == ZX_OK);
// save a pointer to the singleton kernel address space
VmAspace::kernel_aspace_ = &_kernel_aspace;
aspaces.push_front(kernel_aspace_);
}
// simple test routines
static inline bool is_inside(VmAspace& aspace, vaddr_t vaddr) {
return (vaddr >= aspace.base() && vaddr <= aspace.base() + aspace.size() - 1);
}
static inline bool is_inside(VmAspace& aspace, VmAddressRegion& r) {
// is the starting address within the address space
if (!is_inside(aspace, r.base())) {
return false;
}
if (r.size() == 0) {
return true;
}
// see if the size is enough to wrap the integer
if (r.base() + r.size() - 1 < r.base()) {
return false;
}
// test to see if the end address is within the address space's
if (r.base() + r.size() - 1 > aspace.base() + aspace.size() - 1) {
return false;
}
return true;
}
static inline size_t trim_to_aspace(VmAspace& aspace, vaddr_t vaddr, size_t size) {
DEBUG_ASSERT(is_inside(aspace, vaddr));
if (size == 0) {
return size;
}
size_t offset = vaddr - aspace.base();
// LTRACEF("vaddr 0x%lx size 0x%zx offset 0x%zx aspace base 0x%lx aspace size 0x%zx\n",
// vaddr, size, offset, aspace.base(), aspace.size());
if (offset + size < offset) {
size = ULONG_MAX - offset - 1;
}
// LTRACEF("size now 0x%zx\n", size);
if (offset + size >= aspace.size() - 1) {
size = aspace.size() - offset;
}
// LTRACEF("size now 0x%zx\n", size);
return size;
}
VmAspace::VmAspace(vaddr_t base, size_t size, uint32_t flags, const char* name)
: base_(base), size_(size), flags_(flags), root_vmar_(nullptr), aslr_prng_(nullptr, 0) {
DEBUG_ASSERT(size != 0);
DEBUG_ASSERT(base + size - 1 >= base);
Rename(name);
LTRACEF("%p '%s'\n", this, name_);
}
zx_status_t VmAspace::Init() {
canary_.Assert();
LTRACEF("%p '%s'\n", this, name_);
// initialize the architecturally specific part
bool is_high_kernel = (flags_ & TYPE_MASK) == TYPE_KERNEL;
bool is_guest = (flags_ & TYPE_MASK) == TYPE_GUEST_PHYS;
uint arch_aspace_flags =
(is_high_kernel ? ARCH_ASPACE_FLAG_KERNEL : 0u) | (is_guest ? ARCH_ASPACE_FLAG_GUEST : 0u);
zx_status_t status = arch_aspace_.Init(base_, size_, arch_aspace_flags);
if (status != ZX_OK) {
return status;
}
InitializeAslr();
if (likely(!root_vmar_)) {
return VmAddressRegion::CreateRoot(*this, VMAR_FLAG_CAN_MAP_SPECIFIC, &root_vmar_);
}
return ZX_OK;
}
fbl::RefPtr<VmAspace> VmAspace::Create(uint32_t flags, const char* name) {
LTRACEF("flags 0x%x, name '%s'\n", flags, name);
vaddr_t base;
size_t size;
switch (flags & TYPE_MASK) {
case TYPE_USER:
base = USER_ASPACE_BASE;
size = USER_ASPACE_SIZE;
break;
case TYPE_KERNEL:
base = KERNEL_ASPACE_BASE;
size = KERNEL_ASPACE_SIZE;
break;
case TYPE_LOW_KERNEL:
base = 0;
size = USER_ASPACE_BASE + USER_ASPACE_SIZE;
break;
case TYPE_GUEST_PHYS:
base = GUEST_PHYSICAL_ASPACE_BASE;
size = GUEST_PHYSICAL_ASPACE_SIZE;
break;
default:
panic("Invalid aspace type");
}
fbl::AllocChecker ac;
auto aspace = fbl::AdoptRef(new (&ac) VmAspace(base, size, flags, name));
if (!ac.check()) {
return nullptr;
}
// initialize the arch specific component to our address space
zx_status_t status = aspace->Init();
if (status != ZX_OK) {
status = aspace->Destroy();
DEBUG_ASSERT(status == ZX_OK);
return nullptr;
}
// add it to the global list
{
Guard<fbl::Mutex> guard{&aspace_list_lock};
aspaces.push_back(aspace.get());
}
// return a ref pointer to the aspace
return aspace;
}
void VmAspace::Rename(const char* name) {
canary_.Assert();
strlcpy(name_, name ? name : "unnamed", sizeof(name_));
}
VmAspace::~VmAspace() {
canary_.Assert();
LTRACEF("%p '%s'\n", this, name_);
// we have to have already been destroyed before freeing
DEBUG_ASSERT(aspace_destroyed_);
// pop it out of the global aspace list
{
Guard<fbl::Mutex> guard{&aspace_list_lock};
if (this->InContainer()) {
aspaces.erase(*this);
}
}
// destroy the arch portion of the aspace
// TODO(teisenbe): Move this to Destroy(). Currently can't move since
// ProcessDispatcher calls Destroy() from the context of a thread in the
// aspace.
zx_status_t status = arch_aspace_.Destroy();
DEBUG_ASSERT(status == ZX_OK);
}
fbl::RefPtr<VmAddressRegion> VmAspace::RootVmar() {
Guard<fbl::Mutex> guard{&lock_};
return fbl::RefPtr<VmAddressRegion>(root_vmar_);
}
zx_status_t VmAspace::Destroy() {
canary_.Assert();
LTRACEF("%p '%s'\n", this, name_);
Guard<fbl::Mutex> guard{&lock_};
// Don't let a vDSO mapping prevent destroying a VMAR
// when the whole process is being destroyed.
vdso_code_mapping_.reset();
// tear down and free all of the regions in our address space
if (root_vmar_) {
zx_status_t status = root_vmar_->DestroyLocked();
if (status != ZX_OK && status != ZX_ERR_BAD_STATE) {
return status;
}
}
aspace_destroyed_ = true;
// Break the reference cycle between this aspace and the root VMAR
root_vmar_.reset(dummy_root_vmar);
return ZX_OK;
}
bool VmAspace::is_destroyed() const {
Guard<fbl::Mutex> guard{&lock_};
return aspace_destroyed_;
}
zx_status_t VmAspace::MapObjectInternal(fbl::RefPtr<VmObject> vmo, const char* name,
uint64_t offset, size_t size, void** ptr,
uint8_t align_pow2, uint vmm_flags, uint arch_mmu_flags) {
canary_.Assert();
LTRACEF("aspace %p name '%s' vmo %p, offset %#" PRIx64
" size %#zx "
"ptr %p align %hhu vmm_flags %#x arch_mmu_flags %#x\n",
this, name, vmo.get(), offset, size, ptr ? *ptr : 0, align_pow2, vmm_flags,
arch_mmu_flags);
DEBUG_ASSERT(!is_user() || !(arch_mmu_flags & ARCH_MMU_FLAG_PERM_USER));
size = ROUNDUP(size, PAGE_SIZE);
if (size == 0) {
return ZX_ERR_INVALID_ARGS;
}
if (!vmo) {
return ZX_ERR_INVALID_ARGS;
}
if (!IS_PAGE_ALIGNED(offset)) {
return ZX_ERR_INVALID_ARGS;
}
vaddr_t vmar_offset = 0;
// if they're asking for a specific spot or starting address, copy the address
if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
// can't ask for a specific spot and then not provide one
if (!ptr) {
return ZX_ERR_INVALID_ARGS;
}
vmar_offset = reinterpret_cast<vaddr_t>(*ptr);
// check that it's page aligned
if (!IS_PAGE_ALIGNED(vmar_offset) || vmar_offset < base_) {
return ZX_ERR_INVALID_ARGS;
}
vmar_offset -= base_;
}
uint32_t vmar_flags = 0;
if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
vmar_flags |= VMAR_FLAG_SPECIFIC;
}
// Create the mappings with all of the CAN_* RWX flags, so that
// Protect() can transition them arbitrarily. This is not desirable for the
// long-term.
vmar_flags |= VMAR_CAN_RWX_FLAGS;
// allocate a region and put it in the aspace list
fbl::RefPtr<VmMapping> r(nullptr);
zx_status_t status = RootVmar()->CreateVmMapping(vmar_offset, size, align_pow2, vmar_flags, vmo,
offset, arch_mmu_flags, name, &r);
if (status != ZX_OK) {
return status;
}
// if we're committing it, map the region now
if (vmm_flags & VMM_FLAG_COMMIT) {
status = r->MapRange(0, size, true);
if (status != ZX_OK) {
return status;
}
}
// return the vaddr if requested
if (ptr) {
*ptr = (void*)r->base();
}
return ZX_OK;
}
zx_status_t VmAspace::ReserveSpace(const char* name, size_t size, vaddr_t vaddr) {
canary_.Assert();
LTRACEF("aspace %p name '%s' size %#zx vaddr %#" PRIxPTR "\n", this, name, size, vaddr);
DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
size = ROUNDUP_PAGE_SIZE(size);
if (size == 0) {
return ZX_OK;
}
if (!IS_PAGE_ALIGNED(vaddr)) {
return ZX_ERR_INVALID_ARGS;
}
if (!is_inside(*this, vaddr)) {
return ZX_ERR_OUT_OF_RANGE;
}
// trim the size
size = trim_to_aspace(*this, vaddr, size);
// allocate a zero length vm object to back it
// TODO: decide if a null vmo object is worth it
fbl::RefPtr<VmObject> vmo;
zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, 0, &vmo);
if (status != ZX_OK) {
return status;
}
vmo->set_name(name, strlen(name));
// lookup how it's already mapped
uint arch_mmu_flags = 0;
auto err = arch_aspace_.Query(vaddr, nullptr, &arch_mmu_flags);
if (err) {
// if it wasn't already mapped, use some sort of strict default
arch_mmu_flags = ARCH_MMU_FLAG_CACHED | ARCH_MMU_FLAG_PERM_READ;
}
// map it, creating a new region
void* ptr = reinterpret_cast<void*>(vaddr);
return MapObjectInternal(ktl::move(vmo), name, 0, size, &ptr, 0, VMM_FLAG_VALLOC_SPECIFIC,
arch_mmu_flags);
}
zx_status_t VmAspace::AllocPhysical(const char* name, size_t size, void** ptr, uint8_t align_pow2,
paddr_t paddr, uint vmm_flags, uint arch_mmu_flags) {
canary_.Assert();
LTRACEF("aspace %p name '%s' size %#zx ptr %p paddr %#" PRIxPTR
" vmm_flags 0x%x arch_mmu_flags 0x%x\n",
this, name, size, ptr ? *ptr : 0, paddr, vmm_flags, arch_mmu_flags);
DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
if (size == 0) {
return ZX_OK;
}
if (!IS_PAGE_ALIGNED(paddr)) {
return ZX_ERR_INVALID_ARGS;
}
size = ROUNDUP_PAGE_SIZE(size);
// create a vm object to back it
fbl::RefPtr<VmObject> vmo;
zx_status_t status = VmObjectPhysical::Create(paddr, size, &vmo);
if (status != ZX_OK) {
return status;
}
vmo->set_name(name, strlen(name));
// force it to be mapped up front
// TODO: add new flag to precisely mean pre-map
vmm_flags |= VMM_FLAG_COMMIT;
// Apply the cache policy
if (vmo->SetMappingCachePolicy(arch_mmu_flags & ARCH_MMU_FLAG_CACHE_MASK) != ZX_OK) {
return ZX_ERR_INVALID_ARGS;
}
arch_mmu_flags &= ~ARCH_MMU_FLAG_CACHE_MASK;
return MapObjectInternal(ktl::move(vmo), name, 0, size, ptr, align_pow2, vmm_flags,
arch_mmu_flags);
}
zx_status_t VmAspace::AllocContiguous(const char* name, size_t size, void** ptr, uint8_t align_pow2,
uint vmm_flags, uint arch_mmu_flags) {
canary_.Assert();
LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
this, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
size = ROUNDUP(size, PAGE_SIZE);
if (size == 0) {
return ZX_ERR_INVALID_ARGS;
}
// test for invalid flags
if (!(vmm_flags & VMM_FLAG_COMMIT)) {
return ZX_ERR_INVALID_ARGS;
}
// create a vm object to back it
fbl::RefPtr<VmObject> vmo;
zx_status_t status = VmObjectPaged::CreateContiguous(PMM_ALLOC_FLAG_ANY, size, align_pow2, &vmo);
if (status != ZX_OK) {
return status;
}
vmo->set_name(name, strlen(name));
return MapObjectInternal(ktl::move(vmo), name, 0, size, ptr, align_pow2, vmm_flags,
arch_mmu_flags);
}
zx_status_t VmAspace::Alloc(const char* name, size_t size, void** ptr, uint8_t align_pow2,
uint vmm_flags, uint arch_mmu_flags) {
canary_.Assert();
LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
this, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
size = ROUNDUP(size, PAGE_SIZE);
if (size == 0) {
return ZX_ERR_INVALID_ARGS;
}
// allocate a vm object to back it
fbl::RefPtr<VmObject> vmo;
zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, size, &vmo);
if (status != ZX_OK) {
return status;
}
vmo->set_name(name, strlen(name));
// commit memory up front if requested
if (vmm_flags & VMM_FLAG_COMMIT) {
// commit memory to the object
status = vmo->CommitRange(0, size);
if (status != ZX_OK) {
return status;
}
}
// map it, creating a new region
return MapObjectInternal(ktl::move(vmo), name, 0, size, ptr, align_pow2, vmm_flags,
arch_mmu_flags);
}
zx_status_t VmAspace::FreeRegion(vaddr_t va) {
DEBUG_ASSERT(!is_user());
fbl::RefPtr<VmAddressRegionOrMapping> r = RootVmar()->FindRegion(va);
if (!r) {
return ZX_ERR_NOT_FOUND;
}
return r->Destroy();
}
fbl::RefPtr<VmAddressRegionOrMapping> VmAspace::FindRegion(vaddr_t va) {
fbl::RefPtr<VmAddressRegion> vmar(RootVmar());
while (1) {
fbl::RefPtr<VmAddressRegionOrMapping> next(vmar->FindRegion(va));
if (!next) {
return vmar;
}
if (next->is_mapping()) {
return next;
}
vmar = next->as_vm_address_region();
}
}
void VmAspace::AttachToThread(thread_t* t) {
canary_.Assert();
DEBUG_ASSERT(t);
// point the lk thread at our object via the dummy C vmm_aspace_t struct
Guard<spin_lock_t, IrqSave> thread_lock_guard{ThreadLock::Get()};
// not prepared to handle setting a new address space or one on a running thread
DEBUG_ASSERT(!t->aspace);
DEBUG_ASSERT(t->state != THREAD_RUNNING);
t->aspace = reinterpret_cast<vmm_aspace_t*>(this);
}
zx_status_t VmAspace::PageFault(vaddr_t va, uint flags) {
canary_.Assert();
DEBUG_ASSERT(!aspace_destroyed_);
LTRACEF("va %#" PRIxPTR ", flags %#x\n", va, flags);
if ((flags_ & TYPE_MASK) == TYPE_GUEST_PHYS) {
flags &= ~VMM_PF_FLAG_USER;
flags |= VMM_PF_FLAG_GUEST;
}
zx_status_t status = ZX_OK;
PageRequest page_request;
do {
{
// for now, hold the aspace lock across the page fault operation,
// which stops any other operations on the address space from moving
// the region out from underneath it
// if (lock_.lock().IsHeld())
// return ZX_ERR_INTERNAL;
Guard<fbl::Mutex> guard{&lock_};
status = root_vmar_->PageFault(va, flags, &page_request);
}
if (status == ZX_ERR_SHOULD_WAIT) {
zx_status_t st = page_request.Wait();
if (st != ZX_OK) {
return st;
}
}
} while (status == ZX_ERR_SHOULD_WAIT);
return status;
}
void VmAspace::Dump(bool verbose) const {
canary_.Assert();
printf("as %p [%#" PRIxPTR " %#" PRIxPTR "] sz %#zx fl %#x ref %d '%s'\n", this, base_,
base_ + size_ - 1, size_, flags_, ref_count_debug(), name_);
Guard<fbl::Mutex> guard{&lock_};
if (verbose) {
root_vmar_->Dump(1, verbose);
}
}
bool VmAspace::EnumerateChildren(VmEnumerator* ve) {
canary_.Assert();
DEBUG_ASSERT(ve != nullptr);
Guard<fbl::Mutex> guard{&lock_};
if (root_vmar_ == nullptr || aspace_destroyed_) {
// Aspace hasn't been initialized or has already been destroyed.
return true;
}
DEBUG_ASSERT(root_vmar_->IsAliveLocked());
if (!ve->OnVmAddressRegion(root_vmar_.get(), 0)) {
return false;
}
return root_vmar_->EnumerateChildrenLocked(ve, 1);
}
void DumpAllAspaces(bool verbose) {
Guard<fbl::Mutex> guard{&aspace_list_lock};
for (const auto& a : aspaces) {
a.Dump(verbose);
}
}
VmAspace* VmAspace::vaddr_to_aspace(uintptr_t address) {
if (is_kernel_address(address)) {
return kernel_aspace();
} else if (is_user_address(address)) {
return vmm_aspace_to_obj(get_current_thread()->aspace);
} else {
return nullptr;
}
}
// TODO(dbort): Use GetMemoryUsage()
size_t VmAspace::AllocatedPages() const {
canary_.Assert();
Guard<fbl::Mutex> guard{&lock_};
return root_vmar_->AllocatedPagesLocked();
}
void VmAspace::InitializeAslr() {
aslr_enabled_ = is_user() && !gCmdline.GetBool("aslr.disable", false);
crypto::GlobalPRNG::GetInstance()->Draw(aslr_seed_, sizeof(aslr_seed_));
aslr_prng_.AddEntropy(aslr_seed_, sizeof(aslr_seed_));
}
uintptr_t VmAspace::vdso_base_address() const {
Guard<fbl::Mutex> guard{&lock_};
return VDso::base_address(vdso_code_mapping_);
}
uintptr_t VmAspace::vdso_code_address() const {
Guard<fbl::Mutex> guard{&lock_};
return vdso_code_mapping_ ? vdso_code_mapping_->base() : 0;
}
/*
void* operator new(size_t count) {
fbl::AllocChecker ac;
void* data = operator new(count, &ac);
if (!ac.check()) panic("I want to die");
return data;
}
*/
extern int scream_time;
class ClowncopterizeVmoEnumerator : public VmEnumerator {
public:
bool OnVmAddressRegion(const VmAddressRegion* vmar, uint depth) override {
if (depth == 0)
return true;
LTRACEF("clowncopterizing vmar %p depth=%d base=%#lx size=%#lx\n", vmar, depth, vmar->base(),
vmar->size());
Ascend(depth);
zx_status_t status = p_->CreateSubVmar(vmar->base() - p_->base(), vmar->size(), 0,
VMAR_FLAG_SPECIFIC | vmar->flags(), vmar->name(), &p_);
depth_++;
ASSERT(depth + 1 == depth_);
if (status != ZX_OK) {
*status_out = status;
return false;
}
LTRACEF("created vmar %p for %p\n", p_.get(), vmar);
return true;
}
bool OnVmMapping(const VmMapping* map, const VmAddressRegion* vmar, uint depth) override {
Ascend(depth);
LTRACEF("clowncopterizing vmo %p depth=%d base=%#lx size=%lx vmo=%p\n", map, depth, map->base(),
map->size(), map->vmo_locked().get());
fbl::RefPtr<VmObject> vmo = map->vmo_locked();
fbl::RefPtr<VmObject> vmo_clone;
if (VDso::vmo_is_vdso(vmo)) {
// Don't clone the VDSO.
vmo_clone = vmo;
} else {
auto vmo_clone_find = cloned_.find(vmo.get());
if (vmo_clone_find == cloned_.end()) {
zx_status_t status = vmo->CreateClone(
vmo->is_resizable() ? Resizability::Resizable : Resizability::NonResizable,
CloneType::CopyOnWrite, 0, vmo->size(), true, &vmo_clone);
LTRACEF("created cow %p for %p\n", vmo_clone.get(), vmo.get());
if (status != ZX_OK) {
*status_out = status;
return false;
}
fbl::AllocChecker ac;
auto cloned_vmo = fbl::make_unique_checked<ClonedVmo>(&ac, vmo.get(), vmo_clone);
ASSERT(cloned_vmo != nullptr);
ASSERT(ac.check());
cloned_.insert(std::move(cloned_vmo));
} else {
vmo_clone = vmo_clone_find->new_vmo;
LTRACEF("reusing cow %p for %p\n", vmo_clone.get(), vmo.get());
}
}
fbl::RefPtr<VmMapping> new_map;
LTRACEF("CreateVmMapping(%#lx, %#lx, 0, %x, %p, 0, %#x, null, blah)\n", map->base(),
map->size(), VMAR_FLAG_SPECIFIC | map->flags(), vmo_clone.get(), map->arch_mmu_flags());
zx_status_t status = p_->CreateVmMapping(
map->base() - p_->base(), map->size(), 0, VMAR_FLAG_SPECIFIC | map->flags(), vmo_clone,
map->object_offset(), map->arch_mmu_flags(), nullptr, &new_map);
if (status != ZX_OK) {
*status_out = status;
return false;
}
LTRACEF("created mapping %p for %p\n", new_map.get(), map);
return true;
}
zx_status_t CreateClowncopterOverrides(fbl::RefPtr<ProcessDispatcher> new_proc,
fbl::RefPtr<ProcessDispatcher> old_proc) {
return old_proc->ForEachHandle([&](zx_handle_t handle, zx_rights_t rights,
const Dispatcher* dispatcher) {
fbl::RefPtr<Dispatcher> new_dispatcher;
KernelHandle<VmAddressRegionDispatcher> dummy;
if (auto vmo_dispatcher = DownCastDispatcher<const VmObjectDispatcher>(dispatcher)) {
auto vmo = vmo_dispatcher->vmo();
auto cloned_vmo = cloned_.find(vmo.get());
if (cloned_vmo != cloned_.end()) {
new_dispatcher = cloned_vmo->new_vmo_dispatcher.dispatcher();
}
} else if (auto vmar_dispatcher =
DownCastDispatcher<const VmAddressRegionDispatcher>(dispatcher)) {
// we must traverse
auto old_vmar = vmar_dispatcher->vmar();
zx_vaddr_t base = old_vmar->base();
int depth = 0;
while (old_vmar->has_parent()) {
old_vmar = old_vmar->parent();
depth++;
}
auto vmar = new_proc->aspace()->RootVmar();
for (int i = 0; i < depth; i++) {
auto next = vmar->FindRegion(base);
if (next == nullptr || next->is_mapping()) {
vmar = nullptr;
break;
}
vmar = next->as_vm_address_region();
}
if (vmar) {
if (vmar->dispatcher() == nullptr) {
zx_rights_t rights;
VmAddressRegionDispatcher::Create(vmar, ARCH_MMU_FLAG_PERM_USER, &dummy, &rights);
}
new_dispatcher = fbl::RefPtr(vmar->dispatcher());
}
}
if (new_dispatcher) {
zx_handle_t value = handle;
HandleOwner handle_owner = Handle::Make(new_dispatcher, rights);
zx_status_t status = new_proc->OverrideHandle(handle_owner, value);
if (status != ZX_OK)
return status;
}
return ZX_OK;
});
}
fbl::RefPtr<VmAddressRegion> p_;
zx_status_t* status_out;
private:
void Ascend(uint depth) {
ASSERT(depth <= depth_);
while (depth < depth_) {
p_ = p_->parent();
depth_--;
}
}
uint depth_ = 1;
struct ClonedVmo : public fbl::SinglyLinkedListable<ktl::unique_ptr<ClonedVmo>> {
ClonedVmo(VmObject* oldvmo, fbl::RefPtr<VmObject> newvmo)
: old_vmo(oldvmo), new_vmo(std::move(newvmo)) {
zx_status_t status =
VmObjectDispatcher::Create(new_vmo, &new_vmo_dispatcher, &new_vmo_rights);
ASSERT(status == ZX_OK);
}
VmObject* old_vmo;
fbl::RefPtr<VmObject> new_vmo;
KernelHandle<VmObjectDispatcher> new_vmo_dispatcher;
zx_rights_t new_vmo_rights;
VmObject* GetKey() const { return old_vmo; }
static size_t GetHash(VmObject* key) { return reinterpret_cast<size_t>(key); }
};
fbl::HashTable<VmObject*, ktl::unique_ptr<ClonedVmo>> cloned_;
};
zx_status_t VmAspace::ClowncopterizeUpon(fbl::RefPtr<ProcessDispatcher> new_proc,
fbl::RefPtr<ProcessDispatcher> old_proc) {
canary_.Assert();
fbl::RefPtr<VmAspace> new_aspace = new_proc->aspace();
ClowncopterizeVmoEnumerator e;
e.p_ = new_aspace->RootVmar();
zx_status_t status = ZX_OK;
e.status_out = &status;
EnumerateChildren(&e);
if (status == ZX_OK)
status = e.CreateClowncopterOverrides(new_proc, old_proc);
return status;
}