WIP [kernel][vm][vmalloc] pull all of the vmalloc/vmfree allocations out of a dedicated VMAR
Change-Id: I8c84327c802d48b50358167e9b22084b770f2e3b
diff --git a/kernel/vm/include/vm/vmalloc.h b/kernel/vm/include/vm/vmalloc.h
index e3dfe0a..8c1385a 100644
--- a/kernel/vm/include/vm/vmalloc.h
+++ b/kernel/vm/include/vm/vmalloc.h
@@ -16,4 +16,6 @@
void* vmalloc(size_t len, const char* name);
void vmfree(void* ptr);
+void vmalloc_init(void);
+
__END_CDECLS
diff --git a/kernel/vm/vm.cpp b/kernel/vm/vm.cpp
index e9ffaac..2bc9e78 100644
--- a/kernel/vm/vm.cpp
+++ b/kernel/vm/vm.cpp
@@ -23,6 +23,7 @@
#include <vm/pmm.h>
#include <vm/vm.h>
#include <vm/vm_aspace.h>
+#include <vm/vmalloc.h>
#include <zircon/types.h>
#define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
@@ -177,6 +178,8 @@
ASSERT(status == ZX_OK);
LTRACEF("VM: aspace random padding size: %#" PRIxPTR "\n", random_size);
#endif
+
+ vmalloc_init();
}
paddr_t vaddr_to_paddr(const void* ptr) {
diff --git a/kernel/vm/vmalloc.cpp b/kernel/vm/vmalloc.cpp
index 6723abc..b39caba 100644
--- a/kernel/vm/vmalloc.cpp
+++ b/kernel/vm/vmalloc.cpp
@@ -8,21 +8,59 @@
#include <trace.h>
#include <vm/vm_aspace.h>
+#include <vm/vm_object_paged.h>
+
+#define LOCAL_TRACE 0
static const uint kArchRwFlags = ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE;
+static fbl::RefPtr<VmAddressRegion> vmalloc_vmar;
-void* vmalloc(size_t len, const char* name) {
- void* ptr;
- zx_status_t status = VmAspace::kernel_aspace()->Alloc(name ? name : "vmalloc",
- len, &ptr, 0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
+// the power of 2 size of the vmar used for vmalloc mappings
+static const size_t vmalloc_vmar_shift = 30; // 1GB
+
+void* vmalloc(size_t len, const char* _name) {
+ const char* name = _name ? _name : "vmalloc";
+
+ // Create a VMO for our allocation
+ fbl::RefPtr<VmObject> vmo;
+ zx_status_t status = VmObjectPaged::Create(
+ PMM_ALLOC_FLAG_ANY, 0u, len, &vmo);
if (status != ZX_OK) {
+ TRACEF("vmalloc: failed to allocate vmo of size %zu\n", len);
return nullptr;
}
+ vmo->set_name(name, strlen(name));
+
+ auto vmo_size = vmo->size();
+
+ // create a mapping with random placement into the vmalloc region
+ fbl::RefPtr<VmMapping> mapping;
+ status = vmalloc_vmar->CreateVmMapping(0, vmo_size, 0,
+ 0,
+ fbl::move(vmo), 0,
+ kArchRwFlags,
+ name,
+ &mapping);
+ if (status != ZX_OK)
+ return nullptr;
+
+ // fault in all the pages so we dont demand fault in the allocation
+ status = mapping->MapRange(0, vmo_size, true);
+ if (status != ZX_OK) {
+ mapping->Destroy();
+ return nullptr;
+ }
+
+ void* ptr = reinterpret_cast<void*>(mapping->base());
+
+ LTRACEF("returning %p for size %zu\n", ptr, len);
return ptr;
}
void vmfree(void* ptr) {
+ LTRACEF("ptr %p\n", ptr);
+
vaddr_t va = reinterpret_cast<vaddr_t>(ptr);
DEBUG_ASSERT(is_kernel_address(va));
@@ -33,3 +71,12 @@
}
}
+void vmalloc_init() {
+ auto root_vmar = VmAspace::kernel_aspace()->RootVmar()->as_vm_address_region();
+
+ zx_status_t status = root_vmar->CreateSubVmar(0,
+ (1ULL << vmalloc_vmar_shift), vmalloc_vmar_shift,
+ VMAR_FLAG_CAN_MAP_READ | VMAR_FLAG_CAN_MAP_WRITE,
+ "vmalloc vmar", &vmalloc_vmar);
+ ASSERT(status == ZX_OK);
+}