WIP pmm2
Change-Id: Ie6c1ffdcf017906f0e457f4d1cd398a65afa29c8
diff --git a/kernel/include/kernel/vm/page.h b/kernel/include/kernel/vm/page.h
index 49dc7b9..3a5be11 100644
--- a/kernel/include/kernel/vm/page.h
+++ b/kernel/include/kernel/vm/page.h
@@ -20,7 +20,7 @@
// core per page structure allocated at pmm arena creation time
struct vm_page {
- struct list_node node;
+ struct list_node queue_node;
paddr_t paddr;
// offset 0x18
@@ -34,7 +34,7 @@
union {
struct {
// in allocated/just freed state, use a linked list to hold the page in a queue
- struct list_node node;
+ //struct list_node node;
// offset: 0x30
} free;
struct {
@@ -58,7 +58,6 @@
// state manipulation routines
void set_state_alloc();
- void set_state_object(VmObject *obj, uint64_t offset);
};
// assert that the page structure isn't growing uncontrollably
@@ -82,3 +81,8 @@
const char* page_state_to_string(unsigned int state);
void dump_page(const vm_page* page);
+
+// state transition routines
+void pmm_page_set_state_alloc(vm_page *page);
+void pmm_page_set_state_wired(vm_page *page);
+
diff --git a/kernel/include/kernel/vm/pmm.h b/kernel/include/kernel/vm/pmm.h
index cf4c626..b6789f7 100644
--- a/kernel/include/kernel/vm/pmm.h
+++ b/kernel/include/kernel/vm/pmm.h
@@ -56,7 +56,7 @@
// Returns the number of pages freed.
size_t pmm_free(list_node* list);
-// Helper routine for the above.
+// Free a single page
size_t pmm_free_page(vm_page* page);
// Return count of unallocated physical pages in system
diff --git a/kernel/kernel/vm/page.cpp b/kernel/kernel/vm/page.cpp
index e4a6131..8a07b50 100644
--- a/kernel/kernel/vm/page.cpp
+++ b/kernel/kernel/vm/page.cpp
@@ -36,14 +36,6 @@
}
}
-void vm_page::set_state_alloc() {
- LTRACEF("page %p: prev state %s\n", this, page_state_to_string(state));
-
- DEBUG_ASSERT(state == VM_PAGE_STATE_FREE);
-
- state = VM_PAGE_STATE_ALLOC;
-}
-
void dump_page(const vm_page* page) {
printf("page %p: address %#" PRIxPTR " state %s flags %#x\n", page, vm_page_to_paddr(page),
page_state_to_string(page->state), page->flags);
diff --git a/kernel/kernel/vm/pmm.cpp b/kernel/kernel/vm/pmm.cpp
index 1735738..687d514 100644
--- a/kernel/kernel/vm/pmm.cpp
+++ b/kernel/kernel/vm/pmm.cpp
@@ -85,7 +85,7 @@
return nullptr;
if (list) {
- list_add_tail(list, &p->free.node);
+ list_add_tail(list, &p->queue_node);
}
} else {
size_t alloc_count = pmm_node.AllocContiguous(count, PMM_ALLOC_FLAG_KMAP, PAGE_SIZE_SHIFT, &pa, list);
@@ -132,7 +132,12 @@
while (count > 0) {
vm_page* p = paddr_to_vm_page(vaddr_to_paddr(ptr));
if (p) {
- list_add_tail(&list, &p->free.node);
+ DEBUG_ASSERT(!p->is_free());
+
+ if (list_in_list(&p->queue_node))
+ list_delete(&p->queue_node);
+
+ list_add_tail(&list, &p->queue_node);
}
ptr += PAGE_SIZE;
@@ -147,12 +152,8 @@
}
size_t pmm_free_page(vm_page* page) {
- list_node list;
- list_initialize(&list);
-
- list_add_head(&list, &page->free.node);
-
- return pmm_free(&list);
+ pmm_node.Free(page);
+ return 1;
}
uint64_t pmm_count_free_pages() {
diff --git a/kernel/kernel/vm/pmm_arena.cpp b/kernel/kernel/vm/pmm_arena.cpp
index 71aa269..3c15aef 100644
--- a/kernel/kernel/vm/pmm_arena.cpp
+++ b/kernel/kernel/vm/pmm_arena.cpp
@@ -43,7 +43,7 @@
p.paddr = base() + i * PAGE_SIZE;
p.state = VM_PAGE_STATE_FREE;
- list_add_tail(&list, &p.node);
+ list_add_tail(&list, &p.queue_node);
}
node->AddFreePages(&list);
diff --git a/kernel/kernel/vm/pmm_node.cpp b/kernel/kernel/vm/pmm_node.cpp
index c860b65..ae5166a 100644
--- a/kernel/kernel/vm/pmm_node.cpp
+++ b/kernel/kernel/vm/pmm_node.cpp
@@ -13,6 +13,18 @@
#define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
+namespace {
+
+void set_state_alloc(vm_page *page) {
+ LTRACEF("page %p: prev state %s\n", page, page_state_to_string(page->state));
+
+ DEBUG_ASSERT(page->state == VM_PAGE_STATE_FREE);
+
+ page->state = VM_PAGE_STATE_ALLOC;
+}
+
+} // anon namespace
+
PmmNode::PmmNode() {
}
@@ -60,9 +72,9 @@
LTRACEF("list %p\n", list);
vm_page *temp, *page;
- list_for_every_entry_safe(list, page, temp, vm_page, node) {
- list_delete(&page->node);
- list_add_tail(&free_list_, &page->node);
+ list_for_every_entry_safe(list, page, temp, vm_page, queue_node) {
+ list_delete(&page->queue_node);
+ list_add_tail(&free_list_, &page->queue_node);
free_count_++;
}
@@ -72,7 +84,7 @@
vm_page* PmmNode::AllocPage(uint alloc_flags, paddr_t* pa) {
AutoLock al(&lock_);
- vm_page* page = list_remove_head_type(&free_list_, vm_page, node);
+ vm_page* page = list_remove_head_type(&free_list_, vm_page, queue_node);
if (!page)
return nullptr;
@@ -82,7 +94,7 @@
DEBUG_ASSERT(page->is_free());
- page->set_state_alloc();
+ set_state_alloc(page);
#if PMM_ENABLE_FREE_FILL
CheckFreeFill(page);
@@ -110,7 +122,7 @@
size_t allocated = 0;
while (allocated < count) {
- vm_page* page = list_remove_head_type(&free_list_, vm_page, node);
+ vm_page* page = list_remove_head_type(&free_list_, vm_page, queue_node);
if (!page)
return allocated;
@@ -126,7 +138,7 @@
#endif
page->state = VM_PAGE_STATE_ALLOC;
- list_add_tail(list, &page->free.node);
+ list_add_tail(list, &page->queue_node);
allocated++;
}
@@ -155,12 +167,12 @@
if (!page->is_free())
break;
- list_delete(&page->node);
+ list_delete(&page->queue_node);
page->state = VM_PAGE_STATE_ALLOC;
if (list)
- list_add_tail(list, &page->free.node);
+ list_add_tail(list, &page->queue_node);
allocated++;
address += PAGE_SIZE;
@@ -198,9 +210,9 @@
/* remove the pages from the run out of the free list */
for (size_t i = 0; i < count; i++, p++) {
DEBUG_ASSERT_MSG(p->is_free(), "p %p state %u\n", p, p->state);
- DEBUG_ASSERT(list_in_list(&p->node));
+ DEBUG_ASSERT(list_in_list(&p->queue_node));
- list_delete(&p->node);
+ list_delete(&p->queue_node);
p->state = VM_PAGE_STATE_ALLOC;
DEBUG_ASSERT(free_count_ > 0);
@@ -212,7 +224,7 @@
#endif
if (list)
- list_add_tail(list, &p->free.node);
+ list_add_tail(list, &p->queue_node);
}
return count;
@@ -231,24 +243,25 @@
uint count = 0;
while (!list_is_empty(list)) {
- vm_page* page = list_remove_head_type(list, vm_page, free.node);
+ vm_page* page = list_remove_head_type(list, vm_page, queue_node);
- DEBUG_ASSERT(page->state != VM_PAGE_STATE_OBJECT || page->object.pin_count == 0);
- DEBUG_ASSERT(!page->is_free());
+ DEBUG_ASSERT(page->state == VM_PAGE_STATE_ALLOC);
+ //DEBUG_ASSERT(page->state != VM_PAGE_STATE_OBJECT || page->object.pin_count == 0);
+ //DEBUG_ASSERT(!page->is_free());
#if PMM_ENABLE_FREE_FILL
FreeFill(page);
#endif
// remove it from its old queue
- if (list_in_list(&page->node))
- list_delete(&page->node);
+ if (list_in_list(&page->queue_node))
+ list_delete(&page->queue_node);
// mark it free
page->state = VM_PAGE_STATE_FREE;
// add it to the free queue
- list_add_head(&free_list_, &page->node);
+ list_add_head(&free_list_, &page->queue_node);
free_count_++;
count++;
@@ -259,6 +272,31 @@
return count;
}
+void PmmNode::Free(vm_page* page) {
+ LTRACEF("page %p, pa %#" PRIxPTR "\n", page, page->paddr);
+
+ AutoLock al(&lock_);
+
+ DEBUG_ASSERT(page->state != VM_PAGE_STATE_OBJECT || page->object.pin_count == 0);
+ DEBUG_ASSERT(!page->is_free());
+
+#if PMM_ENABLE_FREE_FILL
+ FreeFill(page);
+#endif
+
+ // remove it from its old queue
+ if (list_in_list(&page->queue_node))
+ list_delete(&page->queue_node);
+
+ // mark it free
+ page->state = VM_PAGE_STATE_FREE;
+
+ // add it to the free queue
+ list_add_head(&free_list_, &page->queue_node);
+
+ free_count_++;
+}
+
uint64_t PmmNode::CountFreePages() const {
return free_count_;
}
@@ -300,7 +338,7 @@
DEBUG_ASSERT(!enforce_fill_);
vm_page* page;
- list_for_every_entry (&free_list_, page, vm_page, node) {
+ list_for_every_entry (&free_list_, page, vm_page, queue_node) {
FreeFill(page);
}
diff --git a/kernel/kernel/vm/pmm_node.h b/kernel/kernel/vm/pmm_node.h
index 5adb614..885d430 100644
--- a/kernel/kernel/vm/pmm_node.h
+++ b/kernel/kernel/vm/pmm_node.h
@@ -32,6 +32,7 @@
size_t AllocPages(size_t count, uint alloc_flags, list_node* list);
size_t AllocRange(paddr_t address, size_t count, list_node* list);
size_t AllocContiguous(size_t count, uint alloc_flags, uint8_t alignment_log2, paddr_t* pa, list_node* list);
+ void Free(vm_page* page);
size_t Free(list_node* list);
uint64_t CountFreePages() const;
diff --git a/kernel/kernel/vm/vm.cpp b/kernel/kernel/vm/vm.cpp
index 65e9816..e400fde 100644
--- a/kernel/kernel/vm/vm.cpp
+++ b/kernel/kernel/vm/vm.cpp
@@ -89,7 +89,7 @@
// mark all of the pages we allocated as WIRED
vm_page* p;
- list_for_every_entry (&list, p, vm_page, free.node) { p->state = VM_PAGE_STATE_WIRED; }
+ list_for_every_entry (&list, p, vm_page, queue_node) { pmm_page_set_state_wired(p); }
}
status_t ProtectRegion(VmAspace* aspace, vaddr_t va, uint arch_mmu_flags) {
diff --git a/kernel/kernel/vm/vm_object_paged.cpp b/kernel/kernel/vm/vm_object_paged.cpp
index 2f909a1..d6f5fa3 100644
--- a/kernel/kernel/vm/vm_object_paged.cpp
+++ b/kernel/kernel/vm/vm_object_paged.cpp
@@ -320,7 +320,7 @@
paddr_t pa_clone;
vm_page* p_clone = nullptr;
if (free_list) {
- p_clone = list_remove_head_type(free_list, vm_page, free.node);
+ p_clone = list_remove_head_type(free_list, vm_page, queue_node);
if (p_clone) {
pa_clone = vm_page_to_paddr(p_clone);
}
@@ -375,7 +375,7 @@
// allocate a page
if (free_list) {
- p = list_remove_head_type(free_list, vm_page, free.node);
+ p = list_remove_head_type(free_list, vm_page, queue_node);
if (p) {
pa = vm_page_to_paddr(p);
}
@@ -549,7 +549,7 @@
// add them to the appropriate range of the object
for (uint64_t o = offset; o < end; o += PAGE_SIZE) {
- vm_page* p = list_remove_head_type(&page_list, vm_page, free.node);
+ vm_page* p = list_remove_head_type(&page_list, vm_page, queue_node);
ASSERT(p);
InitializeVmPage(p);
diff --git a/kernel/kernel/vm/vm_page_list.cpp b/kernel/kernel/vm/vm_page_list.cpp
index d68f358..02c3760 100644
--- a/kernel/kernel/vm/vm_page_list.cpp
+++ b/kernel/kernel/vm/vm_page_list.cpp
@@ -150,8 +150,10 @@
// per page get a reference to the page pointer inside the page list node
auto per_page_func = [&](vm_page*& p, uint64_t offset) {
+
+
// add the page to our list and null out the inner node
- list_add_tail(&list, &p->free.node);
+ list_add_tail(&list, &p->queue_node);
p = nullptr;
count++;
return MX_ERR_NEXT;
diff --git a/kernel/platform/pc/platform.cpp b/kernel/platform/pc/platform.cpp
index 060c5ba..afdd5cb 100644
--- a/kernel/platform/pc/platform.cpp
+++ b/kernel/platform/pc/platform.cpp
@@ -213,7 +213,7 @@
// mark all of the pages we allocated as WIRED
vm_page *p;
- list_for_every_entry(&list, p, vm_page, free.node) {
+ list_for_every_entry(&list, p, vm_page, queue_node) {
p->state = VM_PAGE_STATE_WIRED;
}