diff options
author | Richard Braun <rbraun@sceen.net> | 2016-02-02 23:17:20 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2016-02-02 23:20:32 +0100 |
commit | 44d78061e90e777b51cae8e01eda5c0d3ce63103 (patch) | |
tree | 64af20619a7292834c9d66e8157e0301a8d62f0d /vm | |
parent | 909167b9d05cf896f1e54122183ef8ee9ee70677 (diff) | |
download | gnumach-44d78061e90e777b51cae8e01eda5c0d3ce63103.tar.gz gnumach-44d78061e90e777b51cae8e01eda5c0d3ce63103.tar.bz2 gnumach-44d78061e90e777b51cae8e01eda5c0d3ce63103.zip |
Fix various memory managment errors
A few errors were introduced in the latest changes.
o Add VM_PAGE_WAIT calls around physical allocation attempts in case of
memory exhaustion.
o Fix stack release.
o Fix memory exhaustion report.
o Fix free page accounting.
* kern/slab.c (kmem_pagealloc, kmem_pagefree): New functions
(kmem_slab_create, kmem_slab_destroy, kalloc, kfree): Use kmem_pagealloc
and kmem_pagefree instead of the raw page allocation functions.
(kmem_cache_compute_sizes): Don't store slab order.
* kern/slab.h (struct kmem_cache): Remove `slab_order' member.
* kern/thread.c (stack_alloc): Call VM_PAGE_WAIT in case of memory
exhaustion.
(stack_collect): Call vm_page_free_contig instead of kmem_free to
release pages.
* vm/vm_page.c (vm_page_seg_alloc): Fix memory exhaustion report.
(vm_page_setup): Don't update vm_page_free_count.
(vm_page_free_pa): Check page parameter.
(vm_page_mem_free): New function.
* vm/vm_page.h (vm_page_free_count): Remove extern declaration.
(vm_page_mem_free): New prototype.
* vm/vm_pageout.c: Update comments not to refer to vm_page_free_count.
(vm_pageout_scan, vm_pageout_continue, vm_pageout): Use vm_page_mem_free
instead of vm_page_free_count, update types accordingly.
* vm/vm_resident.c (vm_page_free_count, vm_page_free_count_minimum):
Remove variables.
(vm_page_free_avail): New variable.
(vm_page_bootstrap, vm_page_grab, vm_page_release, vm_page_grab_contig,
vm_page_free_contig, vm_page_wait): Use vm_page_mem_free instead of vm_page_free_count,
update types accordingly, don't set vm_page_free_count_minimum.
* vm/vm_user.c (vm_statistics): Likewise.
Diffstat (limited to 'vm')
-rw-r--r-- | vm/vm_page.c | 27 | ||||
-rw-r--r-- | vm/vm_page.h | 14 | ||||
-rw-r--r-- | vm/vm_pageout.c | 20 | ||||
-rw-r--r-- | vm/vm_resident.c | 45 | ||||
-rw-r--r-- | vm/vm_user.c | 2 |
5 files changed, 65 insertions, 43 deletions
diff --git a/vm/vm_page.c b/vm/vm_page.c index a539ab41..48d70964 100644 --- a/vm/vm_page.c +++ b/vm/vm_page.c @@ -442,6 +442,9 @@ vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order, simple_lock(&seg->lock); page = vm_page_seg_alloc_from_buddy(seg, order); simple_unlock(&seg->lock); + + if (page == NULL) + return NULL; } assert(page->type == VM_PT_FREE); @@ -637,10 +640,6 @@ vm_page_setup(void) page->type = VM_PT_FREE; vm_page_seg_free_to_buddy(seg, page, 0); page++; - - /* XXX */ - if (i <= VM_PAGE_SEG_DIRECTMAP) - vm_page_free_count++; } table += vm_page_atop(vm_page_seg_size(seg)); @@ -705,6 +704,7 @@ vm_page_alloc_pa(unsigned int order, unsigned int selector, unsigned short type) void vm_page_free_pa(struct vm_page *page, unsigned int order) { + assert(page != NULL); assert(page->seg_index < ARRAY_SIZE(vm_page_segs)); vm_page_seg_free(&vm_page_segs[page->seg_index], page, order); @@ -760,3 +760,22 @@ vm_page_mem_size(void) return total; } + +unsigned long +vm_page_mem_free(void) +{ + unsigned long total; + unsigned int i; + + total = 0; + + for (i = 0; i < vm_page_segs_size; i++) { + /* XXX */ + if (i > VM_PAGE_SEG_DIRECTMAP) + continue; + + total += vm_page_segs[i].nr_free_pages; + } + + return total; +} diff --git a/vm/vm_page.h b/vm/vm_page.h index 4e870d82..6f4f3c22 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -162,8 +162,6 @@ extern queue_head_t vm_page_queue_inactive; /* inactive memory queue */ extern -int vm_page_free_count; /* How many pages are free? */ -extern int vm_page_fictitious_count;/* How many fictitious pages are free? */ extern int vm_page_active_count; /* How many pages are active? */ @@ -483,12 +481,16 @@ struct vm_page * vm_page_lookup_pa(phys_addr_t pa); * * The selector is used to determine the segments from which allocation can * be attempted. + * + * This function should only be used by the vm_resident module. */ struct vm_page * vm_page_alloc_pa(unsigned int order, unsigned int selector, unsigned short type); /* * Release a block of 2^order physical pages. + * + * This function should only be used by the vm_resident module. */ void vm_page_free_pa(struct vm_page *page, unsigned int order); @@ -507,4 +509,12 @@ void vm_page_info_all(void); */ phys_addr_t vm_page_mem_size(void); +/* + * Return the amount of free (unused) pages. + * + * XXX This currently relies on the kernel being non preemptible and + * uniprocessor. + */ +unsigned long vm_page_mem_free(void); + #endif /* _VM_VM_PAGE_H_ */ diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c index 51a6a0d4..f06e8f8e 100644 --- a/vm/vm_pageout.c +++ b/vm/vm_pageout.c @@ -82,7 +82,7 @@ * of active+inactive pages that should be inactive. * The pageout daemon uses it to update vm_page_inactive_target. * - * If vm_page_free_count falls below vm_page_free_target and + * If the number of free pages falls below vm_page_free_target and * vm_page_inactive_count is below vm_page_inactive_target, * then the pageout daemon starts running. */ @@ -93,7 +93,7 @@ /* * Once the pageout daemon starts running, it keeps going - * until vm_page_free_count meets or exceeds vm_page_free_target. + * until the number of free pages meets or exceeds vm_page_free_target. */ #ifndef VM_PAGE_FREE_TARGET @@ -101,7 +101,7 @@ #endif /* VM_PAGE_FREE_TARGET */ /* - * The pageout daemon always starts running once vm_page_free_count + * The pageout daemon always starts running once the number of free pages * falls below vm_page_free_min. */ @@ -125,7 +125,7 @@ #endif /* VM_PAGE_EXTERNAL_TARGET */ /* - * When vm_page_free_count falls below vm_page_free_reserved, + * When the number of free pages falls below vm_page_free_reserved, * only vm-privileged threads can allocate pages. vm-privilege * allows the pageout daemon and default pager (and any other * associated threads needed for default pageout) to continue @@ -136,7 +136,7 @@ #endif /* VM_PAGE_FREE_RESERVED */ /* - * When vm_page_free_count falls below vm_pageout_reserved_internal, + * When the number of free pages falls below vm_pageout_reserved_internal, * the pageout daemon no longer trusts external pagers to clean pages. * External pagers are probably all wedged waiting for a free page. * It forcibly double-pages dirty pages belonging to external objects, @@ -148,7 +148,7 @@ #endif /* VM_PAGEOUT_RESERVED_INTERNAL */ /* - * When vm_page_free_count falls below vm_pageout_reserved_really, + * When the number of free pages falls below vm_pageout_reserved_really, * the pageout daemon stops work entirely to let the default pager * catch up (assuming the default pager has pages to clean). * Beyond this point, it is too dangerous to consume memory @@ -559,7 +559,7 @@ void vm_pageout_scan(void) for (burst_count = 0;;) { vm_page_t m; vm_object_t object; - unsigned int free_count; + unsigned long free_count; /* * Recalculate vm_page_inactivate_target. @@ -630,7 +630,7 @@ void vm_pageout_scan(void) */ simple_lock(&vm_page_queue_free_lock); - free_count = vm_page_free_count; + free_count = vm_page_mem_free(); if ((free_count >= vm_page_free_target) && (vm_page_external_count <= vm_page_external_target) && (vm_page_free_wanted == 0)) { @@ -910,7 +910,7 @@ void vm_pageout_continue(void) void vm_pageout(void) { - int free_after_reserve; + unsigned long free_after_reserve; current_thread()->vm_privilege = TRUE; stack_privilege(current_thread()); @@ -946,7 +946,7 @@ void vm_pageout(void) vm_pageout_reserved_really = VM_PAGEOUT_RESERVED_REALLY(vm_page_free_reserved); - free_after_reserve = vm_page_free_count - vm_page_free_reserved; + free_after_reserve = vm_page_mem_free() - vm_page_free_reserved; if (vm_page_external_limit == 0) vm_page_external_limit = diff --git a/vm/vm_resident.c b/vm/vm_resident.c index dd1cf9cd..cc7b8091 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -98,11 +98,15 @@ unsigned int vm_page_hash_mask; /* Mask for hash function */ vm_page_t vm_page_queue_fictitious; decl_simple_lock_data(,vm_page_queue_free_lock) unsigned int vm_page_free_wanted; -int vm_page_free_count; int vm_page_fictitious_count; int vm_page_external_count; -unsigned int vm_page_free_count_minimum; /* debugging */ +/* + * This variable isn't directly used. It's merely a placeholder for the + * address used to synchronize threads waiting for pages to become + * available. The real value is returned by vm_page_free_mem(). + */ +unsigned int vm_page_free_avail; /* * Occasionally, the virtual memory system uses @@ -233,9 +237,6 @@ void vm_page_bootstrap( *startp = virtual_space_start; *endp = virtual_space_end; - - /* printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);*/ - vm_page_free_count_minimum = vm_page_free_count; } #ifndef MACHINE_PAGES @@ -773,7 +774,7 @@ vm_page_t vm_page_grab( * for externally-managed pages. */ - if (((vm_page_free_count < vm_page_free_reserved) + if (((vm_page_mem_free() < vm_page_free_reserved) || (external && (vm_page_external_count > vm_page_external_limit))) && !current_thread()->vm_privilege) { @@ -786,8 +787,6 @@ vm_page_t vm_page_grab( if (mem == NULL) panic("vm_page_grab"); - if (--vm_page_free_count < vm_page_free_count_minimum) - vm_page_free_count_minimum = vm_page_free_count; if (external) vm_page_external_count++; @@ -806,8 +805,8 @@ vm_page_t vm_page_grab( * it doesn't really matter. */ - if ((vm_page_free_count < vm_page_free_min) || - ((vm_page_free_count < vm_page_free_target) && + if ((vm_page_mem_free() < vm_page_free_min) || + ((vm_page_mem_free() < vm_page_free_target) && (vm_page_inactive_count < vm_page_inactive_target))) thread_wakeup((event_t) &vm_page_free_wanted); @@ -838,7 +837,6 @@ static void vm_page_release( panic("vm_page_release"); mem->free = TRUE; vm_page_free_pa(mem, 0); - vm_page_free_count++; if (external) vm_page_external_count--; @@ -863,9 +861,9 @@ static void vm_page_release( */ if ((vm_page_free_wanted > 0) && - (vm_page_free_count >= vm_page_free_reserved)) { + (vm_page_mem_free() >= vm_page_free_reserved)) { vm_page_free_wanted--; - thread_wakeup_one((event_t) &vm_page_free_count); + thread_wakeup_one((event_t) &vm_page_free_avail); } simple_unlock(&vm_page_queue_free_lock); @@ -896,22 +894,18 @@ vm_page_t vm_page_grab_contig( * for externally-managed pages. */ - if (((vm_page_free_count - nr_pages) <= vm_page_free_reserved) + if (((vm_page_mem_free() - nr_pages) <= vm_page_free_reserved) && !current_thread()->vm_privilege) { simple_unlock(&vm_page_queue_free_lock); return VM_PAGE_NULL; } + /* TODO Allow caller to pass type */ mem = vm_page_alloc_pa(order, selector, VM_PT_KERNEL); if (mem == NULL) panic("vm_page_grab_contig"); - vm_page_free_count -= nr_pages; - - if (vm_page_free_count < vm_page_free_count_minimum) - vm_page_free_count_minimum = vm_page_free_count; - for (i = 0; i < nr_pages; i++) { mem[i].free = FALSE; mem[i].extcounted = mem[i].external = 0; @@ -930,8 +924,8 @@ vm_page_t vm_page_grab_contig( * it doesn't really matter. */ - if ((vm_page_free_count < vm_page_free_min) || - ((vm_page_free_count < vm_page_free_target) && + if ((vm_page_mem_free() < vm_page_free_min) || + ((vm_page_mem_free() < vm_page_free_target) && (vm_page_inactive_count < vm_page_inactive_target))) thread_wakeup((event_t) &vm_page_free_wanted); @@ -961,12 +955,11 @@ void vm_page_free_contig(vm_page_t mem, vm_size_t size) } vm_page_free_pa(mem, order); - vm_page_free_count += nr_pages; if ((vm_page_free_wanted > 0) && - (vm_page_free_count >= vm_page_free_reserved)) { + (vm_page_mem_free() >= vm_page_free_reserved)) { vm_page_free_wanted--; - thread_wakeup_one((event_t) &vm_page_free_count); + thread_wakeup_one((event_t) &vm_page_free_avail); } simple_unlock(&vm_page_queue_free_lock); @@ -992,11 +985,11 @@ void vm_page_wait( */ simple_lock(&vm_page_queue_free_lock); - if ((vm_page_free_count < vm_page_free_target) + if ((vm_page_mem_free() < vm_page_free_target) || (vm_page_external_count > vm_page_external_limit)) { if (vm_page_free_wanted++ == 0) thread_wakeup((event_t)&vm_page_free_wanted); - assert_wait((event_t)&vm_page_free_count, FALSE); + assert_wait((event_t)&vm_page_free_avail, FALSE); simple_unlock(&vm_page_queue_free_lock); if (continuation != 0) { counter(c_vm_page_wait_block_user++); diff --git a/vm/vm_user.c b/vm/vm_user.c index c71e9f50..e65f6d5f 100644 --- a/vm/vm_user.c +++ b/vm/vm_user.c @@ -182,7 +182,7 @@ kern_return_t vm_statistics( *stat = vm_stat; stat->pagesize = PAGE_SIZE; - stat->free_count = vm_page_free_count; + stat->free_count = vm_page_mem_free(); stat->active_count = vm_page_active_count; stat->inactive_count = vm_page_inactive_count; stat->wire_count = vm_page_wire_count; |