diff options
author | Richard Braun <rbraun@sceen.net> | 2016-02-02 23:17:20 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2016-02-02 23:20:32 +0100 |
commit | 44d78061e90e777b51cae8e01eda5c0d3ce63103 (patch) | |
tree | 64af20619a7292834c9d66e8157e0301a8d62f0d /kern/slab.c | |
parent | 909167b9d05cf896f1e54122183ef8ee9ee70677 (diff) | |
download | gnumach-44d78061e90e777b51cae8e01eda5c0d3ce63103.tar.gz gnumach-44d78061e90e777b51cae8e01eda5c0d3ce63103.tar.bz2 gnumach-44d78061e90e777b51cae8e01eda5c0d3ce63103.zip |
Fix various memory managment errors
A few errors were introduced in the latest changes.
o Add VM_PAGE_WAIT calls around physical allocation attempts in case of
memory exhaustion.
o Fix stack release.
o Fix memory exhaustion report.
o Fix free page accounting.
* kern/slab.c (kmem_pagealloc, kmem_pagefree): New functions
(kmem_slab_create, kmem_slab_destroy, kalloc, kfree): Use kmem_pagealloc
and kmem_pagefree instead of the raw page allocation functions.
(kmem_cache_compute_sizes): Don't store slab order.
* kern/slab.h (struct kmem_cache): Remove `slab_order' member.
* kern/thread.c (stack_alloc): Call VM_PAGE_WAIT in case of memory
exhaustion.
(stack_collect): Call vm_page_free_contig instead of kmem_free to
release pages.
* vm/vm_page.c (vm_page_seg_alloc): Fix memory exhaustion report.
(vm_page_setup): Don't update vm_page_free_count.
(vm_page_free_pa): Check page parameter.
(vm_page_mem_free): New function.
* vm/vm_page.h (vm_page_free_count): Remove extern declaration.
(vm_page_mem_free): New prototype.
* vm/vm_pageout.c: Update comments not to refer to vm_page_free_count.
(vm_pageout_scan, vm_pageout_continue, vm_pageout): Use vm_page_mem_free
instead of vm_page_free_count, update types accordingly.
* vm/vm_resident.c (vm_page_free_count, vm_page_free_count_minimum):
Remove variables.
(vm_page_free_avail): New variable.
(vm_page_bootstrap, vm_page_grab, vm_page_release, vm_page_grab_contig,
vm_page_free_contig, vm_page_wait): Use vm_page_mem_free instead of vm_page_free_count,
update types accordingly, don't set vm_page_free_count_minimum.
* vm/vm_user.c (vm_statistics): Likewise.
Diffstat (limited to 'kern/slab.c')
-rw-r--r-- | kern/slab.c | 39 |
1 files changed, 27 insertions, 12 deletions
diff --git a/kern/slab.c b/kern/slab.c index a887cbb5..f1a534a8 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -383,6 +383,27 @@ static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl, return (void *)bufctl - cache->bufctl_dist; } +static struct vm_page * +kmem_pagealloc(vm_size_t size) +{ + struct vm_page *page; + + for (;;) { + page = vm_page_grab_contig(size, VM_PAGE_SEL_DIRECTMAP); + + if (page != NULL) + return page; + + VM_PAGE_WAIT(NULL); + } +} + +static void +kmem_pagefree(struct vm_page *page, vm_size_t size) +{ + vm_page_free_contig(page, size); +} + static void kmem_slab_create_verify(struct kmem_slab *slab, struct kmem_cache *cache) { @@ -418,9 +439,7 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache, unsigned long buffers; void *slab_buf; - page = vm_page_alloc_pa(cache->slab_order, - VM_PAGE_SEL_DIRECTMAP, - VM_PT_KMEM); + page = kmem_pagealloc(cache->slab_size); if (page == NULL) return NULL; @@ -431,7 +450,7 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache, slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache); if (slab == NULL) { - vm_page_free_pa(page, cache->slab_order); + kmem_pagefree(page, cache->slab_size); return NULL; } } else { @@ -504,7 +523,7 @@ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache) slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE); page = vm_page_lookup_pa(kvtophys(slab_buf)); assert(page != NULL); - vm_page_free_pa(page, cache->slab_order); + kmem_pagefree(page, cache->slab_size); if (cache->flags & KMEM_CF_SLAB_EXTERNAL) kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab); @@ -681,7 +700,7 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) size_t i, buffers, buf_size, slab_size, free_slab_size; size_t waste, waste_min, optimal_size = optimal_size; int embed, optimal_embed = optimal_embed; - unsigned int slab_order, optimal_order = optimal_order; + unsigned int slab_order; buf_size = cache->buf_size; @@ -718,7 +737,6 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) if (waste <= waste_min) { waste_min = waste; - optimal_order = slab_order; optimal_size = slab_size; optimal_embed = embed; } @@ -727,7 +745,6 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed); - cache->slab_order = optimal_order; cache->slab_size = optimal_size; slab_size = cache->slab_size - (optimal_embed ? sizeof(struct kmem_slab) : 0); @@ -1335,9 +1352,7 @@ vm_offset_t kalloc(vm_size_t size) } else { struct vm_page *page; - page = vm_page_alloc_pa(vm_page_order(size), - VM_PAGE_SEL_DIRECTMAP, - VM_PT_KERNEL); + page = kmem_pagealloc(size); if (page == NULL) return 0; @@ -1387,7 +1402,7 @@ void kfree(vm_offset_t data, vm_size_t size) struct vm_page *page; page = vm_page_lookup_pa(kvtophys(data)); - vm_page_free_pa(page, vm_page_order(size)); + kmem_pagefree(page, size); } } |