From 5e9f6f52451ccb768d875370bf1769b27ff0041c Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Tue, 2 Feb 2016 03:30:34 +0100 Subject: Stack the slab allocator directly on top of the physical allocator In order to increase the amount of memory available for kernel objects, without reducing the amount of memory available for user processes, a new allocation strategy is introduced in this change. Instead of allocating kernel objects out of kernel virtual memory, the slab allocator directly uses the direct mapping of physical memory as its backend. This largely increases the kernel heap, and removes the need for address translation updates. In order to allow this strategy, an assumption made by the interrupt code had to be removed. In addition, kernel stacks are now also allocated directly from the physical allocator. * i386/i386/db_trace.c: Include i386at/model_dep.h (db_i386_reg_value): Update stack check. * i386/i386/locore.S (trap_from_kernel, all_intrs, int_from_intstack): Update interrupt handling. * i386/i386at/model_dep.c: Include kern/macros.h. (int_stack, int_stack_base): New variables. (int_stack_high): Remove variable. (i386at_init): Update interrupt stack initialization. * i386/i386at/model_dep.h: Include i386/vm_param.h. (int_stack_top, int_stack_base): New extern declarations. (ON_INT_STACK): New macro. * kern/slab.c: Include vm/vm_page.h (KMEM_CF_NO_CPU_POOL, KMEM_CF_NO_RECLAIM): Remove macros. (kmem_pagealloc, kmem_pagefree, kalloc_pagealloc, kalloc_pagefree): Remove functions. (kmem_slab_create): Allocate slab pages directly from the physical allocator. (kmem_slab_destroy): Release slab pages directly to the physical allocator. (kmem_cache_compute_sizes): Update the slab size computation algorithm to return a power-of-two suitable for the physical allocator. (kmem_cache_init): Remove custom allocation function pointers. (kmem_cache_reap): Remove check on KMEM_CF_NO_RECLAIM. (slab_init, kalloc_init): Update calls to kmem_cache_init. (kalloc, kfree): Directly fall back on the physical allocator for big allocation sizes. (host_slab_info): Remove checks on defunct flags. * kern/slab.h (kmem_slab_alloc_fn_t, kmem_slab_free_fn_t): Remove types. (struct kmem_cache): Add `slab_order' member, remove `slab_alloc_fn' and `slab_free_fn' members. (KMEM_CACHE_NOCPUPOOL, KMEM_CACHE_NORECLAIM): Remove macros. (kmem_cache_init): Update prototype, remove custom allocation functions. * kern/thread.c (stack_alloc): Allocate stacks from the physical allocator. * vm/vm_map.c (vm_map_kentry_cache, kentry_data, kentry_data_size): Remove variables. (kentry_pagealloc): Remove function. (vm_map_init): Update calls to kmem_cache_init, remove initialization of vm_map_kentry_cache. (vm_map_create, _vm_map_entry_dispose, vm_map_copyout): Unconditionnally use vm_map_entry_cache. * vm/vm_map.h (kentry_data, kentry_data_size, kentry_count): Remove extern declarations. * vm/vm_page.h (VM_PT_STACK): New page type. * device/dev_lookup.c (dev_lookup_init): Update calls to kmem_cache_init. * device/dev_pager.c (dev_pager_hash_init, device_pager_init): Likewise. * device/ds_routines.c (mach_device_init, mach_device_trap_init): Likewise. * device/net_io.c (net_io_init): Likewise. * i386/i386/fpu.c (fpu_module_init): Likewise. * i386/i386/machine_task.c (machine_task_module_init): Likewise. * i386/i386/pcb.c (pcb_module_init): Likewise. * i386/intel/pmap.c (pmap_init): Likewise. * ipc/ipc_init.c (ipc_bootstrap): Likewise. * ipc/ipc_marequest.c (ipc_marequest_init): Likewise. * kern/act.c (global_act_init): Likewise. * kern/processor.c (pset_sys_init): Likewise. * kern/rdxtree.c (rdxtree_cache_init): Likewise. * kern/task.c (task_init): Likewise. * vm/memory_object_proxy.c (memory_object_proxy_init): Likewise. * vm/vm_external.c (vm_external_module_initialize): Likewise. * vm/vm_fault.c (vm_fault_init): Likewise. * vm/vm_object.c (vm_object_bootstrap): Likewise. * vm/vm_resident.c (vm_page_module_init): Likewise. (vm_page_bootstrap): Remove initialization of kentry_data. --- vm/vm_map.c | 60 +++++++----------------------------------------------------- 1 file changed, 7 insertions(+), 53 deletions(-) (limited to 'vm/vm_map.c') diff --git a/vm/vm_map.c b/vm/vm_map.c index 3a231de0..0d610621 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -126,7 +126,6 @@ MACRO_END struct kmem_cache vm_map_cache; /* cache for vm_map structures */ struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ -struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ /* @@ -151,43 +150,16 @@ vm_object_t vm_submap_object = &vm_submap_object_store; * * vm_map_cache: used to allocate maps. * vm_map_entry_cache: used to allocate map entries. - * vm_map_kentry_cache: used to allocate map entries for the kernel. - * - * Kernel map entries are allocated from a special cache, using a custom - * page allocation function to avoid recursion. It would be difficult - * (perhaps impossible) for the kernel to allocate more memory to an entry - * cache when it became empty since the very act of allocating memory - * implies the creation of a new entry. */ -vm_offset_t kentry_data; -vm_size_t kentry_data_size = KENTRY_DATA_SIZE; - -static vm_offset_t kentry_pagealloc(vm_size_t size) -{ - vm_offset_t result; - - if (size > kentry_data_size) - panic("vm_map: kentry memory exhausted"); - - result = kentry_data; - kentry_data += size; - kentry_data_size -= size; - return result; -} - void vm_map_init(void) { kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, - NULL, NULL, NULL, 0); + NULL, 0); kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", - sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); - kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", - sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, - NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB - | KMEM_CACHE_NORECLAIM); + sizeof(struct vm_map_entry), 0, NULL, 0); kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", - sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); + sizeof(struct vm_map_copy), 0, NULL, 0); /* * Submap object is initialized by vm_object_init. @@ -261,15 +233,9 @@ vm_map_t vm_map_create( vm_map_entry_t _vm_map_entry_create(map_header) const struct vm_map_header *map_header; { - kmem_cache_t cache; vm_map_entry_t entry; - if (map_header->entries_pageable) - cache = &vm_map_entry_cache; - else - cache = &vm_map_kentry_cache; - - entry = (vm_map_entry_t) kmem_cache_alloc(cache); + entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache); if (entry == VM_MAP_ENTRY_NULL) panic("vm_map_entry_create"); @@ -291,14 +257,9 @@ void _vm_map_entry_dispose(map_header, entry) const struct vm_map_header *map_header; vm_map_entry_t entry; { - kmem_cache_t cache; - - if (map_header->entries_pageable) - cache = &vm_map_entry_cache; - else - cache = &vm_map_kentry_cache; + (void)map_header; - kmem_cache_free(cache, (vm_offset_t) entry); + kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry); } /* @@ -2539,15 +2500,8 @@ kern_return_t vm_map_copyout( * Mismatches occur when dealing with the default * pager. */ - kmem_cache_t old_cache; vm_map_entry_t next, new; - /* - * Find the cache that the copies were allocated from - */ - old_cache = (copy->cpy_hdr.entries_pageable) - ? &vm_map_entry_cache - : &vm_map_kentry_cache; entry = vm_map_copy_first_entry(copy); /* @@ -2571,7 +2525,7 @@ kern_return_t vm_map_copyout( vm_map_copy_last_entry(copy), new); next = entry->vme_next; - kmem_cache_free(old_cache, (vm_offset_t) entry); + kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry); entry = next; } } -- cgit v1.2.3