diff options
author | Richard Braun <rbraun@sceen.net> | 2016-02-02 03:30:34 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2016-02-02 03:58:19 +0100 |
commit | 5e9f6f52451ccb768d875370bf1769b27ff0041c (patch) | |
tree | d556eb00b4ce74e08ddf0f8b34a64cd9f3932067 /kern/slab.h | |
parent | 945f51bfe865e122d73986dd8219762450ffc0f3 (diff) | |
download | gnumach-5e9f6f52451ccb768d875370bf1769b27ff0041c.tar.gz gnumach-5e9f6f52451ccb768d875370bf1769b27ff0041c.tar.bz2 gnumach-5e9f6f52451ccb768d875370bf1769b27ff0041c.zip |
Stack the slab allocator directly on top of the physical allocator
In order to increase the amount of memory available for kernel objects,
without reducing the amount of memory available for user processes,
a new allocation strategy is introduced in this change.
Instead of allocating kernel objects out of kernel virtual memory,
the slab allocator directly uses the direct mapping of physical
memory as its backend. This largely increases the kernel heap, and
removes the need for address translation updates.
In order to allow this strategy, an assumption made by the interrupt
code had to be removed. In addition, kernel stacks are now also
allocated directly from the physical allocator.
* i386/i386/db_trace.c: Include i386at/model_dep.h
(db_i386_reg_value): Update stack check.
* i386/i386/locore.S (trap_from_kernel, all_intrs,
int_from_intstack): Update interrupt handling.
* i386/i386at/model_dep.c: Include kern/macros.h.
(int_stack, int_stack_base): New variables.
(int_stack_high): Remove variable.
(i386at_init): Update interrupt stack initialization.
* i386/i386at/model_dep.h: Include i386/vm_param.h.
(int_stack_top, int_stack_base): New extern declarations.
(ON_INT_STACK): New macro.
* kern/slab.c: Include vm/vm_page.h
(KMEM_CF_NO_CPU_POOL, KMEM_CF_NO_RECLAIM): Remove macros.
(kmem_pagealloc, kmem_pagefree, kalloc_pagealloc, kalloc_pagefree): Remove
functions.
(kmem_slab_create): Allocate slab pages directly from the physical allocator.
(kmem_slab_destroy): Release slab pages directly to the physical allocator.
(kmem_cache_compute_sizes): Update the slab size computation algorithm to
return a power-of-two suitable for the physical allocator.
(kmem_cache_init): Remove custom allocation function pointers.
(kmem_cache_reap): Remove check on KMEM_CF_NO_RECLAIM.
(slab_init, kalloc_init): Update calls to kmem_cache_init.
(kalloc, kfree): Directly fall back on the physical allocator for big
allocation sizes.
(host_slab_info): Remove checks on defunct flags.
* kern/slab.h (kmem_slab_alloc_fn_t, kmem_slab_free_fn_t): Remove types.
(struct kmem_cache): Add `slab_order' member, remove `slab_alloc_fn' and
`slab_free_fn' members.
(KMEM_CACHE_NOCPUPOOL, KMEM_CACHE_NORECLAIM): Remove macros.
(kmem_cache_init): Update prototype, remove custom allocation functions.
* kern/thread.c (stack_alloc): Allocate stacks from the physical allocator.
* vm/vm_map.c (vm_map_kentry_cache, kentry_data, kentry_data_size): Remove
variables.
(kentry_pagealloc): Remove function.
(vm_map_init): Update calls to kmem_cache_init, remove initialization of
vm_map_kentry_cache.
(vm_map_create, _vm_map_entry_dispose, vm_map_copyout): Unconditionnally
use vm_map_entry_cache.
* vm/vm_map.h (kentry_data, kentry_data_size, kentry_count): Remove extern
declarations.
* vm/vm_page.h (VM_PT_STACK): New page type.
* device/dev_lookup.c (dev_lookup_init): Update calls to kmem_cache_init.
* device/dev_pager.c (dev_pager_hash_init, device_pager_init): Likewise.
* device/ds_routines.c (mach_device_init, mach_device_trap_init): Likewise.
* device/net_io.c (net_io_init): Likewise.
* i386/i386/fpu.c (fpu_module_init): Likewise.
* i386/i386/machine_task.c (machine_task_module_init): Likewise.
* i386/i386/pcb.c (pcb_module_init): Likewise.
* i386/intel/pmap.c (pmap_init): Likewise.
* ipc/ipc_init.c (ipc_bootstrap): Likewise.
* ipc/ipc_marequest.c (ipc_marequest_init): Likewise.
* kern/act.c (global_act_init): Likewise.
* kern/processor.c (pset_sys_init): Likewise.
* kern/rdxtree.c (rdxtree_cache_init): Likewise.
* kern/task.c (task_init): Likewise.
* vm/memory_object_proxy.c (memory_object_proxy_init): Likewise.
* vm/vm_external.c (vm_external_module_initialize): Likewise.
* vm/vm_fault.c (vm_fault_init): Likewise.
* vm/vm_object.c (vm_object_bootstrap): Likewise.
* vm/vm_resident.c (vm_page_module_init): Likewise.
(vm_page_bootstrap): Remove initialization of kentry_data.
Diffstat (limited to 'kern/slab.h')
-rw-r--r-- | kern/slab.h | 23 |
1 files changed, 5 insertions, 18 deletions
diff --git a/kern/slab.h b/kern/slab.h index 5ff3960e..1ad24d63 100644 --- a/kern/slab.h +++ b/kern/slab.h @@ -137,14 +137,6 @@ struct kmem_slab { typedef void (*kmem_cache_ctor_t)(void *obj); /* - * Types for slab allocation/free functions. - * - * All addresses and sizes must be page-aligned. - */ -typedef vm_offset_t (*kmem_slab_alloc_fn_t)(vm_size_t); -typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t); - -/* * Cache name buffer size. The size is chosen so that struct * kmem_cache fits into two cache lines. The size of a cache line on * a typical CPU is 64 bytes. @@ -175,6 +167,7 @@ struct kmem_cache { struct rbtree active_slabs; int flags; size_t bufctl_dist; /* Distance from buffer to bufctl */ + unsigned int slab_order; size_t slab_size; unsigned long bufs_per_slab; unsigned long nr_objs; /* Number of allocated objects */ @@ -189,8 +182,6 @@ struct kmem_cache { size_t color_max; unsigned long nr_bufs; /* Total number of buffers */ unsigned long nr_slabs; - kmem_slab_alloc_fn_t slab_alloc_fn; - kmem_slab_free_fn_t slab_free_fn; char name[KMEM_CACHE_NAME_SIZE]; size_t buftag_dist; /* Distance from buffer to buftag */ size_t redzone_pad; /* Bytes from end of object to redzone word */ @@ -210,19 +201,15 @@ extern vm_map_t kmem_map; /* * Cache initialization flags. */ -#define KMEM_CACHE_NOCPUPOOL 0x1 /* Don't use the per-cpu pools */ -#define KMEM_CACHE_NOOFFSLAB 0x2 /* Don't allocate external slab data */ -#define KMEM_CACHE_NORECLAIM 0x4 /* Never give slabs back to their source, - implies KMEM_CACHE_NOOFFSLAB */ -#define KMEM_CACHE_VERIFY 0x8 /* Use debugging facilities */ +#define KMEM_CACHE_NOOFFSLAB 0x1 /* Don't allocate external slab data */ +#define KMEM_CACHE_VERIFY 0x2 /* Use debugging facilities */ /* * Initialize a cache. */ void kmem_cache_init(struct kmem_cache *cache, const char *name, - size_t obj_size, size_t align, kmem_cache_ctor_t ctor, - kmem_slab_alloc_fn_t slab_alloc_fn, - kmem_slab_free_fn_t slab_free_fn, int flags); + size_t obj_size, size_t align, + kmem_cache_ctor_t ctor, int flags); /* * Allocate an object from a cache. |