diff options
Diffstat (limited to 'vm')
-rw-r--r-- | vm/memory_object_proxy.c | 2 | ||||
-rw-r--r-- | vm/vm_external.c | 6 | ||||
-rw-r--r-- | vm/vm_fault.c | 2 | ||||
-rw-r--r-- | vm/vm_map.c | 60 | ||||
-rw-r--r-- | vm/vm_map.h | 3 | ||||
-rw-r--r-- | vm/vm_object.c | 2 | ||||
-rw-r--r-- | vm/vm_page.h | 3 | ||||
-rw-r--r-- | vm/vm_resident.c | 8 |
8 files changed, 16 insertions, 70 deletions
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c index a64bfcce..01bce2a5 100644 --- a/vm/memory_object_proxy.c +++ b/vm/memory_object_proxy.c @@ -64,7 +64,7 @@ void memory_object_proxy_init (void) { kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy", - sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0); + sizeof (struct memory_object_proxy), 0, NULL, 0); } /* Lookup a proxy memory object by its port. */ diff --git a/vm/vm_external.c b/vm/vm_external.c index 2e2593b1..097a9b12 100644 --- a/vm/vm_external.c +++ b/vm/vm_external.c @@ -138,13 +138,13 @@ void vm_external_module_initialize(void) vm_size_t size = (vm_size_t) sizeof(struct vm_external); kmem_cache_init(&vm_external_cache, "vm_external", size, 0, - NULL, NULL, NULL, 0); + NULL, 0); kmem_cache_init(&vm_object_small_existence_map_cache, "small_existence_map", SMALL_SIZE, 0, - NULL, NULL, NULL, 0); + NULL, 0); kmem_cache_init(&vm_object_large_existence_map_cache, "large_existence_map", LARGE_SIZE, 0, - NULL, NULL, NULL, 0); + NULL, 0); } diff --git a/vm/vm_fault.c b/vm/vm_fault.c index 4d674174..09e2c54d 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -105,7 +105,7 @@ extern struct db_watchpoint *db_watchpoint_list; void vm_fault_init(void) { kmem_cache_init(&vm_fault_state_cache, "vm_fault_state", - sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0); + sizeof(vm_fault_state_t), 0, NULL, 0); } /* diff --git a/vm/vm_map.c b/vm/vm_map.c index 3a231de0..0d610621 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -126,7 +126,6 @@ MACRO_END struct kmem_cache vm_map_cache; /* cache for vm_map structures */ struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ -struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ /* @@ -151,43 +150,16 @@ vm_object_t vm_submap_object = &vm_submap_object_store; * * vm_map_cache: used to allocate maps. * vm_map_entry_cache: used to allocate map entries. - * vm_map_kentry_cache: used to allocate map entries for the kernel. - * - * Kernel map entries are allocated from a special cache, using a custom - * page allocation function to avoid recursion. It would be difficult - * (perhaps impossible) for the kernel to allocate more memory to an entry - * cache when it became empty since the very act of allocating memory - * implies the creation of a new entry. */ -vm_offset_t kentry_data; -vm_size_t kentry_data_size = KENTRY_DATA_SIZE; - -static vm_offset_t kentry_pagealloc(vm_size_t size) -{ - vm_offset_t result; - - if (size > kentry_data_size) - panic("vm_map: kentry memory exhausted"); - - result = kentry_data; - kentry_data += size; - kentry_data_size -= size; - return result; -} - void vm_map_init(void) { kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, - NULL, NULL, NULL, 0); + NULL, 0); kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", - sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); - kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", - sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, - NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB - | KMEM_CACHE_NORECLAIM); + sizeof(struct vm_map_entry), 0, NULL, 0); kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", - sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); + sizeof(struct vm_map_copy), 0, NULL, 0); /* * Submap object is initialized by vm_object_init. @@ -261,15 +233,9 @@ vm_map_t vm_map_create( vm_map_entry_t _vm_map_entry_create(map_header) const struct vm_map_header *map_header; { - kmem_cache_t cache; vm_map_entry_t entry; - if (map_header->entries_pageable) - cache = &vm_map_entry_cache; - else - cache = &vm_map_kentry_cache; - - entry = (vm_map_entry_t) kmem_cache_alloc(cache); + entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache); if (entry == VM_MAP_ENTRY_NULL) panic("vm_map_entry_create"); @@ -291,14 +257,9 @@ void _vm_map_entry_dispose(map_header, entry) const struct vm_map_header *map_header; vm_map_entry_t entry; { - kmem_cache_t cache; - - if (map_header->entries_pageable) - cache = &vm_map_entry_cache; - else - cache = &vm_map_kentry_cache; + (void)map_header; - kmem_cache_free(cache, (vm_offset_t) entry); + kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry); } /* @@ -2539,15 +2500,8 @@ kern_return_t vm_map_copyout( * Mismatches occur when dealing with the default * pager. */ - kmem_cache_t old_cache; vm_map_entry_t next, new; - /* - * Find the cache that the copies were allocated from - */ - old_cache = (copy->cpy_hdr.entries_pageable) - ? &vm_map_entry_cache - : &vm_map_kentry_cache; entry = vm_map_copy_first_entry(copy); /* @@ -2571,7 +2525,7 @@ kern_return_t vm_map_copyout( vm_map_copy_last_entry(copy), new); next = entry->vme_next; - kmem_cache_free(old_cache, (vm_offset_t) entry); + kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry); entry = next; } } diff --git a/vm/vm_map.h b/vm/vm_map.h index 9b31f90a..b4ba7c7b 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -363,9 +363,6 @@ MACRO_END * Exported procedures that operate on vm_map_t. */ -extern vm_offset_t kentry_data; -extern vm_size_t kentry_data_size; -extern int kentry_count; /* Initialize the module */ extern void vm_map_init(void); diff --git a/vm/vm_object.c b/vm/vm_object.c index eda03c65..ece3a83c 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -262,7 +262,7 @@ vm_object_t vm_object_allocate( void vm_object_bootstrap(void) { kmem_cache_init(&vm_object_cache, "vm_object", - sizeof(struct vm_object), 0, NULL, NULL, NULL, 0); + sizeof(struct vm_object), 0, NULL, 0); queue_init(&vm_object_cached_list); simple_lock_init(&vm_object_cached_lock_data); diff --git a/vm/vm_page.h b/vm/vm_page.h index 7607aad0..4e870d82 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -378,7 +378,8 @@ extern unsigned int vm_page_info( #define VM_PT_TABLE 2 /* Page is part of the page table */ #define VM_PT_PMAP 3 /* Page stores pmap-specific data */ #define VM_PT_KMEM 4 /* Page is part of a kmem slab */ -#define VM_PT_KERNEL 5 /* Type for generic kernel allocations */ +#define VM_PT_STACK 5 /* Type for generic kernel allocations */ +#define VM_PT_KERNEL 6 /* Type for generic kernel allocations */ static inline unsigned short vm_page_type(const struct vm_page *page) diff --git a/vm/vm_resident.c b/vm/vm_resident.c index 9fd64918..dd1cf9cd 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -193,12 +193,6 @@ void vm_page_bootstrap( vm_page_free_wanted = 0; /* - * Steal memory for the kernel map entries. - */ - - kentry_data = pmap_steal_memory(kentry_data_size); - - /* * Allocate (and initialize) the virtual-to-physical * table hash buckets. * @@ -312,7 +306,7 @@ vm_offset_t pmap_steal_memory( void vm_page_module_init(void) { kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0, - NULL, NULL, NULL, 0); + NULL, 0); } /* |