diff options
author | Richard Braun <rbraun@sceen.net> | 2016-09-07 00:11:08 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2016-09-07 00:11:08 +0200 |
commit | e5c7d1c1dda40f8f262e26fed911bfe03027993b (patch) | |
tree | 1e5f9bfb86ef80c2fafdce7a1a9214ba955e9f5f /vm/vm_kern.c | |
parent | efcecd06abb8f7342723a8916917842840e9264f (diff) | |
download | gnumach-e5c7d1c1dda40f8f262e26fed911bfe03027993b.tar.gz gnumach-e5c7d1c1dda40f8f262e26fed911bfe03027993b.tar.bz2 gnumach-e5c7d1c1dda40f8f262e26fed911bfe03027993b.zip |
Remove map entry pageability property.
Since the replacement of the zone allocator, kernel objects have been
wired in memory. Besides, as of 5e9f6f (Stack the slab allocator
directly on top of the physical allocator), there is a single cache
used to allocate map entries.
Those changes make the pageability attribute of VM maps irrelevant.
* device/ds_routines.c (mach_device_init): Update call to kmem_submap.
* ipc/ipc_init.c (ipc_init): Likewise.
* kern/task.c (task_create): Update call to vm_map_create.
* vm/vm_kern.c (kmem_submap): Remove `pageable' argument. Update call
to vm_map_setup.
(kmem_init): Update call to vm_map_setup.
* vm/vm_kern.h (kmem_submap): Update declaration.
* vm/vm_map.c (vm_map_setup): Remove `pageable' argument. Don't set
`entries_pageable' member.
(vm_map_create): Likewise.
(vm_map_copyout): Don't bother creating copies of page entries with
the right pageability.
(vm_map_copyin): Don't set `entries_pageable' member.
(vm_map_fork): Update call to vm_map_create.
* vm/vm_map.h (struct vm_map_header): Remove `entries_pageable' member.
(vm_map_setup, vm_map_create): Remove `pageable' argument.
Diffstat (limited to 'vm/vm_kern.c')
-rw-r--r-- | vm/vm_kern.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/vm/vm_kern.c b/vm/vm_kern.c index 9c0a20b7..81bb1531 100644 --- a/vm/vm_kern.c +++ b/vm/vm_kern.c @@ -778,8 +778,7 @@ kmem_submap( vm_map_t parent, vm_offset_t *min, vm_offset_t *max, - vm_size_t size, - boolean_t pageable) + vm_size_t size) { vm_offset_t addr; kern_return_t kr; @@ -802,7 +801,7 @@ kmem_submap( panic("kmem_submap"); pmap_reference(vm_map_pmap(parent)); - vm_map_setup(map, vm_map_pmap(parent), addr, addr + size, pageable); + vm_map_setup(map, vm_map_pmap(parent), addr, addr + size); kr = vm_map_submap(parent, addr, addr + size, map); if (kr != KERN_SUCCESS) panic("kmem_submap"); @@ -821,8 +820,7 @@ void kmem_init( vm_offset_t start, vm_offset_t end) { - vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end, - FALSE); + vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end); /* * Reserve virtual memory allocated up to this time. |