aboutsummaryrefslogtreecommitdiff
path: root/vm/vm_map.h
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-09-07 00:11:08 +0200
committerRichard Braun <rbraun@sceen.net>2016-09-07 00:11:08 +0200
commite5c7d1c1dda40f8f262e26fed911bfe03027993b (patch)
tree1e5f9bfb86ef80c2fafdce7a1a9214ba955e9f5f /vm/vm_map.h
parentefcecd06abb8f7342723a8916917842840e9264f (diff)
downloadgnumach-e5c7d1c1dda40f8f262e26fed911bfe03027993b.tar.gz
gnumach-e5c7d1c1dda40f8f262e26fed911bfe03027993b.tar.bz2
gnumach-e5c7d1c1dda40f8f262e26fed911bfe03027993b.zip
Remove map entry pageability property.
Since the replacement of the zone allocator, kernel objects have been wired in memory. Besides, as of 5e9f6f (Stack the slab allocator directly on top of the physical allocator), there is a single cache used to allocate map entries. Those changes make the pageability attribute of VM maps irrelevant. * device/ds_routines.c (mach_device_init): Update call to kmem_submap. * ipc/ipc_init.c (ipc_init): Likewise. * kern/task.c (task_create): Update call to vm_map_create. * vm/vm_kern.c (kmem_submap): Remove `pageable' argument. Update call to vm_map_setup. (kmem_init): Update call to vm_map_setup. * vm/vm_kern.h (kmem_submap): Update declaration. * vm/vm_map.c (vm_map_setup): Remove `pageable' argument. Don't set `entries_pageable' member. (vm_map_create): Likewise. (vm_map_copyout): Don't bother creating copies of page entries with the right pageability. (vm_map_copyin): Don't set `entries_pageable' member. (vm_map_fork): Update call to vm_map_create. * vm/vm_map.h (struct vm_map_header): Remove `entries_pageable' member. (vm_map_setup, vm_map_create): Remove `pageable' argument.
Diffstat (limited to 'vm/vm_map.h')
-rw-r--r--vm/vm_map.h8
1 files changed, 2 insertions, 6 deletions
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 9e946c5e..dad07139 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -153,8 +153,6 @@ struct vm_map_header {
struct rbtree gap_tree; /* Sorted tree of gap lists
for allocations */
int nentries; /* Number of entries */
- boolean_t entries_pageable;
- /* are map entries pageable? */
};
/*
@@ -380,11 +378,9 @@ MACRO_END
extern void vm_map_init(void);
/* Initialize an empty map */
-extern void vm_map_setup(vm_map_t, pmap_t, vm_offset_t, vm_offset_t,
- boolean_t);
+extern void vm_map_setup(vm_map_t, pmap_t, vm_offset_t, vm_offset_t);
/* Create an empty map */
-extern vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t,
- boolean_t);
+extern vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t);
/* Create a map in the image of an existing map */
extern vm_map_t vm_map_fork(vm_map_t);