diff options
author | Richard Braun <rbraun@sceen.net> | 2016-12-24 02:31:34 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2016-12-24 02:31:34 +0100 |
commit | 2b0f19f602e08fd9d37268233b962674fd592634 (patch) | |
tree | 90a4bfa49483ccdcddbbcd20749de7f864272d64 /vm/vm_map.c | |
parent | 023401c5b97023670a44059a60eb2a3a11c8a929 (diff) | |
download | gnumach-2b0f19f602e08fd9d37268233b962674fd592634.tar.gz gnumach-2b0f19f602e08fd9d37268233b962674fd592634.tar.bz2 gnumach-2b0f19f602e08fd9d37268233b962674fd592634.zip |
VM: add the vm_wire_all call
This call maps the POSIX mlockall and munlockall calls.
* Makefrag.am (include_mach_HEADERS): Add include/mach/vm_wire.h.
* include/mach/gnumach.defs (vm_wire_t): New type.
(vm_wire_all): New routine.
* include/mach/mach_types.h: Include mach/vm_wire.h.
* vm/vm_map.c: Likewise.
(vm_map_enter): Automatically wire new entries if requested.
(vm_map_copyout): Likewise.
(vm_map_pageable_all): New function.
vm/vm_map.h: Include mach/vm_wire.h.
(struct vm_map): Update description of member `wiring_required'.
(vm_map_pageable_all): New function.
* vm/vm_user.c (vm_wire_all): New function.
Diffstat (limited to 'vm/vm_map.c')
-rw-r--r-- | vm/vm_map.c | 91 |
1 files changed, 86 insertions, 5 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c index c618e63d..855d7997 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -39,6 +39,7 @@ #include <mach/port.h> #include <mach/vm_attributes.h> #include <mach/vm_param.h> +#include <mach/vm_wire.h> #include <kern/assert.h> #include <kern/debug.h> #include <kern/kalloc.h> @@ -1108,6 +1109,15 @@ kern_return_t vm_map_enter( SAVE_HINT(map, new_entry); + if (map->wiring_required) { + /* Returns with the map read-locked if successful */ + result = vm_map_pageable(map, start, end, cur_protection, FALSE, FALSE); + + if (result != KERN_SUCCESS) { + RETURN(KERN_SUCCESS); + } + } + vm_map_unlock(map); if ((object != VM_OBJECT_NULL) && @@ -1746,6 +1756,69 @@ kern_return_t vm_map_pageable( } /* + * vm_map_pageable_all: + * + * Sets the pageability of an entire map. If the VM_WIRE_CURRENT + * flag is set, then all current mappings are locked down. If the + * VM_WIRE_FUTURE flag is set, then all mappings created after the + * call returns are locked down. If no flags are passed + * (i.e. VM_WIRE_NONE), all mappings become pageable again, and + * future mappings aren't automatically locked down any more. + * + * The access type of the mappings match their current protection. + * Null mappings (with protection PROT_NONE) are updated to track + * that they should be wired in case they become accessible. + */ +kern_return_t +vm_map_pageable_all(struct vm_map *map, vm_wire_t flags) +{ + boolean_t wiring_required; + kern_return_t kr; + + if ((flags & ~VM_WIRE_ALL) != 0) { + return KERN_INVALID_ARGUMENT; + } + + vm_map_lock(map); + + if (flags == VM_WIRE_NONE) { + map->wiring_required = FALSE; + + /* Returns with the map read-locked if successful */ + kr = vm_map_pageable(map, map->min_offset, map->max_offset, + VM_PROT_NONE, FALSE, FALSE); + vm_map_unlock(map); + return kr; + } + + wiring_required = map->wiring_required; + + if (flags & VM_WIRE_FUTURE) { + map->wiring_required = TRUE; + } + + if (flags & VM_WIRE_CURRENT) { + /* Returns with the map read-locked if successful */ + kr = vm_map_pageable(map, map->min_offset, map->max_offset, + VM_PROT_READ | VM_PROT_WRITE, + FALSE, FALSE); + + if (kr != KERN_SUCCESS) { + if (flags & VM_WIRE_FUTURE) { + map->wiring_required = wiring_required; + } + + vm_map_unlock(map); + return kr; + } + } + + vm_map_unlock(map); + + return KERN_SUCCESS; +} + +/* * vm_map_entry_delete: [ internal use only ] * * Deallocate the given entry from the target map. @@ -2605,6 +2678,7 @@ kern_return_t vm_map_copyout( vm_offset_t vm_copy_start; vm_map_entry_t last; vm_map_entry_t entry; + kern_return_t kr; /* * Check for null copy object. @@ -2624,7 +2698,6 @@ kern_return_t vm_map_copyout( vm_object_t object = copy->cpy_object; vm_size_t offset = copy->offset; vm_size_t tmp_size = copy->size; - kern_return_t kr; *dst_addr = 0; kr = vm_map_enter(dst_map, dst_addr, tmp_size, @@ -2764,11 +2837,19 @@ kern_return_t vm_map_copyout( vm_map_copy_insert(dst_map, last, copy); - vm_map_unlock(dst_map); + if (dst_map->wiring_required) { + /* Returns with the map read-locked if successful */ + kr = vm_map_pageable(dst_map, start, start + size, + VM_PROT_READ | VM_PROT_WRITE, + FALSE, FALSE); - /* - * XXX If wiring_required, call vm_map_pageable - */ + if (kr != KERN_SUCCESS) { + vm_map_unlock(dst_map); + return kr; + } + } + + vm_map_unlock(dst_map); return(KERN_SUCCESS); } |