diff options
author | Luca Dariz <luca@orpolo.org> | 2024-01-11 22:09:01 +0100 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2024-01-13 22:49:27 +0100 |
commit | 09ebd602469554ce1a4934a49954e0be8db4da57 (patch) | |
tree | 35f2ad813ef85c670a57485c31999c3868f1e0b2 /vm | |
parent | 973fdd3cf6a8c0e462e0f942f572112122bc5e52 (diff) | |
download | gnumach-09ebd602469554ce1a4934a49954e0be8db4da57.tar.gz gnumach-09ebd602469554ce1a4934a49954e0be8db4da57.tar.bz2 gnumach-09ebd602469554ce1a4934a49954e0be8db4da57.zip |
adjust range when changing memory pageability
* vm/vm_map.c: use actual limits instead of min/max boundaries to
change pageability of the currently mapped memory.
This caused the initial vm_wire_all(host, task VM_WIRE_ALL) in glibc
startup to fail with KERN_NO_SPACE.
Message-ID: <20240111210907.419689-5-luca@orpolo.org>
Diffstat (limited to 'vm')
-rw-r--r-- | vm/vm_map.c | 31 |
1 files changed, 26 insertions, 5 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c index 26e18676..e454bb2a 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -1829,6 +1829,30 @@ kern_return_t vm_map_pageable( return(KERN_SUCCESS); } +/* Update pageability of all the memory currently in the map. + * The map must be locked, and protection mismatch will not be checked, see + * vm_map_pageable(). + */ +static kern_return_t +vm_map_pageable_current(vm_map_t map, vm_prot_t access_type) +{ + struct rbtree_node *node; + vm_offset_t min_address, max_address; + + node = rbtree_first(&map->hdr.tree); + min_address = rbtree_entry(node, struct vm_map_entry, + tree_node)->vme_start; + + node = rbtree_last(&map->hdr.tree); + max_address = rbtree_entry(node, struct vm_map_entry, + tree_node)->vme_end; + + /* Returns with the map read-locked if successful */ + return vm_map_pageable(map, min_address, max_address,access_type, + FALSE, FALSE); +} + + /* * vm_map_pageable_all: * @@ -1859,8 +1883,7 @@ vm_map_pageable_all(struct vm_map *map, vm_wire_t flags) map->wiring_required = FALSE; /* Returns with the map read-locked if successful */ - kr = vm_map_pageable(map, map->min_offset, map->max_offset, - VM_PROT_NONE, FALSE, FALSE); + kr = vm_map_pageable_current(map, VM_PROT_NONE); vm_map_unlock(map); return kr; } @@ -1873,9 +1896,7 @@ vm_map_pageable_all(struct vm_map *map, vm_wire_t flags) if (flags & VM_WIRE_CURRENT) { /* Returns with the map read-locked if successful */ - kr = vm_map_pageable(map, map->min_offset, map->max_offset, - VM_PROT_READ | VM_PROT_WRITE, - FALSE, FALSE); + kr = vm_map_pageable_current(map, VM_PROT_READ | VM_PROT_WRITE); if (kr != KERN_SUCCESS) { if (flags & VM_WIRE_FUTURE) { |