diff options
author | Richard Braun <rbraun@sceen.net> | 2016-09-16 04:39:02 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2016-09-16 04:55:48 +0200 |
commit | c78fe96446794f71a2db7d7e3d43cb15658590a3 (patch) | |
tree | baa0dbed20f9425eca108e4062f28a82a760944c /vm | |
parent | ce50aa8ba8549d2cabfe9cfc9d324bd08d7430d6 (diff) | |
download | gnumach-c78fe96446794f71a2db7d7e3d43cb15658590a3.tar.gz gnumach-c78fe96446794f71a2db7d7e3d43cb15658590a3.tar.bz2 gnumach-c78fe96446794f71a2db7d7e3d43cb15658590a3.zip |
VM: improve pageout deadlock workaround
Commit 5dd4f67522ad0d49a2cecdb9b109251f546d4dd1 makes VM map entry
allocation done with VM privilege, so that a VM map isn't held locked
while physical allocations are paused, which may block the default
pager during page eviction, causing a system-wide deadlock.
First, it turns out that map entries aren't the only buffers allocated,
and second, their number can't be easily determined, which makes a
preallocation strategy very hard to implement.
This change generalizes the strategy of VM privilege increase when a
VM map is locked.
* device/ds_routines.c (io_done_thread): Use integer values instead
of booleans when setting VM privilege.
* kern/thread.c (thread_init, thread_wire): Likewise.
* vm/vm_pageout.c (vm_pageout): Likewise.
* kern/thread.h (struct thread): Turn member `vm_privilege' into an
unsigned integer.
* vm/vm_map.c (vm_map_lock): New function, where VM privilege is
temporarily increased.
(vm_map_unlock): New function, where VM privilege is decreased.
(_vm_map_entry_create): Remove VM privilege workaround from this
function.
* vm/vm_map.h (vm_map_lock, vm_map_unlock): Turn into functions.
Diffstat (limited to 'vm')
-rw-r--r-- | vm/vm_map.c | 63 | ||||
-rw-r--r-- | vm/vm_map.h | 8 | ||||
-rw-r--r-- | vm/vm_pageout.c | 2 |
3 files changed, 43 insertions, 30 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c index f52e7c76..b1c1b4e0 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -222,29 +222,15 @@ vm_map_t vm_map_create( return(result); } -/* - * vm_map_entry_create: [ internal use only ] - * - * Allocates a VM map entry for insertion in the - * given map (or map copy). No fields are filled. - */ -#define vm_map_entry_create(map) \ - _vm_map_entry_create(&(map)->hdr) - -#define vm_map_copy_entry_create(copy) \ - _vm_map_entry_create(&(copy)->cpy_hdr) - -vm_map_entry_t _vm_map_entry_create(map_header) - const struct vm_map_header *map_header; +void vm_map_lock(struct vm_map *map) { - vm_map_entry_t entry; - boolean_t vm_privilege; + lock_write(&map->lock); /* - * XXX Map entry creation may occur while a map is locked, + * XXX Memory allocation may occur while a map is locked, * for example when clipping entries. If the system is running - * low on memory, allocating an entry may block until pages - * are available. But if a map used by the default pager is + * low on memory, allocating may block until pages are + * available. But if a map used by the default pager is * kept locked, a deadlock occurs. * * This workaround temporarily elevates the current thread @@ -252,19 +238,50 @@ vm_map_entry_t _vm_map_entry_create(map_header) * so regardless of the map for convenience, and because it's * currently impossible to predict which map the default pager * may depend on. + * + * This workaround isn't reliable, and only makes exhaustion + * less likely. In particular pageout may cause lots of data + * to be passed between the kernel and the pagers, often + * in the form of large copy maps. Making the minimum + * number of pages depend on the total number of pages + * should make exhaustion even less likely. */ if (current_thread()) { - vm_privilege = current_thread()->vm_privilege; - current_thread()->vm_privilege = TRUE; + current_thread()->vm_privilege++; + assert(current_thread()->vm_privilege != 0); } - entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache); + map->timestamp++; +} +void vm_map_unlock(struct vm_map *map) +{ if (current_thread()) { - current_thread()->vm_privilege = vm_privilege; + current_thread()->vm_privilege--; } + lock_write_done(&map->lock); +} + +/* + * vm_map_entry_create: [ internal use only ] + * + * Allocates a VM map entry for insertion in the + * given map (or map copy). No fields are filled. + */ +#define vm_map_entry_create(map) \ + _vm_map_entry_create(&(map)->hdr) + +#define vm_map_copy_entry_create(copy) \ + _vm_map_entry_create(&(copy)->cpy_hdr) + +vm_map_entry_t _vm_map_entry_create(map_header) + const struct vm_map_header *map_header; +{ + vm_map_entry_t entry; + + entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache); if (entry == VM_MAP_ENTRY_NULL) panic("vm_map_entry_create"); diff --git a/vm/vm_map.h b/vm/vm_map.h index dad07139..537c36ef 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -352,13 +352,9 @@ MACRO_BEGIN \ (map)->timestamp = 0; \ MACRO_END -#define vm_map_lock(map) \ -MACRO_BEGIN \ - lock_write(&(map)->lock); \ - (map)->timestamp++; \ -MACRO_END +void vm_map_lock(struct vm_map *map); +void vm_map_unlock(struct vm_map *map); -#define vm_map_unlock(map) lock_write_done(&(map)->lock) #define vm_map_lock_read(map) lock_read(&(map)->lock) #define vm_map_unlock_read(map) lock_read_done(&(map)->lock) #define vm_map_lock_write_to_read(map) \ diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c index f420804b..e0bd9880 100644 --- a/vm/vm_pageout.c +++ b/vm/vm_pageout.c @@ -918,7 +918,7 @@ void vm_pageout(void) { unsigned long free_after_reserve; - current_thread()->vm_privilege = TRUE; + current_thread()->vm_privilege = 1; stack_privilege(current_thread()); thread_set_own_priority(0); |