diff options
Diffstat (limited to 'vm')
-rw-r--r-- | vm/vm_map.c | 63 | ||||
-rw-r--r-- | vm/vm_map.h | 8 | ||||
-rw-r--r-- | vm/vm_pageout.c | 2 |
3 files changed, 43 insertions, 30 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c index f52e7c76..b1c1b4e0 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -222,29 +222,15 @@ vm_map_t vm_map_create( return(result); } -/* - * vm_map_entry_create: [ internal use only ] - * - * Allocates a VM map entry for insertion in the - * given map (or map copy). No fields are filled. - */ -#define vm_map_entry_create(map) \ - _vm_map_entry_create(&(map)->hdr) - -#define vm_map_copy_entry_create(copy) \ - _vm_map_entry_create(&(copy)->cpy_hdr) - -vm_map_entry_t _vm_map_entry_create(map_header) - const struct vm_map_header *map_header; +void vm_map_lock(struct vm_map *map) { - vm_map_entry_t entry; - boolean_t vm_privilege; + lock_write(&map->lock); /* - * XXX Map entry creation may occur while a map is locked, + * XXX Memory allocation may occur while a map is locked, * for example when clipping entries. If the system is running - * low on memory, allocating an entry may block until pages - * are available. But if a map used by the default pager is + * low on memory, allocating may block until pages are + * available. But if a map used by the default pager is * kept locked, a deadlock occurs. * * This workaround temporarily elevates the current thread @@ -252,19 +238,50 @@ vm_map_entry_t _vm_map_entry_create(map_header) * so regardless of the map for convenience, and because it's * currently impossible to predict which map the default pager * may depend on. + * + * This workaround isn't reliable, and only makes exhaustion + * less likely. In particular pageout may cause lots of data + * to be passed between the kernel and the pagers, often + * in the form of large copy maps. Making the minimum + * number of pages depend on the total number of pages + * should make exhaustion even less likely. */ if (current_thread()) { - vm_privilege = current_thread()->vm_privilege; - current_thread()->vm_privilege = TRUE; + current_thread()->vm_privilege++; + assert(current_thread()->vm_privilege != 0); } - entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache); + map->timestamp++; +} +void vm_map_unlock(struct vm_map *map) +{ if (current_thread()) { - current_thread()->vm_privilege = vm_privilege; + current_thread()->vm_privilege--; } + lock_write_done(&map->lock); +} + +/* + * vm_map_entry_create: [ internal use only ] + * + * Allocates a VM map entry for insertion in the + * given map (or map copy). No fields are filled. + */ +#define vm_map_entry_create(map) \ + _vm_map_entry_create(&(map)->hdr) + +#define vm_map_copy_entry_create(copy) \ + _vm_map_entry_create(&(copy)->cpy_hdr) + +vm_map_entry_t _vm_map_entry_create(map_header) + const struct vm_map_header *map_header; +{ + vm_map_entry_t entry; + + entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache); if (entry == VM_MAP_ENTRY_NULL) panic("vm_map_entry_create"); diff --git a/vm/vm_map.h b/vm/vm_map.h index dad07139..537c36ef 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -352,13 +352,9 @@ MACRO_BEGIN \ (map)->timestamp = 0; \ MACRO_END -#define vm_map_lock(map) \ -MACRO_BEGIN \ - lock_write(&(map)->lock); \ - (map)->timestamp++; \ -MACRO_END +void vm_map_lock(struct vm_map *map); +void vm_map_unlock(struct vm_map *map); -#define vm_map_unlock(map) lock_write_done(&(map)->lock) #define vm_map_lock_read(map) lock_read(&(map)->lock) #define vm_map_unlock_read(map) lock_read_done(&(map)->lock) #define vm_map_lock_write_to_read(map) \ diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c index f420804b..e0bd9880 100644 --- a/vm/vm_pageout.c +++ b/vm/vm_pageout.c @@ -918,7 +918,7 @@ void vm_pageout(void) { unsigned long free_after_reserve; - current_thread()->vm_privilege = TRUE; + current_thread()->vm_privilege = 1; stack_privilege(current_thread()); thread_set_own_priority(0); |