From 98d64d1a78172b1efc26cac36a367eec8496926f Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Wed, 9 Oct 2013 11:51:54 +0200 Subject: VM cache policy change This patch lets the kernel unconditionnally cache non empty unreferenced objects instead of using a fixed arbitrary limit. As the pageout daemon evicts pages, it collects cached objects that have become empty. The effective result is a graceful adjustment of the number of objects related to memory management (virtual memory objects, their associated ports, and potentially objects maintained in the external memory managers). Physical memory can now be almost entirely filled up with cached pages. In addition, these cached pages are not automatically deactivated as objects can quickly be referenced again. There are problems with this patch however. The first is that, on machines with a large amount of physical memory (above 1 GiB but it also depends on usage patterns), scalability issues are exposed. For example, file systems which don't throttle their writeback requests can create thread storms, strongly reducing system responsiveness. Other issues such as linear scans of memory objects also add visible CPU overhead. The second is that, as most memory is used, it increases the chances of swapping deadlocks. Applications that map large objects and quickly cause lots of page faults can still easily bring the system to its knees. --- vm/vm_object.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'vm/vm_object.h') diff --git a/vm/vm_object.h b/vm/vm_object.h index adeff657..95798790 100644 --- a/vm/vm_object.h +++ b/vm/vm_object.h @@ -72,7 +72,7 @@ struct vm_object { */ int ref_count; /* Number of references */ - int resident_page_count; + unsigned long resident_page_count; /* number of resident pages */ struct vm_object *copy; /* Object that should receive @@ -169,6 +169,7 @@ vm_object_t kernel_object; /* the single kernel object */ extern void vm_object_bootstrap(void); extern void vm_object_init(void); +extern void vm_object_collect(vm_object_t); extern void vm_object_terminate(vm_object_t); extern vm_object_t vm_object_allocate(vm_size_t); extern void vm_object_reference(vm_object_t); @@ -280,6 +281,10 @@ extern void vm_object_pager_wakeup(ipc_port_t pager); * Routines implemented as macros */ +#define vm_object_collectable(object) \ + (((object)->ref_count == 0) \ + && ((object)->resident_page_count == 0)) + #define vm_object_paging_begin(object) \ ((object)->paging_in_progress++) -- cgit v1.2.3 From c774e89387a43d737abbdd99781a294c1cceebb2 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sun, 7 Feb 2016 14:08:24 +0100 Subject: Fix page cache accounting * vm/vm_object.c (vm_object_bootstrap): Set template object `cached' member to FALSE. (vm_object_cache_add, vm_object_cache_remove): New functions. (vm_object_collect, vm_object_deallocate, vm_object_lookup, vm_object_lookup_name, vm_object_destroy): Use new cache management functions. (vm_object_terminate, vm_object_collapse): Make sure object isn't cached. * vm/vm_object.h (struct vm_object): New `cached' member. --- vm/vm_object.c | 70 +++++++++++++++++++++++++++++++++------------------------- vm/vm_object.h | 3 ++- 2 files changed, 42 insertions(+), 31 deletions(-) (limited to 'vm/vm_object.h') diff --git a/vm/vm_object.c b/vm/vm_object.c index 9a019f6e..526b6f33 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -300,6 +300,7 @@ void vm_object_bootstrap(void) vm_object_template.paging_in_progress = 0; vm_object_template.can_persist = FALSE; + vm_object_template.cached = FALSE; vm_object_template.internal = TRUE; vm_object_template.temporary = TRUE; vm_object_template.alive = TRUE; @@ -345,6 +346,33 @@ void vm_object_init(void) IKOT_PAGING_NAME); } +/* + * Object cache management functions. + * + * Both the cache and the object must be locked + * before calling these functions. + */ + +static void vm_object_cache_add( + vm_object_t object) +{ + assert(!object->cached); + queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list); + vm_object_cached_count++; + vm_object_cached_pages_update(object->resident_page_count); + object->cached = TRUE; +} + +static void vm_object_cache_remove( + vm_object_t object) +{ + assert(object->cached); + queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); + vm_object_cached_count--; + vm_object_cached_pages_update(-object->resident_page_count); + object->cached = FALSE; +} + void vm_object_collect( register vm_object_t object) { @@ -368,7 +396,7 @@ void vm_object_collect( return; } - queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); + vm_object_cache_remove(object); vm_object_terminate(object); } @@ -435,12 +463,8 @@ void vm_object_deallocate( * it in the cache. */ if (object->can_persist && (object->resident_page_count > 0)) { - queue_enter(&vm_object_cached_list, object, - vm_object_t, cached_list); - vm_object_cached_count++; - vm_object_cached_pages_update(object->resident_page_count); + vm_object_cache_add(object); vm_object_cache_unlock(); - vm_object_unlock(object); return; } @@ -601,6 +625,7 @@ void vm_object_terminate( assert(object->ref_count == 0); assert(object->paging_in_progress == 0); + assert(!object->cached); /* * Throw away port rights... note that they may @@ -1803,12 +1828,8 @@ vm_object_t vm_object_lookup( assert(object->alive); - if (object->ref_count == 0) { - queue_remove(&vm_object_cached_list, object, - vm_object_t, cached_list); - vm_object_cached_count--; - vm_object_cached_pages_update(-object->resident_page_count); - } + if (object->ref_count == 0) + vm_object_cache_remove(object); object->ref_count++; vm_object_unlock(object); @@ -1835,12 +1856,8 @@ vm_object_t vm_object_lookup_name( assert(object->alive); - if (object->ref_count == 0) { - queue_remove(&vm_object_cached_list, object, - vm_object_t, cached_list); - vm_object_cached_count--; - vm_object_cached_pages_update(-object->resident_page_count); - } + if (object->ref_count == 0) + vm_object_cache_remove(object); object->ref_count++; vm_object_unlock(object); @@ -1872,12 +1889,8 @@ void vm_object_destroy( object = (vm_object_t) pager->ip_kobject; vm_object_lock(object); - if (object->ref_count == 0) { - queue_remove(&vm_object_cached_list, object, - vm_object_t, cached_list); - vm_object_cached_count--; - vm_object_cached_pages_update(-object->resident_page_count); - } + if (object->ref_count == 0) + vm_object_cache_remove(object); object->ref_count++; object->can_persist = FALSE; @@ -2026,12 +2039,8 @@ restart: if ((object != VM_OBJECT_NULL) && !must_init) { vm_object_lock(object); - if (object->ref_count == 0) { - queue_remove(&vm_object_cached_list, object, - vm_object_t, cached_list); - vm_object_cached_count--; - vm_object_cached_pages_update(-object->resident_page_count); - } + if (object->ref_count == 0) + vm_object_cache_remove(object); object->ref_count++; vm_object_unlock(object); @@ -2566,6 +2575,7 @@ void vm_object_collapse( ); assert(backing_object->alive); + assert(!backing_object->cached); backing_object->alive = FALSE; vm_object_unlock(backing_object); diff --git a/vm/vm_object.h b/vm/vm_object.h index 95798790..6b9f0bcf 100644 --- a/vm/vm_object.h +++ b/vm/vm_object.h @@ -148,8 +148,9 @@ struct vm_object { */ /* boolean_t */ use_shared_copy : 1,/* Use shared (i.e., * delayed) copy on write */ - /* boolean_t */ shadowed: 1; /* Shadow may exist */ + /* boolean_t */ shadowed: 1, /* Shadow may exist */ + /* boolean_t */ cached: 1; /* Object is cached */ queue_chain_t cached_list; /* Attachment point for the list * of objects cached as a result * of their can_persist value -- cgit v1.2.3