aboutsummaryrefslogtreecommitdiff
path: root/vm/vm_page.h
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-09-20 23:44:23 +0200
committerRichard Braun <rbraun@sceen.net>2016-09-21 00:21:42 +0200
commit5d1258459ad618481a4f239e8ce020bdecda1d3f (patch)
tree507285e032d20fc29237e5a415e9d76380130607 /vm/vm_page.h
parent783ad37f65384994dfa5387ab3847a8a4d77b90b (diff)
downloadgnumach-5d1258459ad618481a4f239e8ce020bdecda1d3f.tar.gz
gnumach-5d1258459ad618481a4f239e8ce020bdecda1d3f.tar.bz2
gnumach-5d1258459ad618481a4f239e8ce020bdecda1d3f.zip
Rework pageout to handle multiple segments
As we're about to use a new HIGHMEM segment, potentially much larger than the existing DMA and DIRECTMAP ones, it's now compulsory to make the pageout daemon aware of those segments. And while we're at it, let's fix some of the defects that have been plaguing pageout forever, such as throttling, and pageout of internal versus external pages (this commit notably introduces a hardcoded policy in which as many external pages are selected before considering internal pages). * kern/slab.c (kmem_pagefree_physmem): Update call to vm_page_release. * vm/vm_page.c: Include <kern/counters.h> and <vm/vm_pageout.h>. (VM_PAGE_SEG_THRESHOLD_MIN_NUM, VM_PAGE_SEG_THRESHOLD_MIN_DENOM, VM_PAGE_SEG_THRESHOLD_MIN, VM_PAGE_SEG_THRESHOLD_LOW_NUM, VM_PAGE_SEG_THRESHOLD_LOW_DENOM, VM_PAGE_SEG_THRESHOLD_LOW, VM_PAGE_SEG_THRESHOLD_HIGH_NUM, VM_PAGE_SEG_THRESHOLD_HIGH_DENOM, VM_PAGE_SEG_THRESHOLD_HIGH, VM_PAGE_SEG_MIN_PAGES, VM_PAGE_HIGH_ACTIVE_PAGE_NUM, VM_PAGE_HIGH_ACTIVE_PAGE_DENOM): New macros. (struct vm_page_queue): New type. (struct vm_page_seg): Add new members `min_free_pages', `low_free_pages', `high_free_pages', `active_pages', `nr_active_pages', `high_active_pages', `inactive_pages', `nr_inactive_pages'. (vm_page_alloc_paused): New variable. (vm_page_pageable, vm_page_can_move, vm_page_remove_mappings): New functions. (vm_page_seg_alloc_from_buddy): Pause allocations and start the pageout daemon as appropriate. (vm_page_queue_init, vm_page_queue_push, vm_page_queue_remove, vm_page_queue_first, vm_page_seg_get, vm_page_seg_index, vm_page_seg_compute_pageout_thresholds): New functions. (vm_page_seg_init): Initialize the new segment members. (vm_page_seg_add_active_page, vm_page_seg_remove_active_page, vm_page_seg_add_inactive_page, vm_page_seg_remove_inactive_page, vm_page_seg_pull_active_page, vm_page_seg_pull_inactive_page, vm_page_seg_pull_cache_page): New functions. (vm_page_seg_min_page_available, vm_page_seg_page_available, vm_page_seg_usable, vm_page_seg_double_lock, vm_page_seg_double_unlock, vm_page_seg_balance_page, vm_page_seg_balance, vm_page_seg_evict, vm_page_seg_compute_high_active_page, vm_page_seg_refill_inactive, vm_page_lookup_seg, vm_page_check): New functions. (vm_page_alloc_pa): Handle allocation failure from VM privileged thread. (vm_page_info_all): Display additional segment properties. (vm_page_wire, vm_page_unwire, vm_page_deactivate, vm_page_activate, vm_page_wait): Move from vm/vm_resident.c and rewrite to use segments. (vm_page_queues_remove, vm_page_check_usable, vm_page_may_balance, vm_page_balance_once, vm_page_balance, vm_page_evict_once): New functions. (VM_PAGE_MAX_LAUNDRY, VM_PAGE_MAX_EVICTIONS): New macros. (vm_page_evict, vm_page_refill_inactive): New functions. * vm/vm_page.h: Include <kern/list.h>. (struct vm_page): Remove member `pageq', reuse the `node' member instead, move the `listq' and `next' members above `vm_page_header'. (VM_PAGE_CHECK): Define as an alias to vm_page_check. (vm_page_check): New function declaration. (vm_page_queue_fictitious, vm_page_queue_active, vm_page_queue_inactive, vm_page_free_target, vm_page_free_min, vm_page_inactive_target, vm_page_free_reserved, vm_page_free_wanted): Remove extern declarations. (vm_page_external_pagedout): New extern declaration. (vm_page_release): Update declaration. (VM_PAGE_QUEUES_REMOVE): Define as an alias to vm_page_queues_remove. (VM_PT_PMAP, VM_PT_KMEM, VM_PT_STACK): Remove macros. (VM_PT_KERNEL): Update value. (vm_page_queues_remove, vm_page_balance, vm_page_evict, vm_page_refill_inactive): New function declarations. * vm/vm_pageout.c (VM_PAGEOUT_BURST_MAX, VM_PAGEOUT_BURST_MIN, VM_PAGEOUT_BURST_WAIT, VM_PAGEOUT_EMPTY_WAIT, VM_PAGEOUT_PAUSE_MAX, VM_PAGE_INACTIVE_TARGET, VM_PAGE_FREE_TARGET, VM_PAGE_FREE_MIN, VM_PAGE_FREE_RESERVED, VM_PAGEOUT_RESERVED_INTERNAL, VM_PAGEOUT_RESERVED_REALLY): Remove macros. (vm_pageout_reserved_internal, vm_pageout_reserved_really, vm_pageout_burst_max, vm_pageout_burst_min, vm_pageout_burst_wait, vm_pageout_empty_wait, vm_pageout_pause_count, vm_pageout_pause_max, vm_pageout_active, vm_pageout_inactive, vm_pageout_inactive_nolock, vm_pageout_inactive_busy, vm_pageout_inactive_absent, vm_pageout_inactive_used, vm_pageout_inactive_clean, vm_pageout_inactive_dirty, vm_pageout_inactive_double, vm_pageout_inactive_cleaned_external): Remove variables. (vm_pageout_requested, vm_pageout_continue): New variables. (vm_pageout_setup): Wait for page allocation to succeed instead of falling back to flush, update double paging protocol with caller, add pageout throttling setup. (vm_pageout_scan): Rewrite to use the new vm_page balancing, eviction and inactive queue refill functions. (vm_pageout_scan_continue, vm_pageout_continue): Remove functions. (vm_pageout): Rewrite. (vm_pageout_start, vm_pageout_resume): New functions. * vm/vm_pageout.h (vm_pageout_continue, vm_pageout_scan_continue): Remove function declarations. (vm_pageout_start, vm_pageout_resume): New function declarations. * vm/vm_resident.c: Include <kern/list.h>. (vm_page_queue_fictitious): Define as a struct list. (vm_page_free_wanted, vm_page_external_count, vm_page_free_avail, vm_page_queue_active, vm_page_queue_inactive, vm_page_free_target, vm_page_free_min, vm_page_inactive_target, vm_page_free_reserved): Remove variables. (vm_page_external_pagedout): New variable. (vm_page_bootstrap): Don't initialize removed variable, update initialization of vm_page_queue_fictitious. (vm_page_replace): Call VM_PAGE_QUEUES_REMOVE where appropriate. (vm_page_remove): Likewise. (vm_page_grab_fictitious): Update to use list_xxx functions. (vm_page_release_fictitious): Likewise. (vm_page_grab): Remove pageout related code. (vm_page_release): Add `laundry' and `external' parameters for pageout throttling. (vm_page_grab_contig): Remove pageout related code. (vm_page_free_contig): Likewise. (vm_page_free): Remove pageout related code, update call to vm_page_release. (vm_page_wait, vm_page_wire, vm_page_unwire, vm_page_deactivate, vm_page_activate): Move to vm/vm_page.c.
Diffstat (limited to 'vm/vm_page.h')
-rw-r--r--vm/vm_page.h114
1 files changed, 62 insertions, 52 deletions
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 164ab6d4..eb684c1b 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -40,6 +40,7 @@
#include <vm/vm_object.h>
#include <vm/vm_types.h>
#include <kern/queue.h>
+#include <kern/list.h>
#include <kern/lock.h>
#include <kern/log2.h>
@@ -77,8 +78,7 @@
*/
struct vm_page {
- /* Members used in the vm_page module only */
- struct list node;
+ struct list node; /* page queues or free list (P) */
unsigned short type;
unsigned short seg_index;
unsigned short order;
@@ -90,15 +90,13 @@ struct vm_page {
*/
phys_addr_t phys_addr;
+ queue_chain_t listq; /* all pages in same object (O) */
+ struct vm_page *next; /* VP bucket link (O) */
+
/* We use an empty struct as the delimiter. */
struct {} vm_page_header;
#define VM_PAGE_HEADER_SIZE offsetof(struct vm_page, vm_page_header)
- queue_chain_t pageq; /* queue info for FIFO
- * queue or free list (P) */
- queue_chain_t listq; /* all pages in same object (O) */
- struct vm_page *next; /* VP bucket link (O) */
-
vm_object_t object; /* which object am I in (O,P) */
vm_offset_t offset; /* offset into that object (O,P) */
@@ -136,7 +134,9 @@ struct vm_page {
* some useful check on a page structure.
*/
-#define VM_PAGE_CHECK(mem)
+#define VM_PAGE_CHECK(mem) vm_page_check(mem)
+
+void vm_page_check(const struct vm_page *page);
/*
* Each pageable resident page falls into one of three lists:
@@ -155,13 +155,6 @@ struct vm_page {
*/
extern
-vm_page_t vm_page_queue_fictitious; /* fictitious free queue */
-extern
-queue_head_t vm_page_queue_active; /* active memory queue */
-extern
-queue_head_t vm_page_queue_inactive; /* inactive memory queue */
-
-extern
int vm_page_fictitious_count;/* How many fictitious pages are free? */
extern
int vm_page_active_count; /* How many pages are active? */
@@ -170,25 +163,15 @@ int vm_page_inactive_count; /* How many pages are inactive? */
extern
int vm_page_wire_count; /* How many pages are wired? */
extern
-int vm_page_free_target; /* How many do we want free? */
-extern
-int vm_page_free_min; /* When to wakeup pageout */
-extern
-int vm_page_inactive_target;/* How many do we want inactive? */
-extern
-int vm_page_free_reserved; /* How many pages reserved to do pageout */
-extern
int vm_page_laundry_count; /* How many pages being laundered? */
-
+extern
+int vm_page_external_pagedout; /* How many external pages being paged out? */
decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive
page queues */
decl_simple_lock_data(extern,vm_page_queue_free_lock)
/* lock on free page queue */
-extern unsigned int vm_page_free_wanted;
- /* how many threads are waiting for memory */
-
extern phys_addr_t vm_page_fictitious_addr;
/* (fake) phys_addr of fictitious pages */
@@ -204,7 +187,7 @@ extern vm_page_t vm_page_grab_fictitious(void);
extern boolean_t vm_page_convert(vm_page_t *);
extern void vm_page_more_fictitious(void);
extern vm_page_t vm_page_grab(void);
-extern void vm_page_release(vm_page_t);
+extern void vm_page_release(vm_page_t, boolean_t, boolean_t);
extern phys_addr_t vm_page_grab_phys_addr(void);
extern vm_page_t vm_page_grab_contig(vm_size_t, unsigned int);
extern void vm_page_free_contig(vm_page_t, vm_size_t);
@@ -294,22 +277,7 @@ extern unsigned int vm_page_info(
#define vm_page_lock_queues() simple_lock(&vm_page_queue_lock)
#define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
-#define VM_PAGE_QUEUES_REMOVE(mem) \
- MACRO_BEGIN \
- if (mem->active) { \
- queue_remove(&vm_page_queue_active, \
- mem, vm_page_t, pageq); \
- mem->active = FALSE; \
- vm_page_active_count--; \
- } \
- \
- if (mem->inactive) { \
- queue_remove(&vm_page_queue_inactive, \
- mem, vm_page_t, pageq); \
- mem->inactive = FALSE; \
- vm_page_inactive_count--; \
- } \
- MACRO_END
+#define VM_PAGE_QUEUES_REMOVE(mem) vm_page_queues_remove(mem)
/*
* Copyright (c) 2010-2014 Richard Braun.
@@ -358,18 +326,11 @@ extern unsigned int vm_page_info(
/*
* Page usage types.
- *
- * Failing to allocate pmap pages will cause a kernel panic.
- * TODO Obviously, this needs to be addressed, e.g. with a reserved pool of
- * pages.
*/
#define VM_PT_FREE 0 /* Page unused */
#define VM_PT_RESERVED 1 /* Page reserved at boot time */
#define VM_PT_TABLE 2 /* Page is part of the page table */
-#define VM_PT_PMAP 3 /* Page stores pmap-specific data */
-#define VM_PT_KMEM 4 /* Page is part of a kmem slab */
-#define VM_PT_STACK 5 /* Type for generic kernel allocations */
-#define VM_PT_KERNEL 6 /* Type for generic kernel allocations */
+#define VM_PT_KERNEL 3 /* Type for generic kernel allocations */
static inline unsigned short
vm_page_type(const struct vm_page *page)
@@ -521,4 +482,53 @@ phys_addr_t vm_page_mem_size(void);
*/
unsigned long vm_page_mem_free(void);
+/*
+ * Remove the given page from any page queue it might be in.
+ */
+void vm_page_queues_remove(struct vm_page *page);
+
+/*
+ * Balance physical pages among segments.
+ *
+ * This function should be called first by the pageout daemon
+ * on memory pressure, since it may be unnecessary to perform any
+ * other operation, let alone shrink caches, if balancing is
+ * enough to make enough free pages.
+ *
+ * Return TRUE if balancing made enough free pages for unprivileged
+ * allocations to succeed, in which case pending allocations are resumed.
+ *
+ * This function acquires vm_page_queue_free_lock, which is held on return.
+ */
+boolean_t vm_page_balance(void);
+
+/*
+ * Evict physical pages.
+ *
+ * This function should be called by the pageout daemon after balancing
+ * the segments and shrinking kernel caches.
+ *
+ * Return TRUE if eviction made enough free pages for unprivileged
+ * allocations to succeed, in which case pending allocations are resumed.
+ *
+ * Otherwise, report whether the pageout daemon should wait (some pages
+ * have been paged out) or not (only clean pages have been released).
+ *
+ * This function acquires vm_page_queue_free_lock, which is held on return.
+ */
+boolean_t vm_page_evict(boolean_t *should_wait);
+
+/*
+ * Turn active pages into inactive ones for second-chance LRU
+ * approximation.
+ *
+ * This function should be called by the pageout daemon on memory pressure,
+ * i.e. right before evicting pages.
+ *
+ * XXX This is probably not the best strategy, compared to keeping the
+ * active/inactive ratio in check at all times, but this means less
+ * frequent refills.
+ */
+void vm_page_refill_inactive(void);
+
#endif /* _VM_VM_PAGE_H_ */