diff options
Diffstat (limited to 'vm')
-rw-r--r-- | vm/vm_fault.c | 4 | ||||
-rw-r--r-- | vm/vm_map.c | 2 | ||||
-rw-r--r-- | vm/vm_page.h | 7 | ||||
-rw-r--r-- | vm/vm_resident.c | 23 |
4 files changed, 27 insertions, 9 deletions
diff --git a/vm/vm_fault.c b/vm/vm_fault.c index 4d1d90a4..df7b2c41 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -423,7 +423,7 @@ vm_fault_return_t vm_fault_page( * need to allocate a real page. */ - real_m = vm_page_grab(); + real_m = vm_page_grab(VM_PAGE_HIGHMEM); if (real_m == VM_PAGE_NULL) { vm_fault_cleanup(object, first_m); return(VM_FAULT_MEMORY_SHORTAGE); @@ -810,7 +810,7 @@ vm_fault_return_t vm_fault_page( /* * Allocate a page for the copy */ - copy_m = vm_page_grab(); + copy_m = vm_page_grab(VM_PAGE_HIGHMEM); if (copy_m == VM_PAGE_NULL) { RELEASE_PAGE(m); vm_fault_cleanup(object, first_m); diff --git a/vm/vm_map.c b/vm/vm_map.c index f8f265c2..a687d365 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -2066,7 +2066,7 @@ vm_map_copy_steal_pages(vm_map_copy_t copy) * Page was not stolen, get a new * one and do the copy now. */ - while ((new_m = vm_page_grab()) == VM_PAGE_NULL) { + while ((new_m = vm_page_grab(VM_PAGE_HIGHMEM)) == VM_PAGE_NULL) { VM_PAGE_WAIT((void(*)()) 0); } diff --git a/vm/vm_page.h b/vm/vm_page.h index d9af188c..d457f9a2 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -155,6 +155,11 @@ void vm_page_check(const struct vm_page *page); * ordered, in LRU-like fashion. */ +#define VM_PAGE_DMA 0x01 +#define VM_PAGE_DMA32 0x02 +#define VM_PAGE_DIRECTMAP 0x04 +#define VM_PAGE_HIGHMEM 0x08 + extern int vm_page_fictitious_count;/* How many fictitious pages are free? */ extern @@ -187,7 +192,7 @@ extern vm_page_t vm_page_lookup( extern vm_page_t vm_page_grab_fictitious(void); extern boolean_t vm_page_convert(vm_page_t *); extern void vm_page_more_fictitious(void); -extern vm_page_t vm_page_grab(void); +extern vm_page_t vm_page_grab(unsigned flags); extern void vm_page_release(vm_page_t, boolean_t, boolean_t); extern phys_addr_t vm_page_grab_phys_addr(void); extern vm_page_t vm_page_grab_contig(vm_size_t, unsigned int); diff --git a/vm/vm_resident.c b/vm/vm_resident.c index b5096e00..4af103d4 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -735,7 +735,7 @@ boolean_t vm_page_convert(struct vm_page **mp) assert(!fict_m->active); assert(!fict_m->inactive); - real_m = vm_page_grab(); + real_m = vm_page_grab(VM_PAGE_HIGHMEM); if (real_m == VM_PAGE_NULL) return FALSE; @@ -764,12 +764,25 @@ boolean_t vm_page_convert(struct vm_page **mp) * * Remove a page from the free list. * Returns VM_PAGE_NULL if the free list is too small. + * + * FLAGS specify which constraint should be enforced for the allocated + * addresses. */ -vm_page_t vm_page_grab(void) +vm_page_t vm_page_grab(unsigned flags) { + unsigned selector; vm_page_t mem; + if (flags & VM_PAGE_HIGHMEM) + selector = VM_PAGE_SEL_HIGHMEM; + else if (flags & VM_PAGE_DIRECTMAP) + selector = VM_PAGE_SEL_DIRECTMAP; + else if (flags & VM_PAGE_DMA32) + selector = VM_PAGE_SEL_DMA32; + else + selector = VM_PAGE_SEL_DMA; + simple_lock(&vm_page_queue_free_lock); /* @@ -781,7 +794,7 @@ vm_page_t vm_page_grab(void) * explicit VM calls. The strategy is then to let memory * pressure balance the physical segments with pageable pages. */ - mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL); + mem = vm_page_alloc_pa(0, selector, VM_PT_KERNEL); if (mem == NULL) { simple_unlock(&vm_page_queue_free_lock); @@ -796,7 +809,7 @@ vm_page_t vm_page_grab(void) phys_addr_t vm_page_grab_phys_addr(void) { - vm_page_t p = vm_page_grab(); + vm_page_t p = vm_page_grab(VM_PAGE_DIRECTMAP); if (p == VM_PAGE_NULL) return -1; else @@ -924,7 +937,7 @@ vm_page_t vm_page_alloc( { vm_page_t mem; - mem = vm_page_grab(); + mem = vm_page_grab(VM_PAGE_HIGHMEM); if (mem == VM_PAGE_NULL) return VM_PAGE_NULL; |