diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2021-08-27 22:40:50 +0200 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2021-08-27 22:40:50 +0200 |
commit | 2125e974fe7d9773f3d598a064646dc757da3622 (patch) | |
tree | 833cc0dc7040d587db54cc52c530e3c83f5b8d21 /vm | |
parent | 4dbaa718038fadc51d9b95d2383868a229d91457 (diff) | |
download | gnumach-2125e974fe7d9773f3d598a064646dc757da3622.tar.gz gnumach-2125e974fe7d9773f3d598a064646dc757da3622.tar.bz2 gnumach-2125e974fe7d9773f3d598a064646dc757da3622.zip |
vm_page_grab: allow allocating in high memory
vm_page_grab was systematically using the VM_PAGE_SEL_DIRECTMAP selector
to play safe with existing code.
This adds a flags parameter to let callers of vm_page_grab specify their
constraints.
Linux drivers need 32bit dmas, Xen drivers use kvtophys to clear some
data. Callers of kmem_pagealloc_physmem and vm_page_grab_phys_addr also use
kvtophys. Otherwise allocations can go to highmem.
This fixes the allocation jam in the directmap segment.
* vm/vm_page.h (VM_PAGE_DMA, VM_PAGE_DMA32, VM_PAGE_DIRECTMAP,
VM_PAGE_HIGHMEM): New macros.
(vm_page_grab): Add flags parameter.
* vm/vm_resident.c (vm_page_grab): Choose allocation selector according
to flags parameter.
(vm_page_convert, vm_page_alloc): Pass VM_PAGE_HIGHMEM to vm_page_grab.
(vm_page_grab_phys_addr): Pass VM_PAGE_DIRECTMAP to vm_page_grab.
* vm/vm_fault.c (vm_fault_page): Pass VM_PAGE_HIGHMEM to vm_page_grab.
* vm/vm_map.c (vm_map_copy_steal_pages): Pass VM_PAGE_HIGHMEM to vm_page_grab.
* kern/slab.c (kmem_pagealloc_physmem): Pass VM_PAGE_DIRECTMAP to vm_page_grab.
* i386/intel/pmap.c (pmap_page_table_page_alloc): Pass VM_PAGE_DIRECTMAP to
vm_page_grab.
* xen/block.c (device_read): Pass VM_PAGE_DIRECTMAP to vm_page_grab.
* linux/dev/glue/block.c (alloc_buffer): Pass VM_PAGE_DMA32 to vm_page_grab.
Diffstat (limited to 'vm')
-rw-r--r-- | vm/vm_fault.c | 4 | ||||
-rw-r--r-- | vm/vm_map.c | 2 | ||||
-rw-r--r-- | vm/vm_page.h | 7 | ||||
-rw-r--r-- | vm/vm_resident.c | 23 |
4 files changed, 27 insertions, 9 deletions
diff --git a/vm/vm_fault.c b/vm/vm_fault.c index 4d1d90a4..df7b2c41 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -423,7 +423,7 @@ vm_fault_return_t vm_fault_page( * need to allocate a real page. */ - real_m = vm_page_grab(); + real_m = vm_page_grab(VM_PAGE_HIGHMEM); if (real_m == VM_PAGE_NULL) { vm_fault_cleanup(object, first_m); return(VM_FAULT_MEMORY_SHORTAGE); @@ -810,7 +810,7 @@ vm_fault_return_t vm_fault_page( /* * Allocate a page for the copy */ - copy_m = vm_page_grab(); + copy_m = vm_page_grab(VM_PAGE_HIGHMEM); if (copy_m == VM_PAGE_NULL) { RELEASE_PAGE(m); vm_fault_cleanup(object, first_m); diff --git a/vm/vm_map.c b/vm/vm_map.c index f8f265c2..a687d365 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -2066,7 +2066,7 @@ vm_map_copy_steal_pages(vm_map_copy_t copy) * Page was not stolen, get a new * one and do the copy now. */ - while ((new_m = vm_page_grab()) == VM_PAGE_NULL) { + while ((new_m = vm_page_grab(VM_PAGE_HIGHMEM)) == VM_PAGE_NULL) { VM_PAGE_WAIT((void(*)()) 0); } diff --git a/vm/vm_page.h b/vm/vm_page.h index d9af188c..d457f9a2 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -155,6 +155,11 @@ void vm_page_check(const struct vm_page *page); * ordered, in LRU-like fashion. */ +#define VM_PAGE_DMA 0x01 +#define VM_PAGE_DMA32 0x02 +#define VM_PAGE_DIRECTMAP 0x04 +#define VM_PAGE_HIGHMEM 0x08 + extern int vm_page_fictitious_count;/* How many fictitious pages are free? */ extern @@ -187,7 +192,7 @@ extern vm_page_t vm_page_lookup( extern vm_page_t vm_page_grab_fictitious(void); extern boolean_t vm_page_convert(vm_page_t *); extern void vm_page_more_fictitious(void); -extern vm_page_t vm_page_grab(void); +extern vm_page_t vm_page_grab(unsigned flags); extern void vm_page_release(vm_page_t, boolean_t, boolean_t); extern phys_addr_t vm_page_grab_phys_addr(void); extern vm_page_t vm_page_grab_contig(vm_size_t, unsigned int); diff --git a/vm/vm_resident.c b/vm/vm_resident.c index b5096e00..4af103d4 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -735,7 +735,7 @@ boolean_t vm_page_convert(struct vm_page **mp) assert(!fict_m->active); assert(!fict_m->inactive); - real_m = vm_page_grab(); + real_m = vm_page_grab(VM_PAGE_HIGHMEM); if (real_m == VM_PAGE_NULL) return FALSE; @@ -764,12 +764,25 @@ boolean_t vm_page_convert(struct vm_page **mp) * * Remove a page from the free list. * Returns VM_PAGE_NULL if the free list is too small. + * + * FLAGS specify which constraint should be enforced for the allocated + * addresses. */ -vm_page_t vm_page_grab(void) +vm_page_t vm_page_grab(unsigned flags) { + unsigned selector; vm_page_t mem; + if (flags & VM_PAGE_HIGHMEM) + selector = VM_PAGE_SEL_HIGHMEM; + else if (flags & VM_PAGE_DIRECTMAP) + selector = VM_PAGE_SEL_DIRECTMAP; + else if (flags & VM_PAGE_DMA32) + selector = VM_PAGE_SEL_DMA32; + else + selector = VM_PAGE_SEL_DMA; + simple_lock(&vm_page_queue_free_lock); /* @@ -781,7 +794,7 @@ vm_page_t vm_page_grab(void) * explicit VM calls. The strategy is then to let memory * pressure balance the physical segments with pageable pages. */ - mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL); + mem = vm_page_alloc_pa(0, selector, VM_PT_KERNEL); if (mem == NULL) { simple_unlock(&vm_page_queue_free_lock); @@ -796,7 +809,7 @@ vm_page_t vm_page_grab(void) phys_addr_t vm_page_grab_phys_addr(void) { - vm_page_t p = vm_page_grab(); + vm_page_t p = vm_page_grab(VM_PAGE_DIRECTMAP); if (p == VM_PAGE_NULL) return -1; else @@ -924,7 +937,7 @@ vm_page_t vm_page_alloc( { vm_page_t mem; - mem = vm_page_grab(); + mem = vm_page_grab(VM_PAGE_HIGHMEM); if (mem == VM_PAGE_NULL) return VM_PAGE_NULL; |