diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2021-08-27 22:40:50 +0200 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2021-08-27 22:40:50 +0200 |
commit | 2125e974fe7d9773f3d598a064646dc757da3622 (patch) | |
tree | 833cc0dc7040d587db54cc52c530e3c83f5b8d21 /vm/vm_resident.c | |
parent | 4dbaa718038fadc51d9b95d2383868a229d91457 (diff) | |
download | gnumach-2125e974fe7d9773f3d598a064646dc757da3622.tar.gz gnumach-2125e974fe7d9773f3d598a064646dc757da3622.tar.bz2 gnumach-2125e974fe7d9773f3d598a064646dc757da3622.zip |
vm_page_grab: allow allocating in high memory
vm_page_grab was systematically using the VM_PAGE_SEL_DIRECTMAP selector
to play safe with existing code.
This adds a flags parameter to let callers of vm_page_grab specify their
constraints.
Linux drivers need 32bit dmas, Xen drivers use kvtophys to clear some
data. Callers of kmem_pagealloc_physmem and vm_page_grab_phys_addr also use
kvtophys. Otherwise allocations can go to highmem.
This fixes the allocation jam in the directmap segment.
* vm/vm_page.h (VM_PAGE_DMA, VM_PAGE_DMA32, VM_PAGE_DIRECTMAP,
VM_PAGE_HIGHMEM): New macros.
(vm_page_grab): Add flags parameter.
* vm/vm_resident.c (vm_page_grab): Choose allocation selector according
to flags parameter.
(vm_page_convert, vm_page_alloc): Pass VM_PAGE_HIGHMEM to vm_page_grab.
(vm_page_grab_phys_addr): Pass VM_PAGE_DIRECTMAP to vm_page_grab.
* vm/vm_fault.c (vm_fault_page): Pass VM_PAGE_HIGHMEM to vm_page_grab.
* vm/vm_map.c (vm_map_copy_steal_pages): Pass VM_PAGE_HIGHMEM to vm_page_grab.
* kern/slab.c (kmem_pagealloc_physmem): Pass VM_PAGE_DIRECTMAP to vm_page_grab.
* i386/intel/pmap.c (pmap_page_table_page_alloc): Pass VM_PAGE_DIRECTMAP to
vm_page_grab.
* xen/block.c (device_read): Pass VM_PAGE_DIRECTMAP to vm_page_grab.
* linux/dev/glue/block.c (alloc_buffer): Pass VM_PAGE_DMA32 to vm_page_grab.
Diffstat (limited to 'vm/vm_resident.c')
-rw-r--r-- | vm/vm_resident.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/vm/vm_resident.c b/vm/vm_resident.c index b5096e00..4af103d4 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -735,7 +735,7 @@ boolean_t vm_page_convert(struct vm_page **mp) assert(!fict_m->active); assert(!fict_m->inactive); - real_m = vm_page_grab(); + real_m = vm_page_grab(VM_PAGE_HIGHMEM); if (real_m == VM_PAGE_NULL) return FALSE; @@ -764,12 +764,25 @@ boolean_t vm_page_convert(struct vm_page **mp) * * Remove a page from the free list. * Returns VM_PAGE_NULL if the free list is too small. + * + * FLAGS specify which constraint should be enforced for the allocated + * addresses. */ -vm_page_t vm_page_grab(void) +vm_page_t vm_page_grab(unsigned flags) { + unsigned selector; vm_page_t mem; + if (flags & VM_PAGE_HIGHMEM) + selector = VM_PAGE_SEL_HIGHMEM; + else if (flags & VM_PAGE_DIRECTMAP) + selector = VM_PAGE_SEL_DIRECTMAP; + else if (flags & VM_PAGE_DMA32) + selector = VM_PAGE_SEL_DMA32; + else + selector = VM_PAGE_SEL_DMA; + simple_lock(&vm_page_queue_free_lock); /* @@ -781,7 +794,7 @@ vm_page_t vm_page_grab(void) * explicit VM calls. The strategy is then to let memory * pressure balance the physical segments with pageable pages. */ - mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL); + mem = vm_page_alloc_pa(0, selector, VM_PT_KERNEL); if (mem == NULL) { simple_unlock(&vm_page_queue_free_lock); @@ -796,7 +809,7 @@ vm_page_t vm_page_grab(void) phys_addr_t vm_page_grab_phys_addr(void) { - vm_page_t p = vm_page_grab(); + vm_page_t p = vm_page_grab(VM_PAGE_DIRECTMAP); if (p == VM_PAGE_NULL) return -1; else @@ -924,7 +937,7 @@ vm_page_t vm_page_alloc( { vm_page_t mem; - mem = vm_page_grab(); + mem = vm_page_grab(VM_PAGE_HIGHMEM); if (mem == VM_PAGE_NULL) return VM_PAGE_NULL; |