diff options
Diffstat (limited to 'vm/vm_resident.c')
-rw-r--r-- | vm/vm_resident.c | 663 |
1 files changed, 212 insertions, 451 deletions
diff --git a/vm/vm_resident.c b/vm/vm_resident.c index 66ab51f0..fa7a337b 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -27,7 +27,7 @@ * the rights to redistribute these changes. */ /* - * File: vm/vm_page.c + * File: vm/vm_resident.c * Author: Avadis Tevanian, Jr., Michael Wayne Young * * Resident memory management module. @@ -65,14 +65,14 @@ /* - * Associated with eacn page of user-allocatable memory is a + * Associated with each page of user-allocatable memory is a * page structure. */ /* * These variables record the values returned by vm_page_bootstrap, * for debugging purposes. The implementation of pmap_steal_memory - * and pmap_startup here also uses them internally. + * here also uses them internally. */ vm_offset_t virtual_space_start; @@ -95,29 +95,18 @@ vm_page_bucket_t *vm_page_buckets; /* Array of buckets */ unsigned int vm_page_bucket_count = 0; /* How big is array? */ unsigned int vm_page_hash_mask; /* Mask for hash function */ -/* - * Resident page structures are initialized from - * a template (see vm_page_alloc). - * - * When adding a new field to the virtual memory - * object structure, be sure to add initialization - * (see vm_page_bootstrap). - */ -struct vm_page vm_page_template; - -/* - * Resident pages that represent real memory - * are allocated from a free list. - */ -vm_page_t vm_page_queue_free; vm_page_t vm_page_queue_fictitious; decl_simple_lock_data(,vm_page_queue_free_lock) unsigned int vm_page_free_wanted; -int vm_page_free_count; int vm_page_fictitious_count; int vm_page_external_count; -unsigned int vm_page_free_count_minimum; /* debugging */ +/* + * This variable isn't directly used. It's merely a placeholder for the + * address used to synchronize threads waiting for pages to become + * available. The real value is returned by vm_page_free_mem(). + */ +unsigned int vm_page_free_avail; /* * Occasionally, the virtual memory system uses @@ -192,48 +181,15 @@ void vm_page_bootstrap( vm_offset_t *startp, vm_offset_t *endp) { - register vm_page_t m; int i; /* - * Initialize the vm_page template. - */ - - m = &vm_page_template; - m->object = VM_OBJECT_NULL; /* reset later */ - m->offset = 0; /* reset later */ - m->wire_count = 0; - - m->inactive = FALSE; - m->active = FALSE; - m->laundry = FALSE; - m->free = FALSE; - m->external = FALSE; - - m->busy = TRUE; - m->wanted = FALSE; - m->tabled = FALSE; - m->fictitious = FALSE; - m->private = FALSE; - m->absent = FALSE; - m->error = FALSE; - m->dirty = FALSE; - m->precious = FALSE; - m->reference = FALSE; - - m->phys_addr = 0; /* reset later */ - - m->page_lock = VM_PROT_NONE; - m->unlock_request = VM_PROT_NONE; - - /* * Initialize the page queues. */ simple_lock_init(&vm_page_queue_free_lock); simple_lock_init(&vm_page_queue_lock); - vm_page_queue_free = VM_PAGE_NULL; vm_page_queue_fictitious = VM_PAGE_NULL; queue_init(&vm_page_queue_active); queue_init(&vm_page_queue_inactive); @@ -241,12 +197,6 @@ void vm_page_bootstrap( vm_page_free_wanted = 0; /* - * Steal memory for the kernel map entries. - */ - - kentry_data = pmap_steal_memory(kentry_data_size); - - /* * Allocate (and initialize) the virtual-to-physical * table hash buckets. * @@ -274,35 +224,25 @@ void vm_page_bootstrap( sizeof(vm_page_bucket_t)); for (i = 0; i < vm_page_bucket_count; i++) { - register vm_page_bucket_t *bucket = &vm_page_buckets[i]; + vm_page_bucket_t *bucket = &vm_page_buckets[i]; bucket->pages = VM_PAGE_NULL; simple_lock_init(&bucket->lock); } - /* - * Machine-dependent code allocates the resident page table. - * It uses vm_page_init to initialize the page frames. - * The code also returns to us the virtual space available - * to the kernel. We don't trust the pmap module - * to get the alignment right. - */ + vm_page_setup(); - pmap_startup(&virtual_space_start, &virtual_space_end); virtual_space_start = round_page(virtual_space_start); virtual_space_end = trunc_page(virtual_space_end); *startp = virtual_space_start; *endp = virtual_space_end; - - /* printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);*/ - vm_page_free_count_minimum = vm_page_free_count; } #ifndef MACHINE_PAGES /* - * We implement pmap_steal_memory and pmap_startup with the help - * of two simpler functions, pmap_virtual_space and pmap_next_page. + * We implement pmap_steal_memory with the help + * of two simpler functions, pmap_virtual_space and vm_page_bootalloc. */ vm_offset_t pmap_steal_memory( @@ -310,11 +250,7 @@ vm_offset_t pmap_steal_memory( { vm_offset_t addr, vaddr, paddr; - /* - * We round the size to an integer multiple. - */ - - size = (size + 3) &~ 3; + size = round_page(size); /* * If this is the first call to pmap_steal_memory, @@ -347,8 +283,7 @@ vm_offset_t pmap_steal_memory( for (vaddr = round_page(addr); vaddr < addr + size; vaddr += PAGE_SIZE) { - if (!pmap_next_page(&paddr)) - panic("pmap_steal_memory"); + paddr = vm_page_bootalloc(PAGE_SIZE); /* * XXX Logically, these mappings should be wired, @@ -361,64 +296,6 @@ vm_offset_t pmap_steal_memory( return addr; } - -void pmap_startup( - vm_offset_t *startp, - vm_offset_t *endp) -{ - unsigned int i, npages, pages_initialized; - vm_page_t pages; - vm_offset_t paddr; - - /* - * We calculate how many page frames we will have - * and then allocate the page structures in one chunk. - */ - - npages = ((PAGE_SIZE * pmap_free_pages() + - (round_page(virtual_space_start) - virtual_space_start)) / - (PAGE_SIZE + sizeof *pages)); - - pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages); - - /* - * Initialize the page frames. - */ - - for (i = 0, pages_initialized = 0; i < npages; i++) { - if (!pmap_next_page(&paddr)) - break; - - vm_page_init(&pages[i], paddr); - pages_initialized++; - } - i = 0; - while (pmap_next_page(&paddr)) - i++; - if (i) - printf("%u memory page(s) left away\n", i); - - /* - * Release pages in reverse order so that physical pages - * initially get allocated in ascending addresses. This keeps - * the devices (which must address physical memory) happy if - * they require several consecutive pages. - */ - - for (i = pages_initialized; i > 0; i--) { - vm_page_release(&pages[i - 1], FALSE); - } - - /* - * We have to re-align virtual_space_start, - * because pmap_steal_memory has been using it. - */ - - virtual_space_start = round_page(virtual_space_start); - - *startp = virtual_space_start; - *endp = virtual_space_end; -} #endif /* MACHINE_PAGES */ /* @@ -430,35 +307,7 @@ void pmap_startup( void vm_page_module_init(void) { kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0, - NULL, NULL, NULL, 0); -} - -/* - * Routine: vm_page_create - * Purpose: - * After the VM system is up, machine-dependent code - * may stumble across more physical memory. For example, - * memory that it was reserving for a frame buffer. - * vm_page_create turns this memory into available pages. - */ - -void vm_page_create( - vm_offset_t start, - vm_offset_t end) -{ - vm_offset_t paddr; - vm_page_t m; - - for (paddr = round_page(start); - paddr < trunc_page(end); - paddr += PAGE_SIZE) { - m = (vm_page_t) kmem_cache_alloc(&vm_page_cache); - if (m == VM_PAGE_NULL) - panic("vm_page_create"); - - vm_page_init(m, paddr); - vm_page_release(m, FALSE); - } + NULL, 0); } /* @@ -483,11 +332,11 @@ void vm_page_create( */ void vm_page_insert( - register vm_page_t mem, - register vm_object_t object, - register vm_offset_t offset) + vm_page_t mem, + vm_object_t object, + vm_offset_t offset) { - register vm_page_bucket_t *bucket; + vm_page_bucket_t *bucket; VM_PAGE_CHECK(mem); @@ -555,11 +404,11 @@ void vm_page_insert( */ void vm_page_replace( - register vm_page_t mem, - register vm_object_t object, - register vm_offset_t offset) + vm_page_t mem, + vm_object_t object, + vm_offset_t offset) { - register vm_page_bucket_t *bucket; + vm_page_bucket_t *bucket; VM_PAGE_CHECK(mem); @@ -582,7 +431,7 @@ void vm_page_replace( simple_lock(&bucket->lock); if (bucket->pages) { vm_page_t *mp = &bucket->pages; - register vm_page_t m = *mp; + vm_page_t m = *mp; do { if (m->object == object && m->offset == offset) { /* @@ -646,10 +495,10 @@ void vm_page_replace( */ void vm_page_remove( - register vm_page_t mem) + vm_page_t mem) { - register vm_page_bucket_t *bucket; - register vm_page_t this; + vm_page_bucket_t *bucket; + vm_page_t this; assert(mem->tabled); VM_PAGE_CHECK(mem); @@ -665,7 +514,7 @@ void vm_page_remove( bucket->pages = mem->next; } else { - register vm_page_t *prev; + vm_page_t *prev; for (prev = &this->next; (this = *prev) != mem; @@ -704,11 +553,11 @@ void vm_page_remove( */ vm_page_t vm_page_lookup( - register vm_object_t object, - register vm_offset_t offset) + vm_object_t object, + vm_offset_t offset) { - register vm_page_t mem; - register vm_page_bucket_t *bucket; + vm_page_t mem; + vm_page_bucket_t *bucket; /* * Search the hash table for this object/offset pair @@ -735,9 +584,9 @@ vm_page_t vm_page_lookup( * The object must be locked. */ void vm_page_rename( - register vm_page_t mem, - register vm_object_t new_object, - vm_offset_t new_offset) + vm_page_t mem, + vm_object_t new_object, + vm_offset_t new_offset) { /* * Changes to mem->object require the page lock because @@ -750,6 +599,33 @@ void vm_page_rename( vm_page_unlock_queues(); } +static void vm_page_init_template(vm_page_t m) +{ + m->object = VM_OBJECT_NULL; /* reset later */ + m->offset = 0; /* reset later */ + m->wire_count = 0; + + m->inactive = FALSE; + m->active = FALSE; + m->laundry = FALSE; + m->free = FALSE; + m->external = FALSE; + + m->busy = TRUE; + m->wanted = FALSE; + m->tabled = FALSE; + m->fictitious = FALSE; + m->private = FALSE; + m->absent = FALSE; + m->error = FALSE; + m->dirty = FALSE; + m->precious = FALSE; + m->reference = FALSE; + + m->page_lock = VM_PROT_NONE; + m->unlock_request = VM_PROT_NONE; +} + /* * vm_page_init: * @@ -758,11 +634,9 @@ void vm_page_rename( * so that it can be given to vm_page_release or vm_page_insert. */ void vm_page_init( - vm_page_t mem, - vm_offset_t phys_addr) + vm_page_t mem) { - *mem = vm_page_template; - mem->phys_addr = phys_addr; + vm_page_init_template(mem); } /* @@ -774,7 +648,7 @@ void vm_page_init( vm_page_t vm_page_grab_fictitious(void) { - register vm_page_t m; + vm_page_t m; simple_lock(&vm_page_queue_free_lock); m = vm_page_queue_fictitious; @@ -794,8 +668,8 @@ vm_page_t vm_page_grab_fictitious(void) * Release a fictitious page to the free list. */ -void vm_page_release_fictitious( - register vm_page_t m) +static void vm_page_release_fictitious( + vm_page_t m) { simple_lock(&vm_page_queue_free_lock); if (m->free) @@ -818,7 +692,7 @@ int vm_page_fictitious_quantum = 5; void vm_page_more_fictitious(void) { - register vm_page_t m; + vm_page_t m; int i; for (i = 0; i < vm_page_fictitious_quantum; i++) { @@ -826,7 +700,8 @@ void vm_page_more_fictitious(void) if (m == VM_PAGE_NULL) panic("vm_page_more_fictitious"); - vm_page_init(m, vm_page_fictitious_addr); + vm_page_init(m); + m->phys_addr = vm_page_fictitious_addr; m->fictitious = TRUE; vm_page_release_fictitious(m); } @@ -836,25 +711,46 @@ void vm_page_more_fictitious(void) * vm_page_convert: * * Attempt to convert a fictitious page into a real page. + * + * The object referenced by *MP must be locked. */ boolean_t vm_page_convert( - register vm_page_t m, + struct vm_page **mp, boolean_t external) { - register vm_page_t real_m; + struct vm_page *real_m, *fict_m; + vm_object_t object; + vm_offset_t offset; + + fict_m = *mp; + + assert(fict_m->fictitious); + assert(fict_m->phys_addr == vm_page_fictitious_addr); + assert(!fict_m->active); + assert(!fict_m->inactive); real_m = vm_page_grab(external); if (real_m == VM_PAGE_NULL) return FALSE; - m->phys_addr = real_m->phys_addr; - m->fictitious = FALSE; + object = fict_m->object; + offset = fict_m->offset; + vm_page_remove(fict_m); - real_m->phys_addr = vm_page_fictitious_addr; - real_m->fictitious = TRUE; + memcpy(&real_m->vm_page_header, + &fict_m->vm_page_header, + sizeof(*fict_m) - VM_PAGE_HEADER_SIZE); + real_m->fictitious = FALSE; - vm_page_release_fictitious(real_m); + vm_page_insert(real_m, object, offset); + + assert(real_m->phys_addr != vm_page_fictitious_addr); + assert(fict_m->fictitious); + assert(fict_m->phys_addr == vm_page_fictitious_addr); + + vm_page_release_fictitious(fict_m); + *mp = real_m; return TRUE; } @@ -868,7 +764,7 @@ boolean_t vm_page_convert( vm_page_t vm_page_grab( boolean_t external) { - register vm_page_t mem; + vm_page_t mem; simple_lock(&vm_page_queue_free_lock); @@ -878,7 +774,7 @@ vm_page_t vm_page_grab( * for externally-managed pages. */ - if (((vm_page_free_count < vm_page_free_reserved) + if (((vm_page_mem_free() < vm_page_free_reserved) || (external && (vm_page_external_count > vm_page_external_limit))) && !current_thread()->vm_privilege) { @@ -886,15 +782,16 @@ vm_page_t vm_page_grab( return VM_PAGE_NULL; } - if (vm_page_queue_free == VM_PAGE_NULL) - panic("vm_page_grab"); + mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL); + + if (mem == NULL) { + simple_unlock(&vm_page_queue_free_lock); + return NULL; + } - if (--vm_page_free_count < vm_page_free_count_minimum) - vm_page_free_count_minimum = vm_page_free_count; if (external) vm_page_external_count++; - mem = vm_page_queue_free; - vm_page_queue_free = (vm_page_t) mem->pageq.next; + mem->free = FALSE; mem->extcounted = mem->external = external; simple_unlock(&vm_page_queue_free_lock); @@ -910,15 +807,15 @@ vm_page_t vm_page_grab( * it doesn't really matter. */ - if ((vm_page_free_count < vm_page_free_min) || - ((vm_page_free_count < vm_page_free_target) && + if ((vm_page_mem_free() < vm_page_free_min) || + ((vm_page_mem_free() < vm_page_free_target) && (vm_page_inactive_count < vm_page_inactive_target))) thread_wakeup((event_t) &vm_page_free_wanted); return mem; } -vm_offset_t vm_page_grab_phys_addr() +vm_offset_t vm_page_grab_phys_addr(void) { vm_page_t p = vm_page_grab(FALSE); if (p == VM_PAGE_NULL) @@ -928,208 +825,92 @@ vm_offset_t vm_page_grab_phys_addr() } /* - * vm_page_grab_contiguous_pages: - * - * Take N pages off the free list, the pages should - * cover a contiguous range of physical addresses. - * [Used by device drivers to cope with DMA limitations] + * vm_page_release: * - * Returns the page descriptors in ascending order, or - * Returns KERN_RESOURCE_SHORTAGE if it could not. + * Return a page to the free list. */ -/* Biggest phys page number for the pages we handle in VM */ - -vm_size_t vm_page_big_pagenum = 0; /* Set this before call! */ - -kern_return_t -vm_page_grab_contiguous_pages( - int npages, - vm_page_t pages[], - natural_t *bits, - boolean_t external) +static void vm_page_release( + vm_page_t mem, + boolean_t external) { - register int first_set; - int size, alloc_size; - kern_return_t ret; - vm_page_t mem, *prevmemp; - -#ifndef NBBY -#define NBBY 8 /* size in bits of sizeof()`s unity */ -#endif - -#define NBPEL (sizeof(natural_t)*NBBY) - - size = (vm_page_big_pagenum + NBPEL - 1) - & ~(NBPEL - 1); /* in bits */ - - size = size / NBBY; /* in bytes */ - - /* - * If we are called before the VM system is fully functional - * the invoker must provide us with the work space. [one bit - * per page starting at phys 0 and up to vm_page_big_pagenum] - */ - if (bits == 0) { - alloc_size = round_page(size); - if (kmem_alloc_wired(kernel_map, - (vm_offset_t *)&bits, - alloc_size) - != KERN_SUCCESS) - return KERN_RESOURCE_SHORTAGE; - } else - alloc_size = 0; - - memset(bits, 0, size); - - /* - * A very large granularity call, its rare so that is ok - */ simple_lock(&vm_page_queue_free_lock); + if (mem->free) + panic("vm_page_release"); + mem->free = TRUE; + vm_page_free_pa(mem, 0); + if (external) + vm_page_external_count--; /* - * Do not dip into the reserved pool. - */ - - if ((vm_page_free_count < vm_page_free_reserved) - || (vm_page_external_count >= vm_page_external_limit)) { - printf_once("no more room for vm_page_grab_contiguous_pages"); - simple_unlock(&vm_page_queue_free_lock); - return KERN_RESOURCE_SHORTAGE; - } - - /* - * First pass through, build a big bit-array of - * the pages that are free. It is not going to - * be too large anyways, in 4k we can fit info - * for 32k pages. + * Check if we should wake up someone waiting for page. + * But don't bother waking them unless they can allocate. + * + * We wakeup only one thread, to prevent starvation. + * Because the scheduling system handles wait queues FIFO, + * if we wakeup all waiting threads, one greedy thread + * can starve multiple niceguy threads. When the threads + * all wakeup, the greedy threads runs first, grabs the page, + * and waits for another page. It will be the first to run + * when the next page is freed. + * + * However, there is a slight danger here. + * The thread we wake might not use the free page. + * Then the other threads could wait indefinitely + * while the page goes unused. To forestall this, + * the pageout daemon will keep making free pages + * as long as vm_page_free_wanted is non-zero. */ - mem = vm_page_queue_free; - while (mem) { - register int word_index, bit_index; - - bit_index = (mem->phys_addr >> PAGE_SHIFT); - word_index = bit_index / NBPEL; - bit_index = bit_index - (word_index * NBPEL); - bits[word_index] |= 1 << bit_index; - mem = (vm_page_t) mem->pageq.next; + if ((vm_page_free_wanted > 0) && + (vm_page_mem_free() >= vm_page_free_reserved)) { + vm_page_free_wanted--; + thread_wakeup_one((event_t) &vm_page_free_avail); } - /* - * Second loop. Scan the bit array for NPAGES - * contiguous bits. That gives us, if any, - * the range of pages we will be grabbing off - * the free list. - */ - { - register int bits_so_far = 0, i; + simple_unlock(&vm_page_queue_free_lock); +} - first_set = 0; +/* + * vm_page_grab_contig: + * + * Remove a block of contiguous pages from the free list. + * Returns VM_PAGE_NULL if the request fails. + */ - for (i = 0; i < size; i += sizeof(natural_t)) { +vm_page_t vm_page_grab_contig( + vm_size_t size, + unsigned int selector) +{ + unsigned int i, order, nr_pages; + vm_page_t mem; - register natural_t v = bits[i / sizeof(natural_t)]; - register int bitpos; + order = vm_page_order(size); + nr_pages = 1 << order; - /* - * Bitscan this one word - */ - if (v) { - /* - * keep counting them beans ? - */ - bitpos = 0; + simple_lock(&vm_page_queue_free_lock); - if (bits_so_far) { -count_ones: - while (v & 1) { - bitpos++; - /* - * got enough beans ? - */ - if (++bits_so_far == npages) - goto found_em; - v >>= 1; - } - /* if we are being lucky, roll again */ - if (bitpos == NBPEL) - continue; - } + /* + * Only let privileged threads (involved in pageout) + * dip into the reserved pool or exceed the limit + * for externally-managed pages. + */ - /* - * search for beans here - */ - bits_so_far = 0; - while ((bitpos < NBPEL) && ((v & 1) == 0)) { - bitpos++; - v >>= 1; - } - if (v & 1) { - first_set = (i * NBBY) + bitpos; - goto count_ones; - } - } - /* - * No luck - */ - bits_so_far = 0; - } + if (((vm_page_mem_free() - nr_pages) <= vm_page_free_reserved) + && !current_thread()->vm_privilege) { + simple_unlock(&vm_page_queue_free_lock); + return VM_PAGE_NULL; } - /* - * We could not find enough contiguous pages. - */ - simple_unlock(&vm_page_queue_free_lock); + /* TODO Allow caller to pass type */ + mem = vm_page_alloc_pa(order, selector, VM_PT_KERNEL); - printf_once("no contiguous room for vm_page_grab_contiguous_pages"); - ret = KERN_RESOURCE_SHORTAGE; - goto out; + if (mem == NULL) + panic("vm_page_grab_contig"); - /* - * Final pass. Now we know which pages we want. - * Scan the list until we find them all, grab - * pages as we go. FIRST_SET tells us where - * in the bit-array our pages start. - */ -found_em: - vm_page_free_count -= npages; - if (vm_page_free_count < vm_page_free_count_minimum) - vm_page_free_count_minimum = vm_page_free_count; - if (external) - vm_page_external_count += npages; - { - register vm_offset_t first_phys, last_phys; - - /* cache values for compare */ - first_phys = first_set << PAGE_SHIFT; - last_phys = first_phys + (npages << PAGE_SHIFT);/* not included */ - - /* running pointers */ - mem = vm_page_queue_free; - prevmemp = &vm_page_queue_free; - - while (mem) { - - register vm_offset_t addr; - - addr = mem->phys_addr; - - if ((addr >= first_phys) && - (addr < last_phys)) { - *prevmemp = (vm_page_t) mem->pageq.next; - pages[(addr - first_phys) >> PAGE_SHIFT] = mem; - mem->free = FALSE; - mem->extcounted = mem->external = external; - /* - * Got them all ? - */ - if (--npages == 0) break; - } else - prevmemp = (vm_page_t *) &mem->pageq.next; - - mem = (vm_page_t) mem->pageq.next; - } + for (i = 0; i < nr_pages; i++) { + mem[i].free = FALSE; + mem[i].extcounted = mem[i].external = 0; } simple_unlock(&vm_page_queue_free_lock); @@ -1145,63 +926,42 @@ found_em: * it doesn't really matter. */ - if ((vm_page_free_count < vm_page_free_min) || - ((vm_page_free_count < vm_page_free_target) && + if ((vm_page_mem_free() < vm_page_free_min) || + ((vm_page_mem_free() < vm_page_free_target) && (vm_page_inactive_count < vm_page_inactive_target))) - thread_wakeup(&vm_page_free_wanted); - - ret = KERN_SUCCESS; -out: - if (alloc_size) - kmem_free(kernel_map, (vm_offset_t) bits, alloc_size); + thread_wakeup((event_t) &vm_page_free_wanted); - return ret; + return mem; } /* - * vm_page_release: + * vm_page_free_contig: * - * Return a page to the free list. + * Return a block of contiguous pages to the free list. */ -void vm_page_release( - register vm_page_t mem, - boolean_t external) +void vm_page_free_contig(vm_page_t mem, vm_size_t size) { + unsigned int i, order, nr_pages; + + order = vm_page_order(size); + nr_pages = 1 << order; + simple_lock(&vm_page_queue_free_lock); - if (mem->free) - panic("vm_page_release"); - mem->free = TRUE; - mem->pageq.next = (queue_entry_t) vm_page_queue_free; - vm_page_queue_free = mem; - vm_page_free_count++; - if (external) - vm_page_external_count--; - /* - * Check if we should wake up someone waiting for page. - * But don't bother waking them unless they can allocate. - * - * We wakeup only one thread, to prevent starvation. - * Because the scheduling system handles wait queues FIFO, - * if we wakeup all waiting threads, one greedy thread - * can starve multiple niceguy threads. When the threads - * all wakeup, the greedy threads runs first, grabs the page, - * and waits for another page. It will be the first to run - * when the next page is freed. - * - * However, there is a slight danger here. - * The thread we wake might not use the free page. - * Then the other threads could wait indefinitely - * while the page goes unused. To forestall this, - * the pageout daemon will keep making free pages - * as long as vm_page_free_wanted is non-zero. - */ + for (i = 0; i < nr_pages; i++) { + if (mem[i].free) + panic("vm_page_free_contig"); + + mem[i].free = TRUE; + } + + vm_page_free_pa(mem, order); if ((vm_page_free_wanted > 0) && - (vm_page_free_count >= vm_page_free_reserved)) { + (vm_page_mem_free() >= vm_page_free_reserved)) { vm_page_free_wanted--; - thread_wakeup_one((event_t) &vm_page_free_count); + thread_wakeup_one((event_t) &vm_page_free_avail); } simple_unlock(&vm_page_queue_free_lock); @@ -1227,11 +987,11 @@ void vm_page_wait( */ simple_lock(&vm_page_queue_free_lock); - if ((vm_page_free_count < vm_page_free_target) + if ((vm_page_mem_free() < vm_page_free_target) || (vm_page_external_count > vm_page_external_limit)) { if (vm_page_free_wanted++ == 0) thread_wakeup((event_t)&vm_page_free_wanted); - assert_wait((event_t)&vm_page_free_count, FALSE); + assert_wait((event_t)&vm_page_free_avail, FALSE); simple_unlock(&vm_page_queue_free_lock); if (continuation != 0) { counter(c_vm_page_wait_block_user++); @@ -1257,7 +1017,7 @@ vm_page_t vm_page_alloc( vm_object_t object, vm_offset_t offset) { - register vm_page_t mem; + vm_page_t mem; mem = vm_page_grab(!object->internal); if (mem == VM_PAGE_NULL) @@ -1279,7 +1039,7 @@ vm_page_t vm_page_alloc( * Object and page queues must be locked prior to entry. */ void vm_page_free( - register vm_page_t mem) + vm_page_t mem) { if (mem->free) panic("vm_page_free"); @@ -1310,12 +1070,13 @@ void vm_page_free( */ if (mem->private || mem->fictitious) { - vm_page_init(mem, vm_page_fictitious_addr); + vm_page_init(mem); + mem->phys_addr = vm_page_fictitious_addr; mem->fictitious = TRUE; vm_page_release_fictitious(mem); } else { int external = mem->external && mem->extcounted; - vm_page_init(mem, mem->phys_addr); + vm_page_init(mem); vm_page_release(mem, external); } } @@ -1330,7 +1091,7 @@ void vm_page_free( * The page's object and the page queues must be locked. */ void vm_page_wire( - register vm_page_t mem) + vm_page_t mem) { VM_PAGE_CHECK(mem); @@ -1351,7 +1112,7 @@ void vm_page_wire( * The page's object and the page queues must be locked. */ void vm_page_unwire( - register vm_page_t mem) + vm_page_t mem) { VM_PAGE_CHECK(mem); @@ -1374,7 +1135,7 @@ void vm_page_unwire( * The page queues must be locked. */ void vm_page_deactivate( - register vm_page_t m) + vm_page_t m) { VM_PAGE_CHECK(m); @@ -1408,7 +1169,7 @@ void vm_page_deactivate( */ void vm_page_activate( - register vm_page_t m) + vm_page_t m) { VM_PAGE_CHECK(m); @@ -1505,10 +1266,10 @@ vm_page_info( * Routine: vm_page_print [exported] */ void vm_page_print(p) - vm_page_t p; + const vm_page_t p; { iprintf("Page 0x%X: object 0x%X,", (vm_offset_t) p, (vm_offset_t) p->object); - printf(" offset 0x%X", (vm_offset_t) p->offset); + printf(" offset 0x%X", p->offset); printf("wire_count %d,", p->wire_count); printf(" %s", (p->active ? "active" : (p->inactive ? "inactive" : "loose"))); @@ -1533,7 +1294,7 @@ void vm_page_print(p) printf("%s,", (p->tabled ? "" : "not_tabled")); printf("phys_addr = 0x%X, lock = 0x%X, unlock_request = 0x%X\n", - (vm_offset_t) p->phys_addr, + p->phys_addr, (vm_offset_t) p->page_lock, (vm_offset_t) p->unlock_request); } |