diff options
Diffstat (limited to 'vm/vm_map.c')
-rw-r--r-- | vm/vm_map.c | 422 |
1 files changed, 0 insertions, 422 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c index c060196c..fcd62659 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -34,8 +34,6 @@ * Virtual memory mapping module. */ -#include <norma_ipc.h> - #include <mach/kern_return.h> #include <mach/port.h> #include <mach/vm_attributes.h> @@ -1657,67 +1655,6 @@ kern_return_t vm_map_delete(map, start, end) entry = first_entry->vme_next; else { entry = first_entry; -#if NORMA_IPC_xxx - /* - * XXX Had to disable this code because: - - _vm_map_delete(c0804b78,c2198000,c219a000,0,c219a000)+df - [vm/vm_map.c:2007] - _vm_map_remove(c0804b78,c2198000,c219a000,c0817834, - c081786c)+42 [vm/vm_map.c:2094] - _kmem_io_map_deallocate(c0804b78,c2198000,2000,c0817834, - c081786c)+43 [vm/vm_kern.c:818] - _device_write_dealloc(c081786c)+117 [device/ds_routines.c:814] - _ds_write_done(c081786c,0)+2e [device/ds_routines.c:848] - _io_done_thread_continue(c08150c0,c21d4e14,c21d4e30,c08150c0, - c080c114)+14 [device/ds_routines.c:1350] - - */ - if (start > entry->vme_start - && end == entry->vme_end - && ! entry->wired_count /* XXX ??? */ - && ! entry->is_shared - && ! entry->projected_on - && ! entry->is_sub_map) { - extern vm_object_t kernel_object; - register vm_object_t object = entry->object.vm_object; - - /* - * The region to be deleted lives at the end - * of this entry, and thus all we have to do is - * truncate the entry. - * - * This special case is necessary if we want - * coalescing to do us any good. - * - * XXX Do we have to adjust object size? - */ - if (object == kernel_object) { - vm_object_lock(object); - vm_object_page_remove(object, - entry->offset + start, - entry->offset + - (end - start)); - vm_object_unlock(object); - } else if (entry->is_shared) { - vm_object_pmap_remove(object, - entry->offset + start, - entry->offset + - (end - start)); - } else { - pmap_remove(map->pmap, start, end); - } - object->size -= (end - start); /* XXX */ - - entry->vme_end = start; - map->size -= (end - start); - - if (map->wait_for_space) { - thread_wakeup((event_t) map); - } - return KERN_SUCCESS; - } -#endif /* NORMA_IPC */ vm_map_clip_start(map, entry, start); /* @@ -2109,11 +2046,7 @@ kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible) * support page lists LATER. */ -#if NORMA_IPC - vm_map_convert_from_page_list(copy); -#else assert(copy->type == VM_MAP_COPY_ENTRY_LIST); -#endif /* * Currently this routine only handles page-aligned @@ -4887,358 +4820,3 @@ void vm_map_copy_print(copy) indent -=2; } #endif /* MACH_KDB */ - -#if NORMA_IPC -/* - * This should one day be eliminated; - * we should always construct the right flavor of copy object - * the first time. Troublesome areas include vm_read, where vm_map_copyin - * is called without knowing whom the copy object is for. - * There are also situations where we do want a lazy data structure - * even if we are sending to a remote port... - */ - -/* - * Convert a copy to a page list. The copy argument is in/out - * because we probably have to allocate a new vm_map_copy structure. - * We take responsibility for discarding the old structure and - * use a continuation to do so. Postponing this discard ensures - * that the objects containing the pages we've marked busy will stick - * around. - */ -kern_return_t -vm_map_convert_to_page_list(caller_copy) - vm_map_copy_t *caller_copy; -{ - vm_map_entry_t entry, next_entry; - vm_offset_t va; - vm_offset_t offset; - vm_object_t object; - kern_return_t result; - vm_map_copy_t copy, new_copy; - int i, num_pages = 0; - - zone_t entry_zone; - - copy = *caller_copy; - - /* - * We may not have to do anything, - * or may not be able to do anything. - */ - if (copy == VM_MAP_COPY_NULL || copy->type == VM_MAP_COPY_PAGE_LIST) { - return KERN_SUCCESS; - } - if (copy->type == VM_MAP_COPY_OBJECT) { - return vm_map_convert_to_page_list_from_object(caller_copy); - } - if (copy->type != VM_MAP_COPY_ENTRY_LIST) { - panic("vm_map_convert_to_page_list: copy type %d!\n", - copy->type); - } - - /* - * Allocate the new copy. Set its continuation to - * discard the old one. - */ - new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); - new_copy->type = VM_MAP_COPY_PAGE_LIST; - new_copy->cpy_npages = 0; - new_copy->offset = copy->offset; - new_copy->size = copy->size; - new_copy->cpy_cont = vm_map_copy_discard_cont; - new_copy->cpy_cont_args = (char *) copy; - - /* - * Iterate over entries. - */ - for (entry = vm_map_copy_first_entry(copy); - entry != vm_map_copy_to_entry(copy); - entry = entry->vme_next) { - - object = entry->object.vm_object; - offset = entry->offset; - /* - * Iterate over pages. - */ - for (va = entry->vme_start; - va < entry->vme_end; - va += PAGE_SIZE, offset += PAGE_SIZE) { - - vm_page_t m; - - if (new_copy->cpy_npages == VM_MAP_COPY_PAGE_LIST_MAX) { - /* - * What a mess. We need a continuation - * to do the page list, but also one - * to discard the old copy. The right - * thing to do is probably to copy - * out the old copy into the kernel - * map (or some temporary task holding - * map if we're paranoid about large - * copies), and then copyin the page - * list that we really wanted with - * src_destroy. LATER. - */ - panic("vm_map_convert_to_page_list: num\n"); - } - - /* - * Try to find the page of data. - */ - vm_object_lock(object); - vm_object_paging_begin(object); - if (((m = vm_page_lookup(object, offset)) != - VM_PAGE_NULL) && !m->busy && !m->fictitious && - !m->absent && !m->error) { - - /* - * This is the page. Mark it busy - * and keep the paging reference on - * the object whilst we do our thing. - */ - m->busy = TRUE; - - /* - * Also write-protect the page, so - * that the map`s owner cannot change - * the data. The busy bit will prevent - * faults on the page from succeeding - * until the copy is released; after - * that, the page can be re-entered - * as writable, since we didn`t alter - * the map entry. This scheme is a - * cheap copy-on-write. - * - * Don`t forget the protection and - * the page_lock value! - */ - - pmap_page_protect(m->phys_addr, - entry->protection - & ~m->page_lock - & ~VM_PROT_WRITE); - - } - else { - vm_prot_t result_prot; - vm_page_t top_page; - kern_return_t kr; - -retry: - result_prot = VM_PROT_READ; - - kr = vm_fault_page(object, offset, - VM_PROT_READ, FALSE, FALSE, - &result_prot, &m, &top_page, - FALSE, (void (*)()) 0); - if (kr == VM_FAULT_MEMORY_SHORTAGE) { - VM_PAGE_WAIT((void (*)()) 0); - vm_object_lock(object); - vm_object_paging_begin(object); - goto retry; - } - if (kr != VM_FAULT_SUCCESS) { - /* XXX what about data_error? */ - vm_object_lock(object); - vm_object_paging_begin(object); - goto retry; - } - if (top_page != VM_PAGE_NULL) { - vm_object_lock(object); - VM_PAGE_FREE(top_page); - vm_object_paging_end(object); - vm_object_unlock(object); - } - } - assert(m); - m->busy = TRUE; - new_copy->cpy_page_list[new_copy->cpy_npages++] = m; - vm_object_unlock(object); - } - } - - *caller_copy = new_copy; - return KERN_SUCCESS; -} - -kern_return_t -vm_map_convert_to_page_list_from_object(caller_copy) - vm_map_copy_t *caller_copy; -{ - vm_object_t object; - vm_offset_t offset; - vm_map_copy_t copy, new_copy; - - copy = *caller_copy; - assert(copy->type == VM_MAP_COPY_OBJECT); - object = copy->cpy_object; - assert(object->size == round_page(object->size)); - - /* - * Allocate the new copy. Set its continuation to - * discard the old one. - */ - new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); - new_copy->type = VM_MAP_COPY_PAGE_LIST; - new_copy->cpy_npages = 0; - new_copy->offset = copy->offset; - new_copy->size = copy->size; - new_copy->cpy_cont = vm_map_copy_discard_cont; - new_copy->cpy_cont_args = (char *) copy; - - /* - * XXX memory_object_lock_request can probably bust this - * XXX See continuation comment in previous routine for solution. - */ - assert(object->size <= VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE); - - for (offset = 0; offset < object->size; offset += PAGE_SIZE) { - vm_page_t m; - - /* - * Try to find the page of data. - */ - vm_object_lock(object); - vm_object_paging_begin(object); - m = vm_page_lookup(object, offset); - if ((m != VM_PAGE_NULL) && !m->busy && !m->fictitious && - !m->absent && !m->error) { - - /* - * This is the page. Mark it busy - * and keep the paging reference on - * the object whilst we do our thing. - */ - m->busy = TRUE; - } - else { - vm_prot_t result_prot; - vm_page_t top_page; - kern_return_t kr; - -retry: - result_prot = VM_PROT_READ; - - kr = vm_fault_page(object, offset, - VM_PROT_READ, FALSE, FALSE, - &result_prot, &m, &top_page, - FALSE, (void (*)()) 0); - if (kr == VM_FAULT_MEMORY_SHORTAGE) { - VM_PAGE_WAIT((void (*)()) 0); - vm_object_lock(object); - vm_object_paging_begin(object); - goto retry; - } - if (kr != VM_FAULT_SUCCESS) { - /* XXX what about data_error? */ - vm_object_lock(object); - vm_object_paging_begin(object); - goto retry; - } - - if (top_page != VM_PAGE_NULL) { - vm_object_lock(object); - VM_PAGE_FREE(top_page); - vm_object_paging_end(object); - vm_object_unlock(object); - } - } - assert(m); - m->busy = TRUE; - new_copy->cpy_page_list[new_copy->cpy_npages++] = m; - vm_object_unlock(object); - } - - *caller_copy = new_copy; - return (KERN_SUCCESS); -} - -kern_return_t -vm_map_convert_from_page_list(copy) - vm_map_copy_t copy; -{ - vm_object_t object; - int i; - vm_map_entry_t new_entry; - vm_page_t *page_list; - - /* - * Check type of copy object. - */ - if (copy->type == VM_MAP_COPY_ENTRY_LIST) { - return KERN_SUCCESS; - } - if (copy->type == VM_MAP_COPY_OBJECT) { - printf("vm_map_convert_from_page_list: COPY_OBJECT?"); - return KERN_SUCCESS; - } - if (copy->type != VM_MAP_COPY_PAGE_LIST) { - panic("vm_map_convert_from_page_list 0x%x %d", - copy, - copy->type); - } - - /* - * Make sure the pages are loose. This may be - * a "Can't Happen", but just to be safe ... - */ - page_list = ©->cpy_page_list[0]; - if ((*page_list)->tabled) - vm_map_copy_steal_pages(copy); - - /* - * Create object, and stuff pages into it. - */ - object = vm_object_allocate(copy->cpy_npages); - for (i = 0; i < copy->cpy_npages; i++) { - register vm_page_t m = *page_list++; - vm_page_insert(m, object, i * PAGE_SIZE); - m->busy = FALSE; - m->dirty = TRUE; - vm_page_activate(m); - } - - /* - * XXX If this page list contained a continuation, then - * XXX we're screwed. The right thing to do is probably do - * XXX the copyout, and then copyin the entry list we really - * XXX wanted. - */ - if (vm_map_copy_has_cont(copy)) - panic("convert_from_page_list: continuation"); - - /* - * Change type of copy object - */ - vm_map_copy_first_entry(copy) = - vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); - copy->type = VM_MAP_COPY_ENTRY_LIST; - copy->cpy_hdr.nentries = 0; - copy->cpy_hdr.entries_pageable = TRUE; - - /* - * Allocate and initialize an entry for object - */ - new_entry = vm_map_copy_entry_create(copy); - new_entry->vme_start = trunc_page(copy->offset); - new_entry->vme_end = round_page(copy->offset + copy->size); - new_entry->object.vm_object = object; - new_entry->offset = 0; - new_entry->is_shared = FALSE; - new_entry->is_sub_map = FALSE; - new_entry->needs_copy = FALSE; - new_entry->protection = VM_PROT_DEFAULT; - new_entry->max_protection = VM_PROT_ALL; - new_entry->inheritance = VM_INHERIT_DEFAULT; - new_entry->wired_count = 0; - new_entry->user_wired_count = 0; - new_entry->projected_on = 0; - - /* - * Insert entry into copy object, and return. - */ - vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), new_entry); - return(KERN_SUCCESS); -} -#endif /* NORMA_IPC */ |