diff options
-rw-r--r-- | i386/i386/db_interface.c | 4 | ||||
-rw-r--r-- | i386/i386/ldt.c | 8 | ||||
-rw-r--r-- | i386/i386/pcb.c | 6 | ||||
-rw-r--r-- | i386/i386/vm_param.h | 6 | ||||
-rw-r--r-- | i386/intel/pmap.c | 18 | ||||
-rw-r--r-- | kern/task.c | 4 |
6 files changed, 25 insertions, 21 deletions
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c index 3a331490..5a4ace9f 100644 --- a/i386/i386/db_interface.c +++ b/i386/i386/db_interface.c @@ -119,8 +119,8 @@ kern_return_t db_set_debug_state( int i; for (i = 0; i <= 3; i++) - if (state->dr[i] < VM_MIN_ADDRESS - || state->dr[i] >= VM_MAX_ADDRESS) + if (state->dr[i] < VM_MIN_USER_ADDRESS + || state->dr[i] >= VM_MAX_USER_ADDRESS) return KERN_INVALID_ARGUMENT; pcb->ims.ids = *state; diff --git a/i386/i386/ldt.c b/i386/i386/ldt.c index 3f9ac8ff..70fa24e2 100644 --- a/i386/i386/ldt.c +++ b/i386/i386/ldt.c @@ -64,13 +64,13 @@ ldt_fill(struct real_descriptor *myldt, struct real_descriptor *mygdt) (vm_offset_t)&syscall, KERNEL_CS, ACC_PL_U|ACC_CALL_GATE, 0); fill_ldt_descriptor(myldt, USER_CS, - VM_MIN_ADDRESS, - VM_MAX_ADDRESS-VM_MIN_ADDRESS-4096, + VM_MIN_USER_ADDRESS, + VM_MAX_USER_ADDRESS-VM_MIN_USER_ADDRESS-4096, /* XXX LINEAR_... */ ACC_PL_U|ACC_CODE_R, SZ_32); fill_ldt_descriptor(myldt, USER_DS, - VM_MIN_ADDRESS, - VM_MAX_ADDRESS-VM_MIN_ADDRESS-4096, + VM_MIN_USER_ADDRESS, + VM_MAX_USER_ADDRESS-VM_MIN_USER_ADDRESS-4096, ACC_PL_U|ACC_DATA_W, SZ_32); /* Activate the LDT. */ diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c index 924ed08b..3ae9e095 100644 --- a/i386/i386/pcb.c +++ b/i386/i386/pcb.c @@ -622,10 +622,10 @@ kern_return_t thread_setstatus( int_table = state->int_table; int_count = state->int_count; - if (int_table >= VM_MAX_ADDRESS || + if (int_table >= VM_MAX_USER_ADDRESS || int_table + int_count * sizeof(struct v86_interrupt_table) - > VM_MAX_ADDRESS) + > VM_MAX_USER_ADDRESS) return KERN_INVALID_ARGUMENT; thread->pcb->ims.v86s.int_table = int_table; @@ -834,7 +834,7 @@ thread_set_syscall_return( vm_offset_t user_stack_low(vm_size_t stack_size) { - return (VM_MAX_ADDRESS - stack_size); + return (VM_MAX_USER_ADDRESS - stack_size); } /* diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h index 314fdb35..5e7f149a 100644 --- a/i386/i386/vm_param.h +++ b/i386/i386/vm_param.h @@ -31,6 +31,10 @@ #include <xen/public/xen.h> #endif +/* To avoid ambiguity in kernel code, make the name explicit */ +#define VM_MIN_USER_ADDRESS VM_MIN_ADDRESS +#define VM_MAX_USER_ADDRESS VM_MAX_ADDRESS + /* The kernel address space is usually 1GB, usually starting at virtual address 0. */ /* This can be changed freely to separate kernel addresses from user addresses * for better trace support in kdb; the _START symbol has to be offset by the @@ -77,7 +81,7 @@ #else /* On x86, the kernel virtual address space is actually located at high linear addresses. */ -#define LINEAR_MIN_KERNEL_ADDRESS (VM_MAX_ADDRESS) +#define LINEAR_MIN_KERNEL_ADDRESS (VM_MAX_USER_ADDRESS) #define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffUL) #endif diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index 1f1d8d9b..698b95db 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -1342,7 +1342,7 @@ pmap_t pmap_create(vm_size_t size) ); } #ifdef __x86_64__ - // TODO alloc only PDPTE for the user range VM_MIN_ADDRESS, VM_MAX_ADDRESS + // TODO alloc only PDPTE for the user range VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS // and keep the same for kernel range, in l4 table we have different entries p->l4base = (pt_entry_t *) kmem_cache_alloc(&l4_cache); if (p->l4base == NULL) @@ -1350,7 +1350,7 @@ pmap_t pmap_create(vm_size_t size) memset(p->l4base, 0, INTEL_PGBYTES); WRITE_PTE(&p->l4base[lin2l4num(VM_MIN_KERNEL_ADDRESS)], pa_to_pte(kvtophys((vm_offset_t) pdp_kernel)) | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_USER); -#if lin2l4num(VM_MIN_KERNEL_ADDRESS) != lin2l4num(VM_MAX_ADDRESS) +#if lin2l4num(VM_MIN_KERNEL_ADDRESS) != lin2l4num(VM_MAX_USER_ADDRESS) // TODO kernel vm and user vm are not in the same l4 entry, so add the user one #endif #ifdef MACH_PV_PAGETABLES @@ -1362,7 +1362,7 @@ pmap_t pmap_create(vm_size_t size) memset(p->user_pdpbase, 0, INTEL_PGBYTES); { int i; - for (i = 0; i < lin2pdpnum(VM_MAX_ADDRESS); i++) + for (i = 0; i < lin2pdpnum(VM_MAX_USER_ADDRESS); i++) WRITE_PTE(&p->user_pdpbase[i], pa_to_pte(kvtophys((vm_offset_t) page_dir[i])) | INTEL_PTE_VALID | INTEL_PTE_WRITE); } // FIXME: use kmem_cache_alloc instead @@ -1440,7 +1440,7 @@ void pmap_destroy(pmap_t p) #ifdef __x86_64__ #ifdef USER32 /* In this case we know we have one PDP for user space */ - pt_entry_t *pdp = (pt_entry_t *) ptetokv(p->l4base[lin2l4num(VM_MIN_ADDRESS)]); + pt_entry_t *pdp = (pt_entry_t *) ptetokv(p->l4base[lin2l4num(VM_MIN_USER_ADDRESS)]); #else #error "TODO do 64-bit userspace need more that 512G?" #endif /* USER32 */ @@ -1502,8 +1502,8 @@ void pmap_destroy(pmap_t p) #endif /* MACH_PV_PAGETABLES */ #ifdef __x86_64__ - kmem_cache_free(&pdpt_cache, (vm_offset_t) pmap_ptp(p, VM_MIN_ADDRESS)); -#if lin2l4num(VM_MIN_KERNEL_ADDRESS) != lin2l4num(VM_MAX_ADDRESS) + kmem_cache_free(&pdpt_cache, (vm_offset_t) pmap_ptp(p, VM_MIN_USER_ADDRESS)); +#if lin2l4num(VM_MIN_KERNEL_ADDRESS) != lin2l4num(VM_MAX_USER_ADDRESS) // TODO kernel vm and user vm are not in the same l4 entry #endif kmem_cache_free(&l4_cache, (vm_offset_t) p->l4base); @@ -2449,7 +2449,7 @@ void pmap_collect(pmap_t p) #ifdef __x86_64__ #ifdef USER32 /* In this case we know we have one PDP for user space */ - pdp = (pt_entry_t *) ptetokv(p->l4base[lin2l4num(VM_MIN_ADDRESS)]); + pdp = (pt_entry_t *) ptetokv(p->l4base[lin2l4num(VM_MIN_USER_ADDRESS)]); #else #error "TODO do 64-bit userspace need more that 512G?" #endif /* USER32 */ @@ -2556,7 +2556,7 @@ void pmap_collect(pmap_t p) #if PAE } #endif - PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS); + PMAP_UPDATE_TLBS(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS); PMAP_READ_UNLOCK(p, spl); return; @@ -2967,7 +2967,7 @@ void signal_cpus( * indicate overflow. */ update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap; - update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS; + update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_USER_ADDRESS; update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS; } else { diff --git a/kern/task.c b/kern/task.c index 6bd50983..be385a1b 100644 --- a/kern/task.c +++ b/kern/task.c @@ -122,8 +122,8 @@ task_create_kernel( new_task->map = VM_MAP_NULL; else { new_task->map = vm_map_create(new_pmap, - round_page(VM_MIN_ADDRESS), - trunc_page(VM_MAX_ADDRESS)); + round_page(VM_MIN_USER_ADDRESS), + trunc_page(VM_MAX_USER_ADDRESS)); if (new_task->map == VM_MAP_NULL) pmap_destroy(new_pmap); } |