diff options
Diffstat (limited to 'kern/thread.c')
-rw-r--r-- | kern/thread.c | 206 |
1 files changed, 116 insertions, 90 deletions
diff --git a/kern/thread.c b/kern/thread.c index 79f526a2..ce44ed14 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -57,9 +57,11 @@ #include <kern/slab.h> #include <kern/mach_clock.h> #include <vm/vm_kern.h> +#include <vm/vm_user.h> #include <ipc/ipc_kmsg.h> #include <ipc/ipc_port.h> #include <ipc/mach_msg.h> +#include <ipc/mach_port.h> #include <machine/machspl.h> /* for splsched */ #include <machine/pcb.h> #include <machine/thread.h> /* for MACHINE_STACK */ @@ -68,19 +70,15 @@ thread_t active_threads[NCPUS]; vm_offset_t active_stacks[NCPUS]; struct kmem_cache thread_cache; +struct kmem_cache thread_stack_cache; queue_head_t reaper_queue; decl_simple_lock_data(, reaper_lock) -extern void pcb_module_init(void); - /* private */ struct thread thread_template; #if MACH_DEBUG -void stack_init(vm_offset_t stack); /* forward */ -void stack_finalize(vm_offset_t stack); /* forward */ - #define STACK_MARKER 0xdeadbeefU boolean_t stack_check_usage = FALSE; decl_simple_lock_data(, stack_usage_lock) @@ -127,10 +125,6 @@ vm_offset_t stack_free_list; /* splsched only */ unsigned int stack_free_count = 0; /* splsched only */ unsigned int stack_free_limit = 1; /* patchable */ -unsigned int stack_alloc_hits = 0; /* debugging */ -unsigned int stack_alloc_misses = 0; /* debugging */ -unsigned int stack_alloc_max = 0; /* debugging */ - /* * The next field is at the base of the stack, * so the low end is left unsullied. @@ -149,7 +143,7 @@ boolean_t stack_alloc_try( thread_t thread, void (*resume)(thread_t)) { - register vm_offset_t stack; + vm_offset_t stack; stack_lock(); stack = stack_free_list; @@ -163,10 +157,10 @@ boolean_t stack_alloc_try( if (stack != 0) { stack_attach(thread, stack, resume); - stack_alloc_hits++; + counter(c_stack_alloc_hits++); return TRUE; } else { - stack_alloc_misses++; + counter(c_stack_alloc_misses++); return FALSE; } } @@ -178,7 +172,7 @@ boolean_t stack_alloc_try( * May block. */ -void stack_alloc( +kern_return_t stack_alloc( thread_t thread, void (*resume)(thread_t)) { @@ -202,22 +196,15 @@ void stack_alloc( (void) splx(s); if (stack == 0) { - /* - * Kernel stacks should be naturally aligned, - * so that it is easy to find the starting/ending - * addresses of a stack given an address in the middle. - */ - - if (kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE) - != KERN_SUCCESS) - panic("stack_alloc"); - + stack = kmem_cache_alloc(&thread_stack_cache); + assert(stack != 0); #if MACH_DEBUG stack_init(stack); #endif /* MACH_DEBUG */ } stack_attach(thread, stack, resume); + return KERN_SUCCESS; } /* @@ -230,7 +217,7 @@ void stack_alloc( void stack_free( thread_t thread) { - register vm_offset_t stack; + vm_offset_t stack; stack = stack_detach(thread); @@ -238,8 +225,11 @@ void stack_free( stack_lock(); stack_next(stack) = stack_free_list; stack_free_list = stack; - if (++stack_free_count > stack_alloc_max) - stack_alloc_max = stack_free_count; + stack_free_count += 1; +#if MACH_COUNTERS + if (stack_free_count > c_stack_alloc_max) + c_stack_alloc_max = stack_free_count; +#endif /* MACH_COUNTERS */ stack_unlock(); } } @@ -253,7 +243,7 @@ void stack_free( void stack_collect(void) { - register vm_offset_t stack; + vm_offset_t stack; spl_t s; s = splsched(); @@ -268,7 +258,7 @@ void stack_collect(void) #if MACH_DEBUG stack_finalize(stack); #endif /* MACH_DEBUG */ - kmem_free(kmem_map, stack, KERNEL_STACK_SIZE); + kmem_cache_free(&thread_stack_cache, stack); s = splsched(); stack_lock(); @@ -285,7 +275,7 @@ void stack_collect(void) */ void stack_privilege( - register thread_t thread) + thread_t thread) { /* * This implementation only works for the current thread. @@ -301,7 +291,15 @@ void stack_privilege( void thread_init(void) { kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0, - NULL, NULL, NULL, 0); + NULL, 0); + /* + * Kernel stacks should be naturally aligned, + * so that it is easy to find the starting/ending + * addresses of a stack given an address in the middle. + */ + kmem_cache_init(&thread_stack_cache, "thread_stack", + KERNEL_STACK_SIZE, KERNEL_STACK_SIZE, + NULL, 0); /* * Fill in a template thread for fast initialization. @@ -398,11 +396,11 @@ void thread_init(void) } kern_return_t thread_create( - register task_t parent_task, + task_t parent_task, thread_t *child_thread) /* OUT */ { - register thread_t new_thread; - register processor_set_t pset; + thread_t new_thread; + processor_set_t pset; if (parent_task == TASK_NULL) return KERN_INVALID_ARGUMENT; @@ -433,7 +431,7 @@ kern_return_t thread_create( * Create a pcb. The kernel stack is created later, * when the thread is swapped-in. */ - pcb_init(new_thread); + pcb_init(parent_task, new_thread); ipc_thread_init(new_thread); @@ -575,11 +573,11 @@ kern_return_t thread_create( unsigned int thread_deallocate_stack = 0; void thread_deallocate( - register thread_t thread) + thread_t thread) { spl_t s; - register task_t task; - register processor_set_t pset; + task_t task; + processor_set_t pset; time_value_t user_time, system_time; @@ -710,7 +708,7 @@ void thread_deallocate( } void thread_reference( - register thread_t thread) + thread_t thread) { spl_t s; @@ -744,10 +742,10 @@ void thread_reference( * since it needs a kernel stack to execute.) */ kern_return_t thread_terminate( - register thread_t thread) + thread_t thread) { - register thread_t cur_thread = current_thread(); - register task_t cur_task; + thread_t cur_thread = current_thread(); + task_t cur_task; spl_t s; if (thread == THREAD_NULL) @@ -850,6 +848,28 @@ kern_return_t thread_terminate( return KERN_SUCCESS; } +kern_return_t thread_terminate_release( + thread_t thread, + task_t task, + mach_port_t thread_name, + mach_port_t reply_port, + vm_offset_t address, + vm_size_t size) +{ + if (task == NULL) + return KERN_INVALID_ARGUMENT; + + mach_port_deallocate(task->itk_space, thread_name); + + if (reply_port != MACH_PORT_NULL) + mach_port_destroy(task->itk_space, reply_port); + + if ((address != 0) || (size != 0)) + vm_deallocate(task->map, address, size); + + return thread_terminate(thread); +} + /* * thread_force_terminate: * @@ -859,7 +879,7 @@ kern_return_t thread_terminate( */ void thread_force_terminate( - register thread_t thread) + thread_t thread) { boolean_t deallocate_here; spl_t s; @@ -901,11 +921,11 @@ thread_force_terminate( * */ kern_return_t thread_halt( - register thread_t thread, + thread_t thread, boolean_t must_halt) { - register thread_t cur_thread = current_thread(); - register kern_return_t ret; + thread_t cur_thread = current_thread(); + kern_return_t ret; spl_t s; if (thread == cur_thread) @@ -949,7 +969,7 @@ kern_return_t thread_halt( * operation can never cause a deadlock.) */ if (cur_thread->ast & AST_HALT) { - thread_wakeup_with_result((event_t)&cur_thread->wake_active, + thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(cur_thread), THREAD_INTERRUPTED); thread_unlock(thread); thread_unlock(cur_thread); @@ -987,7 +1007,7 @@ kern_return_t thread_halt( */ while ((thread->ast & AST_HALT) && (!(thread->state & TH_HALTED))) { thread->wake_active = TRUE; - thread_sleep((event_t) &thread->wake_active, + thread_sleep(TH_EV_WAKE_ACTIVE(thread), simple_lock_addr(thread->lock), TRUE); if (thread->state & TH_HALTED) { @@ -1026,7 +1046,7 @@ kern_return_t thread_halt( s = splsched(); thread_lock(thread); thread_ast_clear(thread, AST_HALT); - thread_wakeup_with_result((event_t)&thread->wake_active, + thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(thread), THREAD_INTERRUPTED); thread_unlock(thread); (void) splx(s); @@ -1104,7 +1124,7 @@ kern_return_t thread_halt( } } -void walking_zombie(void) +void __attribute__((noreturn)) walking_zombie(void) { panic("the zombie walks!"); } @@ -1113,9 +1133,9 @@ void walking_zombie(void) * Thread calls this routine on exit from the kernel when it * notices a halt request. */ -void thread_halt_self(void) +void thread_halt_self(continuation_t continuation) { - register thread_t thread = current_thread(); + thread_t thread = current_thread(); spl_t s; if (thread->ast & AST_TERMINATE) { @@ -1130,7 +1150,7 @@ void thread_halt_self(void) s = splsched(); simple_lock(&reaper_lock); - enqueue_tail(&reaper_queue, (queue_entry_t) thread); + enqueue_tail(&reaper_queue, &(thread->links)); simple_unlock(&reaper_lock); thread_lock(thread); @@ -1154,7 +1174,7 @@ void thread_halt_self(void) thread_unlock(thread); splx(s); counter(c_thread_halt_self_block++); - thread_block(thread_exception_return); + thread_block(continuation); /* * thread_release resets TH_HALTED. */ @@ -1169,7 +1189,7 @@ void thread_halt_self(void) * suspends is maintained. */ void thread_hold( - register thread_t thread) + thread_t thread) { spl_t s; @@ -1191,11 +1211,11 @@ void thread_hold( */ kern_return_t thread_dowait( - register thread_t thread, + thread_t thread, boolean_t must_halt) { - register boolean_t need_wakeup; - register kern_return_t ret = KERN_SUCCESS; + boolean_t need_wakeup; + kern_return_t ret = KERN_SUCCESS; spl_t s; if (thread == current_thread()) @@ -1265,7 +1285,7 @@ thread_dowait( * Check for failure if interrupted. */ thread->wake_active = TRUE; - thread_sleep((event_t) &thread->wake_active, + thread_sleep(TH_EV_WAKE_ACTIVE(thread), simple_lock_addr(thread->lock), TRUE); thread_lock(thread); if ((current_thread()->wait_result != THREAD_AWAKENED) && @@ -1289,13 +1309,13 @@ thread_dowait( (void) splx(s); if (need_wakeup) - thread_wakeup((event_t) &thread->wake_active); + thread_wakeup(TH_EV_WAKE_ACTIVE(thread)); return ret; } void thread_release( - register thread_t thread) + thread_t thread) { spl_t s; @@ -1314,9 +1334,9 @@ void thread_release( } kern_return_t thread_suspend( - register thread_t thread) + thread_t thread) { - register boolean_t hold; + boolean_t hold; spl_t spl; if (thread == THREAD_NULL) @@ -1327,9 +1347,9 @@ kern_return_t thread_suspend( thread_lock(thread); /* Wait for thread to get interruptible */ while (thread->state & TH_UNINT) { - assert_wait(&thread->state, TRUE); + assert_wait(TH_EV_STATE(thread), TRUE); thread_unlock(thread); - thread_block(NULL); + thread_block(thread_no_continuation); thread_lock(thread); } if (thread->user_stop_count++ == 0) { @@ -1360,9 +1380,9 @@ kern_return_t thread_suspend( kern_return_t thread_resume( - register thread_t thread) + thread_t thread) { - register kern_return_t ret; + kern_return_t ret; spl_t s; if (thread == THREAD_NULL) @@ -1398,7 +1418,7 @@ kern_return_t thread_resume( * Return thread's machine-dependent state. */ kern_return_t thread_get_state( - register thread_t thread, + thread_t thread, int flavor, thread_state_t old_state, /* pointer to OUT array */ natural_t *old_state_count) /*IN/OUT*/ @@ -1422,7 +1442,7 @@ kern_return_t thread_get_state( * Change thread's machine-dependent state. */ kern_return_t thread_set_state( - register thread_t thread, + thread_t thread, int flavor, thread_state_t new_state, natural_t new_state_count) @@ -1443,7 +1463,7 @@ kern_return_t thread_set_state( } kern_return_t thread_info( - register thread_t thread, + thread_t thread, int flavor, thread_info_t thread_info_out, /* pointer to OUT array */ natural_t *thread_info_count) /*IN/OUT*/ @@ -1455,7 +1475,7 @@ kern_return_t thread_info( return KERN_INVALID_ARGUMENT; if (flavor == THREAD_BASIC_INFO) { - register thread_basic_info_t basic_info; + thread_basic_info_t basic_info; /* Allow *thread_info_count to be one smaller than the usual amount, because creation_time is a new member @@ -1484,7 +1504,8 @@ kern_return_t thread_info( &basic_info->system_time); basic_info->base_priority = thread->priority; basic_info->cur_priority = thread->sched_pri; - basic_info->creation_time = thread->creation_time; + read_time_stamp(&thread->creation_time, + &basic_info->creation_time); /* * To calculate cpu_usage, first correct for timer rate, @@ -1541,7 +1562,7 @@ kern_return_t thread_info( return KERN_SUCCESS; } else if (flavor == THREAD_SCHED_INFO) { - register thread_sched_info_t sched_info; + thread_sched_info_t sched_info; if (*thread_info_count < THREAD_SCHED_INFO_COUNT) { return KERN_INVALID_ARGUMENT; @@ -1583,7 +1604,7 @@ kern_return_t thread_info( } kern_return_t thread_abort( - register thread_t thread) + thread_t thread) { if (thread == THREAD_NULL || thread == current_thread()) { return KERN_INVALID_ARGUMENT; @@ -1648,9 +1669,13 @@ thread_t kernel_thread( continuation_t start, void * arg) { + kern_return_t kr; thread_t thread; - (void) thread_create(task, &thread); + kr = thread_create(task, &thread); + if (kr != KERN_SUCCESS) + return THREAD_NULL; + /* release "extra" ref that thread_create gave us */ thread_deallocate(thread); thread_start(thread, start); @@ -1674,10 +1699,10 @@ thread_t kernel_thread( * This kernel thread runs forever looking for threads to destroy * (when they request that they be destroyed, of course). */ -void reaper_thread_continue(void) +void __attribute__((noreturn)) reaper_thread_continue(void) { for (;;) { - register thread_t thread; + thread_t thread; spl_t s; s = splsched(); @@ -1792,12 +1817,12 @@ thread_unfreeze( void thread_doassign( - register thread_t thread, - register processor_set_t new_pset, + thread_t thread, + processor_set_t new_pset, boolean_t release_freeze) { - register processor_set_t pset; - register boolean_t old_empty, new_empty; + processor_set_t pset; + boolean_t old_empty, new_empty; boolean_t recompute_pri = FALSE; spl_t s; @@ -1955,6 +1980,9 @@ kern_return_t thread_get_assignment( thread_t thread, processor_set_t *pset) { + if (thread == THREAD_NULL) + return KERN_INVALID_ARGUMENT; + *pset = thread->processor_set; pset_reference(*pset); return KERN_SUCCESS; @@ -2100,8 +2128,8 @@ thread_policy( int data) { #if MACH_FIXPRI - register kern_return_t ret = KERN_SUCCESS; - register int temp; + kern_return_t ret = KERN_SUCCESS; + int temp; spl_t s; #endif /* MACH_FIXPRI */ @@ -2217,7 +2245,6 @@ thread_wire( void thread_collect_scan(void) { -#if 0 register thread_t thread, prev_thread; processor_set_t pset, prev_pset; @@ -2270,7 +2297,6 @@ void thread_collect_scan(void) thread_deallocate(prev_thread); if (prev_pset != PROCESSOR_SET_NULL) pset_deallocate(prev_pset); -#endif /* 0 */ } boolean_t thread_collect_allowed = TRUE; @@ -2304,7 +2330,7 @@ void consider_thread_collect(void) #if MACH_DEBUG vm_size_t stack_usage( - register vm_offset_t stack) + vm_offset_t stack) { int i; @@ -2321,7 +2347,7 @@ vm_size_t stack_usage( */ void stack_init( - register vm_offset_t stack) + vm_offset_t stack) { if (stack_check_usage) { int i; @@ -2337,7 +2363,7 @@ void stack_init( */ void stack_finalize( - register vm_offset_t stack) + vm_offset_t stack) { if (stack_check_usage) { vm_size_t used = stack_usage(stack); @@ -2430,8 +2456,8 @@ kern_return_t processor_set_stack_usage( vm_size_t maxusage; vm_offset_t maxstack; - register thread_t *threads; - register thread_t tmp_thread; + thread_t *threads; + thread_t tmp_thread; unsigned int actual; /* this many things */ unsigned int i; @@ -2549,7 +2575,7 @@ kern_return_t processor_set_stack_usage( void thread_stats(void) { - register thread_t thread; + thread_t thread; int total = 0, rpcreply = 0; queue_iterate(&default_pset.threads, thread, thread_t, pset_threads) { |