aboutsummaryrefslogtreecommitdiff
path: root/libthreads/cprocs.c
diff options
context:
space:
mode:
Diffstat (limited to 'libthreads/cprocs.c')
-rw-r--r--libthreads/cprocs.c325
1 files changed, 163 insertions, 162 deletions
diff --git a/libthreads/cprocs.c b/libthreads/cprocs.c
index 71a6a3ad..7f63fc0d 100644
--- a/libthreads/cprocs.c
+++ b/libthreads/cprocs.c
@@ -1,120 +1,132 @@
-/*
+/*
* Mach Operating System
- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * Copyright (c) 1993-1989 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
* HISTORY
+ * 26-Oct-94 Johannes Helander (jvh) Helsinki University of Technology
+ * Set the wait_type field.
+ *
* $Log: cprocs.c,v $
- * Revision 1.7 1995/09/22 17:51:10 roland
- * Include hurd/threadvar.h.
+ * Revision 1.16 2002/05/27 02:50:10 roland
+ * 2002-05-26 Roland McGrath <roland@frob.com>
+ *
+ * Changes merged from CMU MK83a version:
+ * * cthreads.h, options.h: Various cleanups.
+ * * call.c, cthread_data.c, sync.c, mig_support.c: Likewise.
+ * * i386/cthreads.h, i386/thread.c, i386/lock.s: Likewise.
+ * * cthread_internals.h: Add decls for internal functions.
+ * (struct cproc): Use vm_offset_t for stack_base and stack_size members.
+ * Use natural_t for context member.
+ * * cprocs.c: Use prototypes for all defns.
+ * * cthreads.c: Likewise.
+ * (cthread_exit): Cast any_t to integer_t before int.
*
- * Revision 1.6 1995/08/30 15:57:47 mib
- * Repair typos.
+ * Revision 2.18 93/03/09 10:59:10 danner
+ * Lint.
+ * [93/03/06 af]
*
- * Revision 1.5 1995/08/30 15:50:53 mib
- * (cond_signal): If this condition has implications, see if one of them
- * needs to be signalled when we have no waiters.
- * (cond_broadcast): Signal the implications list too.
- * (condition_implies, condition_unimplies): New functions.
+ * Revision 2.17 93/01/19 08:55:44 danner
+ * Added missing spin_lock_t type from cproc_list_lock decl.
+ * [92/12/30 af]
*
- * Revision 1.4 1995/04/04 21:04:29 roland
- * (mutex_lock_solid, mutex_unlock_solid): Renamed to __*.
- * (_cthread_mutex_lock_routine, _cthread_mutex_unlock_routine): Variables
- * removed.
*
- * Revision 1.3 1994/05/19 04:55:30 roland
- * entered into RCS
+ * Revision 2.16 93/01/14 18:04:46 danner
+ * Convert file to ANSI C.
+ * [92/12/18 pds]
+ * 64bit cleanup.
+ * [92/12/10 21:08:32 af]
*
* Revision 2.15 92/03/06 14:09:31 rpd
* Replaced swtch_pri with yield.
* [92/03/06 rpd]
- *
+ *
* Revision 2.14 91/08/28 11:19:16 jsb
* Fixed the loop in cproc_fork_child that frees cprocs.
* [91/08/23 rpd]
- *
+ *
* Revision 2.13 91/07/31 18:33:04 dbg
* Fix some more bad types. Ints are NOT pointers.
- *
+ *
* Fix argument type mismatch in cproc_create.
* [91/07/30 17:32:59 dbg]
- *
+ *
* Revision 2.12 91/05/14 17:56:11 mrt
* Correcting copyright
- *
+ *
* Revision 2.11 91/02/14 14:19:26 mrt
* Added new Mach copyright
* [91/02/13 12:40:50 mrt]
- *
+ *
* Revision 2.10 90/11/05 14:36:41 rpd
* Added cproc_fork_{prepare,parent,child}.
* [90/11/02 rwd]
- *
+ *
* Fix for positive stack growth.
* [90/11/01 rwd]
- *
+ *
* Add spin_lock_t.
* [90/10/31 rwd]
- *
+ *
* Revision 2.9 90/10/12 13:07:12 rpd
* Fix type
* [90/10/10 15:09:59 rwd]
- *
+ *
* Comment code.
* [90/10/02 rwd]
- *
+ *
* Revision 2.8 90/09/09 14:34:44 rpd
* Remove special mutex. Remove thread_calls and debug_mutex
* [90/08/24 rwd]
* Fix up old call to cthread_msg_busy to new format.
* [90/08/22 rwd]
- *
+ *
* Revision 2.7 90/08/06 15:09:17 rwd
* Fixed arguments to cthread_mach_msg.
* [90/06/26 rwd]
* Add additional STATISTICS.
* [90/06/07 rwd]
- *
+ *
* Attempt to reduce number of times a cthread is released to to a
* msg_receive by adding min/max instead of single number to
* cthread_msg calls.
* [90/06/06 rwd]
- *
+ *
* Revision 2.6 90/06/02 15:13:36 rpd
* Converted to new IPC.
* [90/03/20 20:46:16 rpd]
- *
+ *
* Revision 2.5 90/05/29 18:40:11 rwd
* Don't incr special field until the mutex grab is successful.
* [90/05/09 rwd]
- *
+ *
* Revision 2.4 90/03/14 21:12:02 rwd
* Added WAIT_DEBUG code for deadlock debugging.
* [90/03/01 rwd]
* Insert cprocs in cproc_list as allocated.
* [90/03/01 10:20:16 rwd]
- *
+ *
* Revision 2.3 90/01/19 14:36:57 rwd
* Make cthread_msg_busy only release new thread if this is still
* busy. Ie don't release two on back to back calls.
@@ -126,7 +138,7 @@
* Change cproc_self pointer to top of stack. Now need to change
* the stack of the first thread.
* [89/12/12 rwd]
- *
+ *
* Revision 2.2 89/12/08 19:53:13 rwd
* Added CPROC_CONDWAIT state to deal with lock held
* across mutex_unlock problem.
@@ -134,7 +146,7 @@
* Changed mutexes to not hand off. MUTEX_EXTRA conditional is
* now obsolete.
* [89/11/27 rwd]
- *
+ *
* Add MUTEX_EXTRA code for extra kernel threads to serve special
* mutexes in time of need.
* [89/11/25 rwd]
@@ -144,15 +156,15 @@
* macro which tries the spin_lock before making a subroutine call.
* Mutex_unlock is now a macro with mutex_unlock_solid for worst case.
* [89/11/13 rwd]
- *
+ *
* Rewrite most to merge coroutine and thread implementation.
* New routines are cthread_set_kernel_limit, cthread_kernel_limit,
* cthread_wire, cthread_unwire, and cthread_receive.
* [89/10/23 rwd]
- *
+ *
* Revision 2.1 89/08/03 17:07:10 rwd
* Created.
- *
+ *
* 11-Apr-89 David Golub (dbg) at Carnegie-Mellon University
* Made condition_yield loop break if swtch_pri returns TRUE (in
* case we fix it).
@@ -208,7 +220,7 @@
* to eliminate dependency on cproc layout.
*/
/*
- * File: cprocs.c
+ * File: cprocs.c
* Author: Eric Cooper, Carnegie Mellon University
* Date: Aug, 1987
*
@@ -221,15 +233,7 @@
#include "cthread_internals.h"
#include <mach/message.h>
#include <hurd/threadvar.h> /* GNU */
-
-/*
- * C Threads imports:
- */
-extern void alloc_stack();
-extern void cproc_switch(); /* cproc context switch */
-extern void cproc_start_wait(); /* cproc idle thread */
-extern vm_offset_t cproc_stack_base(); /* return start of stack */
-extern vm_offset_t stack_init();
+#include <assert.h>
/*
* Port_entry's are used by cthread_mach_msg to store information
@@ -280,10 +284,10 @@ int cthread_no_mutex = 0; /* total number times woken to get
mutex and couldn't */
private spin_lock_t mutex_count_lock = SPIN_LOCK_INITIALIZER;
/* lock for above */
-#endif STATISTICS
+#endif /* STATISTICS */
cproc_t cproc_list = NO_CPROC; /* list of all cprocs */
-private cproc_list_lock = SPIN_LOCK_INITIALIZER;
+private spin_lock_t cproc_list_lock = SPIN_LOCK_INITIALIZER;
/* lock for above */
private int cprocs_started = FALSE; /* initialized? */
private struct cthread_queue ready = QUEUE_INITIALIZER;
@@ -311,8 +315,8 @@ private mach_msg_header_t wakeup_msg; /* prebuilt message used by idle
* Return current value for max kernel threads
* Note: 0 means no limit
*/
-
-cthread_kernel_limit()
+int
+cthread_kernel_limit(void)
{
return cthread_max_kernel_threads;
}
@@ -323,8 +327,8 @@ cthread_kernel_limit()
* over maximum.
*/
-cthread_set_kernel_limit(n)
- int n;
+void
+cthread_set_kernel_limit(int n)
{
cthread_max_kernel_threads = n;
}
@@ -333,47 +337,28 @@ cthread_set_kernel_limit(n)
* Wire a cthread to its current kernel thread
*/
-void cthread_wire()
+void
+cthread_wire(void)
{
register cproc_t p = cproc_self();
kern_return_t r;
- /*
- * A wired thread has a port associated with it for all
- * of its wait/block cases. We also prebuild a wakeup
- * message.
- */
-
- if (p->wired == MACH_PORT_NULL) {
- MACH_CALL(mach_port_allocate(mach_task_self(),
- MACH_PORT_RIGHT_RECEIVE,
- &p->wired), r);
- MACH_CALL(mach_port_insert_right(mach_task_self(),
- p->wired, p->wired,
- MACH_MSG_TYPE_MAKE_SEND), r);
- p->msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
- p->msg.msgh_size = 0; /* initialized in call */
- p->msg.msgh_remote_port = p->wired;
- p->msg.msgh_local_port = MACH_PORT_NULL;
- p->msg.msgh_kind = MACH_MSGH_KIND_NORMAL;
- p->msg.msgh_id = 0;
-#ifdef STATISTICS
- spin_lock(&wired_lock);
- cthread_wired++;
- spin_unlock(&wired_lock);
-#endif STATISTICS
- }
+ /* In GNU, we wire all threads on creation (in cproc_alloc). */
+ assert (p->wired != MACH_PORT_NULL);
}
/*
* Unwire a cthread. Deallocate its wait port.
*/
-void cthread_unwire()
+void
+cthread_unwire(void)
{
register cproc_t p = cproc_self();
- kern_return_t r;
+ /* This is bad juju in GNU, where all cthreads must be wired. */
+ abort();
+#if 0
if (p->wired != MACH_PORT_NULL) {
MACH_CALL(mach_port_mod_refs(mach_task_self(), p->wired,
MACH_PORT_RIGHT_SEND, -1), r);
@@ -384,14 +369,16 @@ void cthread_unwire()
spin_lock(&wired_lock);
cthread_wired--;
spin_unlock(&wired_lock);
-#endif STATISTICS
+#endif /* STATISTICS */
}
+#endif
}
private cproc_t
-cproc_alloc()
+cproc_alloc(void)
{
register cproc_t p = (cproc_t) malloc(sizeof(struct cproc));
+ kern_return_t r;
p->incarnation = NO_CTHREAD;
#if 0
@@ -400,9 +387,31 @@ cproc_alloc()
#endif
spin_lock_init(&p->lock);
- p->wired = MACH_PORT_NULL;
p->state = CPROC_RUNNING;
p->busy = 0;
+
+ /*
+ * In GNU, every cthread must be wired. So we just
+ * initialize P->wired on creation.
+ *
+ * A wired thread has a port associated with it for all
+ * of its wait/block cases. We also prebuild a wakeup
+ * message.
+ */
+
+ MACH_CALL(mach_port_allocate(mach_task_self(),
+ MACH_PORT_RIGHT_RECEIVE,
+ &p->wired), r);
+ MACH_CALL(mach_port_insert_right(mach_task_self(),
+ p->wired, p->wired,
+ MACH_MSG_TYPE_MAKE_SEND), r);
+ p->msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
+ p->msg.msgh_size = 0; /* initialized in call */
+ p->msg.msgh_remote_port = p->wired;
+ p->msg.msgh_local_port = MACH_PORT_NULL;
+ p->msg.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ p->msg.msgh_id = 0;
+
spin_lock(&cproc_list_lock);
p->list = cproc_list;
cproc_list = p;
@@ -416,7 +425,7 @@ cproc_alloc()
*/
vm_offset_t
-cproc_init()
+cproc_init(void)
{
kern_return_t r;
@@ -453,9 +462,7 @@ cproc_init()
* synching on its lock. Just send message to wired cproc.
*/
-private int cproc_ready(p, preq)
- register cproc_t p;
- register int preq;
+private boolean_t cproc_ready(register cproc_t p, register int preq)
{
register cproc_t s=cproc_self();
kern_return_t r;
@@ -469,7 +476,7 @@ private int cproc_ready(p, preq)
mach_error("mach_msg", r);
exit(1);
}
-#endif CHECK_STATUS
+#endif /* CHECK_STATUS */
return TRUE;
}
spin_lock(&p->lock); /* is it ready to be queued? It
@@ -497,7 +504,7 @@ private int cproc_ready(p, preq)
}
#ifdef STATISTICS
cthread_ready++;
-#endif STATISTICS
+#endif /* STATISTICS */
ready_count++;
if ((s->state & CPROC_CONDWAIT) && !(s->wired)) {
@@ -523,7 +530,7 @@ private int cproc_ready(p, preq)
mach_error("mach_msg", r);
exit(1);
}
-#endif CHECK_STATUS
+#endif /* CHECK_STATUS */
return TRUE;
}
spin_unlock(&ready_lock);
@@ -536,8 +543,7 @@ private int cproc_ready(p, preq)
*/
void
-cproc_waiting(p)
- register cproc_t p;
+cproc_waiting(cproc_t p)
{
mach_msg_header_t msg;
register cproc_t new;
@@ -548,7 +554,7 @@ cproc_waiting(p)
cthread_waiting++;
cthread_waiters++;
spin_unlock(&ready_lock);
-#endif STATISTICS
+#endif /* STATISTICS */
for (;;) {
MACH_CALL(mach_msg(&msg, MACH_RCV_MSG,
0, sizeof msg, wait_port,
@@ -560,14 +566,14 @@ cproc_waiting(p)
ready_count++;
#ifdef STATISTICS
cthread_none++;
-#endif STATISTICS
+#endif /* STATISTICS */
spin_unlock(&ready_lock);
- }
+ }
#ifdef STATISTICS
cthread_ready--;
cthread_running++;
cthread_waiting--;
-#endif STATISTICS
+#endif /* STATISTICS */
spin_unlock(&ready_lock);
spin_lock(&new->lock);
new->state = CPROC_RUNNING;
@@ -584,8 +590,8 @@ cproc_waiting(p)
*
*/
-cproc_t
-cproc_waiter()
+private cproc_t
+cproc_waiter(void)
{
register cproc_t waiter;
@@ -599,7 +605,7 @@ cproc_waiter()
spin_lock(&waiters_lock);
cthread_wait_stacks++;
spin_unlock(&waiters_lock);
-#endif STATISTICS
+#endif /* STATISTICS */
waiter = cproc_alloc();
MACH_CALL(vm_allocate(mach_task_self(), &base,
cthread_wait_stack_size, TRUE), r);
@@ -617,7 +623,8 @@ cproc_waiter()
* You must hold cproc_self()->lock when called.
*/
-cproc_block()
+void
+cproc_block(void)
{
extern unsigned int __hurd_threadvar_max; /* GNU */
register cproc_t waiter, new, p = cproc_self();
@@ -638,13 +645,13 @@ cproc_block()
spin_lock(&ready_lock);
#ifdef STATISTICS
cthread_blocked++;
-#endif STATISTICS
+#endif /* STATISTICS */
cthread_queue_deq(&ready, cproc_t, new);
if (new) {
#ifdef STATISTICS
cthread_ready--;
cthread_switches++;
-#endif STATISTICS
+#endif /* STATISTICS */
ready_count--;
spin_unlock(&ready_lock);
spin_lock(&p->lock);
@@ -655,7 +662,7 @@ cproc_block()
cthread_wakeup++;
cthread_switches--;
spin_unlock(&ready_lock);
-#endif STATISTICS
+#endif /* STATISTICS */
cproc_ready(new, 1); /* requeue at head were it was */
} else {
p->state = CPROC_BLOCKED;
@@ -668,7 +675,7 @@ cproc_block()
wait_count++;
#ifdef STATISTICS
cthread_running--;
-#endif STATISTICS
+#endif /* STATISTICS */
spin_unlock(&ready_lock);
waiter = cproc_waiter();
spin_lock(&p->lock);
@@ -679,7 +686,7 @@ cproc_block()
#ifdef STATISTICS
cthread_running++;
cthread_wakeup++;
-#endif STATISTICS
+#endif /* STATISTICS */
spin_unlock(&ready_lock);
spin_lock(&waiters_lock);
cthread_queue_preq(&waiters, waiter);
@@ -689,7 +696,7 @@ cproc_block()
spin_lock(&waiter->lock); /* in case still switching */
spin_unlock(&waiter->lock);
cproc_start_wait
- (&p->context, waiter,
+ (&p->context, waiter,
cproc_stack_base(waiter,
sizeof(ur_cthread_t *) +
/* Account for GNU per-thread
@@ -705,7 +712,7 @@ cproc_block()
* Implement C threads using MACH threads.
*/
cproc_t
-cproc_create()
+cproc_create(void)
{
register cproc_t child = cproc_alloc();
register kern_return_t r;
@@ -718,16 +725,17 @@ cproc_create()
spin_lock(&n_kern_lock);
if (cthread_max_kernel_threads == 0 ||
cthread_kernel_threads < cthread_max_kernel_threads) {
+ tcbhead_t *tcb = _dl_allocate_tls(NULL);
cthread_kernel_threads++;
spin_unlock(&n_kern_lock);
MACH_CALL(thread_create(mach_task_self(), &n), r);
- cproc_setup(child, n, cthread_body); /* machine dependent */
+ cproc_setup(child, n, tcb, cthread_body); /* machine dependent */
MACH_CALL(thread_resume(n), r);
#ifdef STATISTICS
spin_lock(&ready_lock);
cthread_running++;
spin_unlock(&ready_lock);
-#endif STATISTICS
+#endif /* STATISTICS */
} else {
vm_offset_t stack;
spin_unlock(&n_kern_lock);
@@ -746,18 +754,16 @@ cproc_create()
variables. */
__hurd_threadvar_max *
sizeof (long int));
- cproc_prepare(child, &child->context, stack);
+ cproc_prepare(child, &child->context, stack, &cthread_body);
/* Set up the cproc_self ptr at the base of CHILD's stack. */
- ur_cthread_ptr(stack) = child;
+ ur_cthread_ptr(stack) = (ur_cthread_t) child;
cproc_ready(child,0);
}
return child;
}
void
-condition_wait(c, m)
- register condition_t c;
- mutex_t m;
+condition_wait(condition_t c, mutex_t m)
{
register cproc_t p = cproc_self();
@@ -768,7 +774,7 @@ condition_wait(c, m)
spin_unlock(&c->lock);
#ifdef WAIT_DEBUG
p->waiting_for = (char *)c;
-#endif WAIT_DEBUG
+#endif /* WAIT_DEBUG */
mutex_unlock(m);
@@ -783,7 +789,7 @@ condition_wait(c, m)
#ifdef WAIT_DEBUG
p->waiting_for = (char *)0;
-#endif WAIT_DEBUG
+#endif /* WAIT_DEBUG */
/*
* Re-acquire the mutex and return.
@@ -798,7 +804,7 @@ void
condition_implies (condition_t implicator, condition_t implicatand)
{
struct cond_imp *imp;
-
+
imp = malloc (sizeof (struct cond_imp));
imp->implicatand = implicatand;
imp->next = implicator->implications;
@@ -811,7 +817,7 @@ void
condition_unimplies (condition_t implicator, condition_t implicatand)
{
struct cond_imp **impp;
-
+
for (impp = &implicator->implications; *impp; impp = &(*impp)->next)
{
if ((*impp)->implicatand == implicatand)
@@ -827,8 +833,7 @@ condition_unimplies (condition_t implicator, condition_t implicatand)
/* Signal one waiter on C. If there were no waiters at all, return
0, else return 1. */
int
-cond_signal(c)
- register condition_t c;
+cond_signal(condition_t c)
{
register cproc_t p;
struct cond_imp *imp;
@@ -849,8 +854,7 @@ cond_signal(c)
}
void
-cond_broadcast(c)
- register condition_t c;
+cond_broadcast(condition_t c)
{
register cproc_t p;
struct cthread_queue blocked_queue;
@@ -881,7 +885,7 @@ cond_broadcast(c)
}
void
-cthread_yield()
+cthread_yield(void)
{
register cproc_t new, p = cproc_self();
@@ -892,7 +896,7 @@ cthread_yield()
spin_lock(&ready_lock);
#ifdef STATISTICS
cthread_yields++;
-#endif STATISTICS
+#endif /* STATISTICS */
cthread_queue_deq(&ready, cproc_t, new);
if (new) {
cthread_queue_enq(&ready, p);
@@ -923,7 +927,7 @@ __mutex_lock_solid(void *ptr)
#ifdef WAIT_DEBUG
p->waiting_for = (char *)m;
-#endif WAIT_DEBUG
+#endif /* WAIT_DEBUG */
while (1) {
spin_lock(&m->lock);
if (cthread_queue_head(&m->queue, cproc_t) == NO_CPROC) {
@@ -937,7 +941,7 @@ __mutex_lock_solid(void *ptr)
spin_unlock(&m->lock);
#ifdef WAIT_DEBUG
p->waiting_for = (char *)0;
-#endif WAIT_DEBUG
+#endif /* WAIT_DEBUG */
return;
} else {
if (!queued) cthread_queue_enq(&m->queue, p);
@@ -947,14 +951,14 @@ __mutex_lock_solid(void *ptr)
if (spin_try_lock(&m->held)) {
#ifdef WAIT_DEBUG
p->waiting_for = (char *)0;
-#endif WAIT_DEBUG
+#endif /* WAIT_DEBUG */
return;
}
#ifdef STATISTICS
spin_lock(&mutex_count_lock);
cthread_no_mutex++;
spin_unlock(&mutex_count_lock);
-#endif STATISTICS
+#endif /* STATISTICS */
}
}
}
@@ -990,8 +994,8 @@ __mutex_unlock_solid(void *ptr)
* call to occur as often as is possible.
*/
-private port_entry_t get_port_entry(port, min, max)
- mach_port_t port;
+private port_entry_t
+get_port_entry(mach_port_t port, int min, int max)
{
register port_entry_t i;
@@ -1014,8 +1018,8 @@ private port_entry_t get_port_entry(port, min, max)
return i;
}
-cthread_msg_busy(port, min, max)
- mach_port_t port;
+void
+cthread_msg_busy(mach_port_t port, int min, int max)
{
register port_entry_t port_entry;
register cproc_t new, p = cproc_self();
@@ -1036,7 +1040,7 @@ cthread_msg_busy(port, min, max)
spin_lock(&port_lock);
cthread_rnone++;
spin_unlock(&port_lock);
-#endif STATISTICS
+#endif /* STATISTICS */
}
} else {
port_entry->held--;
@@ -1046,8 +1050,8 @@ cthread_msg_busy(port, min, max)
}
-cthread_msg_active(port, min, max)
-mach_port_t port;
+void
+cthread_msg_active(mach_port_t port, int min, int max)
{
register cproc_t p = cproc_self();
register port_entry_t port_entry;
@@ -1058,24 +1062,18 @@ mach_port_t port;
spin_lock(&port_entry->lock);
if (port_entry->held < port_entry->max) {
port_entry->held++;
- p->busy = (int)port_entry;
+ p->busy = port_entry;
}
spin_unlock(&port_entry->lock);
}
}
mach_msg_return_t
-cthread_mach_msg(header, option,
- send_size, rcv_size, rcv_name,
- timeout, notify, min, max)
- register mach_msg_header_t *header;
- register mach_msg_option_t option;
- mach_msg_size_t send_size;
- mach_msg_size_t rcv_size;
- register mach_port_t rcv_name;
- mach_msg_timeout_t timeout;
- mach_port_t notify;
- int min, max;
+cthread_mach_msg(register mach_msg_header_t *header,
+ register mach_msg_option_t option, mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size, register mach_port_t rcv_name,
+ mach_msg_timeout_t timeout, mach_port_t notify, int min,
+ int max)
{
register port_entry_t port_entry;
register cproc_t p = cproc_self();
@@ -1105,7 +1103,7 @@ cthread_mach_msg(header, option,
spin_unlock(&port_entry->lock);
#ifdef WAIT_DEBUG
p->waiting_for = (char *)port_entry;
-#endif WAIT_DEBUG
+#endif /* WAIT_DEBUG */
cproc_block();
} else {
port_entry->held++;
@@ -1120,8 +1118,8 @@ cthread_mach_msg(header, option,
}
#ifdef WAIT_DEBUG
p->waiting_for = (char *)0;
-#endif WAIT_DEBUG
- p->busy = (int)port_entry;
+#endif /* WAIT_DEBUG */
+ p->busy = port_entry;
if ((option & MACH_SEND_MSG) && !sent) {
r = mach_msg(header, option,
send_size, rcv_size, rcv_name,
@@ -1134,7 +1132,8 @@ cthread_mach_msg(header, option,
return r;
}
-cproc_fork_prepare()
+void
+cproc_fork_prepare(void)
{
register cproc_t p = cproc_self();
@@ -1143,7 +1142,8 @@ cproc_fork_prepare()
spin_lock(&cproc_list_lock);
}
-cproc_fork_parent()
+void
+cproc_fork_parent(void)
{
register cproc_t p = cproc_self();
@@ -1152,7 +1152,8 @@ cproc_fork_parent()
vm_inherit(mach_task_self(),p->stack_base, p->stack_size, VM_INHERIT_NONE);
}
-cproc_fork_child()
+void
+cproc_fork_child(void)
{
register cproc_t l,p = cproc_self();
cproc_t m;
@@ -1180,7 +1181,7 @@ cproc_fork_child()
cthread_switches = 0;
cthread_no_mutex = 0;
spin_lock_init(&mutex_count_lock);
-#endif STATISTICS
+#endif /* STATISTICS */
for(l=cproc_list;l!=NO_CPROC;l=m) {
m=l->next;