aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2023-02-08 20:17:38 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2023-02-08 20:18:16 +0100
commit92257f56a32f16795402cdbeb76a8b4f56ea2369 (patch)
treefa520391b49f4bac7ad77ee065d68e8dcab2d8be
parenta9c54f2c7f93127517b725952e67697bd82a7eeb (diff)
downloadgnumach-92257f56a32f16795402cdbeb76a8b4f56ea2369.tar.gz
gnumach-92257f56a32f16795402cdbeb76a8b4f56ea2369.tar.bz2
gnumach-92257f56a32f16795402cdbeb76a8b4f56ea2369.zip
slock: Fix initialization of statically-allocated slocks
(this is actually a no-op for i386)
-rw-r--r--ddb/db_mp.c2
-rw-r--r--device/dev_lookup.c3
-rw-r--r--device/dev_pager.c6
-rw-r--r--device/ds_routines.c2
-rw-r--r--device/kmsg.c2
-rw-r--r--device/net_io.c8
-rw-r--r--i386/i386/lock.h3
-rw-r--r--i386/intel/pmap.c4
-rw-r--r--ipc/ipc_port.c4
-rw-r--r--kern/debug.c8
-rw-r--r--kern/lock.h3
-rw-r--r--kern/lock_mon.c4
-rw-r--r--kern/mach_clock.c2
-rw-r--r--kern/machine.c2
-rw-r--r--kern/processor.c2
-rw-r--r--kern/sched_prim.c2
-rw-r--r--kern/thread.c6
-rw-r--r--kern/thread_swap.c2
-rw-r--r--kern/xpr.c2
-rw-r--r--vm/memory_object.c2
-rw-r--r--vm/vm_object.c4
-rw-r--r--vm/vm_resident.c4
-rw-r--r--xen/console.c4
-rw-r--r--xen/grant.c2
-rw-r--r--xen/store.c2
25 files changed, 41 insertions, 44 deletions
diff --git a/ddb/db_mp.c b/ddb/db_mp.c
index f4e5fa3e..9c7edfdb 100644
--- a/ddb/db_mp.c
+++ b/ddb/db_mp.c
@@ -48,7 +48,7 @@
* multiprocessors.
*/
-decl_simple_lock_data(,db_lock) /* lock to enter debugger */
+def_simple_lock_data(static,db_lock) /* lock to enter debugger */
volatile int db_cpu = -1; /* CPU currently in debugger */
/* -1 if none */
int db_active[NCPUS] = { 0 }; /* count recursive entries
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
index 994452cd..c9c39f88 100644
--- a/device/dev_lookup.c
+++ b/device/dev_lookup.c
@@ -60,8 +60,7 @@ queue_head_t dev_number_hash_table[NDEVHASH];
* Lock for device-number to device lookup.
* Must be held before device-ref_count lock.
*/
-decl_simple_lock_data(,
- dev_number_lock)
+def_simple_lock_data(static, dev_number_lock)
struct kmem_cache dev_hdr_cache;
diff --git a/device/dev_pager.c b/device/dev_pager.c
index d9d22489..1cd74064 100644
--- a/device/dev_pager.c
+++ b/device/dev_pager.c
@@ -167,8 +167,7 @@ typedef struct dev_pager_entry *dev_pager_entry_t;
*/
queue_head_t dev_pager_hashtable[DEV_HASH_COUNT];
struct kmem_cache dev_pager_hash_cache;
-decl_simple_lock_data(,
- dev_pager_hash_lock)
+def_simple_lock_data(static, dev_pager_hash_lock)
struct dev_device_entry {
queue_chain_t links;
@@ -184,8 +183,7 @@ typedef struct dev_device_entry *dev_device_entry_t;
*/
queue_head_t dev_device_hashtable[DEV_HASH_COUNT];
struct kmem_cache dev_device_hash_cache;
-decl_simple_lock_data(,
- dev_device_hash_lock)
+def_simple_lock_data(static, dev_device_hash_lock)
#define dev_hash(name_port) \
(((vm_offset_t)(name_port) & 0xffffff) % DEV_HASH_COUNT)
diff --git a/device/ds_routines.c b/device/ds_routines.c
index 6d629f69..07ab7b30 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -1508,7 +1508,7 @@ ds_no_senders(mach_no_senders_notification_t *notification)
}
queue_head_t io_done_list;
-decl_simple_lock_data(, io_done_list_lock)
+def_simple_lock_data(static, io_done_list_lock)
#define splio splsched /* XXX must block ALL io devices */
diff --git a/device/kmsg.c b/device/kmsg.c
index e49eb3d3..4885d7b3 100644
--- a/device/kmsg.c
+++ b/device/kmsg.c
@@ -44,7 +44,7 @@ static queue_head_t kmsg_read_queue;
/* Used for exclusive access to the device */
static boolean_t kmsg_in_use;
/* Used for exclusive access to the routines */
-decl_simple_lock_data (static, kmsg_lock);
+def_simple_lock_data (static, kmsg_lock);
/* If already initialized or not */
static boolean_t kmsg_init_done = FALSE;
diff --git a/device/net_io.c b/device/net_io.c
index d1cd5a67..bad6d3eb 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -86,7 +86,7 @@ int kttd_async_counter= 0;
* Messages can be high priority or low priority.
* The network thread processes high priority messages first.
*/
-decl_simple_lock_data(,net_queue_lock)
+def_simple_lock_data(static,net_queue_lock)
boolean_t net_thread_awake = FALSE;
struct ipc_kmsg_queue net_queue_high;
int net_queue_high_size = 0;
@@ -99,7 +99,7 @@ int net_queue_low_max = 0; /* for debugging */
* List of net kmsgs that can be touched at interrupt level.
* If it is empty, we will also steal low priority messages.
*/
-decl_simple_lock_data(,net_queue_free_lock)
+def_simple_lock_data(static,net_queue_free_lock)
struct ipc_kmsg_queue net_queue_free;
int net_queue_free_size = 0; /* on free list */
int net_queue_free_max = 0; /* for debugging */
@@ -125,7 +125,7 @@ int net_kmsg_send_low_misses = 0; /* for debugging */
int net_thread_awaken = 0; /* for debugging */
int net_ast_taken = 0; /* for debugging */
-decl_simple_lock_data(,net_kmsg_total_lock)
+def_simple_lock_data(static,net_kmsg_total_lock)
int net_kmsg_total = 0; /* total allocated */
int net_kmsg_max; /* initialized below */
@@ -337,7 +337,7 @@ struct net_hash_header {
net_hash_entry_t table[NET_HASH_SIZE];
} filter_hash_header[N_NET_HASH];
-decl_simple_lock_data(,net_hash_header_lock)
+def_simple_lock_data(static,net_hash_header_lock)
#define HASH_ITERATE(head, elt) (elt) = (net_hash_entry_t) (head); do {
#define HASH_ITERATE_END(head, elt) \
diff --git a/i386/i386/lock.h b/i386/i386/lock.h
index 90f2d69a..1450fe03 100644
--- a/i386/i386/lock.h
+++ b/i386/i386/lock.h
@@ -55,6 +55,9 @@
#define simple_lock_init(l) \
((l)->lock_data = 0)
+#define SIMPLE_LOCK_INITIALIZER(l) \
+ {.lock_data = 0}
+
#define simple_lock(l) \
({ \
while(_simple_lock_xchg_(l, 1)) \
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 91f797bf..ccbb03fc 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -123,7 +123,7 @@ pv_entry_t pv_head_table; /* array of entries, one per page */
* The list is refilled from the pv_list_cache if it becomes empty.
*/
pv_entry_t pv_free_list; /* free list at SPLVM */
-decl_simple_lock_data(, pv_free_list_lock)
+def_simple_lock_data(static, pv_free_list_lock)
#define PV_ALLOC(pv_e) { \
simple_lock(&pv_free_list_lock); \
@@ -427,7 +427,7 @@ pt_entry_t *kernel_page_dir;
* physical-to-physical transfers.
*/
static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS];
-decl_simple_lock_data(static, pmapwindows_lock)
+def_simple_lock_data(static, pmapwindows_lock)
static inline pt_entry_t *
pmap_pde(const pmap_t pmap, vm_offset_t addr)
diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
index d71edfaf..e959f674 100644
--- a/ipc/ipc_port.c
+++ b/ipc/ipc_port.c
@@ -57,9 +57,9 @@
#endif /* MACH_KDB */
-decl_simple_lock_data(, ipc_port_multiple_lock_data)
+def_simple_lock_data(, ipc_port_multiple_lock_data)
-decl_simple_lock_data(, ipc_port_timestamp_lock_data)
+def_simple_lock_data(, ipc_port_timestamp_lock_data)
ipc_port_timestamp_t ipc_port_timestamp_data;
/*
diff --git a/kern/debug.c b/kern/debug.c
index 44814925..cd782361 100644
--- a/kern/debug.c
+++ b/kern/debug.c
@@ -112,8 +112,7 @@ void Debugger(const char *message)
even before panic_init() gets called from the "normal" place in kern/startup.c.
(panic_init() still needs to be called from there
to make sure we get initialized before starting multiple processors.) */
-boolean_t panic_lock_initialized = FALSE;
-decl_simple_lock_data(, panic_lock)
+def_simple_lock_data(static, panic_lock)
const char *panicstr;
int paniccpu;
@@ -121,11 +120,6 @@ int paniccpu;
void
panic_init(void)
{
- if (!panic_lock_initialized)
- {
- panic_lock_initialized = TRUE;
- simple_lock_init(&panic_lock);
- }
}
#if ! MACH_KBD
diff --git a/kern/lock.h b/kern/lock.h
index 2548409d..13f6915c 100644
--- a/kern/lock.h
+++ b/kern/lock.h
@@ -68,6 +68,8 @@ typedef struct slock *simple_lock_t;
#define decl_simple_lock_data(class,name) \
class simple_lock_data_t name;
+#define def_simple_lock_data(class,name) \
+class simple_lock_data_t name = SIMPLE_LOCK_INITIALIZER(&name);
#define simple_lock_addr(lock) (simple_lock_assert(&(lock)), \
&(lock))
@@ -144,6 +146,7 @@ class struct simple_lock_data_empty name;
#define decl_mutex_data(class,name) decl_simple_lock_data(class,name)
+#define def_mutex_data(class,name) def_simple_lock_data(class,name)
#define mutex_try(l) simple_lock_try(l)
#define mutex_lock(l) simple_lock(l)
#define mutex_unlock(l) simple_unlock(l)
diff --git a/kern/lock_mon.c b/kern/lock_mon.c
index 4b3ba82e..a963ec05 100644
--- a/kern/lock_mon.c
+++ b/kern/lock_mon.c
@@ -47,8 +47,8 @@
#include <kern/lock.h>
-decl_simple_lock_data(extern , kdb_lock)
-decl_simple_lock_data(extern , printf_lock)
+def_simple_lock_data(extern , kdb_lock)
+def_simple_lock_data(extern , printf_lock)
#if NCPUS > 1 && MACH_LOCK_MON
diff --git a/kern/mach_clock.c b/kern/mach_clock.c
index 0e3a2cf2..09717d16 100644
--- a/kern/mach_clock.c
+++ b/kern/mach_clock.c
@@ -119,7 +119,7 @@ MACRO_BEGIN \
} while ((time)->seconds != mtime->check_seconds64); \
MACRO_END
-decl_simple_lock_data(, timer_lock) /* lock for ... */
+def_simple_lock_data(static, timer_lock) /* lock for ... */
timer_elt_data_t timer_head; /* ordered list of timeouts */
/* (doubles as end-of-list) */
diff --git a/kern/machine.c b/kern/machine.c
index 7eead810..e8c9912e 100644
--- a/kern/machine.c
+++ b/kern/machine.c
@@ -68,7 +68,7 @@ struct machine_info machine_info;
struct machine_slot machine_slot[NCPUS];
queue_head_t action_queue; /* assign/shutdown queue */
-decl_simple_lock_data(,action_lock);
+def_simple_lock_data(,action_lock);
/*
* cpu_up:
diff --git a/kern/processor.c b/kern/processor.c
index 75d2ff53..2cd6d46c 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -64,7 +64,7 @@ struct processor processor_array[NCPUS];
queue_head_t all_psets;
int all_psets_count;
-decl_simple_lock_data(, all_psets_lock);
+def_simple_lock_data(, all_psets_lock);
processor_t master_processor;
processor_t processor_ptr[NCPUS];
diff --git a/kern/sched_prim.c b/kern/sched_prim.c
index 9e7a9c55..713d379e 100644
--- a/kern/sched_prim.c
+++ b/kern/sched_prim.c
@@ -128,7 +128,7 @@ timer_elt_data_t recompute_priorities_timer;
#define NUMQUEUES 1031
queue_head_t wait_queue[NUMQUEUES];
-decl_simple_lock_data(, wait_lock[NUMQUEUES])
+decl_simple_lock_data(static, wait_lock[NUMQUEUES])
/* NOTE: we want a small positive integer out of this */
#define wait_hash(event) \
diff --git a/kern/thread.c b/kern/thread.c
index bc155b45..17cc458c 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -75,7 +75,7 @@ struct kmem_cache thread_cache;
struct kmem_cache thread_stack_cache;
queue_head_t reaper_queue;
-decl_simple_lock_data(, reaper_lock)
+def_simple_lock_data(static, reaper_lock)
/* private */
struct thread thread_template;
@@ -83,7 +83,7 @@ struct thread thread_template;
#if MACH_DEBUG
#define STACK_MARKER 0xdeadbeefU
boolean_t stack_check_usage = FALSE;
-decl_simple_lock_data(, stack_usage_lock)
+def_simple_lock_data(static, stack_usage_lock)
vm_size_t stack_max_usage = 0;
#endif /* MACH_DEBUG */
@@ -119,7 +119,7 @@ vm_size_t stack_max_usage = 0;
* because stack_alloc_try/thread_invoke operate at splsched.
*/
-decl_simple_lock_data(, stack_lock_data)/* splsched only */
+def_simple_lock_data(static, stack_lock_data)/* splsched only */
#define stack_lock() simple_lock(&stack_lock_data)
#define stack_unlock() simple_unlock(&stack_lock_data)
diff --git a/kern/thread_swap.c b/kern/thread_swap.c
index ecfe48e4..a5fc0523 100644
--- a/kern/thread_swap.c
+++ b/kern/thread_swap.c
@@ -60,7 +60,7 @@
queue_head_t swapin_queue;
-decl_simple_lock_data(, swapper_lock_data)
+def_simple_lock_data(static, swapper_lock_data)
#define swapper_lock() simple_lock(&swapper_lock_data)
#define swapper_unlock() simple_unlock(&swapper_lock_data)
diff --git a/kern/xpr.c b/kern/xpr.c
index 46cb2273..1b551eb1 100644
--- a/kern/xpr.c
+++ b/kern/xpr.c
@@ -46,7 +46,7 @@
* Just set xprenable false so the buffer isn't overwritten.
*/
-decl_simple_lock_data(, xprlock)
+def_simple_lock_data(static, xprlock)
boolean_t xprenable = TRUE; /* Enable xpr tracing */
int nxprbufs = 0; /* Number of contiguous xprbufs allocated */
diff --git a/vm/memory_object.c b/vm/memory_object.c
index 41bbf49e..1ea59563 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -74,7 +74,7 @@ typedef int memory_object_lock_result_t; /* moved from below */
ipc_port_t memory_manager_default = IP_NULL;
-decl_simple_lock_data(,memory_manager_default_lock)
+def_simple_lock_data(static,memory_manager_default_lock)
/*
* Important note:
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 16b34d4a..b5be3f81 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -183,7 +183,7 @@ vm_object_t kernel_object = &kernel_object_store;
*/
queue_head_t vm_object_cached_list;
-decl_simple_lock_data(,vm_object_cached_lock_data)
+def_simple_lock_data(static,vm_object_cached_lock_data)
#define vm_object_cache_lock() \
simple_lock(&vm_object_cached_lock_data)
@@ -199,7 +199,7 @@ decl_simple_lock_data(,vm_object_cached_lock_data)
*/
int vm_object_cached_pages;
-decl_simple_lock_data(,vm_object_cached_pages_lock_data)
+def_simple_lock_data(static,vm_object_cached_pages_lock_data)
/*
* Virtual memory objects are initialized from
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index aefdb1f9..e0a03bf5 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -98,7 +98,7 @@ unsigned long vm_page_bucket_count = 0; /* How big is array? */
unsigned long vm_page_hash_mask; /* Mask for hash function */
static struct list vm_page_queue_fictitious;
-decl_simple_lock_data(,vm_page_queue_free_lock)
+def_simple_lock_data(,vm_page_queue_free_lock)
int vm_page_fictitious_count;
int vm_object_external_count;
int vm_object_external_pages;
@@ -129,7 +129,7 @@ phys_addr_t vm_page_fictitious_addr = (phys_addr_t) -1;
* defined here, but are shared by the pageout
* module.
*/
-decl_simple_lock_data(,vm_page_queue_lock)
+def_simple_lock_data(,vm_page_queue_lock)
int vm_page_active_count;
int vm_page_inactive_count;
int vm_page_wire_count;
diff --git a/xen/console.c b/xen/console.c
index 0bf2f712..61de4390 100644
--- a/xen/console.c
+++ b/xen/console.c
@@ -29,8 +29,8 @@
/* Hypervisor part */
-decl_simple_lock_data(static, outlock);
-decl_simple_lock_data(static, inlock);
+def_simple_lock_data(static, outlock);
+def_simple_lock_data(static, inlock);
static struct xencons_interface *console;
static int kd_pollc;
int kb_mode; /* XXX: actually don't care. */
diff --git a/xen/grant.c b/xen/grant.c
index 1d6e607b..84758cfc 100644
--- a/xen/grant.c
+++ b/xen/grant.c
@@ -28,7 +28,7 @@
#define NR_RESERVED_ENTRIES 8
#define NR_GRANT_PAGES 8
-decl_simple_lock_data(static,lock);
+def_simple_lock_data(static,lock);
static struct grant_entry *grants;
static vm_map_entry_t grants_map_entry;
static int last_grant = NR_RESERVED_ENTRIES;
diff --git a/xen/store.c b/xen/store.c
index 23cbc223..5f5a902a 100644
--- a/xen/store.c
+++ b/xen/store.c
@@ -36,7 +36,7 @@
/* Hypervisor part */
-decl_simple_lock_data(static, lock);
+def_simple_lock_data(static, lock);
static struct xenstore_domain_interface *store;