diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-08-12 01:25:28 +0200 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-08-12 18:56:19 +0200 |
commit | 34890b4939c1ee8190a103b8693e94ddffe13ffa (patch) | |
tree | c7fb3fcde06df4aaa042851ac412571bfe33ada1 /kern/lock.h | |
parent | 5a9f9578e3075a39e7b14db6e5b8e8c4e01f245f (diff) | |
download | gnumach-34890b4939c1ee8190a103b8693e94ddffe13ffa.tar.gz gnumach-34890b4939c1ee8190a103b8693e94ddffe13ffa.tar.bz2 gnumach-34890b4939c1ee8190a103b8693e94ddffe13ffa.zip |
lock: Add _irq variants
And pave the way for making the non-_irq variants check that they are
never used within interrupts.
We do have a few places which were missing it, as the following commits will
show.
Diffstat (limited to 'kern/lock.h')
-rw-r--r-- | kern/lock.h | 84 |
1 files changed, 75 insertions, 9 deletions
diff --git a/kern/lock.h b/kern/lock.h index 0cf76ea7..2d493f87 100644 --- a/kern/lock.h +++ b/kern/lock.h @@ -36,13 +36,27 @@ #include <mach/boolean.h> #include <mach/machine/vm_types.h> +#include <machine/spl.h> + +/* + * Note: we cannot blindly use simple locks in interrupt handlers, otherwise one + * may try to acquire a lock while already having the lock, thus a deadlock. + * + * When locks are needed in interrupt handlers, the _irq versions of the calls + * should be used, which disable interrupts (by calling splhigh) before acquiring + * the lock, thus preventing the deadlock. They need to be used this way: + * + * spl_t s = simple_lock_irq(&mylock); + * [... critical section] + * simple_unlock_irq(s, &mylock); + */ #if NCPUS > 1 #include <machine/lock.h>/*XXX*/ #if MACH_LOCK_MON == 0 -#define simple_lock _simple_lock -#define simple_lock_try _simple_lock_try -#define simple_unlock _simple_unlock +#define simple_lock_nocheck _simple_lock +#define simple_lock_try_nocheck _simple_lock_try +#define simple_unlock_nocheck _simple_unlock #endif #endif @@ -75,9 +89,13 @@ typedef struct slock *simple_lock_t; class simple_lock_data_t name; #define def_simple_lock_data(class,name) \ class simple_lock_data_t name = SIMPLE_LOCK_INITIALIZER(&name); +#define def_simple_lock_irq_data(class,name) \ +class simple_lock_irq_data_t name = { SIMPLE_LOCK_INITIALIZER(&name.lock) }; #define simple_lock_addr(lock) (simple_lock_assert(&(lock)), \ &(lock)) +#define simple_lock_irq_addr(l) (simple_lock_irq_assert(&(l)), \ + &(l)->lock) #if (NCPUS > 1) @@ -109,9 +127,9 @@ extern boolean_t _simple_lock_try(simple_lock_t, #define STR(x) XSTR(x) #define LOCATION __FILE__ ":" STR(__LINE__) -#define simple_lock(lock) _simple_lock((lock), #lock, LOCATION) -#define simple_unlock(lock) _simple_unlock((lock), #lock, LOCATION) -#define simple_lock_try(lock) _simple_lock_try((lock), #lock, LOCATION) +#define simple_lock_nocheck(lock) _simple_lock((lock), #lock, LOCATION) +#define simple_lock_try_nocheck(lock) _simple_lock_try((lock), #lock, LOCATION) +#define simple_unlock_nocheck(lock) _simple_unlock((lock)) #define simple_lock_pause() #define simple_lock_taken(lock) (simple_lock_assert(lock), \ @@ -128,20 +146,25 @@ extern void check_simple_locks_disable(void); * Do not allocate storage for locks if not needed. */ struct simple_lock_data_empty { struct {} is_a_simple_lock; }; +struct simple_lock_irq_data_empty { struct simple_lock_data_empty slock; }; #define decl_simple_lock_data(class,name) \ class struct simple_lock_data_empty name; #define def_simple_lock_data(class,name) \ class struct simple_lock_data_empty name; +#define def_simple_lock_irq_data(class,name) \ +class struct simple_lock_irq_data_empty name; #define simple_lock_addr(lock) (simple_lock_assert(&(lock)), \ (simple_lock_t)0) +#define simple_lock_irq_addr(lock) (simple_lock_irq_assert(&(lock)), \ + (simple_lock_t)0) /* * No multiprocessor locking is necessary. */ #define simple_lock_init(l) simple_lock_assert(l) -#define simple_lock(l) simple_lock_assert(l) -#define simple_unlock(l) simple_lock_assert(l) -#define simple_lock_try(l) (simple_lock_assert(l), \ +#define simple_lock_nocheck(l) simple_lock_assert(l) +#define simple_unlock_nocheck(l) simple_lock_assert(l) +#define simple_lock_try_nocheck(l) (simple_lock_assert(l), \ TRUE) /* always succeeds */ #define simple_lock_taken(l) (simple_lock_assert(l), \ 1) /* always succeeds */ @@ -221,13 +244,56 @@ extern void lock_clear_recursive(lock_t); #if ! MACH_LDEBUG #define have_read_lock(l) 1 #define have_write_lock(l) 1 +#define lock_check_no_interrupts() #else /* MACH_LDEBUG */ /* XXX: We don't keep track of readers, so this is an approximation. */ #define have_read_lock(l) ((l)->read_count > 0) #define have_write_lock(l) ((l)->writer == current_thread()) +// Disabled for now, until all places are fixed +extern unsigned long in_interrupt[NCPUS]; +#define lock_check_no_interrupts() // assert(!in_interrupt[cpu_number()]) #endif /* MACH_LDEBUG */ #define have_lock(l) (have_read_lock(l) || have_write_lock(l)) +#define simple_lock(l) do { \ + lock_check_no_interrupts(); \ + simple_lock_nocheck(l); \ +} while (0) +#define simple_lock_try(l) ({ \ + lock_check_no_interrupts(); \ + simple_lock_try_nocheck(l); \ +}) +#define simple_unlock(l) do { \ + lock_check_no_interrupts(); \ + simple_unlock_nocheck(l); \ +} while (0) + +/* _irq variants */ + +struct slock_irq { + struct slock slock; +}; + +#define simple_lock_irq_assert(l) simple_lock_assert(&(l)->slock) + +typedef struct slock_irq simple_lock_irq_data_t; +typedef struct slock_irq *simple_lock_irq_t; + +#define decl_simple_lock_irq_data(class,name) \ +class simple_lock_irq_data_t name; + +#define simple_lock_init_irq(l) simple_lock_init(&(l)->slock) + +#define simple_lock_irq(l) ({ \ + spl_t __s = splhigh(); \ + simple_lock_nocheck(&(l)->slock); \ + __s; \ +}) +#define simple_unlock_irq(s, l) do { \ + simple_unlock_nocheck(&(l)->slock); \ + splx(s); \ +} while (0) + #if MACH_KDB extern void db_show_all_slocks(void); #endif /* MACH_KDB */ |