aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--i386/i386/locore.S14
-rw-r--r--i386/i386/mp_desc.c8
-rw-r--r--kern/lock.h10
-rw-r--r--x86_64/locore.S14
4 files changed, 38 insertions, 8 deletions
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
index 55add6e4..cb0f063b 100644
--- a/i386/i386/locore.S
+++ b/i386/i386/locore.S
@@ -701,12 +701,20 @@ ENTRY(all_intrs)
TIME_INT_ENTRY /* do timing */
#endif
- call EXT(interrupt) /* call generic interrupt routine */
+#ifdef MACH_LDEBUG
+ CPU_NUMBER(%ecx)
+ incl CX(EXT(in_interrupt),%ecx)
+#endif
- .globl EXT(return_to_iret)
-LEXT(return_to_iret) /* ( label for kdb_kintr and hardclock) */
+ call EXT(interrupt) /* call generic interrupt routine */
+ .globl EXT(return_to_iret) /* ( label for kdb_kintr and hardclock */
+LEXT(return_to_iret) /* to find the return from calling interrupt) */
CPU_NUMBER(%edx)
+#ifdef MACH_LDEBUG
+ decl CX(EXT(in_interrupt),%edx)
+#endif
+
#if STAT_TIME
#else
TIME_INT_EXIT /* do timing */
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
index fa66aa44..88fbb50a 100644
--- a/i386/i386/mp_desc.c
+++ b/i386/i386/mp_desc.c
@@ -67,6 +67,14 @@
vm_offset_t int_stack_top[NCPUS];
vm_offset_t int_stack_base[NCPUS];
+/*
+ * Whether we are currently handling an interrupt.
+ * To catch code erroneously taking non-irq-safe locks.
+ */
+#ifdef MACH_LDEBUG
+unsigned long in_interrupt[NCPUS];
+#endif
+
/* Interrupt stack allocation */
uint8_t solid_intstack[NCPUS*INTSTACK_SIZE] __aligned(NCPUS*INTSTACK_SIZE);
diff --git a/kern/lock.h b/kern/lock.h
index 2d493f87..885b60ba 100644
--- a/kern/lock.h
+++ b/kern/lock.h
@@ -49,6 +49,13 @@
* spl_t s = simple_lock_irq(&mylock);
* [... critical section]
* simple_unlock_irq(s, &mylock);
+ *
+ * To catch faulty code, when MACH_LDEBUG is set we check that non-_irq versions
+ * are not called while handling an interrupt.
+ *
+ * In the following, the _nocheck versions don't check anything, the _irq
+ * versions disable interrupts, and the pristine versions add a check when
+ * MACH_LDEBUG is set.
*/
#if NCPUS > 1
@@ -249,9 +256,8 @@ extern void lock_clear_recursive(lock_t);
/* XXX: We don't keep track of readers, so this is an approximation. */
#define have_read_lock(l) ((l)->read_count > 0)
#define have_write_lock(l) ((l)->writer == current_thread())
-// Disabled for now, until all places are fixed
extern unsigned long in_interrupt[NCPUS];
-#define lock_check_no_interrupts() // assert(!in_interrupt[cpu_number()])
+#define lock_check_no_interrupts() assert(!in_interrupt[cpu_number()])
#endif /* MACH_LDEBUG */
#define have_lock(l) (have_read_lock(l) || have_write_lock(l))
diff --git a/x86_64/locore.S b/x86_64/locore.S
index 070644bd..1ac7575e 100644
--- a/x86_64/locore.S
+++ b/x86_64/locore.S
@@ -820,12 +820,20 @@ ENTRY(all_intrs)
TIME_INT_ENTRY /* do timing */
#endif
- call EXT(interrupt) /* call generic interrupt routine */
+#ifdef MACH_LDEBUG
+ CPU_NUMBER(%ecx)
+ incl CX(EXT(in_interrupt),%rcx)
+#endif
- .globl EXT(return_to_iret)
-LEXT(return_to_iret) /* ( label for kdb_kintr and hardclock) */
+ call EXT(interrupt) /* call generic interrupt routine */
+ .globl EXT(return_to_iret) /* ( label for kdb_kintr and hardclock */
+LEXT(return_to_iret) /* to find the return from calling interrupt) */
CPU_NUMBER(%edx)
+#ifdef MACH_LDEBUG
+ decl CX(EXT(in_interrupt),%rdx)
+#endif
+
#if STAT_TIME
#else
TIME_INT_EXIT /* do timing */