diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-10-28 13:05:33 +0200 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-10-28 13:06:39 +0200 |
commit | 0ea8f34d6b2d37cb5027a9cd8d143b0d6b701613 (patch) | |
tree | 36b06a38574b15555466f08dcfa1deb95ca28df3 /x86_64 | |
parent | fc47cd4ec8314c3df45a6cedc2d633bd52bca01e (diff) | |
download | gnumach-0ea8f34d6b2d37cb5027a9cd8d143b0d6b701613.tar.gz gnumach-0ea8f34d6b2d37cb5027a9cd8d143b0d6b701613.tar.bz2 gnumach-0ea8f34d6b2d37cb5027a9cd8d143b0d6b701613.zip |
64bit: Fix locore build
To allow references to int_stack_base to be quite unconstrained, we need
to use 64bit register indexing.
CPU_NUMBER_NO_GS was missing a 64bit variant.
CPU_NUMBER_NO_STACK assumes being passed a 32bit register.
Diffstat (limited to 'x86_64')
-rw-r--r-- | x86_64/locore.S | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/x86_64/locore.S b/x86_64/locore.S index 2db0d49b..af3809ee 100644 --- a/x86_64/locore.S +++ b/x86_64/locore.S @@ -696,7 +696,7 @@ trap_from_kernel: CPU_NUMBER(%ecx) and $(~(INTSTACK_SIZE-1)),%rdx - cmpq CX(EXT(int_stack_base),%ecx),%rdx + cmpq CX(EXT(int_stack_base),%rcx),%rdx je 1f /* OK if so */ movl %ecx,%edx @@ -828,7 +828,7 @@ ENTRY(all_intrs) CPU_NUMBER_NO_GS(%ecx) movq %rsp,%rdx /* on an interrupt stack? */ and $(~(INTSTACK_SIZE-1)),%rdx - cmpq %ss:CX(EXT(int_stack_base),%ecx),%rdx + cmpq %ss:CX(EXT(int_stack_base),%rcx),%rdx je int_from_intstack /* if not: */ SET_KERNEL_SEGMENTS(%rdx) /* switch to kernel segments */ @@ -888,7 +888,7 @@ LEXT(return_to_iret) /* to find the return from calling interrupt) */ int_from_intstack: CPU_NUMBER_NO_GS(%edx) - cmpq CX(EXT(int_stack_base),%edx),%rsp /* seemingly looping? */ + cmpq CX(EXT(int_stack_base),%rdx),%rsp /* seemingly looping? */ jb stack_overflowed /* if not: */ call EXT(interrupt) /* call interrupt routine */ _return_to_iret_i: /* ( label for kdb_kintr) */ @@ -1408,7 +1408,7 @@ ENTRY(syscall64) mov %r10,%rcx /* fix arg3 location according to C ABI */ /* switch to kernel stack, then we can enable interrupts */ - CPU_NUMBER_NO_STACK(%r11) + CPU_NUMBER_NO_STACK(%r11d) movq CX(EXT(kernel_stack),%r11),%rsp sti @@ -1447,7 +1447,7 @@ _syscall64_call: _syscall64_check_for_ast: /* Check for ast. */ - CPU_NUMBER_NO_GS(%r11) + CPU_NUMBER_NO_GS(%r11d) cmpl $0,CX(EXT(need_ast),%r11) jz _syscall64_restore_state |