diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2012-03-19 01:32:32 +0100 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2020-03-28 17:10:59 +0100 |
commit | 3d146cfb99d77058866124e7e57366f60e8396c3 (patch) | |
tree | 319568555799fb5f96ef2483096d0c7a8e9310ed /x86_64 | |
parent | 79b501fc3a26480403ed586609ddca298d559ab9 (diff) | |
download | gnumach-3d146cfb99d77058866124e7e57366f60e8396c3.tar.gz gnumach-3d146cfb99d77058866124e7e57366f60e8396c3.tar.bz2 gnumach-3d146cfb99d77058866124e7e57366f60e8396c3.zip |
cswitch: Add 64bit variant
* x86_64/cswitch.S: New file.
Diffstat (limited to 'x86_64')
-rw-r--r-- | x86_64/cswitch.S | 150 |
1 files changed, 150 insertions, 0 deletions
diff --git a/x86_64/cswitch.S b/x86_64/cswitch.S new file mode 100644 index 00000000..1a7471c3 --- /dev/null +++ b/x86_64/cswitch.S @@ -0,0 +1,150 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include <mach/machine/asm.h> + +#include <i386/i386/proc_reg.h> +#include <i386/i386/i386asm.h> +#include <i386/i386/cpu_number.h> + +/* + * Context switch routines for x86_64. + */ + +ENTRY(Load_context) + movq S_ARG0,%rcx /* get thread */ + movq TH_KERNEL_STACK(%rcx),%rcx /* get kernel stack */ + lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%rcx),%rdx + /* point to stack top */ + CPU_NUMBER(%eax) + movq %rcx,CX(EXT(active_stacks),%eax) /* store stack address */ + movq %rdx,CX(EXT(kernel_stack),%eax) /* store stack top */ + +/* XXX complete */ + + movq KSS_ESP(%rcx),%rsp /* switch stacks */ + movq KSS_EBP(%rcx),%rbp /* restore registers */ + movq KSS_EBX(%rcx),%rbx + movq KSS_R12(%rcx),%r12 + movq KSS_R13(%rcx),%r13 + movq KSS_R14(%rcx),%r14 + movq KSS_R15(%rcx),%r15 + xorq %rax,%rax /* return zero (no old thread) */ + jmp *KSS_EIP(%rcx) /* resume thread */ + +/* + * This really only has to save registers + * when there is no explicit continuation. + */ + +ENTRY(Switch_context) + CPU_NUMBER(%eax) + movq CX(EXT(active_stacks),%eax),%rcx /* get old kernel stack */ + + movq %r12,KSS_R12(%rcx) /* save registers */ + movq %r13,KSS_R13(%rcx) + movq %r14,KSS_R14(%rcx) + movq %r15,KSS_R15(%rcx) + movq %rbx,KSS_EBX(%rcx) + movq %rbp,KSS_EBP(%rcx) + popq KSS_EIP(%rcx) /* save return PC */ + movq %rsp,KSS_ESP(%rcx) /* save SP */ + + movq S_ARG0,%rax /* get old thread */ + movq %rcx,TH_KERNEL_STACK(%rax) /* save old stack */ + movq S_ARG1,%rbx /* get continuation */ + movq %rbx,TH_SWAP_FUNC(%rax) /* save continuation */ + + movq S_ARG2,%rsi /* get new thread */ + + movq TH_KERNEL_STACK(%rsi),%rcx /* get its kernel stack */ + lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%rcx),%rbx + /* point to stack top */ + + CPU_NUMBER(%eax) + movq %rsi,CX(EXT(active_threads),%eax) /* new thread is active */ + movq %rcx,CX(EXT(active_stacks),%eax) /* set current stack */ + movq %rbx,CX(EXT(kernel_stack),%eax) /* set stack top */ + + movq KSS_ESP(%rcx),%rsp /* switch stacks */ + movq KSS_EBP(%rcx),%rbp /* restore registers */ + movq KSS_EBX(%rcx),%rbx + movq KSS_R12(%rcx),%r12 + movq KSS_R13(%rcx),%r13 + movq KSS_R14(%rcx),%r14 + movq KSS_R15(%rcx),%r15 + jmp *KSS_EIP(%rcx) /* return old thread */ + +ENTRY(Thread_continue) + movq %rax,%rdi /* push the thread argument */ + xorq %rbp,%rbp /* zero frame pointer */ + call *%rbx /* call real continuation */ + +#if NCPUS > 1 +/* + * void switch_to_shutdown_context(thread_t thread, + * void (*routine)(processor_t), + * processor_t processor) + * + * saves the kernel context of the thread, + * switches to the interrupt stack, + * continues the thread (with thread_continue), + * then runs routine on the interrupt stack. + * + * Assumes that the thread is a kernel thread (thus + * has no FPU state) + */ +ENTRY(switch_to_shutdown_context) +ud2 + CPU_NUMBER(%eax) + movq EXT(active_stacks)(,%eax,8),%rcx /* get old kernel stack */ + movq %r12,KSS_R12(%rcx) /* save registers */ + movq %r13,KSS_R13(%rcx) + movq %r14,KSS_R14(%rcx) + movq %r15,KSS_R15(%rcx) + movq %rbx,KSS_EBX(%rcx) + movq %rbp,KSS_EBP(%rcx) + popq KSS_EIP(%rcx) /* save return PC */ + movq %rsp,KSS_ESP(%rcx) /* save SP */ + + movq S_ARG0,%rax /* get old thread */ + movq %rcx,TH_KERNEL_STACK(%rax) /* save old stack */ + movq $0,TH_SWAP_FUNC(%rax) /* clear continuation */ + movq S_ARG1,%rbx /* get routine to run next */ + movq S_ARG2,%rsi /* get its argument */ + + CPU_NUMBER(%eax) + movq EXT(interrupt_stack)(,%eax,8),%rcx /* point to its interrupt stack */ + lea INTSTACK_SIZE(%rcx),%rsp /* switch to it (top) */ + + movq %rax,%rdi /* push thread */ + call EXT(thread_dispatch) /* reschedule thread */ + + movq %rsi,%rdi /* push argument */ + call *%rbx /* call routine to run */ + hlt /* (should never return) */ + +#endif /* NCPUS > 1 */ |