diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2024-03-04 00:18:37 +0100 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2024-03-04 00:19:13 +0100 |
commit | a251b15660097c37892bf510f0c9979a4ec196f2 (patch) | |
tree | 021a16ca9a8fa3869279d7a56e562b28463d6ed7 /x86_64 | |
parent | 759acbf16bc9215e0ae29dde84d223eaa8b1678e (diff) | |
download | gnumach-a251b15660097c37892bf510f0c9979a4ec196f2.tar.gz gnumach-a251b15660097c37892bf510f0c9979a4ec196f2.tar.bz2 gnumach-a251b15660097c37892bf510f0c9979a4ec196f2.zip |
x86_64 locore: Check segmentation by hand
x86_64 ignores the segmentation limit, so we have to check it by hand
when accessing userland pointers.
Reported-by: Sergey Bugaev <bugaevc@gmail.com>
Diffstat (limited to 'x86_64')
-rw-r--r-- | x86_64/locore.S | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/x86_64/locore.S b/x86_64/locore.S index 25dc15d0..806762bb 100644 --- a/x86_64/locore.S +++ b/x86_64/locore.S @@ -1241,6 +1241,10 @@ syscall_native: movq R_UESP(%rbx),%rbp /* get user stack pointer */ addq $4,%rbp /* Skip user return address */ + movq $VM_MAX_ADDRESS, %rcx + cmpq %rcx,%rbp /* Check segment limit by hand */ + jae mach_call_addr_push + #define PARAM(reg,ereg) \ xorq %reg,%reg ;\ RECOVER(mach_call_addr_push) \ @@ -1323,6 +1327,9 @@ syscall_emul: /* XXX what about write-protected pages? */ movq R_UESP(%rbx),%rdi /* get user stack pointer */ subq $16,%rdi /* push space for new arguments */ + movq $VM_MAX_ADDRESS, %rax + cmpq %rax,%rdi /* Check segment limit by hand */ + jae syscall_addr movq R_EFLAGS(%rbx),%rax /* move flags */ RECOVER(syscall_addr) movl %eax,%fs:0(%rdi) /* to user stack */ @@ -1437,6 +1444,10 @@ _syscall64_args_stack: lea (%r11,%r10,8),%r11 /* point past last argument */ + movq $VM_MAX_ADDRESS, %r12 + cmpq %r12,%r11 /* Check segment limit by hand */ + jae _syscall64_addr_push + 0: subq $8,%r11 RECOVER(_syscall64_addr_push) mov (%r11),%r12 @@ -1545,6 +1556,9 @@ ENTRY(discover_x86_cpu_type) ENTRY(copyin) xchgq %rsi,%rdi /* Get user source and kernel destination */ + movq $VM_MAX_ADDRESS, %rcx + cmpq %rcx,%rsi /* Check segment limit by hand */ + jae copyin_fail copyin_remainder: /*cld*/ /* count up: default mode in all GCC code */ @@ -1572,6 +1586,9 @@ bogus: ENTRY(copyout) xchgq %rsi,%rdi /* Get user source and kernel destination */ + movq $VM_MAX_ADDRESS, %rcx + cmpq %rcx,%rdi /* Check segment limit by hand */ + jae copyin_fail copyout_remainder: movq %rdx,%rax /* use count */ @@ -1604,6 +1621,9 @@ ENTRY(inst_fetch) movq S_ARG1, %rax /* get segment */ movw %ax,%fs /* into FS */ movq S_ARG0, %rax /* get offset */ + movq $VM_MAX_ADDRESS, %rcx + cmpq %rcx,%rax /* Check segment limit by hand */ + jae _inst_fetch_fault RETRY(EXT(inst_fetch)) /* re-load FS on retry */ RECOVER(_inst_fetch_fault) movzbq %fs:(%rax),%rax /* load instruction byte */ |