diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2024-03-03 23:52:10 +0100 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2024-03-03 23:52:42 +0100 |
commit | 759acbf16bc9215e0ae29dde84d223eaa8b1678e (patch) | |
tree | e3f5c853e974d36c85850e0e9ac9411ec78482c5 /i386 | |
parent | 43f2a52a6a00904f90a19fb178935cd16a7ced23 (diff) | |
download | gnumach-759acbf16bc9215e0ae29dde84d223eaa8b1678e.tar.gz gnumach-759acbf16bc9215e0ae29dde84d223eaa8b1678e.tar.bz2 gnumach-759acbf16bc9215e0ae29dde84d223eaa8b1678e.zip |
pmap: Avoid leaking USER bit in page tables
We should only set USER
- for user processes maps
- for 32bit Xen support
This was not actually posing problem since in 32bit segmentation
protects us, and in 64bit the l4 entry for the kernel is already set.
But better be safe than sorry.
Diffstat (limited to 'i386')
-rw-r--r-- | i386/intel/pmap.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index 21f75eeb..94c580e7 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -1231,12 +1231,16 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) { #ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(pdp), pa_to_pte(kv_to_ma(ptp)) | INTEL_PTE_VALID +#ifndef __x86_64__ | INTEL_PTE_USER +#endif | INTEL_PTE_WRITE)) panic("%s:%d could not set pde %llx(%lx) to %lx(%lx)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp), ptp, (vm_offset_t) pa_to_ma(ptp)); #else /* MACH_PV_PAGETABLES */ *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID +#ifndef __x86_64__ | INTEL_PTE_USER +#endif | INTEL_PTE_WRITE; #endif /* MACH_PV_PAGETABLES */ pte = pmap_pte(kernel_pmap, addr); @@ -1375,9 +1379,6 @@ pmap_t pmap_create(vm_size_t size) | INTEL_PTE_VALID #if (defined(__x86_64__) && !defined(MACH_HYP)) || defined(MACH_PV_PAGETABLES) | INTEL_PTE_WRITE -#ifdef __x86_64__ - | INTEL_PTE_USER -#endif /* __x86_64__ */ #endif ); } @@ -2078,12 +2079,12 @@ static inline pt_entry_t* pmap_expand_level(pmap_t pmap, vm_offset_t v, int spl, panic("couldn't pin page %lx(%lx)\n",ptp,(vm_offset_t) kv_to_ma(ptp)); if (!hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdp)), pa_to_pte(pa_to_ma(kvtophys(ptp))) | INTEL_PTE_VALID - | INTEL_PTE_USER + | (pmap != kernel_pmap ? INTEL_PTE_USER : 0) | INTEL_PTE_WRITE)) panic("%s:%d could not set pde %p(%llx,%lx) to %lx(%llx,%lx) %lx\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp))); #else /* MACH_PV_PAGETABLES */ *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID - | INTEL_PTE_USER + | (pmap != kernel_pmap ? INTEL_PTE_USER : 0) | INTEL_PTE_WRITE; #endif /* MACH_PV_PAGETABLES */ pdp++; /* Note: This is safe b/c we stay in one page. */ |