diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-08-14 22:08:10 +0200 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-08-14 22:42:25 +0200 |
commit | befadb5a31a013a95802478d45707f49043a773c (patch) | |
tree | aedfbce156c155c329e13defca2069d10653d29d | |
parent | 160f9286fd8b6e358244e2642c3068c9484d82d0 (diff) | |
download | gnumach-befadb5a31a013a95802478d45707f49043a773c.tar.gz gnumach-befadb5a31a013a95802478d45707f49043a773c.tar.bz2 gnumach-befadb5a31a013a95802478d45707f49043a773c.zip |
pmap+slab: Add more smoketests
Checking the range of addresses for operations on the kernel_pmap is
quite cheap, and allows to catch oddities quite early enough.
-rw-r--r-- | i386/intel/pmap.c | 12 | ||||
-rw-r--r-- | kern/slab.c | 3 |
2 files changed, 11 insertions, 4 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index 6218b27a..a9a615e9 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -1553,6 +1553,9 @@ void pmap_remove_range( struct mmu_update update[HYP_BATCH_MMU_UPDATES]; #endif /* MACH_PV_PAGETABLES */ + if (pmap == kernel_pmap && (va < kernel_virtual_start || va + (epte-spte)*PAGE_SIZE > kernel_virtual_end)) + panic("pmap_remove_range(%lx-%lx) falls in physical memory area!\n", (unsigned long) va, (unsigned long) va + (epte-spte)*PAGE_SIZE); + #if DEBUG_PTE_PAGE if (pmap != kernel_pmap) ptep_check(get_pte_page(spte)); @@ -1565,6 +1568,9 @@ void pmap_remove_range( if (*cpte == 0) continue; + + assert(*cpte & INTEL_PTE_VALID); + pa = pte_to_pa(*cpte); num_removed++; @@ -1639,7 +1645,7 @@ void pmap_remove_range( pv_h = pai_to_pvh(pai); if (pv_h->pmap == PMAP_NULL) { - panic("pmap_remove: null pv_list for pai %lx at va %lx!", pai, va); + panic("pmap_remove: null pv_list for pai %lx at va %lx!", pai, (unsigned long) va); } if (pv_h->va == va && pv_h->pmap == pmap) { /* @@ -2136,10 +2142,8 @@ void pmap_enter( if (pmap == PMAP_NULL) return; -#if !MACH_KDB if (pmap == kernel_pmap && (v < kernel_virtual_start || v >= kernel_virtual_end)) - panic("pmap_enter(%llx, %llx) falls in physical memory area!\n", v, (unsigned long long) pa); -#endif + panic("pmap_enter(%lx, %llx) falls in physical memory area!\n", (unsigned long) v, (unsigned long long) pa); #if !(__i486__ || __i586__ || __i686__) if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0 && !wired /* hack for io_wire */ ) { diff --git a/kern/slab.c b/kern/slab.c index 7cb4ee27..ee51f9e9 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -416,6 +416,9 @@ kmem_pagealloc_virtual(vm_size_t size, vm_size_t align) static void kmem_pagefree_virtual(vm_offset_t addr, vm_size_t size) { + if (addr < kernel_virtual_start || addr + size > kernel_virtual_end) + panic("kmem_pagefree_virtual(%lx-%lx) falls in physical memory area!\n", + (unsigned long) addr, (unsigned long) addr + size); assert(size > PAGE_SIZE); size = vm_page_round(size); kmem_free(kernel_map, addr, size); |