diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-02-15 11:08:08 +0100 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-02-15 11:08:10 +0100 |
commit | c1da11e520e6454c7798dc0fbd715d8dce87ee27 (patch) | |
tree | 21bbadf35387f048867ce1562d301204d0efb697 /i386/intel | |
parent | 4fc6cb13da6628fef1ce2e3a45a036cc3804b93e (diff) | |
download | gnumach-c1da11e520e6454c7798dc0fbd715d8dce87ee27.tar.gz gnumach-c1da11e520e6454c7798dc0fbd715d8dce87ee27.tar.bz2 gnumach-c1da11e520e6454c7798dc0fbd715d8dce87ee27.zip |
pmap: Make mapwindow per CPU
They are used temporarily without CPU exchanges, and may need to be used
concurrently so 2 slots only would not be enough anyway. This also saves
having to lock for them.
Diffstat (limited to 'i386/intel')
-rw-r--r-- | i386/intel/pmap.c | 18 | ||||
-rw-r--r-- | i386/intel/pmap.h | 2 |
2 files changed, 8 insertions, 12 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index 3605d120..67c55e7d 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -427,8 +427,7 @@ pt_entry_t *kernel_page_dir; * Two slots for temporary physical page mapping, to allow for * physical-to-physical transfers. */ -static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS]; -def_simple_lock_data(static, pmapwindows_lock) +static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS * NCPUS]; #ifdef PAE static inline pt_entry_t * @@ -847,9 +846,9 @@ void pmap_bootstrap(void) } for (; pte < ptable+NPTES; pte++) { - if (va >= kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE && va < kernel_virtual_end) + if (va >= kernel_virtual_end - PMAP_NMAPWINDOWS * NCPUS * PAGE_SIZE && va < kernel_virtual_end) { - pmap_mapwindow_t *win = &mapwindows[atop(va - (kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE))]; + pmap_mapwindow_t *win = &mapwindows[atop(va - (kernel_virtual_end - PMAP_NMAPWINDOWS * NCPUS * PAGE_SIZE))]; win->entry = pte; win->vaddr = va; } @@ -1005,15 +1004,15 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) { pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry) { pmap_mapwindow_t *map; + int cpu = cpu_number(); assert(entry != 0); - simple_lock(&pmapwindows_lock); /* Find an empty one. */ - for (map = &mapwindows[0]; map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]; map++) + for (map = &mapwindows[cpu * PMAP_NMAPWINDOWS]; map < &mapwindows[(cpu+1) * PMAP_NMAPWINDOWS]; map++) if (!(*map->entry)) break; - assert(map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]); + assert(map < &mapwindows[(cpu+1) * PMAP_NMAPWINDOWS]); #ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(map->entry), pa_to_ma(entry))) @@ -1021,7 +1020,6 @@ pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry) #else /* MACH_PV_PAGETABLES */ WRITE_PTE(map->entry, entry); #endif /* MACH_PV_PAGETABLES */ - simple_unlock(&pmapwindows_lock); INVALIDATE_TLB(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE); return map; } @@ -1031,14 +1029,12 @@ pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry) */ void pmap_put_mapwindow(pmap_mapwindow_t *map) { - simple_lock(&pmapwindows_lock); #ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(map->entry), 0)) panic("pmap_put_mapwindow"); #else /* MACH_PV_PAGETABLES */ WRITE_PTE(map->entry, 0); #endif /* MACH_PV_PAGETABLES */ - simple_unlock(&pmapwindows_lock); INVALIDATE_TLB(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE); } @@ -1047,7 +1043,7 @@ void pmap_virtual_space( vm_offset_t *endp) { *startp = kernel_virtual_start; - *endp = kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE; + *endp = kernel_virtual_end - PMAP_NMAPWINDOWS * NCPUS * PAGE_SIZE; } /* diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h index 68784ac9..4c1b9bd5 100644 --- a/i386/intel/pmap.h +++ b/i386/intel/pmap.h @@ -247,7 +247,7 @@ typedef struct { extern pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry); extern void pmap_put_mapwindow(pmap_mapwindow_t *map); -#define PMAP_NMAPWINDOWS 2 +#define PMAP_NMAPWINDOWS 2 /* Per CPU */ #if NCPUS > 1 /* |