diff options
author | Richard Braun <rbraun@sceen.net> | 2016-09-20 21:34:07 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2016-09-21 00:19:59 +0200 |
commit | 66a878640573dd9101e3915db44408b661220038 (patch) | |
tree | b030d125bc83e9c52b5e915fbe50de17d5eaf2bf /i386/intel | |
parent | 8322083864500f5726f4f04f80427acee4b52c9a (diff) | |
download | gnumach-66a878640573dd9101e3915db44408b661220038.tar.gz gnumach-66a878640573dd9101e3915db44408b661220038.tar.bz2 gnumach-66a878640573dd9101e3915db44408b661220038.zip |
Remove phys_first_addr and phys_last_addr global variables
The old assumption that all physical memory is directly mapped in
kernel space is about to go away. Those variables are directly linked
to that assumption.
* i386/i386/model_dep.h (phys_first_addr): Remove extern declaration.
(phys_last_addr): Likewise.
* i386/i386/phys.c (pmap_zero_page): Use VM_PAGE_DIRECTMAP_LIMIT
instead of phys_last_addr.
(pmap_copy_page, copy_to_phys, copy_from_phys): Likewise.
* i386/i386/trap.c (user_trap): Remove check against phys_last_addr.
* i386/i386at/biosmem.c (biosmem_bootstrap_common): Don't set
phys_last_addr.
* i386/i386at/mem.c (memmmap): Use vm_page_lookup_pa to determine if
a physical address references physical memory.
* i386/i386at/model_dep.c (phys_first_addr): Remove variable.
(phys_last_addr): Likewise.
(pmap_free_pages, pmap_valid_page): Remove functions.
* i386/intel/pmap.c: Include i386at/biosmem.h.
(pa_index): Turn into an alias for vm_page_table_index.
(pmap_bootstrap): Replace uses of phys_first_addr and phys_last_addr
as appropriate.
(pmap_virtual_space): Use vm_page_table_size instead of phys_first_addr
and phys_last_addr to obtain the number of physical pages.
(pmap_verify_free): Remove function.
(valid_page): Turn this macro into an inline function and rewrite
using vm_page_lookup_pa.
(pmap_page_table_page_alloc): Build the pmap VM object using
vm_page_table_size to determine its size.
(pmap_remove_range, pmap_page_protect, phys_attribute_clear,
phys_attribute_test): Turn page indexes into unsigned long integers.
(pmap_enter): Likewise. In addition, use either vm_page_lookup_pa or
biosmem_directmap_end to determine if a physical address references
physical memory.
* i386/xen/xen.c (hyp_p2m_init): Use vm_page_table_size instead of
phys_last_addr to obtain the number of physical pages.
* kern/startup.c (phys_first_addr): Remove extern declaration.
(phys_last_addr): Likewise.
* linux/dev/init/main.c (linux_init): Use vm_page_seg_end with the
appropriate segment selector instead of phys_last_addr to determine
where high memory starts.
* vm/pmap.h: Update requirements description.
(pmap_free_pages, pmap_valid_page): Remove declarations.
* vm/vm_page.c (vm_page_seg_end, vm_page_boot_table_size,
vm_page_table_size, vm_page_table_index): New functions.
* vm/vm_page.h (vm_page_seg_end, vm_page_table_size,
vm_page_table_index): New function declarations.
* vm/vm_resident.c (vm_page_bucket_count, vm_page_hash_mask): Define
as unsigned long integers.
(vm_page_bootstrap): Compute VP table size based on the page table
size instead of the value returned by pmap_free_pages.
Diffstat (limited to 'i386/intel')
-rw-r--r-- | i386/intel/pmap.c | 75 |
1 files changed, 33 insertions, 42 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index e85e5480..7cde0931 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -83,6 +83,7 @@ #include <i386/proc_reg.h> #include <i386/locore.h> #include <i386/model_dep.h> +#include <i386at/biosmem.h> #include <i386at/model_dep.h> #ifdef MACH_PSEUDO_PHYS @@ -158,9 +159,9 @@ vm_offset_t kernel_virtual_end; /* * Index into pv_head table, its lock bits, and the modify/reference - * bits starting at phys_first_addr. + * bits. */ -#define pa_index(pa) (atop(pa - phys_first_addr)) +#define pa_index(pa) vm_page_table_index(pa) #define pai_to_pvh(pai) (&pv_head_table[pai]) #define lock_pvh_pai(pai) (bit_lock(pai, pv_lock_table)) @@ -499,8 +500,8 @@ vm_offset_t pmap_map( /* * Back-door routine for mapping kernel VM at initialization. - * Useful for mapping memory outside the range - * [phys_first_addr, phys_last_addr) (i.e., devices). + * Useful for mapping memory outside the range of direct mapped + * physical memory (i.e., devices). * Otherwise like pmap_map. */ vm_offset_t pmap_map_bd( @@ -600,8 +601,8 @@ void pmap_bootstrap(void) * mapped into the kernel address space, * and extends to a stupid arbitrary limit beyond that. */ - kernel_virtual_start = phystokv(phys_last_addr); - kernel_virtual_end = phystokv(phys_last_addr) + VM_KERNEL_MAP_SIZE; + kernel_virtual_start = phystokv(biosmem_directmap_end()); + kernel_virtual_end = kernel_virtual_start + VM_KERNEL_MAP_SIZE; if (kernel_virtual_end < kernel_virtual_start || kernel_virtual_end > VM_MAX_KERNEL_ADDRESS) @@ -692,8 +693,7 @@ void pmap_bootstrap(void) pt_entry_t global = CPU_HAS_FEATURE(CPU_FEATURE_PGE) ? INTEL_PTE_GLOBAL : 0; /* - * Map virtual memory for all known physical memory, 1-1, - * from phys_first_addr to phys_last_addr. + * Map virtual memory for all directly mappable physical memory, 1-1, * Make any mappings completely in the kernel's text segment read-only. * * Also allocate some additional all-null page tables afterwards @@ -702,7 +702,7 @@ void pmap_bootstrap(void) * to allocate new kernel page tables later. * XX fix this */ - for (va = phystokv(phys_first_addr); va >= phystokv(phys_first_addr) && va < kernel_virtual_end; ) + for (va = phystokv(0); va >= phystokv(0) && va < kernel_virtual_end; ) { pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va)); pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page()); @@ -713,7 +713,7 @@ void pmap_bootstrap(void) | INTEL_PTE_VALID | INTEL_PTE_WRITE); /* Initialize the page table. */ - for (pte = ptable; (va < phystokv(phys_last_addr)) && (pte < ptable+NPTES); pte++) + for (pte = ptable; (va < phystokv(biosmem_directmap_end())) && (pte < ptable+NPTES); pte++) { if ((pte - ptable) < ptenum(va)) { @@ -937,7 +937,7 @@ void pmap_virtual_space( */ void pmap_init(void) { - long npages; + unsigned long npages; vm_offset_t addr; vm_size_t s; #if NCPUS > 1 @@ -949,7 +949,7 @@ void pmap_init(void) * the modify bit array, and the pte_page table. */ - npages = atop(phys_last_addr - phys_first_addr); + npages = vm_page_table_size(); s = (vm_size_t) (sizeof(struct pv_entry) * npages + pv_lock_table_size(npages) + npages); @@ -997,31 +997,16 @@ void pmap_init(void) pmap_initialized = TRUE; } -#define valid_page(x) (pmap_initialized && pmap_valid_page(x)) - -boolean_t pmap_verify_free(vm_offset_t phys) +static inline boolean_t +valid_page(phys_addr_t addr) { - pv_entry_t pv_h; - int pai; - int spl; - boolean_t result; + struct vm_page *p; - assert(phys != vm_page_fictitious_addr); if (!pmap_initialized) - return(TRUE); - - if (!pmap_valid_page(phys)) - return(FALSE); + return FALSE; - PMAP_WRITE_LOCK(spl); - - pai = pa_index(phys); - pv_h = pai_to_pvh(pai); - - result = (pv_h->pmap == PMAP_NULL); - PMAP_WRITE_UNLOCK(spl); - - return(result); + p = vm_page_lookup_pa(addr); + return (p != NULL); } /* @@ -1046,7 +1031,7 @@ pmap_page_table_page_alloc(void) * Allocate it now if it is missing. */ if (pmap_object == VM_OBJECT_NULL) - pmap_object = vm_object_allocate(phys_last_addr - phys_first_addr); + pmap_object = vm_object_allocate(vm_page_table_size() * PAGE_SIZE); /* * Allocate a VM page for the level 2 page table entries. @@ -1324,8 +1309,8 @@ void pmap_remove_range( pt_entry_t *epte) { pt_entry_t *cpte; - int num_removed, num_unwired; - int pai; + unsigned long num_removed, num_unwired; + unsigned long pai; vm_offset_t pa; #ifdef MACH_PV_PAGETABLES int n, ii = 0; @@ -1522,7 +1507,7 @@ void pmap_page_protect( pv_entry_t pv_h, prev; pv_entry_t pv_e; pt_entry_t *pte; - int pai; + unsigned long pai; pmap_t pmap; int spl; boolean_t remove; @@ -1792,9 +1777,10 @@ void pmap_enter( vm_prot_t prot, boolean_t wired) { + boolean_t is_physmem; pt_entry_t *pte; pv_entry_t pv_h; - int i, pai; + unsigned long i, pai; pv_entry_t pv_e; pt_entry_t template; int spl; @@ -1923,6 +1909,11 @@ Retry: continue; } + if (vm_page_ready()) + is_physmem = (vm_page_lookup_pa(pa) != NULL); + else + is_physmem = (pa < biosmem_directmap_end()); + /* * Special case if the physical page is already mapped * at this address. @@ -1944,7 +1935,7 @@ Retry: if (prot & VM_PROT_WRITE) template |= INTEL_PTE_WRITE; if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486 - && pa >= phys_last_addr) + && !is_physmem) template |= INTEL_PTE_NCACHE|INTEL_PTE_WTHRU; if (wired) template |= INTEL_PTE_WIRED; @@ -2056,7 +2047,7 @@ Retry: if (prot & VM_PROT_WRITE) template |= INTEL_PTE_WRITE; if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486 - && pa >= phys_last_addr) + && !is_physmem) template |= INTEL_PTE_NCACHE|INTEL_PTE_WTHRU; if (wired) template |= INTEL_PTE_WIRED; @@ -2418,7 +2409,7 @@ phys_attribute_clear( pv_entry_t pv_h; pv_entry_t pv_e; pt_entry_t *pte; - int pai; + unsigned long pai; pmap_t pmap; int spl; @@ -2502,7 +2493,7 @@ phys_attribute_test( pv_entry_t pv_h; pv_entry_t pv_e; pt_entry_t *pte; - int pai; + unsigned long pai; pmap_t pmap; int spl; |