aboutsummaryrefslogtreecommitdiff
path: root/i386/intel/pmap.h
diff options
context:
space:
mode:
authorLuca Dariz <luca@orpolo.org>2023-05-21 10:57:56 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2023-05-21 20:55:01 +0200
commit222020cff440921e987dcd92e308dd775e5d543d (patch)
treef12d67c7bcb96a6528604c8a5c881ddd792c55b4 /i386/intel/pmap.h
parent95bf57a0625140e4b60f817150cb516bda65b446 (diff)
downloadgnumach-222020cff440921e987dcd92e308dd775e5d543d.tar.gz
gnumach-222020cff440921e987dcd92e308dd775e5d543d.tar.bz2
gnumach-222020cff440921e987dcd92e308dd775e5d543d.zip
pmap: dynamically allocate the whole user page tree map
* i386/intel/pmap.c: switch to dynamic allocation of all the page tree map levels for the user-space address range, using a separate kmem cache for each level. This allows to extend the usable memory space on x86_64 to use more than one L3 page for user space. The kernel address map is left untouched for now as it needs a different initialization. * i386/intel/pmap.h: remove hardcoded user pages and add macro to recontruct the page-to-virtual mapping Message-Id: <20230521085758.365640-1-luca@orpolo.org>
Diffstat (limited to 'i386/intel/pmap.h')
-rw-r--r--i386/intel/pmap.h21
1 files changed, 20 insertions, 1 deletions
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 4c1b9bd5..5fc7fb25 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -75,7 +75,6 @@ typedef phys_addr_t pt_entry_t;
#define L4SHIFT 39 /* L4 shift */
#define L4MASK 0x1ff /* mask for L4 index */
#define PDPNUM_KERNEL (((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) >> PDPSHIFT) + 1)
-#define PDPNUM_USER (((VM_MAX_USER_ADDRESS - VM_MIN_USER_ADDRESS) >> PDPSHIFT) + 1)
#define PDPMASK 0x1ff /* mask for page directory pointer index */
#else /* __x86_64__ */
#define PDPNUM 4 /* number of page directory pointers */
@@ -130,6 +129,26 @@ typedef phys_addr_t pt_entry_t;
*/
#define pdenum2lin(a) ((vm_offset_t)(a) << PDESHIFT)
+#if PAE
+#ifdef __x86_64__
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l4num) << L4SHIFT) + \
+ ((vm_offset_t)(l3num) << PDPSHIFT) + \
+ ((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#else /* __x86_64__ */
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l3num) << PDPSHIFT) + \
+ ((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#endif
+#else /* PAE */
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#endif
+
+
/*
* Convert linear offset to page table index
*/