aboutsummaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_page.c16
-rw-r--r--vm/vm_resident.c9
2 files changed, 9 insertions, 16 deletions
diff --git a/vm/vm_page.c b/vm/vm_page.c
index 4c11ea7a..2a9f27b2 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -1620,10 +1620,6 @@ vm_page_boot_table_size(void)
nr_pages = 0;
for (i = 0; i < vm_page_segs_size; i++) {
- /* XXX */
- if (i > VM_PAGE_SEG_DIRECTMAP)
- continue;
-
nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
}
@@ -1643,10 +1639,6 @@ vm_page_table_size(void)
nr_pages = 0;
for (i = 0; i < vm_page_segs_size; i++) {
- /* XXX */
- if (i > VM_PAGE_SEG_DIRECTMAP)
- continue;
-
nr_pages += vm_page_atop(vm_page_seg_size(&vm_page_segs[i]));
}
@@ -1684,10 +1676,6 @@ vm_page_mem_size(void)
total = 0;
for (i = 0; i < vm_page_segs_size; i++) {
- /* XXX */
- if (i > VM_PAGE_SEG_DIRECTMAP)
- continue;
-
total += vm_page_seg_size(&vm_page_segs[i]);
}
@@ -1703,10 +1691,6 @@ vm_page_mem_free(void)
total = 0;
for (i = 0; i < vm_page_segs_size; i++) {
- /* XXX */
- if (i > VM_PAGE_SEG_DIRECTMAP)
- continue;
-
total += vm_page_segs[i].nr_free_pages;
}
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index e276fe68..e3e34dc3 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -771,6 +771,15 @@ vm_page_t vm_page_grab(void)
simple_lock(&vm_page_queue_free_lock);
+ /*
+ * XXX Mach has many modules that merely assume memory is
+ * directly mapped in kernel space. Instead of updating all
+ * users, we assume those which need specific physical memory
+ * properties will wire down their pages, either because
+ * they can't be paged (not part of an object), or with
+ * explicit VM calls. The strategy is then to let memory
+ * pressure balance the physical segments with pageable pages.
+ */
mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL);
if (mem == NULL) {