summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-03-06 06:06:42 +0000
committerjhb <jhb@FreeBSD.org>2001-03-06 06:06:42 +0000
commita710dd719491cf5d6cf25776c860d40ecf004895 (patch)
treee94703ba8537bcc7df6eed45946da665b34eea58 /sys/vm
parent0a04578be41eeac63246d97839c306955bc8f7cc (diff)
downloadFreeBSD-src-a710dd719491cf5d6cf25776c860d40ecf004895.zip
FreeBSD-src-a710dd719491cf5d6cf25776c860d40ecf004895.tar.gz
- Rework pmap_map() to take advantage of direct-mapped segments on
supported architectures such as the alpha. This allows us to save on kernel virtual address space, TLB entries, and (on the ia64) VHPT entries. pmap_map() now modifies the passed in virtual address on architectures that do not support direct-mapped segments to point to the next available virtual address. It also returns the actual address that the request was mapped to. - On the IA64 don't use a special zone of PV entries needed for early calls to pmap_kenter() during pmap_init(). This gets us in trouble because we end up trying to use the zone allocator before it is initialized. Instead, with the pmap_map() change, the number of needed PV entries is small enough that we can get by with a static pool that is used until pmap_init() is complete. Submitted by: dfr Debugging help: peter Tested by: me
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h2
-rw-r--r--sys/vm/vm_page.c25
2 files changed, 10 insertions, 17 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 1aca49d..4ff3321 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -110,7 +110,7 @@ boolean_t pmap_is_modified __P((vm_page_t m));
boolean_t pmap_ts_referenced __P((vm_page_t m));
void pmap_kenter __P((vm_offset_t va, vm_offset_t pa));
void pmap_kremove __P((vm_offset_t));
-vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+vm_offset_t pmap_map __P((vm_offset_t *, vm_offset_t, vm_offset_t, int));
void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_offset_t size,
int pagelimit));
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 808f7f3..e332564 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -180,7 +180,7 @@ vm_offset_t
vm_page_startup(starta, enda, vaddr)
register vm_offset_t starta;
vm_offset_t enda;
- register vm_offset_t vaddr;
+ vm_offset_t vaddr;
{
register vm_offset_t mapped;
register struct vm_page **bucket;
@@ -242,8 +242,6 @@ vm_page_startup(starta, enda, vaddr)
*
* Note: This computation can be tweaked if desired.
*/
- vm_page_buckets = (struct vm_page **)vaddr;
- bucket = vm_page_buckets;
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
while (vm_page_bucket_count < atop(total))
@@ -257,12 +255,12 @@ vm_page_startup(starta, enda, vaddr)
*/
new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
new_end = trunc_page(new_end);
- mapped = round_page(vaddr);
- vaddr = pmap_map(mapped, new_end, end,
+ mapped = pmap_map(&vaddr, new_end, end,
VM_PROT_READ | VM_PROT_WRITE);
- vaddr = round_page(vaddr);
- bzero((caddr_t) mapped, vaddr - mapped);
+ bzero((caddr_t) mapped, end - new_end);
+ vm_page_buckets = (struct vm_page **)mapped;
+ bucket = vm_page_buckets;
for (i = 0; i < vm_page_bucket_count; i++) {
*bucket = NULL;
bucket++;
@@ -281,20 +279,15 @@ vm_page_startup(starta, enda, vaddr)
(end - new_end)) / PAGE_SIZE;
end = new_end;
+
/*
* Initialize the mem entry structures now, and put them in the free
* queue.
*/
- vm_page_array = (vm_page_t) vaddr;
- mapped = vaddr;
-
- /*
- * Validate these addresses.
- */
-
new_end = trunc_page(end - page_range * sizeof(struct vm_page));
- mapped = pmap_map(mapped, new_end, end,
+ mapped = pmap_map(&vaddr, new_end, end,
VM_PROT_READ | VM_PROT_WRITE);
+ vm_page_array = (vm_page_t) mapped;
/*
* Clear all of the page structures
@@ -321,7 +314,7 @@ vm_page_startup(starta, enda, vaddr)
pa += PAGE_SIZE;
}
}
- return (mapped);
+ return (vaddr);
}
/*
OpenPOWER on IntegriCloud