summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-03-06 06:06:42 +0000
committerjhb <jhb@FreeBSD.org>2001-03-06 06:06:42 +0000
commita710dd719491cf5d6cf25776c860d40ecf004895 (patch)
treee94703ba8537bcc7df6eed45946da665b34eea58 /sys/amd64
parent0a04578be41eeac63246d97839c306955bc8f7cc (diff)
downloadFreeBSD-src-a710dd719491cf5d6cf25776c860d40ecf004895.zip
FreeBSD-src-a710dd719491cf5d6cf25776c860d40ecf004895.tar.gz
- Rework pmap_map() to take advantage of direct-mapped segments on
supported architectures such as the alpha. This allows us to save on kernel virtual address space, TLB entries, and (on the ia64) VHPT entries. pmap_map() now modifies the passed in virtual address on architectures that do not support direct-mapped segments to point to the next available virtual address. It also returns the actual address that the request was mapped to. - On the IA64 don't use a special zone of PV entries needed for early calls to pmap_kenter() during pmap_init(). This gets us in trouble because we end up trying to use the zone allocator before it is initialized. Instead, with the pmap_map() change, the number of needed PV entries is small enough that we can get by with a static pool that is used until pmap_init() is complete. Submitted by: dfr Debugging help: peter Tested by: me
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 52a5e3c..4bf0ace 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -711,22 +711,30 @@ pmap_kremove(va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
- * For now, VM is already on, we only need to map the
- * specified memory.
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
- vm_offset_t virt;
+ vm_offset_t *virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
+ vm_offset_t sva = *virt;
+ vm_offset_t va = sva;
while (start < end) {
- pmap_kenter(virt, start);
- virt += PAGE_SIZE;
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
start += PAGE_SIZE;
}
- return (virt);
+ *virt = va;
+ return (sva);
}
OpenPOWER on IntegriCloud