summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2002-08-03 01:02:37 +0000
committerpeter <peter@FreeBSD.org>2002-08-03 01:02:37 +0000
commit11999f653492e17e269ada0829468c98c87a4e37 (patch)
treef7b6181bd37c9f08bc0942a27e7c2acbc3a6620b /sys
parentd468bdf569109a05fcc47c18e0da1e754e5b2a32 (diff)
downloadFreeBSD-src-11999f653492e17e269ada0829468c98c87a4e37.zip
FreeBSD-src-11999f653492e17e269ada0829468c98c87a4e37.tar.gz
Take advantage of the fact that there is a small 1MB direct mapped region
on x86 in between KERNBASE and the kernel load address. pmap_mapdev() can return pointers to this for devices operating in the isa "hole".
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c19
-rw-r--r--sys/i386/i386/pmap.c19
2 files changed, 22 insertions, 16 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index ffd23d4..409e4ec 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3282,27 +3282,28 @@ pmap_mapdev(pa, size)
vm_size_t size;
{
vm_offset_t va, tmpva, offset;
- pt_entry_t *pte;
offset = pa & PAGE_MASK;
- size = roundup(offset + size, PAGE_SIZE);
+ size = round_page(offset + size);
+ pa = trunc_page(pa);
- GIANT_REQUIRED;
+ /* We have a 1MB direct mapped region at KERNBASE */
+ if (pa < 0x00100000 && pa + size <= 0x00100000)
+ return (void *)(pa + KERNBASE);
+ GIANT_REQUIRED;
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
- pa = pa & PG_FRAME;
for (tmpva = va; size > 0; ) {
- pte = vtopte(tmpva);
- *pte = pa | PG_RW | PG_V | pgeflag;
+ pmap_kenter(tmpva, pa);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
pmap_invalidate_range(kernel_pmap, va, tmpva);
- return ((void *)(va + offset));
+ return (void *)(va + offset);
}
void
@@ -3315,7 +3316,9 @@ pmap_unmapdev(va, size)
base = va & PG_FRAME;
offset = va & PAGE_MASK;
- size = roundup(offset + size, PAGE_SIZE);
+ size = round_page(offset + size);
+ if (base >= KERNBASE && va + size <= KERNBASE + 0x00100000)
+ return; /* direct mapped */
for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
pte = vtopte(tmpva);
*pte = 0;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index ffd23d4..409e4ec 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3282,27 +3282,28 @@ pmap_mapdev(pa, size)
vm_size_t size;
{
vm_offset_t va, tmpva, offset;
- pt_entry_t *pte;
offset = pa & PAGE_MASK;
- size = roundup(offset + size, PAGE_SIZE);
+ size = round_page(offset + size);
+ pa = trunc_page(pa);
- GIANT_REQUIRED;
+ /* We have a 1MB direct mapped region at KERNBASE */
+ if (pa < 0x00100000 && pa + size <= 0x00100000)
+ return (void *)(pa + KERNBASE);
+ GIANT_REQUIRED;
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
- pa = pa & PG_FRAME;
for (tmpva = va; size > 0; ) {
- pte = vtopte(tmpva);
- *pte = pa | PG_RW | PG_V | pgeflag;
+ pmap_kenter(tmpva, pa);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
pmap_invalidate_range(kernel_pmap, va, tmpva);
- return ((void *)(va + offset));
+ return (void *)(va + offset);
}
void
@@ -3315,7 +3316,9 @@ pmap_unmapdev(va, size)
base = va & PG_FRAME;
offset = va & PAGE_MASK;
- size = roundup(offset + size, PAGE_SIZE);
+ size = round_page(offset + size);
+ if (base >= KERNBASE && va + size <= KERNBASE + 0x00100000)
+ return; /* direct mapped */
for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
pte = vtopte(tmpva);
*pte = 0;
OpenPOWER on IntegriCloud