summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2002-08-05 06:10:03 +0000
committerpeter <peter@FreeBSD.org>2002-08-05 06:10:03 +0000
commit273b24988c75a7b68ff02bc3b84beec00263bb9b (patch)
tree51c355c12484bec9a105460bc5b4bc20b5dcaeee
parente7bcbc4dbfb3670fa4b840cd0b67ad74f0ab1239 (diff)
downloadFreeBSD-src-273b24988c75a7b68ff02bc3b84beec00263bb9b.zip
FreeBSD-src-273b24988c75a7b68ff02bc3b84beec00263bb9b.tar.gz
Revert rev 1.356 and 1.352 (pmap_mapdev hacks). It wasn't worth the
pain.
-rw-r--r--sys/amd64/amd64/pmap.c19
-rw-r--r--sys/i386/i386/pmap.c19
2 files changed, 16 insertions, 22 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 208fdab..249eeb8 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3277,28 +3277,27 @@ pmap_mapdev(pa, size)
vm_size_t size;
{
vm_offset_t va, tmpva, offset;
+ pt_entry_t *pte;
offset = pa & PAGE_MASK;
- size = round_page(offset + size);
- pa = trunc_page(pa);
-
- /* We have a 1MB direct mapped region at KERNBASE */
- if (pa < 0x00100000 && pa + size <= 0x00100000)
- return (void *)(pa + offset + KERNBASE);
+ size = roundup(offset + size, PAGE_SIZE);
GIANT_REQUIRED;
+
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
+ pa = pa & PG_FRAME;
for (tmpva = va; size > 0; ) {
- pmap_kenter(tmpva, pa);
+ pte = vtopte(tmpva);
+ *pte = pa | PG_RW | PG_V | pgeflag;
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
pmap_invalidate_range(kernel_pmap, va, tmpva);
- return (void *)(va + offset);
+ return ((void *)(va + offset));
}
void
@@ -3311,9 +3310,7 @@ pmap_unmapdev(va, size)
base = va & PG_FRAME;
offset = va & PAGE_MASK;
- size = round_page(offset + size);
- if (base >= KERNBASE && va + size <= KERNBASE + 0x00100000)
- return; /* direct mapped */
+ size = roundup(offset + size, PAGE_SIZE);
for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
pte = vtopte(tmpva);
*pte = 0;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 208fdab..249eeb8 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3277,28 +3277,27 @@ pmap_mapdev(pa, size)
vm_size_t size;
{
vm_offset_t va, tmpva, offset;
+ pt_entry_t *pte;
offset = pa & PAGE_MASK;
- size = round_page(offset + size);
- pa = trunc_page(pa);
-
- /* We have a 1MB direct mapped region at KERNBASE */
- if (pa < 0x00100000 && pa + size <= 0x00100000)
- return (void *)(pa + offset + KERNBASE);
+ size = roundup(offset + size, PAGE_SIZE);
GIANT_REQUIRED;
+
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
+ pa = pa & PG_FRAME;
for (tmpva = va; size > 0; ) {
- pmap_kenter(tmpva, pa);
+ pte = vtopte(tmpva);
+ *pte = pa | PG_RW | PG_V | pgeflag;
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
pmap_invalidate_range(kernel_pmap, va, tmpva);
- return (void *)(va + offset);
+ return ((void *)(va + offset));
}
void
@@ -3311,9 +3310,7 @@ pmap_unmapdev(va, size)
base = va & PG_FRAME;
offset = va & PAGE_MASK;
- size = round_page(offset + size);
- if (base >= KERNBASE && va + size <= KERNBASE + 0x00100000)
- return; /* direct mapped */
+ size = roundup(offset + size, PAGE_SIZE);
for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
pte = vtopte(tmpva);
*pte = 0;
OpenPOWER on IntegriCloud