summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/pmap.c8
-rw-r--r--sys/i386/i386/pmap.c8
2 files changed, 16 insertions, 0 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index fd4fed3..c6db00b 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3135,10 +3135,15 @@ pmap_mapdev(pa, size)
{
vm_offset_t va, tmpva, offset;
unsigned *pte;
+ int hadvmlock;
offset = pa & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
+ hadvmlock = mtx_owned(&vm_mtx);
+ if (!hadvmlock)
+ mtx_lock(&vm_mtx);
+
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
@@ -3153,6 +3158,9 @@ pmap_mapdev(pa, size)
}
invltlb();
+ if (!hadvmlock)
+ mtx_unlock(&vm_mtx);
+
return ((void *)(va + offset));
}
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index fd4fed3..c6db00b 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3135,10 +3135,15 @@ pmap_mapdev(pa, size)
{
vm_offset_t va, tmpva, offset;
unsigned *pte;
+ int hadvmlock;
offset = pa & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
+ hadvmlock = mtx_owned(&vm_mtx);
+ if (!hadvmlock)
+ mtx_lock(&vm_mtx);
+
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
@@ -3153,6 +3158,9 @@ pmap_mapdev(pa, size)
}
invltlb();
+ if (!hadvmlock)
+ mtx_unlock(&vm_mtx);
+
return ((void *)(va + offset));
}
OpenPOWER on IntegriCloud