summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-12-23 20:16:11 +0000
committeralc <alc@FreeBSD.org>2004-12-23 20:16:11 +0000
commita618275b13403f9bf275545c254a6ad694a98ac7 (patch)
tree69d2684272055b8e783776f57abaa83628519595
parent8aad93579025896f709dbf6fcddb19ff74b73640 (diff)
downloadFreeBSD-src-a618275b13403f9bf275545c254a6ad694a98ac7.zip
FreeBSD-src-a618275b13403f9bf275545c254a6ad694a98ac7.tar.gz
Modify pmap_enter_quick() so that it expects the page queues to be locked
on entry and it assumes the responsibility for releasing the page queues lock if it must sleep. Remove a bogus comment from pmap_enter_quick(). Using the first change, modify vm_map_pmap_enter() so that the page queues lock is acquired and released once, rather than each time that a page is mapped.
-rw-r--r--sys/alpha/alpha/pmap.c5
-rw-r--r--sys/amd64/amd64/pmap.c5
-rw-r--r--sys/arm/arm/pmap.c3
-rw-r--r--sys/i386/i386/pmap.c5
-rw-r--r--sys/ia64/ia64/pmap.c5
-rw-r--r--sys/powerpc/aim/mmu_oea.c2
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c2
-rw-r--r--sys/powerpc/powerpc/pmap.c2
-rw-r--r--sys/sparc64/sparc64/pmap.c2
-rw-r--r--sys/vm/vm_fault.c7
-rw-r--r--sys/vm/vm_map.c11
11 files changed, 19 insertions, 30 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 41a5558..47044a8 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -1799,7 +1799,6 @@ validate:
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@@ -1810,7 +1809,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
register pt_entry_t *pte;
int managed;
- vm_page_lock_queues();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
/*
@@ -1905,7 +1905,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
*pte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V | PG_KRE | PG_URE | managed;
out:
alpha_pal_imb(); /* XXX overkill? */
- vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index b17e34f..e60ff0f 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2007,7 +2007,6 @@ validate:
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@@ -2018,7 +2017,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pt_entry_t *pte;
vm_paddr_t pa;
- vm_page_lock_queues();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
/*
@@ -2110,7 +2110,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
out:
- vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 9159787..f4585e3 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -3409,7 +3409,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@@ -3418,7 +3417,6 @@ vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
- vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@@ -3429,7 +3427,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
- vm_page_unlock_queues();
return (NULL);
}
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 67ac6f6..5a60cb0 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2048,7 +2048,6 @@ validate:
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@@ -2059,7 +2058,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pt_entry_t *pte;
vm_paddr_t pa;
- vm_page_lock_queues();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
/*
@@ -2151,7 +2151,6 @@ retry:
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
out:
- vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index a2feed7..f488846 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1612,7 +1612,6 @@ validate:
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@@ -1624,7 +1623,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_t oldpmap;
boolean_t managed;
- vm_page_lock_queues();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
@@ -1666,7 +1666,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
reinstall:
- vm_page_unlock_queues();
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
return (NULL);
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 136e412..768fe14 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1048,7 +1048,6 @@ vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
- vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@@ -1058,7 +1057,6 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
- vm_page_unlock_queues();
return (NULL);
}
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 136e412..768fe14 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -1048,7 +1048,6 @@ vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
- vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@@ -1058,7 +1057,6 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
- vm_page_unlock_queues();
return (NULL);
}
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index 136e412..768fe14 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -1048,7 +1048,6 @@ vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
- vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@@ -1058,7 +1057,6 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
- vm_page_unlock_queues();
return (NULL);
}
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index fa964ab..6a16076 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1401,7 +1401,6 @@ vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
- vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@@ -1411,7 +1410,6 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
- vm_page_unlock_queues();
return (NULL);
}
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 6fac17c..4f6d1e9 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -977,12 +977,11 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
- if ((m->queue - m->pc) == PQ_CACHE) {
- vm_page_lock_queues();
+ vm_page_lock_queues();
+ if ((m->queue - m->pc) == PQ_CACHE)
vm_page_deactivate(m);
- vm_page_unlock_queues();
- }
mpte = pmap_enter_quick(pmap, addr, m, mpte);
+ vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(lobject);
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 46ed849..e001d9a 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1369,6 +1369,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_offset_t tmpidx;
int psize;
vm_page_t p, mpte;
+ boolean_t are_queues_locked;
if ((prot & VM_PROT_READ) == 0 || object == NULL)
return;
@@ -1392,6 +1393,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
psize = object->size - pindex;
}
+ are_queues_locked = FALSE;
mpte = NULL;
if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
@@ -1420,15 +1422,18 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
- if ((p->queue - p->pc) == PQ_CACHE) {
+ if (!are_queues_locked) {
+ are_queues_locked = TRUE;
vm_page_lock_queues();
- vm_page_deactivate(p);
- vm_page_unlock_queues();
}
+ if ((p->queue - p->pc) == PQ_CACHE)
+ vm_page_deactivate(p);
mpte = pmap_enter_quick(map->pmap,
addr + ptoa(tmpidx), p, mpte);
}
}
+ if (are_queues_locked)
+ vm_page_unlock_queues();
unlock_return:
VM_OBJECT_UNLOCK(object);
}
OpenPOWER on IntegriCloud