summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/alpha/pmap.c4
-rw-r--r--sys/dev/agp/agp.c4
-rw-r--r--sys/dev/agp/agp_i810.c2
-rw-r--r--sys/pci/agp.c4
-rw-r--r--sys/pci/agp_i810.c2
-rw-r--r--sys/sparc64/sparc64/pmap.c4
-rw-r--r--sys/vm/vm_page.c2
-rw-r--r--sys/vm/vm_page.h7
8 files changed, 24 insertions, 5 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 7955f65..30d7901 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -997,8 +997,10 @@ pmap_dispose_thread(td)
vm_page_busy(m);
ptek[i] = 0;
pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE);
+ vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_free(m);
+ vm_page_unlock_queues();
}
/*
@@ -1036,8 +1038,10 @@ pmap_swapout_thread(td)
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
+ vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
pmap_kremove(ks + i * PAGE_SIZE);
}
}
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index b16d1ee..ab488fb 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -448,7 +448,9 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
for (k = 0; k <= i; k += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj,
OFF_TO_IDX(k));
+ vm_page_lock_queues();
vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
}
lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
return error;
@@ -499,7 +501,9 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
AGP_UNBIND_PAGE(dev, mem->am_offset + i);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i));
+ vm_page_lock_queues();
vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
}
agp_flush_cache();
diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
index ed5f6d2..9f1b292 100644
--- a/sys/dev/agp/agp_i810.c
+++ b/sys/dev/agp/agp_i810.c
@@ -399,7 +399,9 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
* Unwire the page which we wired in alloc_memory.
*/
vm_page_t m = vm_page_lookup(mem->am_obj, 0);
+ vm_page_lock_queues();
vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
}
sc->agp.as_allocated -= mem->am_size;
diff --git a/sys/pci/agp.c b/sys/pci/agp.c
index b16d1ee..ab488fb 100644
--- a/sys/pci/agp.c
+++ b/sys/pci/agp.c
@@ -448,7 +448,9 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
for (k = 0; k <= i; k += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj,
OFF_TO_IDX(k));
+ vm_page_lock_queues();
vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
}
lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
return error;
@@ -499,7 +501,9 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
AGP_UNBIND_PAGE(dev, mem->am_offset + i);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i));
+ vm_page_lock_queues();
vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
}
agp_flush_cache();
diff --git a/sys/pci/agp_i810.c b/sys/pci/agp_i810.c
index ed5f6d2..9f1b292 100644
--- a/sys/pci/agp_i810.c
+++ b/sys/pci/agp_i810.c
@@ -399,7 +399,9 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
* Unwire the page which we wired in alloc_memory.
*/
vm_page_t m = vm_page_lookup(mem->am_obj, 0);
+ vm_page_lock_queues();
vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
}
sc->agp.as_allocated -= mem->am_size;
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index e8414f5..2d6352c 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -887,9 +887,11 @@ pmap_dispose_thread(struct thread *td)
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
+ vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
+ vm_page_unlock_queues();
}
pmap_qremove(ks, KSTACK_PAGES);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
@@ -914,8 +916,10 @@ pmap_swapout_thread(struct thread *td)
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
+ vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
}
pmap_qremove(ks, KSTACK_PAGES);
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 5d8b807..74eb575 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1285,7 +1285,7 @@ vm_page_unwire(vm_page_t m, int activate)
int s;
s = splvm();
-
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->wire_count > 0) {
m->wire_count--;
if (m->wire_count == 0) {
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 066fd8d..17afdb8 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -216,12 +216,8 @@ struct vpgqueues {
};
extern struct vpgqueues vm_page_queues[PQ_COUNT];
-extern struct mtx vm_page_queue_mtx;
extern struct mtx vm_page_queue_free_mtx;
-#define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx)
-#define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
-
#endif /* !defined(KLD_MODULE) */
/*
@@ -299,6 +295,9 @@ extern long first_page; /* first physical page number */
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[atop(pa) - first_page ])
+extern struct mtx vm_page_queue_mtx;
+#define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx)
+#define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
#if PAGE_SIZE == 4096
#define VM_PAGE_BITS_ALL 0xff
OpenPOWER on IntegriCloud