summaryrefslogtreecommitdiffstats
path: root/sys/vm/memguard.c
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2005-02-15 22:17:07 +0000
committerbmilekic <bmilekic@FreeBSD.org>2005-02-15 22:17:07 +0000
commit04e8cef9b4d31f7aa83936f7554e8a757394d805 (patch)
treec62390077b3556e5b8c21300f51bcbbc2b00ada8 /sys/vm/memguard.c
parent0054992ccdf09c6d1e4596265a1fcf6de80db953 (diff)
downloadFreeBSD-src-04e8cef9b4d31f7aa83936f7554e8a757394d805.zip
FreeBSD-src-04e8cef9b4d31f7aa83936f7554e8a757394d805.tar.gz
Rather than overloading the page->object field like UMA does, use instead
an unused pageq queue reference in the page structure to stash a pointer to the MemGuard FIFO. Using the page->object field caused problems because when vm_map_protect() was called the second time to set VM_PROT_DEFAULT back onto a set of pages in memguard_map, the protection in the VM would be changed but the PMAP code would lazily not restore the PG_RW bit on the underlying pages right away (see pmap_protect()). So when a page fault finally occured and the VM noticed the faulting address corresponds to a page that _does_ have write access now, it would then call into PMAP to set back PG_RW (i386 case being discussed here). However, before it got to do that, an assertion on the object lock not being owned would get triggered, as the object of the faulting page would need to be locked but was overloaded by MemGuard. This is precisely why MemGuard cannot overload page->object. Submitted by: Alan Cox (alc@)
Diffstat (limited to 'sys/vm/memguard.c')
-rw-r--r--sys/vm/memguard.c30
1 files changed, 13 insertions, 17 deletions
diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c
index eef18df..e259b60 100644
--- a/sys/vm/memguard.c
+++ b/sys/vm/memguard.c
@@ -275,7 +275,10 @@ memguard_unguard(void *addr, int numpgs)
* vsetmgfifo() sets a reference in an underlying page for the specified
* virtual address to an appropriate memguard_fifo_pool.
*
- * These routines are very similar to those defined by UMA in uma_int.h
+ * These routines are very similar to those defined by UMA in uma_int.h.
+ * The difference is that these routines store the mgfifo in one of the
+ * page's fields that is unused when the page is wired rather than the
+ * object field, which is used.
*/
static struct memguard_fifo *
vtomgfifo(vm_offset_t va)
@@ -284,14 +287,9 @@ vtomgfifo(vm_offset_t va)
struct memguard_fifo *mgfifo;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
- mgfifo = (struct memguard_fifo *)p->object;
-
- /*
- * We use PG_SLAB, just like UMA does, even though we stash a
- * reference to a memguard_fifo, and not a slab.
- */
- if ((p->flags & PG_SLAB) == 0)
- panic("MEMGUARD: Expected memguard_fifo reference to be set!");
+ KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
+ ("MEMGUARD: Expected wired page in vtomgfifo!"));
+ mgfifo = (struct memguard_fifo *)p->pageq.tqe_next;
return mgfifo;
}
@@ -301,12 +299,9 @@ vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo)
vm_page_t p;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
- p->object = (vm_object_t)mgfifo;
- /*
- * We use PG_SLAB, just like UMA does, even though we stash a reference
- * to a memguard_fifo, and not a slab.
- */
- p->flags |= PG_SLAB;
+ KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
+ ("MEMGUARD: Expected wired page in vsetmgfifo!"));
+ p->pageq.tqe_next = (vm_page_t)mgfifo;
}
static void vclrmgfifo(vm_offset_t va)
@@ -314,6 +309,7 @@ static void vclrmgfifo(vm_offset_t va)
vm_page_t p;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
- p->object = NULL;
- p->flags &= ~PG_SLAB;
+ KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
+ ("MEMGUARD: Expected wired page in vclrmgfifo!"));
+ p->pageq.tqe_next = NULL;
}
OpenPOWER on IntegriCloud