summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c2
-rw-r--r--sys/arm/arm/pmap-v6.c2
-rw-r--r--sys/dev/agp/agp.c4
-rw-r--r--sys/dev/agp/agp_i810.c2
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c2
-rw-r--r--sys/dev/drm/via_dmablit.c2
-rw-r--r--sys/dev/drm2/i915/i915_gem.c10
-rw-r--r--sys/dev/drm2/i915/i915_gem_gtt.c2
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc.c2
-rw-r--r--sys/dev/ti/if_ti.c2
-rw-r--r--sys/dev/virtio/balloon/virtio_balloon.c2
-rw-r--r--sys/dev/xen/balloon/balloon.c4
-rw-r--r--sys/i386/i386/pmap.c2
-rw-r--r--sys/i386/xen/pmap.c2
-rw-r--r--sys/ia64/ia64/pmap.c2
-rw-r--r--sys/kern/uipc_syscalls.c6
-rw-r--r--sys/kern/vfs_bio.c4
-rw-r--r--sys/mips/mips/pmap.c2
-rw-r--r--sys/net/bpf_zerocopy.c2
-rw-r--r--sys/vm/uma_core.c2
-rw-r--r--sys/vm/vm_fault.c8
-rw-r--r--sys/vm/vm_glue.c4
-rw-r--r--sys/vm/vm_kern.c6
-rw-r--r--sys/vm/vm_page.c24
-rw-r--r--sys/vm/vm_page.h2
25 files changed, 53 insertions, 49 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 0141f15..ae0041f 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2868,7 +2868,7 @@ free_pv_chunk(struct pv_chunk *pc)
/* entire chunk is free, return it */
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
dump_drop_page(m->phys_addr);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 6fc328c..67ae8dd 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -4222,7 +4222,7 @@ pmap_free_pv_chunk(struct pv_chunk *pc)
/* entire chunk is free, return it */
m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
pmap_qremove((vm_offset_t)pc, 1);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index 5970943..9eed774 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -629,7 +629,7 @@ bad:
if (k >= i)
vm_page_xunbusy(m);
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_unlock(m);
}
VM_OBJECT_WUNLOCK(mem->am_obj);
@@ -663,7 +663,7 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i));
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_unlock(m);
}
VM_OBJECT_WUNLOCK(mem->am_obj);
diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
index 725aa99..dbf34e3 100644
--- a/sys/dev/agp/agp_i810.c
+++ b/sys/dev/agp/agp_i810.c
@@ -2009,7 +2009,7 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
VM_OBJECT_WLOCK(mem->am_obj);
m = vm_page_lookup(mem->am_obj, 0);
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(mem->am_obj);
} else {
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index 6c93675..f5dda07 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -869,7 +869,7 @@ unwire_ddp_buffer(struct ddp_buffer *db)
for (i = 0; i < db->npages; i++) {
p = db->pages[i];
vm_page_lock(p);
- vm_page_unwire(p, 0);
+ vm_page_unwire(p, PQ_INACTIVE);
vm_page_unlock(p);
}
}
diff --git a/sys/dev/drm/via_dmablit.c b/sys/dev/drm/via_dmablit.c
index 70ba9d9..e8096f2 100644
--- a/sys/dev/drm/via_dmablit.c
+++ b/sys/dev/drm/via_dmablit.c
@@ -179,7 +179,7 @@ via_free_sg_info(drm_via_sg_info_t *vsg)
for (i=0; i < vsg->num_pages; ++i) {
page = vsg->pages[i];
vm_page_lock(page);
- vm_page_unwire(page, 0);
+ vm_page_unwire(page, PQ_INACTIVE);
vm_page_unlock(page);
}
case dr_via_pages_alloc:
diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c
index 99821e4..a3acb60 100644
--- a/sys/dev/drm2/i915/i915_gem.c
+++ b/sys/dev/drm2/i915/i915_gem.c
@@ -1039,7 +1039,7 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
vm_page_dirty(m);
vm_page_reference(m);
vm_page_lock(m);
- vm_page_unwire(m, 1);
+ vm_page_unwire(m, PQ_ACTIVE);
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
@@ -2247,7 +2247,7 @@ failed:
for (j = 0; j < i; j++) {
m = obj->pages[j];
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
@@ -2308,7 +2308,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_WILLNEED)
vm_page_reference(m);
vm_page_lock(m);
- vm_page_unwire(obj->pages[i], 1);
+ vm_page_unwire(obj->pages[i], PQ_ACTIVE);
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
@@ -3611,7 +3611,7 @@ i915_gem_detach_phys_object(struct drm_device *dev,
vm_page_reference(m);
vm_page_lock(m);
vm_page_dirty(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
@@ -3676,7 +3676,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
vm_page_reference(m);
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
diff --git a/sys/dev/drm2/i915/i915_gem_gtt.c b/sys/dev/drm2/i915/i915_gem_gtt.c
index 90899de..a4ca76f 100644
--- a/sys/dev/drm2/i915/i915_gem_gtt.c
+++ b/sys/dev/drm2/i915/i915_gem_gtt.c
@@ -206,7 +206,7 @@ i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
for (i = 0; i < ppgtt->num_pd_entries; i++) {
m = ppgtt->pt_pages[i];
if (m != NULL) {
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
}
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.c b/sys/dev/drm2/ttm/ttm_page_alloc.c
index 3c0f18a..8f513d0 100644
--- a/sys/dev/drm2/ttm/ttm_page_alloc.c
+++ b/sys/dev/drm2/ttm/ttm_page_alloc.c
@@ -139,7 +139,7 @@ ttm_vm_page_free(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
m->flags &= ~PG_FICTITIOUS;
m->oflags |= VPO_UNMANAGED;
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
diff --git a/sys/dev/ti/if_ti.c b/sys/dev/ti/if_ti.c
index c201029..bba974b 100644
--- a/sys/dev/ti/if_ti.c
+++ b/sys/dev/ti/if_ti.c
@@ -1616,7 +1616,7 @@ ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
}
sf[i] = sf_buf_alloc(frame, SFB_NOWAIT);
if (sf[i] == NULL) {
- vm_page_unwire(frame, 0);
+ vm_page_unwire(frame, PQ_INACTIVE);
vm_page_free(frame);
device_printf(sc->ti_dev, "buffer allocation "
"failed -- packet dropped!\n");
diff --git a/sys/dev/virtio/balloon/virtio_balloon.c b/sys/dev/virtio/balloon/virtio_balloon.c
index a90a653..d540099 100644
--- a/sys/dev/virtio/balloon/virtio_balloon.c
+++ b/sys/dev/virtio/balloon/virtio_balloon.c
@@ -450,7 +450,7 @@ static void
vtballoon_free_page(struct vtballoon_softc *sc, vm_page_t m)
{
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
sc->vtballoon_current_npages--;
}
diff --git a/sys/dev/xen/balloon/balloon.c b/sys/dev/xen/balloon/balloon.c
index 2df9c8b..fa56c86 100644
--- a/sys/dev/xen/balloon/balloon.c
+++ b/sys/dev/xen/balloon/balloon.c
@@ -255,7 +255,7 @@ increase_reservation(unsigned long nr_pages)
set_phys_to_machine(pfn, frame_list[i]);
- vm_page_unwire(page, 0);
+ vm_page_unwire(page, PQ_INACTIVE);
vm_page_free(page);
}
@@ -297,7 +297,7 @@ decrease_reservation(unsigned long nr_pages)
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
if (balloon_append(page) != 0) {
- vm_page_unwire(page, 0);
+ vm_page_unwire(page, PQ_INACTIVE);
vm_page_free(page);
nr_pages = i;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 2515f7f..af8c9ff 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2368,7 +2368,7 @@ free_pv_chunk(struct pv_chunk *pc)
/* entire chunk is free, return it */
m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
pmap_qremove((vm_offset_t)pc, 1);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
}
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index ece4857..fdc64bc 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -2144,7 +2144,7 @@ free_pv_chunk(struct pv_chunk *pc)
/* entire chunk is free, return it */
m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
pmap_qremove((vm_offset_t)pc, 1);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
}
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 6211e51..9f64297 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -932,7 +932,7 @@ free_pv_chunk(struct pv_chunk *pc)
PV_STAT(pc_chunk_frees++);
/* entire chunk is free, return it */
m = PHYS_TO_VM_PAGE(IA64_RR_MASK((vm_offset_t)pc));
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 52dc021..f00d77f 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1996,7 +1996,7 @@ sf_buf_mext(struct mbuf *mb, void *addr, void *args)
m = sf_buf_page(args);
sf_buf_free(args);
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
/*
* Check for the object going away on us. This can
* happen since we don't hold a reference to it.
@@ -2692,7 +2692,7 @@ sendfile_readpage(vm_object_t obj, struct vnode *vp, int nd,
} else if (m != NULL) {
free_page:
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
/*
* See if anyone else might know about this page. If
@@ -3050,7 +3050,7 @@ retry_space:
if (sf == NULL) {
SFSTAT_INC(sf_allocfail);
vm_page_lock(pg);
- vm_page_unwire(pg, 0);
+ vm_page_unwire(pg, PQ_INACTIVE);
KASSERT(pg->object != NULL,
("%s: object disappeared", __func__));
vm_page_unlock(pg);
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 0311bed..8093dd0 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1896,7 +1896,7 @@ vfs_vmio_release(struct buf *bp)
* everything on the inactive queue.
*/
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
/*
* Might as well free the page if we can and it has
@@ -3483,7 +3483,7 @@ allocbuf(struct buf *bp, int size)
bp->b_pages[i] = NULL;
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_unlock(m);
}
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 414bd89..7a262c8 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -1535,7 +1535,7 @@ free_pv_chunk(struct pv_chunk *pc)
PV_STAT(pc_chunk_frees++);
/* entire chunk is free, return it */
m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc));
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
diff --git a/sys/net/bpf_zerocopy.c b/sys/net/bpf_zerocopy.c
index a8ef8d5..5d54f64 100644
--- a/sys/net/bpf_zerocopy.c
+++ b/sys/net/bpf_zerocopy.c
@@ -114,7 +114,7 @@ zbuf_page_free(vm_page_t pp)
{
vm_page_lock(pp);
- vm_page_unwire(pp, 0);
+ vm_page_unwire(pp, PQ_INACTIVE);
if (pp->wire_count == 0 && pp->object == NULL)
vm_page_free(pp);
vm_page_unlock(pp);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 0c28c18..62f4912 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1154,7 +1154,7 @@ noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
* exit.
*/
TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
- vm_page_unwire(p, 0);
+ vm_page_unwire(p, PQ_INACTIVE);
vm_page_free(p);
}
return (NULL);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 35aea6b..ecaafdb 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -755,7 +755,7 @@ vnode_locked:
vm_page_unlock(fs.first_m);
vm_page_lock(fs.m);
- vm_page_unwire(fs.m, FALSE);
+ vm_page_unwire(fs.m, PQ_INACTIVE);
vm_page_unlock(fs.m);
}
/*
@@ -917,7 +917,7 @@ vnode_locked:
if (wired)
vm_page_wire(fs.m);
else
- vm_page_unwire(fs.m, 1);
+ vm_page_unwire(fs.m, PQ_ACTIVE);
} else
vm_page_activate(fs.m);
if (m_hold != NULL) {
@@ -1208,7 +1208,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
if (!fictitious) {
m = PHYS_TO_VM_PAGE(pa);
vm_page_lock(m);
- vm_page_unwire(m, TRUE);
+ vm_page_unwire(m, PQ_ACTIVE);
vm_page_unlock(m);
}
}
@@ -1390,7 +1390,7 @@ again:
if (upgrade) {
if (src_m != dst_m) {
vm_page_lock(src_m);
- vm_page_unwire(src_m, 0);
+ vm_page_unwire(src_m, PQ_INACTIVE);
vm_page_unlock(src_m);
vm_page_lock(dst_m);
vm_page_wire(dst_m);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 7d00097..d48de9e 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -418,7 +418,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
if (m == NULL)
panic("vm_thread_dispose: kstack already missing?");
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
vm_page_unlock(m);
}
@@ -507,7 +507,7 @@ vm_thread_swapout(struct thread *td)
panic("vm_thread_swapout: kstack already missing?");
vm_page_dirty(m);
vm_page_lock(m);
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_unlock(m);
}
VM_OBJECT_WUNLOCK(ksobj);
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index c0e56ce..37c0a27 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -193,7 +193,7 @@ retry:
i -= PAGE_SIZE;
m = vm_page_lookup(object,
OFF_TO_IDX(offset + i));
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
vmem_free(vmem, addr, size);
@@ -367,7 +367,7 @@ retry:
i -= PAGE_SIZE;
m = vm_page_lookup(object,
OFF_TO_IDX(offset + i));
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
VM_OBJECT_WUNLOCK(object);
@@ -401,7 +401,7 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
VM_OBJECT_WLOCK(object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_lookup(object, OFF_TO_IDX(offset + i));
- vm_page_unwire(m, 0);
+ vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
VM_OBJECT_WUNLOCK(object);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 788b9ea..49c3ede 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -147,7 +147,7 @@ static uma_zone_t fakepg_zone;
static struct vnode *vm_page_alloc_init(vm_page_t m);
static void vm_page_cache_turn_free(vm_page_t m);
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
-static void vm_page_enqueue(int queue, vm_page_t m);
+static void vm_page_enqueue(uint8_t queue, vm_page_t m);
static void vm_page_init_fakepg(void *dummy);
static int vm_page_insert_after(vm_page_t m, vm_object_t object,
vm_pindex_t pindex, vm_page_t mpred);
@@ -2029,8 +2029,8 @@ vm_page_dequeue(vm_page_t m)
struct vm_pagequeue *pq;
vm_page_assert_locked(m);
- KASSERT(m->queue == PQ_ACTIVE || m->queue == PQ_INACTIVE,
- ("vm_page_dequeue: page %p is not queued", m));
+ KASSERT(m->queue < PQ_COUNT, ("vm_page_dequeue: page %p is not queued",
+ m));
pq = vm_page_pagequeue(m);
vm_pagequeue_lock(pq);
m->queue = PQ_NONE;
@@ -2067,11 +2067,14 @@ vm_page_dequeue_locked(vm_page_t m)
* The page must be locked.
*/
static void
-vm_page_enqueue(int queue, vm_page_t m)
+vm_page_enqueue(uint8_t queue, vm_page_t m)
{
struct vm_pagequeue *pq;
vm_page_lock_assert(m, MA_OWNED);
+ KASSERT(queue < PQ_COUNT,
+ ("vm_page_enqueue: invalid queue %u request for page %p",
+ queue, m));
pq = &vm_phys_domain(m)->vmd_pagequeues[queue];
vm_pagequeue_lock(pq);
m->queue = queue;
@@ -2330,9 +2333,7 @@ vm_page_wire(vm_page_t m)
*
* Release one wiring of the specified page, potentially enabling it to be
* paged again. If paging is enabled, then the value of the parameter
- * "activate" determines to which queue the page is added. If "activate" is
- * non-zero, then the page is added to the active queue. Otherwise, it is
- * added to the inactive queue.
+ * "queue" determines the queue to which the page is added.
*
* However, unless the page belongs to an object, it is not enqueued because
* it cannot be paged out.
@@ -2342,9 +2343,12 @@ vm_page_wire(vm_page_t m)
* A managed page must be locked.
*/
void
-vm_page_unwire(vm_page_t m, int activate)
+vm_page_unwire(vm_page_t m, uint8_t queue)
{
+ KASSERT(queue < PQ_COUNT,
+ ("vm_page_unwire: invalid queue %u request for page %p",
+ queue, m));
if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_lock_assert(m, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0) {
@@ -2359,9 +2363,9 @@ vm_page_unwire(vm_page_t m, int activate)
if ((m->oflags & VPO_UNMANAGED) != 0 ||
m->object == NULL)
return;
- if (!activate)
+ if (queue == PQ_INACTIVE)
m->flags &= ~PG_WINATCFLS;
- vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m);
+ vm_page_enqueue(queue, m);
}
} else
panic("vm_page_unwire: page %p's wire count is zero", m);
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 29c7dec..ff61e06 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -465,7 +465,7 @@ vm_offset_t vm_page_startup(vm_offset_t vaddr);
void vm_page_sunbusy(vm_page_t m);
int vm_page_trysbusy(vm_page_t m);
void vm_page_unhold_pages(vm_page_t *ma, int count);
-void vm_page_unwire (vm_page_t, int);
+void vm_page_unwire (vm_page_t m, uint8_t queue);
void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_wire (vm_page_t);
void vm_page_xunbusy_hard(vm_page_t m);
OpenPOWER on IntegriCloud