diff options
Diffstat (limited to 'sys')
-rw-r--r-- | sys/vm/vm_fault.c | 10 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 8 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 2 |
4 files changed, 12 insertions, 12 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index b122c36..5922ff6 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -842,14 +842,15 @@ readrest: if (prot & VM_PROT_WRITE) { vm_page_lock_queues(); vm_page_flag_set(fs.m, PG_WRITEABLE); - vm_object_set_writeable_dirty(fs.m->object); + vm_page_unlock_queues(); + vm_object_set_writeable_dirty(fs.object); /* * If the fault is a write, we know that this page is being * written NOW so dirty it explicitly to save on * pmap_is_modified() calls later. * - * If this is a NOSYNC mmap we do not want to set PG_NOSYNC + * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC * if the page is already dirty to prevent data written with * the expectation of being synced from not being synced. * Likewise if this entry does not request NOSYNC then make @@ -861,11 +862,10 @@ readrest: */ if (fs.entry->eflags & MAP_ENTRY_NOSYNC) { if (fs.m->dirty == 0) - vm_page_flag_set(fs.m, PG_NOSYNC); + fs.m->oflags |= VPO_NOSYNC; } else { - vm_page_flag_clear(fs.m, PG_NOSYNC); + fs.m->oflags &= ~VPO_NOSYNC; } - vm_page_unlock_queues(); if (fault_flags & VM_FAULT_DIRTY) { vm_page_dirty(fs.m); vm_pager_page_unswapped(fs.m); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index fd34f40..cef7b6a 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -687,7 +687,7 @@ vm_object_terminate(vm_object_t object) * * Clean all dirty pages in the specified range of object. Leaves page * on whatever queue it is currently on. If NOSYNC is set then do not - * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), + * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), * leaving the object dirty. * * When stuffing pages asynchronously, allow clustering. XXX we need a @@ -765,7 +765,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int * If we have been asked to skip nosync pages and * this is a nosync page, we can't continue. */ - if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { + if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { if (--scanlimit == 0) break; ++tscan; @@ -805,7 +805,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int clearobjflags = 1; TAILQ_FOREACH(p, &object->memq, listq) { vm_page_flag_set(p, PG_CLEANCHK); - if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) + if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) clearobjflags = 0; else pmap_remove_write(p); @@ -853,7 +853,7 @@ again: * nosync page, skip it. Note that the object flags were * not cleared in this case so we do not have to set them. */ - if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { + if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { vm_page_flag_clear(p, PG_CLEANCHK); continue; } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 8e4bf6f..bc62a8b 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -1580,7 +1580,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size) /* * Set valid, clear dirty bits. If validating the entire * page we can safely clear the pmap modify bit. We also - * use this opportunity to clear the PG_NOSYNC flag. If a process + * use this opportunity to clear the VPO_NOSYNC flag. If a process * takes a write fault on a MAP_NOSYNC memory area the flag will * be set again. * @@ -1603,7 +1603,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size) m->dirty &= ~pagebits; if (base == 0 && size == PAGE_SIZE) { pmap_clear_modify(m); - vm_page_flag_clear(m, PG_NOSYNC); + m->oflags &= ~VPO_NOSYNC; } } diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 428aee5..f09d91f 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -144,6 +144,7 @@ struct vm_page { */ #define VPO_WANTED 0x0002 /* someone is waiting for page */ #define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */ +#define VPO_NOSYNC 0x0400 /* do not collect for syncer */ /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ #if PAGE_SIZE == 32768 @@ -226,7 +227,6 @@ extern struct pq_coloring page_queue_coloring; #define PG_ZERO 0x0040 /* page is zeroed */ #define PG_REFERENCED 0x0080 /* page has been referenced */ #define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */ -#define PG_NOSYNC 0x0400 /* do not collect for syncer */ #define PG_UNMANAGED 0x0800 /* No PV management for page */ #define PG_MARKER 0x1000 /* special queue marker page */ #define PG_SLAB 0x2000 /* object pointer is actually a slab */ |