diff options
author | kib <kib@FreeBSD.org> | 2011-09-28 14:57:50 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2011-09-28 14:57:50 +0000 |
commit | e84b0ecd819abddadeed97fa141468b674261293 (patch) | |
tree | 6a5296fc970ddac679f37c4f859ef750f1173a7e /sys/vm/vm_page.c | |
parent | 3a1a19715027c5c03af47ff8d3ce06461e3b4d65 (diff) | |
download | FreeBSD-src-e84b0ecd819abddadeed97fa141468b674261293.zip FreeBSD-src-e84b0ecd819abddadeed97fa141468b674261293.tar.gz |
Use the trick of performing the atomic operation on the contained aligned
word to handle the dirty mask updates in vm_page_clear_dirty_mask().
Remove the vm page queue lock around vm_page_dirty() call in vm_fault_hold()
the sole purpose of which was to protect dirty on architectures which
does not provide short or byte-wide atomics.
Reviewed by: alc, attilio
Tested by: flo (sparc64)
MFC after: 2 weeks
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r-- | sys/vm/vm_page.c | 61 |
1 files changed, 35 insertions, 26 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 341c238..fe829a3 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -745,9 +745,9 @@ vm_page_sleep(vm_page_t m, const char *msg) * * Set all bits in the page's dirty field. * - * The object containing the specified page must be locked if the call is - * made from the machine-independent layer. If, however, the call is - * made from the pmap layer, then the page queues lock may be required. + * The object containing the specified page must be locked if the + * call is made from the machine-independent layer. + * * See vm_page_clear_dirty_mask(). */ void @@ -2339,44 +2339,53 @@ vm_page_set_valid(vm_page_t m, int base, int size) static __inline void vm_page_clear_dirty_mask(vm_page_t m, int pagebits) { + uintptr_t addr; +#if PAGE_SIZE < 16384 + int shift; +#endif /* * If the object is locked and the page is neither VPO_BUSY nor * PGA_WRITEABLE, then the page's dirty field cannot possibly be - * set by a concurrent pmap operation. + * set by a concurrent pmap operation. + * */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) m->dirty &= ~pagebits; else { -#if defined(__amd64__) || defined(__i386__) || defined(__ia64__) /* - * On the aforementioned architectures, the page queues lock - * is not required by the following read-modify-write - * operation. The combination of the object's lock and an - * atomic operation suffice. Moreover, the pmap layer on - * these architectures can call vm_page_dirty() without - * holding the page queues lock. + * The pmap layer can call vm_page_dirty() without + * holding a distinguished lock. The combination of + * the object's lock and an atomic operation suffice + * to guarantee consistency of the page dirty field. + * + * For PAGE_SIZE == 32768 case, compiler already + * properly aligns the dirty field, so no forcible + * alignment is needed. Only require existence of + * atomic_clear_64 when page size if 32768. */ -#if PAGE_SIZE == 4096 - atomic_clear_char(&m->dirty, pagebits); -#elif PAGE_SIZE == 8192 - atomic_clear_short(&m->dirty, pagebits); + addr = (uintptr_t)&m->dirty; +#if PAGE_SIZE == 32768 +#error pagebits too short + atomic_clear_64((uint64_t *)addr, pagebits); #elif PAGE_SIZE == 16384 - atomic_clear_int(&m->dirty, pagebits); -#else -#error "PAGE_SIZE is not supported." -#endif -#else + atomic_clear_32((uint32_t *)addr, pagebits); +#else /* PAGE_SIZE <= 8192 */ /* - * Otherwise, the page queues lock is required to ensure that - * a concurrent pmap operation does not set the page's dirty - * field during the following read-modify-write operation. + * Use a trick to perform an 32bit atomic on the + * contained aligned word, to not depend on existence + * of the atomic_clear_{8, 16}. */ - vm_page_lock_queues(); - m->dirty &= ~pagebits; - vm_page_unlock_queues(); + shift = addr & (sizeof(uint32_t) - 1); +#if BYTE_ORDER == BIG_ENDIAN + shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; +#else + shift *= NBBY; #endif + addr &= ~(sizeof(uint32_t) - 1); + atomic_clear_32((uint32_t *)addr, pagebits << shift); +#endif /* PAGE_SIZE */ } } |