diff options
-rw-r--r-- | sys/vm/vm_fault.c | 10 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 35 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 33 |
3 files changed, 68 insertions, 10 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index d417a84..7568f57 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -1089,10 +1089,20 @@ vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, * caller's changes may go unnoticed because they are * performed through an unmanaged mapping or by a DMA * operation. + * + * The object lock is not held here. Therefore, like + * a pmap operation, the page queues lock may be + * required in order to call vm_page_dirty(). See + * vm_page_clear_dirty_mask(). */ +#if defined(__amd64__) || defined(__i386__) || defined(__ia64__) || \ + defined(__mips__) + vm_page_dirty(*mp); +#else vm_page_lock_queues(); vm_page_dirty(*mp); vm_page_unlock_queues(); +#endif } } if (pmap_failed) { diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 21c7080..033c4c9 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -729,7 +729,12 @@ vm_page_sleep(vm_page_t m, const char *msg) /* * vm_page_dirty: * - * make page all dirty + * Set all bits in the page's dirty field. + * + * The object containing the specified page must be locked if the call is + * made from the machine-independent layer. If, however, the call is + * made from the pmap layer, then the page queues lock may be required. + * See vm_page_clear_dirty_mask(). */ void vm_page_dirty(vm_page_t m) @@ -2325,15 +2330,41 @@ vm_page_clear_dirty_mask(vm_page_t m, int pagebits) /* * If the object is locked and the page is neither VPO_BUSY nor * PG_WRITEABLE, then the page's dirty field cannot possibly be - * modified by a concurrent pmap operation. + * set by a concurrent pmap operation. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0) m->dirty &= ~pagebits; else { +#if defined(__amd64__) || defined(__i386__) || defined(__ia64__) || \ + defined(__mips__) + /* + * On the aforementioned architectures, the page queues lock + * is not required by the following read-modify-write + * operation. The combination of the object's lock and an + * atomic operation suffice. Moreover, the pmap layer on + * these architectures can call vm_page_dirty() without + * holding the page queues lock. + */ +#if PAGE_SIZE == 4096 + atomic_clear_char(&m->dirty, pagebits); +#elif PAGE_SIZE == 8192 + atomic_clear_short(&m->dirty, pagebits); +#elif PAGE_SIZE == 16384 + atomic_clear_int(&m->dirty, pagebits); +#else +#error "PAGE_SIZE is not supported." +#endif +#else + /* + * Otherwise, the page queues lock is required to ensure that + * a concurrent pmap operation does not set the page's dirty + * field during the following read-modify-write operation. + */ vm_page_lock_queues(); m->dirty &= ~pagebits; vm_page_unlock_queues(); +#endif } } diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 898857b..e852313 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -89,10 +89,26 @@ * and offset to which this page belongs (for pageout), * and sundry status bits. * - * Fields in this structure are locked either by the lock on the - * object that the page belongs to (O), its corresponding page lock (P), - * or by the lock on the page queues (Q). - * + * In general, operations on this structure's mutable fields are + * synchronized using either one of or a combination of the lock on the + * object that the page belongs to (O), the pool lock for the page (P), + * or the lock for either the free or paging queues (Q). If a field is + * annotated below with two of these locks, then holding either lock is + * sufficient for read access, but both locks are required for write + * access. + * + * In contrast, the synchronization of accesses to the page's dirty field + * is machine dependent (M). In the machine-independent layer, the lock + * on the object that the page belongs to must be held in order to + * operate on the field. However, the pmap layer is permitted to set + * all bits within the field without holding that lock. Therefore, if + * the underlying architecture does not support atomic read-modify-write + * operations on the field's type, then the machine-independent layer + * must also hold the page queues lock when performing read-modify-write + * operations and the pmap layer must hold the page queues lock when + * setting the field. In the machine-independent layer, the + * implementation of read-modify-write operations on the field is + * encapsulated in vm_page_clear_dirty_mask(). */ TAILQ_HEAD(pglist, vm_page); @@ -120,18 +136,19 @@ struct vm_page { u_char busy; /* page busy count (O) */ /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */ /* so, on normal X86 kernels, they must be at least 8 bits wide */ + /* In reality, support for 32KB pages is not fully implemented. */ #if PAGE_SIZE == 4096 u_char valid; /* map of valid DEV_BSIZE chunks (O) */ - u_char dirty; /* map of dirty DEV_BSIZE chunks (O) */ + u_char dirty; /* map of dirty DEV_BSIZE chunks (M) */ #elif PAGE_SIZE == 8192 u_short valid; /* map of valid DEV_BSIZE chunks (O) */ - u_short dirty; /* map of dirty DEV_BSIZE chunks (O) */ + u_short dirty; /* map of dirty DEV_BSIZE chunks (M) */ #elif PAGE_SIZE == 16384 u_int valid; /* map of valid DEV_BSIZE chunks (O) */ - u_int dirty; /* map of dirty DEV_BSIZE chunks (O) */ + u_int dirty; /* map of dirty DEV_BSIZE chunks (M) */ #elif PAGE_SIZE == 32768 u_long valid; /* map of valid DEV_BSIZE chunks (O) */ - u_long dirty; /* map of dirty DEV_BSIZE chunks (O) */ + u_long dirty; /* map of dirty DEV_BSIZE chunks (M) */ #endif }; |