diff options
author | alc <alc@FreeBSD.org> | 2003-10-04 19:13:27 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2003-10-04 19:13:27 +0000 |
commit | fa0f25a35968ab0f76aa53390c80c563bf400f47 (patch) | |
tree | b770b06d3d652594c3951bb4be499c1a820fad36 /sys | |
parent | 0f6892a7db8fbd494d563be9a95c9f878f459ff3 (diff) | |
download | FreeBSD-src-fa0f25a35968ab0f76aa53390c80c563bf400f47.zip FreeBSD-src-fa0f25a35968ab0f76aa53390c80c563bf400f47.tar.gz |
Synchronize access to a vm page's valid field using the containing
vm object's lock.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/vm/vm_kern.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 3e7f622..ca240b1 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -196,21 +196,21 @@ kmem_alloc(map, size) * We're intentionally not activating the pages we allocate to prevent a * race with page-out. vm_map_pageable will wire the pages. */ + VM_OBJECT_LOCK(kernel_object); for (i = 0; i < size; i += PAGE_SIZE) { vm_page_t mem; - VM_OBJECT_LOCK(kernel_object); mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), VM_ALLOC_ZERO | VM_ALLOC_RETRY); - VM_OBJECT_UNLOCK(kernel_object); if ((mem->flags & PG_ZERO) == 0) pmap_zero_page(mem); - vm_page_lock_queues(); mem->valid = VM_PAGE_BITS_ALL; + vm_page_lock_queues(); vm_page_flag_clear(mem, PG_ZERO); vm_page_wakeup(mem); vm_page_unlock_queues(); } + VM_OBJECT_UNLOCK(kernel_object); /* * And finally, mark the data as non-pageable. @@ -406,9 +406,9 @@ retry: } if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); + m->valid = VM_PAGE_BITS_ALL; vm_page_lock_queues(); vm_page_flag_clear(m, PG_ZERO); - m->valid = VM_PAGE_BITS_ALL; vm_page_unmanage(m); vm_page_unlock_queues(); } |