diff options
-rw-r--r-- | sys/kern/kern_sendfile.c | 18 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 2 | ||||
-rw-r--r-- | sys/sparc64/sparc64/pmap.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_glue.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 13 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 4 |
6 files changed, 25 insertions, 18 deletions
diff --git a/sys/kern/kern_sendfile.c b/sys/kern/kern_sendfile.c index 973e476..aafa697 100644 --- a/sys/kern/kern_sendfile.c +++ b/sys/kern/kern_sendfile.c @@ -308,7 +308,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, off_t off, off_t len, int npages, int rhpages, int flags) { vm_page_t *pa = sfio->pa; - int nios; + int grabbed, nios; nios = 0; flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0; @@ -318,14 +318,14 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, off_t off, off_t len, * only required pages. Readahead pages are dealt with later. */ VM_OBJECT_WLOCK(obj); - for (int i = 0; i < npages; i++) { - pa[i] = vm_page_grab(obj, OFF_TO_IDX(vmoff(i, off)), - VM_ALLOC_WIRED | VM_ALLOC_NORMAL | flags); - if (pa[i] == NULL) { - npages = i; - rhpages = 0; - break; - } + + grabbed = vm_page_grab_pages(obj, OFF_TO_IDX(off), + VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages); + if (grabbed < npages) { + for (int i = grabbed; i < npages; i++) + pa[i] = NULL; + npages = grabbed; + rhpages = 0; } for (int i = 0; i < npages;) { diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 317a81a..890dd31 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -2727,7 +2727,7 @@ vfs_vmio_extend(struct buf *bp, int desiredpages, int size) * deadlocks once allocbuf() is called after * pages are vfs_busy_pages(). */ - vm_page_grab_pages(obj, + (void)vm_page_grab_pages(obj, OFF_TO_IDX(bp->b_offset) + bp->b_npages, VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED, diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index d838668..cf5c9d8 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -1252,7 +1252,7 @@ pmap_pinit(pmap_t pm) CPU_ZERO(&pm->pm_active); VM_OBJECT_WLOCK(pm->pm_tsb_obj); - vm_page_grab_pages(pm->pm_tsb_obj, 0, VM_ALLOC_NORMAL | + (void)vm_page_grab_pages(pm->pm_tsb_obj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO, ma, TSB_PAGES); VM_OBJECT_WUNLOCK(pm->pm_tsb_obj); for (i = 0; i < TSB_PAGES; i++) diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 0655f74..941e7e8 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -391,7 +391,7 @@ vm_thread_new(struct thread *td, int pages) * page of stack. */ VM_OBJECT_WLOCK(ksobj); - vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | + (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED, ma, pages); for (i = 0; i < pages; i++) ma[i]->valid = VM_PAGE_BITS_ALL; @@ -568,7 +568,7 @@ vm_thread_swapin(struct thread *td) pages = td->td_kstack_pages; ksobj = td->td_kstack_obj; VM_OBJECT_WLOCK(ksobj); - vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma, + (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma, pages); for (int i = 0; i < pages;) { int j, a, count, rv; diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 39df669..07f32f3 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -3147,13 +3147,15 @@ retrylookup: * optional allocation flags: * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages * VM_ALLOC_NOBUSY do not exclusive busy the page + * VM_ALLOC_NOWAIT do not sleep * VM_ALLOC_SBUSY set page to sbusy state * VM_ALLOC_WIRED wire the pages * VM_ALLOC_ZERO zero and validate any invalid pages * - * This routine may sleep. + * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it + * may return a partial prefix of the requested range. */ -void +int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count) { @@ -3171,7 +3173,7 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, (allocflags & VM_ALLOC_IGN_SBUSY) != 0, ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch")); if (count == 0) - return; + return (0); i = 0; retrylookup: m = vm_page_lookup(object, pindex + i); @@ -3180,6 +3182,8 @@ retrylookup: sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? vm_page_xbusied(m) : vm_page_busied(m); if (sleep) { + if ((allocflags & VM_ALLOC_NOWAIT) != 0) + break; /* * Reference the page before unlocking and * sleeping so that the page daemon is less @@ -3207,6 +3211,8 @@ retrylookup: m = vm_page_alloc(object, pindex + i, (allocflags & ~VM_ALLOC_IGN_SBUSY) | VM_ALLOC_COUNT(count - i)); if (m == NULL) { + if ((allocflags & VM_ALLOC_NOWAIT) != 0) + break; VM_OBJECT_WUNLOCK(object); VM_WAIT; VM_OBJECT_WLOCK(object); @@ -3221,6 +3227,7 @@ retrylookup: ma[i] = m; m = vm_page_next(m); } + return (i); } /* diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index e3df347..156da21 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -414,7 +414,7 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */ #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */ #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */ -#define VM_ALLOC_NOWAIT 0x8000 /* (g) Do not sleep, return NULL */ +#define VM_ALLOC_NOWAIT 0x8000 /* (gp) Do not sleep */ #define VM_ALLOC_COUNT_SHIFT 16 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT) @@ -454,7 +454,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, vm_page_t vm_page_alloc_freelist(int, int); void vm_page_change_lock(vm_page_t m, struct mtx **mtx); vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); -void vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, +int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count); int vm_page_try_to_free (vm_page_t); void vm_page_deactivate (vm_page_t); |