diff options
author | Renato Botelho <renato@netgate.com> | 2017-01-09 12:11:05 -0200 |
---|---|---|
committer | Renato Botelho <renato@netgate.com> | 2017-01-09 12:11:05 -0200 |
commit | 681a482d8fc4bfc14a24f7a9d75cca6337f2a520 (patch) | |
tree | 08368e0c4dcea4baa16f4a34b2cc104c42e1ed27 /sys/vm | |
parent | cbeab2a9b6b7ac70992175202f35fcc05a5821d5 (diff) | |
parent | 91f6edbb8913d163d5c16fb615e84baf8a16d390 (diff) | |
download | FreeBSD-src-681a482d8fc4bfc14a24f7a9d75cca6337f2a520.zip FreeBSD-src-681a482d8fc4bfc14a24f7a9d75cca6337f2a520.tar.gz |
Merge remote-tracking branch 'origin/stable/10' into devel
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/default_pager.c | 10 | ||||
-rw-r--r-- | sys/vm/phys_pager.c | 14 | ||||
-rw-r--r-- | sys/vm/swap_pager.c | 18 | ||||
-rw-r--r-- | sys/vm/vm_fault.c | 6 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 174 | ||||
-rw-r--r-- | sys/vm/vm_meter.c | 13 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 16 | ||||
-rw-r--r-- | sys/vm/vm_pager.c | 58 |
8 files changed, 142 insertions, 167 deletions
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c index 98dee45..012a220 100644 --- a/sys/vm/default_pager.c +++ b/sys/vm/default_pager.c @@ -63,6 +63,16 @@ static boolean_t default_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); /* * pagerops for OBJT_DEFAULT - "default pager". + * + * This pager handles anonymous (no handle) swap-backed memory, just + * like the swap pager. It allows several optimizations based on the + * fact that no pages of a default object can be swapped out. The + * most important optimization is in vm_fault(), where the pager is + * never asked for a non-resident page. Instead, a freshly allocated + * zeroed page is used. + * + * On the first request to page out a page from a default object, the + * object is converted to swap pager type. */ struct pagerops defaultpagerops = { .pgo_alloc = default_pager_alloc, diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c index 885a451..861776d 100644 --- a/sys/vm/phys_pager.c +++ b/sys/vm/phys_pager.c @@ -56,9 +56,6 @@ phys_pager_init(void) mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF); } -/* - * MPSAFE - */ static vm_object_t phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred) @@ -101,8 +98,8 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, object = object1; object1 = NULL; object->handle = handle; - TAILQ_INSERT_TAIL(&phys_pager_object_list, object, - pager_object_list); + TAILQ_INSERT_TAIL(&phys_pager_object_list, + object, pager_object_list); } } else { if (pindex > object->size) @@ -117,9 +114,6 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, return (object); } -/* - * MPSAFE - */ static void phys_pager_dealloc(vm_object_t object) { @@ -167,7 +161,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) static void phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, - int *rtvals) + int *rtvals) { panic("phys_pager_putpage called"); @@ -185,7 +179,7 @@ phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, #endif static boolean_t phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, - int *after) + int *after) { vm_pindex_t base, end; diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 5a62114..99ff658 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -1997,30 +1997,30 @@ swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) static void swp_pager_meta_free_all(vm_object_t object) { - daddr_t index = 0; + struct swblock **pswap, *swap; + vm_pindex_t index; + daddr_t v; + int i; VM_OBJECT_ASSERT_WLOCKED(object); if (object->type != OBJT_SWAP) return; - while (object->un_pager.swp.swp_bcount) { - struct swblock **pswap; - struct swblock *swap; - + index = 0; + while (object->un_pager.swp.swp_bcount != 0) { mtx_lock(&swhash_mtx); pswap = swp_pager_hash(object, index); if ((swap = *pswap) != NULL) { - int i; - for (i = 0; i < SWAP_META_PAGES; ++i) { - daddr_t v = swap->swb_pages[i]; + v = swap->swb_pages[i]; if (v != SWAPBLK_NONE) { --swap->swb_count; swp_pager_freeswapspace(v, 1); } } if (swap->swb_count != 0) - panic("swap_pager_meta_free_all: swb_count != 0"); + panic( + "swap_pager_meta_free_all: swb_count != 0"); *pswap = swap->swb_hnext; uma_zfree(swap_zone, swap); --object->un_pager.swp.swp_bcount; diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index b3fd372..0152c1c 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -124,6 +124,7 @@ struct faultstate { vm_map_t map; vm_map_entry_t entry; int lookup_still_valid; + int map_generation; struct vnode *vp; }; @@ -336,7 +337,6 @@ vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, long ahead, behind; int alloc_req, era, faultcount, nera, reqpage, result; boolean_t dead, growstack, is_first_object_locked, wired; - int map_generation; vm_object_t next_object; vm_page_t marray[VM_FAULT_READ_MAX]; int hardfault; @@ -372,7 +372,7 @@ RetryFault:; return (result); } - map_generation = fs.map->timestamp; + fs.map_generation = fs.map->timestamp; if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { panic("vm_fault: fault on nofault entry, addr: %lx", @@ -950,7 +950,7 @@ readrest: goto RetryFault; } fs.lookup_still_valid = TRUE; - if (fs.map->timestamp != map_generation) { + if (fs.map->timestamp != fs.map_generation) { result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 46e3089..750083f 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1130,24 +1130,24 @@ vm_map_lookup_entry( */ int vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, - vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, - int cow) + vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) { - vm_map_entry_t new_entry; - vm_map_entry_t prev_entry; - vm_map_entry_t temp_entry; - vm_eflags_t protoeflags; + vm_map_entry_t new_entry, prev_entry, temp_entry; struct ucred *cred; + vm_eflags_t protoeflags; vm_inherit_t inheritance; - boolean_t charge_prev_obj; VM_MAP_ASSERT_LOCKED(map); + KASSERT((object != kmem_object && object != kernel_object) || + (cow & MAP_COPY_ON_WRITE) == 0, + ("vm_map_insert: kmem or kernel object and COW")); + KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, + ("vm_map_insert: paradoxical MAP_NOFAULT request")); /* * Check that the start and end points are not bogus. */ - if ((start < map->min_offset) || (end > map->max_offset) || - (start >= end)) + if (start < map->min_offset || end > map->max_offset || start >= end) return (KERN_INVALID_ADDRESS); /* @@ -1162,26 +1162,22 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, /* * Assert that the next entry doesn't overlap the end point. */ - if ((prev_entry->next != &map->header) && - (prev_entry->next->start < end)) + if (prev_entry->next != &map->header && prev_entry->next->start < end) return (KERN_NO_SPACE); protoeflags = 0; - charge_prev_obj = FALSE; - if (cow & MAP_COPY_ON_WRITE) - protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; - - if (cow & MAP_NOFAULT) { + protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; + if (cow & MAP_NOFAULT) protoeflags |= MAP_ENTRY_NOFAULT; - - KASSERT(object == NULL, - ("vm_map_insert: paradoxical MAP_NOFAULT request")); - } if (cow & MAP_DISABLE_SYNCER) protoeflags |= MAP_ENTRY_NOSYNC; if (cow & MAP_DISABLE_COREDUMP) protoeflags |= MAP_ENTRY_NOCOREDUMP; + if (cow & MAP_STACK_GROWS_DOWN) + protoeflags |= MAP_ENTRY_GROWS_DOWN; + if (cow & MAP_STACK_GROWS_UP) + protoeflags |= MAP_ENTRY_GROWS_UP; if (cow & MAP_VN_WRITECOUNT) protoeflags |= MAP_ENTRY_VN_WRITECNT; if (cow & MAP_INHERIT_SHARE) @@ -1190,23 +1186,17 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, inheritance = VM_INHERIT_DEFAULT; cred = NULL; - KASSERT((object != kmem_object && object != kernel_object) || - ((object == kmem_object || object == kernel_object) && - !(protoeflags & MAP_ENTRY_NEEDS_COPY)), - ("kmem or kernel object and cow")); if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT)) goto charged; if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) return (KERN_RESOURCE_SHORTAGE); - KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) || + KASSERT(object == NULL || + (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || object->cred == NULL, - ("OVERCOMMIT: vm_map_insert o %p", object)); + ("overcommit: vm_map_insert o %p", object)); cred = curthread->td_ucred; - crhold(cred); - if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY)) - charge_prev_obj = TRUE; } charged: @@ -1225,33 +1215,30 @@ charged: if (object->ref_count > 1 || object->shadow_count != 0) vm_object_clear_flag(object, OBJ_ONEMAPPING); VM_OBJECT_WUNLOCK(object); - } - else if ((prev_entry != &map->header) && - (prev_entry->eflags == protoeflags) && - (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && - (prev_entry->end == start) && - (prev_entry->wired_count == 0) && - (prev_entry->cred == cred || - (prev_entry->object.vm_object != NULL && - (prev_entry->object.vm_object->cred == cred))) && - vm_object_coalesce(prev_entry->object.vm_object, - prev_entry->offset, - (vm_size_t)(prev_entry->end - prev_entry->start), - (vm_size_t)(end - prev_entry->end), charge_prev_obj)) { + } else if (prev_entry != &map->header && + prev_entry->eflags == protoeflags && + (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && + prev_entry->end == start && prev_entry->wired_count == 0 && + (prev_entry->cred == cred || + (prev_entry->object.vm_object != NULL && + prev_entry->object.vm_object->cred == cred)) && + vm_object_coalesce(prev_entry->object.vm_object, + prev_entry->offset, + (vm_size_t)(prev_entry->end - prev_entry->start), + (vm_size_t)(end - prev_entry->end), cred != NULL && + (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { /* * We were able to extend the object. Determine if we * can extend the previous map entry to include the * new range as well. */ - if ((prev_entry->inheritance == inheritance) && - (prev_entry->protection == prot) && - (prev_entry->max_protection == max)) { - map->size += (end - prev_entry->end); + if (prev_entry->inheritance == inheritance && + prev_entry->protection == prot && + prev_entry->max_protection == max) { + map->size += end - prev_entry->end; prev_entry->end = end; vm_map_entry_resize_free(map, prev_entry); vm_map_simplify_entry(map, prev_entry); - if (cred != NULL) - crfree(cred); return (KERN_SUCCESS); } @@ -1263,21 +1250,16 @@ charged: */ object = prev_entry->object.vm_object; offset = prev_entry->offset + - (prev_entry->end - prev_entry->start); + (prev_entry->end - prev_entry->start); vm_object_reference(object); if (cred != NULL && object != NULL && object->cred != NULL && !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { /* Object already accounts for this uid. */ - crfree(cred); cred = NULL; } } - - /* - * NOTE: if conditionals fail, object can be NULL here. This occurs - * in things like the buffer map where we manage kva but do not manage - * backing objects. - */ + if (cred != NULL) + crhold(cred); /* * Create a new entry @@ -1301,7 +1283,7 @@ charged: new_entry->next_read = OFF_TO_IDX(offset); KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), - ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry)); + ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); new_entry->cred = cred; /* @@ -1311,17 +1293,16 @@ charged: map->size += new_entry->end - new_entry->start; /* - * It may be possible to merge the new entry with the next and/or - * previous entries. However, due to MAP_STACK_* being a hack, a - * panic can result from merging such entries. + * Try to coalesce the new entry with both the previous and next + * entries in the list. Previously, we only attempted to coalesce + * with the previous entry when object is NULL. Here, we handle the + * other cases, which are less common. */ - if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0) - vm_map_simplify_entry(map, new_entry); + vm_map_simplify_entry(map, new_entry); - if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { - vm_map_pmap_enter(map, start, prot, - object, OFF_TO_IDX(offset), end - start, - cow & MAP_PREFAULT_PARTIAL); + if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { + vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), + end - start, cow & MAP_PREFAULT_PARTIAL); } return (KERN_SUCCESS); @@ -1531,7 +1512,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) vm_map_entry_t next, prev; vm_size_t prevsize, esize; - if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) + if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | + MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) return; prev = entry->prev; @@ -2896,8 +2878,8 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) offidxstart = OFF_TO_IDX(entry->offset); offidxend = offidxstart + count; VM_OBJECT_WLOCK(object); - if (object->ref_count != 1 && - ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || + if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | + OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || object == kernel_object || object == kmem_object)) { vm_object_collapse(object); @@ -2910,7 +2892,8 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) vm_object_page_remove(object, offidxstart, offidxend, OBJPR_NOTMAPPED); if (object->type == OBJT_SWAP) - swap_pager_freespace(object, offidxstart, count); + swap_pager_freespace(object, offidxstart, + count); if (offidxend >= object->size && offidxstart < object->size) { size1 = object->size; @@ -2918,8 +2901,9 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) if (object->cred != NULL) { size1 -= object->size; KASSERT(object->charge >= ptoa(size1), - ("vm_map_entry_delete: object->charge < 0")); - swap_release_by_cred(ptoa(size1), object->cred); + ("object %p charge < 0", object)); + swap_release_by_cred(ptoa(size1), + object->cred); object->charge -= ptoa(size1); } } @@ -3137,13 +3121,15 @@ vm_map_copy_entry( if ((src_object = src_entry->object.vm_object) != NULL) { VM_OBJECT_WLOCK(src_object); charged = ENTRY_CHARGED(src_entry); - if ((src_object->handle == NULL) && - (src_object->type == OBJT_DEFAULT || - src_object->type == OBJT_SWAP)) { + if (src_object->handle == NULL && + (src_object->type == OBJT_DEFAULT || + src_object->type == OBJT_SWAP)) { vm_object_collapse(src_object); - if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { + if ((src_object->flags & (OBJ_NOSPLIT | + OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { vm_object_split(src_entry); - src_object = src_entry->object.vm_object; + src_object = + src_entry->object.vm_object; } } vm_object_reference_locked(src_object); @@ -3170,8 +3156,10 @@ vm_map_copy_entry( *fork_charge += size; } } - src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); - dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); + src_entry->eflags |= MAP_ENTRY_COW | + MAP_ENTRY_NEEDS_COPY; + dst_entry->eflags |= MAP_ENTRY_COW | + MAP_ENTRY_NEEDS_COPY; dst_entry->offset = src_entry->offset; if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { /* @@ -3521,17 +3509,17 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, /* Now set the avail_ssize amount. */ if (rv == KERN_SUCCESS) { - if (prev_entry != &map->header) - vm_map_clip_end(map, prev_entry, bot); new_entry = prev_entry->next; if (new_entry->end != top || new_entry->start != bot) panic("Bad entry start/end for new stack entry"); new_entry->avail_ssize = max_ssize - init_ssize; - if (orient & MAP_STACK_GROWS_DOWN) - new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; - if (orient & MAP_STACK_GROWS_UP) - new_entry->eflags |= MAP_ENTRY_GROWS_UP; + KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || + (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, + ("new entry lacks MAP_ENTRY_GROWS_DOWN")); + KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || + (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, + ("new entry lacks MAP_ENTRY_GROWS_UP")); } return (rv); @@ -3758,21 +3746,21 @@ Retry: } rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, - next_entry->protection, next_entry->max_protection, 0); + next_entry->protection, next_entry->max_protection, + MAP_STACK_GROWS_DOWN); /* Adjust the available stack space by the amount we grew. */ if (rv == KERN_SUCCESS) { - if (prev_entry != &map->header) - vm_map_clip_end(map, prev_entry, addr); new_entry = prev_entry->next; KASSERT(new_entry == stack_entry->prev, ("foo")); KASSERT(new_entry->end == stack_entry->start, ("foo")); KASSERT(new_entry->start == addr, ("foo")); + KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != + 0, ("new entry lacks MAP_ENTRY_GROWS_DOWN")); grow_amount = new_entry->end - new_entry->start; new_entry->avail_ssize = stack_entry->avail_ssize - grow_amount; stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; - new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; } } else { /* @@ -3799,10 +3787,10 @@ Retry: rv = KERN_NO_SPACE; /* Grow the underlying object if applicable. */ else if (stack_entry->object.vm_object == NULL || - vm_object_coalesce(stack_entry->object.vm_object, - stack_entry->offset, - (vm_size_t)(stack_entry->end - stack_entry->start), - (vm_size_t)grow_amount, cred != NULL)) { + vm_object_coalesce(stack_entry->object.vm_object, + stack_entry->offset, + (vm_size_t)(stack_entry->end - stack_entry->start), + (vm_size_t)grow_amount, cred != NULL)) { map->size += (addr - stack_entry->end); /* Update the current entry. */ stack_entry->end = addr; diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c index 3f8ba38..c60c1fa 100644 --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -123,15 +123,10 @@ vmtotal(SYSCTL_HANDLER_ARGS) */ sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { - if (p->p_flag & P_SYSTEM) + if ((p->p_flag & P_SYSTEM) != 0) continue; PROC_LOCK(p); - switch (p->p_state) { - case PRS_NEW: - PROC_UNLOCK(p); - continue; - break; - default: + if (p->p_state != PRS_NEW) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); switch (td->td_state) { @@ -148,15 +143,13 @@ vmtotal(SYSCTL_HANDLER_ARGS) total.t_pw++; } break; - case TDS_CAN_RUN: total.t_sw++; break; case TDS_RUNQ: case TDS_RUNNING: total.t_rq++; - thread_unlock(td); - continue; + break; default: break; } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index b688b56..106557c 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -1175,9 +1175,11 @@ vm_page_next(vm_page_t m) vm_page_t next; VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((next = TAILQ_NEXT(m, listq)) != NULL && - next->pindex != m->pindex + 1) - next = NULL; + if ((next = TAILQ_NEXT(m, listq)) != NULL) { + MPASS(next->object == m->object); + if (next->pindex != m->pindex + 1) + next = NULL; + } return (next); } @@ -1193,9 +1195,11 @@ vm_page_prev(vm_page_t m) vm_page_t prev; VM_OBJECT_ASSERT_WLOCKED(m->object); - if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && - prev->pindex != m->pindex - 1) - prev = NULL; + if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { + MPASS(prev->object == m->object); + if (prev->pindex != m->pindex - 1) + prev = NULL; + } return (prev); } diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index c7d038b..6eb34d9 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -107,43 +107,35 @@ static vm_object_t dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t off, struct ucred *cred) { - return NULL; + + return (NULL); } static void -dead_pager_putpages(object, m, count, flags, rtvals) - vm_object_t object; - vm_page_t *m; - int count; - int flags; - int *rtvals; +dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, + int flags, int *rtvals) { int i; - for (i = 0; i < count; i++) { + for (i = 0; i < count; i++) rtvals[i] = VM_PAGER_AGAIN; - } } static int -dead_pager_haspage(object, pindex, prev, next) - vm_object_t object; - vm_pindex_t pindex; - int *prev; - int *next; +dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next) { - if (prev) + + if (prev != NULL) *prev = 0; - if (next) + if (next != NULL) *next = 0; - return FALSE; + return (FALSE); } static void -dead_pager_dealloc(object) - vm_object_t object; +dead_pager_dealloc(vm_object_t object) { - return; + } static struct pagerops deadpagerops = { @@ -181,7 +173,7 @@ static int bswneeded; vm_offset_t swapbkva; /* swap buffers kva */ void -vm_pager_init() +vm_pager_init(void) { struct pagerops **pgops; @@ -191,11 +183,11 @@ vm_pager_init() */ for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) if ((*pgops)->pgo_init != NULL) - (*(*pgops)->pgo_init) (); + (*(*pgops)->pgo_init)(); } void -vm_pager_bufferinit() +vm_pager_bufferinit(void) { struct buf *bp; int i; @@ -231,7 +223,7 @@ vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, ops = pagertab[type]; if (ops) - ret = (*ops->pgo_alloc) (handle, size, prot, off, cred); + ret = (*ops->pgo_alloc)(handle, size, prot, off, cred); else ret = NULL; return (ret); @@ -241,8 +233,7 @@ vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, * The object must be locked. */ void -vm_pager_deallocate(object) - vm_object_t object; +vm_pager_deallocate(vm_object_t object) { VM_OBJECT_ASSERT_WLOCKED(object); @@ -291,12 +282,13 @@ vm_pager_object_lookup(struct pagerlst *pg_list, void *handle) static void initpbuf(struct buf *bp) { + KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj")); KASSERT(bp->b_vp == NULL, ("initpbuf with vp")); bp->b_rcred = NOCRED; bp->b_wcred = NOCRED; bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */ - bp->b_saveaddr = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva; + bp->b_saveaddr = (caddr_t)(MAXPHYS * (bp - swbuf)) + swapbkva; bp->b_data = bp->b_saveaddr; bp->b_kvabase = bp->b_saveaddr; bp->b_kvasize = MAXPHYS; @@ -329,9 +321,8 @@ getpbuf(int *pfreecnt) struct buf *bp; mtx_lock(&pbuf_mtx); - for (;;) { - if (pfreecnt) { + if (pfreecnt != NULL) { while (*pfreecnt == 0) { msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0); } @@ -349,9 +340,8 @@ getpbuf(int *pfreecnt) if (pfreecnt) --*pfreecnt; mtx_unlock(&pbuf_mtx); - initpbuf(bp); - return bp; + return (bp); } /* @@ -371,14 +361,10 @@ trypbuf(int *pfreecnt) return NULL; } TAILQ_REMOVE(&bswlist, bp, b_freelist); - --*pfreecnt; - mtx_unlock(&pbuf_mtx); - initpbuf(bp); - - return bp; + return (bp); } /* |