summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2017-03-28 06:07:59 +0000
committerkib <kib@FreeBSD.org>2017-03-28 06:07:59 +0000
commit2a317bc917832a4b40c4b69aa496672156144bda (patch)
tree75d92e88cf5389c711dedc7e5fb80466fc294fda /sys/vm
parente8aa125d464be1f250b75f43986fc93a25dfad86 (diff)
downloadFreeBSD-src-2a317bc917832a4b40c4b69aa496672156144bda.zip
FreeBSD-src-2a317bc917832a4b40c4b69aa496672156144bda.tar.gz
MFC r315281:
Use atop() instead of OFF_TO_IDX() for convertion of addresses or addresses offsets, as intended. MFC r315580 (by alc): Simplify the logic for clipping the range returned by the pager to fit within the map entry. Use atop() rather than OFF_TO_IDX() on addresses.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c23
-rw-r--r--sys/vm/vm_kern.c14
-rw-r--r--sys/vm/vm_map.c4
-rw-r--r--sys/vm/vm_mmap.c4
-rw-r--r--sys/vm/vm_object.c10
5 files changed, 27 insertions, 28 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index d875af9..ba0c775 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -352,8 +352,7 @@ vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
MPASS(fs->lookup_still_valid);
pager_first = OFF_TO_IDX(fs->entry->offset);
- pager_last = OFF_TO_IDX(fs->entry->offset + fs->entry->end -
- fs->entry->start) - 1;
+ pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1;
unlock_map(fs);
unlock_vp(fs);
@@ -404,18 +403,20 @@ vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
* In case the pager expanded the range, clip it to fit into
* the map entry.
*/
- map_first = MAX(OFF_TO_IDX(fs->entry->offset), pager_first);
- if (map_first > pager_first)
+ map_first = OFF_TO_IDX(fs->entry->offset);
+ if (map_first > pager_first) {
vm_fault_populate_cleanup(fs->first_object, pager_first,
map_first - 1);
- map_last = MIN(OFF_TO_IDX(fs->entry->end - fs->entry->start +
- fs->entry->offset) - 1, pager_last);
- if (map_last < pager_last)
+ pager_first = map_first;
+ }
+ map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1;
+ if (map_last < pager_last) {
vm_fault_populate_cleanup(fs->first_object, map_last + 1,
pager_last);
-
- for (pidx = map_first, m = vm_page_lookup(fs->first_object, pidx);
- pidx <= map_last; pidx++, m = vm_page_next(m)) {
+ pager_last = map_last;
+ }
+ for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx);
+ pidx <= pager_last; pidx++, m = vm_page_next(m)) {
vm_fault_populate_check_page(m);
vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags,
true);
@@ -1548,7 +1549,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
* actually shadow anything - we copy the pages directly.)
*/
dst_object = vm_object_allocate(OBJT_DEFAULT,
- OFF_TO_IDX(dst_entry->end - dst_entry->start));
+ atop(dst_entry->end - dst_entry->start));
#if VM_NRESERVLEVEL > 0
dst_object->flags |= OBJ_COLORED;
dst_object->pg_color = atop(dst_entry->start);
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 1b15f97..af678ac 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -162,8 +162,7 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
- vm_offset_t addr, i;
- vm_ooffset_t offset;
+ vm_offset_t addr, i, offset;
vm_page_t m;
int pflags, tries;
@@ -176,7 +175,7 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
for (i = 0; i < size; i += PAGE_SIZE) {
tries = 0;
retry:
- m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i),
+ m = vm_page_alloc_contig(object, atop(offset + i),
pflags, 1, low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
@@ -217,8 +216,7 @@ kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_memattr_t memattr)
{
vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
- vm_offset_t addr, tmp;
- vm_ooffset_t offset;
+ vm_offset_t addr, offset, tmp;
vm_page_t end_m, m;
u_long npages;
int pflags, tries;
@@ -232,7 +230,7 @@ kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
VM_OBJECT_WLOCK(object);
tries = 0;
retry:
- m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
+ m = vm_page_alloc_contig(object, atop(offset), pflags,
npages, low, high, alignment, boundary, memattr);
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
@@ -343,7 +341,7 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
VM_OBJECT_WLOCK(object);
for (i = 0; i < size; i += PAGE_SIZE) {
retry:
- m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags);
+ m = vm_page_alloc(object, atop(offset + i), pflags);
/*
* Ran out of space, free everything up and return. Don't need
@@ -395,7 +393,7 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
offset = addr - VM_MIN_KERNEL_ADDRESS;
VM_OBJECT_WLOCK(object);
for (i = 0; i < size; i += PAGE_SIZE) {
- m = vm_page_lookup(object, OFF_TO_IDX(offset + i));
+ m = vm_page_lookup(object, atop(offset + i));
vm_page_unwire(m, PQ_NONE);
vm_page_free(m);
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 4bfe22c..9f49eac 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -2904,7 +2904,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
{
vm_object_t object;
vm_pindex_t offidxstart, offidxend, count, size1;
- vm_ooffset_t size;
+ vm_size_t size;
vm_map_entry_unlink(map, entry);
object = entry->object.vm_object;
@@ -2921,7 +2921,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
KASSERT(entry->cred == NULL || object->cred == NULL ||
(entry->eflags & MAP_ENTRY_NEEDS_COPY),
("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
- count = OFF_TO_IDX(size);
+ count = atop(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
VM_OBJECT_WLOCK(object);
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index db0a3bd..9b5766e 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -888,7 +888,7 @@ RestartScan:
/*
* calculate index into user supplied byte vector
*/
- vecindex = OFF_TO_IDX(addr - first_addr);
+ vecindex = atop(addr - first_addr);
/*
* If we have skipped map entries, we need to make sure that
@@ -934,7 +934,7 @@ RestartScan:
/*
* Zero the last entries in the byte vector.
*/
- vecindex = OFF_TO_IDX(end - first_addr);
+ vecindex = atop(end - first_addr);
while ((lastvecindex + 1) < vecindex) {
++lastvecindex;
error = subyte(vec + lastvecindex, 0);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index ee1907b..2e9d16f 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -279,16 +279,16 @@ vm_object_init(void)
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
rw_init(&kernel_object->lock, "kernel vm object");
- _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
- kernel_object);
+ _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
+ VM_MIN_KERNEL_ADDRESS), kernel_object);
#if VM_NRESERVLEVEL > 0
kernel_object->flags |= OBJ_COLORED;
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
#endif
rw_init(&kmem_object->lock, "kmem vm object");
- _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
- kmem_object);
+ _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
+ VM_MIN_KERNEL_ADDRESS), kmem_object);
#if VM_NRESERVLEVEL > 0
kmem_object->flags |= OBJ_COLORED;
kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
@@ -1036,7 +1036,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (syncio && !invalidate && offset == 0 &&
- OFF_TO_IDX(size) == object->size) {
+ atop(size) == object->size) {
/*
* If syncing the whole mapping of the file,
* it is faster to schedule all the writes in
OpenPOWER on IntegriCloud