summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>1999-04-05 19:38:30 +0000
committerjulian <julian@FreeBSD.org>1999-04-05 19:38:30 +0000
commit0ed09d2ad576c0a64797f8ca9bebd32873f770ae (patch)
tree6de1ee6b7f198b11b20d471fbc1a36de8329d82e /sys/vm
parent9ac433dd352fdfe7f3038aa0e1a4333686bc07fc (diff)
downloadFreeBSD-src-0ed09d2ad576c0a64797f8ca9bebd32873f770ae.zip
FreeBSD-src-0ed09d2ad576c0a64797f8ca9bebd32873f770ae.tar.gz
Catch a case spotted by Tor where files mmapped could leave garbage in the
unallocated parts of the last page when the file ended on a frag but not a page boundary. Delimitted by tags PRE_MATT_MMAP_EOF and POST_MATT_MMAP_EOF, in files alpha/alpha/pmap.c i386/i386/pmap.c nfs/nfs_bio.c vm/pmap.h vm/vm_page.c vm/vm_page.h vm/vnode_pager.c miscfs/specfs/spec_vnops.c ufs/ufs/ufs_readwrite.c kern/vfs_bio.c Submitted by: Matt Dillon <dillon@freebsd.org> Reviewed by: Alan Cox <alc@freebsd.org>
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h3
-rw-r--r--sys/vm/vm_page.c156
-rw-r--r--sys/vm/vm_page.h3
-rw-r--r--sys/vm/vnode_pager.c33
4 files changed, 157 insertions, 38 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 1f35a6f..2fe5c18 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: pmap.h,v 1.27 1998/02/01 20:08:39 bde Exp $
+ * $Id: pmap.h,v 1.28 1998/07/26 18:15:20 dfr Exp $
*/
/*
@@ -129,6 +129,7 @@ void pmap_release __P((pmap_t));
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_remove_pages __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_zero_page __P((vm_offset_t));
+void pmap_zero_page_area __P((vm_offset_t, int off, int size));
void pmap_prefault __P((pmap_t, vm_offset_t, vm_map_entry_t));
int pmap_mincore __P((pmap_t pmap, vm_offset_t addr));
void pmap_new_proc __P((struct proc *p));
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 1cadc5b..e07ea63 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.127 1999/02/24 21:26:26 dillon Exp $
+ * $Id: vm_page.c,v 1.128 1999/03/19 05:21:03 alc Exp $
*/
/*
@@ -146,15 +146,6 @@ static vm_size_t page_mask;
static int page_shift;
int vm_page_zero_count = 0;
-/*
- * map of contiguous valid DEV_BSIZE chunks in a page
- * (this list is valid for page sizes upto 16*DEV_BSIZE)
- */
-static u_short vm_page_dev_bsize_chunks[] = {
- 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
- 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
-};
-
static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
static void vm_page_free_wakeup __P((void));
@@ -1442,30 +1433,41 @@ retrylookup:
}
/*
- * mapping function for valid bits or for dirty bits in
+ * Mapping function for valid bits or for dirty bits in
* a page. May not block.
+ *
+ * Inputs are required to range within a page.
*/
+
__inline int
vm_page_bits(int base, int size)
{
- u_short chunk;
+ int first_bit;
+ int last_bit;
- if ((base == 0) && (size >= PAGE_SIZE))
- return VM_PAGE_BITS_ALL;
+ KASSERT(
+ base + size <= PAGE_SIZE,
+ ("vm_page_bits: illegal base/size %d/%d", base, size)
+ );
- size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
- base &= PAGE_MASK;
- if (size > PAGE_SIZE - base) {
- size = PAGE_SIZE - base;
- }
+ if (size == 0) /* handle degenerate case */
+ return(0);
- base = base / DEV_BSIZE;
- chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
- return (chunk << base) & VM_PAGE_BITS_ALL;
+ first_bit = base >> DEV_BSHIFT;
+ last_bit = (base + size - 1) >> DEV_BSHIFT;
+
+ return ((2 << last_bit) - (1 << first_bit));
}
/*
* set a page valid and clean. May not block.
+ *
+ * In order to maintain consistancy due to the DEV_BSIZE granularity
+ * of the valid bits, we have to zero non-DEV_BSIZE aligned portions of
+ * the page at the beginning and end of the valid range when the
+ * associated valid bits are not already set.
+ *
+ * (base + size) must be less then or equal to PAGE_SIZE.
*/
void
vm_page_set_validclean(m, base, size)
@@ -1473,10 +1475,57 @@ vm_page_set_validclean(m, base, size)
int base;
int size;
{
- int pagebits = vm_page_bits(base, size);
+ int pagebits;
+ int frag;
+ int endoff;
+
+ if (size == 0) /* handle degenerate case */
+ return;
+
+ /*
+ * If the base is not DEV_BSIZE aligned and the valid
+ * bit is clear, we have to zero out a portion of the
+ * first block.
+ */
+
+ if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
+ (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
+ ) {
+ pmap_zero_page_area(
+ VM_PAGE_TO_PHYS(m),
+ frag,
+ base - frag
+ );
+ }
+
+ /*
+ * If the ending offset is not DEV_BSIZE aligned and the
+ * valid bit is clear, we have to zero out a portion of
+ * the last block.
+ */
+
+ endoff = base + size;
+
+ if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
+ (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
+ ) {
+ pmap_zero_page_area(
+ VM_PAGE_TO_PHYS(m),
+ endoff,
+ DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
+ );
+ }
+
+ /*
+ * Set valid, clear dirty bits. If validating the entire
+ * page we can safely clear the pmap modify bit.
+ */
+
+ pagebits = vm_page_bits(base, size);
m->valid |= pagebits;
m->dirty &= ~pagebits;
- if( base == 0 && size == PAGE_SIZE)
+
+ if (base == 0 && size == PAGE_SIZE)
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
}
@@ -1498,8 +1547,65 @@ vm_page_set_invalid(m, base, size)
}
/*
- * is (partial) page valid? May not block.
+ * vm_page_zero_invalid()
+ *
+ * The kernel assumes that the invalid portions of a page contain
+ * garbage, but such pages can be mapped into memory by user code.
+ * When this occurs, we must zero out the non-valid portions of the
+ * page so user code sees what it expects.
+ *
+ * Pages are most often semi-valid when the end of a file is mapped
+ * into memory and the file's size is not page aligned.
+ */
+
+void
+vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
+{
+ int b;
+ int i;
+
+ /*
+ * Scan the valid bits looking for invalid sections that
+ * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
+ * valid bit may be set ) have already been zerod by
+ * vm_page_set_validclean().
+ */
+
+ for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
+ if (i == (PAGE_SIZE / DEV_BSIZE) ||
+ (m->valid & (1 << i))
+ ) {
+ if (i > b) {
+ pmap_zero_page_area(
+ VM_PAGE_TO_PHYS(m),
+ b << DEV_BSHIFT,
+ (i - b) << DEV_BSHIFT
+ );
+ }
+ b = i + 1;
+ }
+ }
+
+ /*
+ * setvalid is TRUE when we can safely set the zero'd areas
+ * as being valid. We can do this if there are no cache consistancy
+ * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
+ */
+
+ if (setvalid)
+ m->valid = VM_PAGE_BITS_ALL;
+}
+
+/*
+ * vm_page_is_valid:
+ *
+ * Is (partial) page valid? Note that the case where size == 0
+ * will return FALSE in the degenerate case where the page is
+ * entirely invalid, and TRUE otherwise.
+ *
+ * May not block.
*/
+
int
vm_page_is_valid(m, base, size)
vm_page_t m;
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 78c51f2..8072f66 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.57 1999/03/14 20:40:15 julian Exp $
+ * $Id: vm_page.h,v 1.58 1999/03/15 05:09:48 julian Exp $
*/
/*
@@ -415,6 +415,7 @@ int vm_page_queue_index __P((vm_offset_t, int));
int vm_page_sleep(vm_page_t m, char *msg, char *busy);
int vm_page_asleep(vm_page_t m, char *msg, char *busy);
#endif
+void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
void vm_page_free_toq(vm_page_t m);
/*
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index b645ee3..39f1c35 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.104 1999/02/27 23:39:28 alc Exp $
+ * $Id: vnode_pager.c,v 1.105 1999/03/27 02:39:01 eivind Exp $
*/
/*
@@ -624,23 +624,21 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
cnt.v_vnodepgsin++;
return vnode_pager_input_smlfs(object, m[reqpage]);
}
+
/*
- * if ANY DEV_BSIZE blocks are valid on a large filesystem block
- * then, the entire page is valid --
- * XXX no it isn't
+ * If we have a completely valid page available to us, we can
+ * clean up and return. Otherwise we have to re-read the
+ * media.
*/
- if (m[reqpage]->valid != VM_PAGE_BITS_ALL)
- m[reqpage]->valid = 0;
-
- if (m[reqpage]->valid) {
- m[reqpage]->valid = VM_PAGE_BITS_ALL;
+ if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
for (i = 0; i < count; i++) {
if (i != reqpage)
vnode_pager_freepage(m[i]);
}
return VM_PAGER_OK;
}
+ m[reqpage]->valid = 0;
/*
* here on direct device I/O
@@ -773,12 +771,25 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
mt = m[i];
if (nextoff <= size) {
+ /*
+ * Read filled up entire page.
+ */
mt->valid = VM_PAGE_BITS_ALL;
mt->dirty = 0;
pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
} else {
- int nvalid = ((size + DEV_BSIZE - 1) - tfoff) & ~(DEV_BSIZE - 1);
- vm_page_set_validclean(mt, 0, nvalid);
+ /*
+ * Read did not fill up entire page. Since this
+ * is getpages, the page may be mapped, so we have
+ * to zero the invalid portions of the page even
+ * though we aren't setting them valid.
+ *
+ * Currently we do not set the entire page valid,
+ * we just try to clear the piece that we couldn't
+ * read.
+ */
+ vm_page_set_validclean(mt, 0, size - tfoff);
+ vm_page_zero_invalid(mt, FALSE);
}
vm_page_flag_clear(mt, PG_ZERO);
OpenPOWER on IntegriCloud