summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2002-08-25 00:22:31 +0000
committeralc <alc@FreeBSD.org>2002-08-25 00:22:31 +0000
commitcdcc7b3446c85c853dfe52a068a60cfe203c5afd (patch)
tree1434aead5b19bb951abb3181cdfe9517740f36b6
parent883caafe6b42481f1cd62d5448db088af9bc54b7 (diff)
downloadFreeBSD-src-cdcc7b3446c85c853dfe52a068a60cfe203c5afd.zip
FreeBSD-src-cdcc7b3446c85c853dfe52a068a60cfe203c5afd.tar.gz
o Retire vm_page_zero_fill() and vm_page_zero_fill_area(). Ever since
pmap_zero_page() and pmap_zero_page_area() were modified to accept a struct vm_page * instead of a physical address, vm_page_zero_fill() and vm_page_zero_fill_area() have served no purpose.
-rw-r--r--sys/dev/agp/agp.c2
-rw-r--r--sys/dev/agp/agp_i810.c2
-rw-r--r--sys/pci/agp.c2
-rw-r--r--sys/pci/agp_i810.c2
-rw-r--r--sys/ufs/ffs/ffs_vnops.c2
-rw-r--r--sys/vm/phys_pager.c2
-rw-r--r--sys/vm/vm_fault.c2
-rw-r--r--sys/vm/vm_kern.c4
-rw-r--r--sys/vm/vm_page.c25
-rw-r--r--sys/vm/vm_page.h2
-rw-r--r--sys/vm/vnode_pager.c2
11 files changed, 10 insertions, 37 deletions
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index b8e04fa..c475145 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -423,7 +423,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((m->flags & PG_ZERO) == 0)
- vm_page_zero_fill(m);
+ pmap_zero_page(m);
AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
/*
diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
index c78cfb1..5f17de0 100644
--- a/sys/dev/agp/agp_i810.c
+++ b/sys/dev/agp/agp_i810.c
@@ -373,7 +373,7 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
m = vm_page_grab(mem->am_obj, 0,
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((m->flags & PG_ZERO) == 0)
- vm_page_zero_fill(m);
+ pmap_zero_page(m);
vm_page_lock_queues();
mem->am_physical = VM_PAGE_TO_PHYS(m);
vm_page_wakeup(m);
diff --git a/sys/pci/agp.c b/sys/pci/agp.c
index b8e04fa..c475145 100644
--- a/sys/pci/agp.c
+++ b/sys/pci/agp.c
@@ -423,7 +423,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((m->flags & PG_ZERO) == 0)
- vm_page_zero_fill(m);
+ pmap_zero_page(m);
AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
/*
diff --git a/sys/pci/agp_i810.c b/sys/pci/agp_i810.c
index c78cfb1..5f17de0 100644
--- a/sys/pci/agp_i810.c
+++ b/sys/pci/agp_i810.c
@@ -373,7 +373,7 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
m = vm_page_grab(mem->am_obj, 0,
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((m->flags & PG_ZERO) == 0)
- vm_page_zero_fill(m);
+ pmap_zero_page(m);
vm_page_lock_queues();
mem->am_physical = VM_PAGE_TO_PHYS(m);
vm_page_wakeup(m);
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index b88be6c..0b36545 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -930,7 +930,7 @@ ffs_getpages(ap)
vm_page_unlock_queues();
if (reqblkno == -1) {
if ((mreq->flags & PG_ZERO) == 0)
- vm_page_zero_fill(mreq);
+ pmap_zero_page(mreq);
vm_page_undirty(mreq);
mreq->valid = VM_PAGE_BITS_ALL;
return VM_PAGER_OK;
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index 98c505c..c271959 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -144,7 +144,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
*/
for (i = 0; i < count; i++) {
if ((m[i]->flags & PG_ZERO) == 0)
- vm_page_zero_fill(m[i]);
+ pmap_zero_page(m[i]);
vm_page_flag_set(m[i], PG_ZERO);
/* Switch off pv_entries */
vm_page_lock_queues();
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index e670dc4..a7cec52 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -622,7 +622,7 @@ readrest:
* Zero the page if necessary and mark it valid.
*/
if ((fs.m->flags & PG_ZERO) == 0) {
- vm_page_zero_fill(fs.m);
+ pmap_zero_page(fs.m);
} else {
cnt.v_ozfod++;
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index f6fa55a..12608a6 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -199,7 +199,7 @@ kmem_alloc(map, size)
mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((mem->flags & PG_ZERO) == 0)
- vm_page_zero_fill(mem);
+ pmap_zero_page(mem);
mem->valid = VM_PAGE_BITS_ALL;
vm_page_flag_clear(mem, PG_ZERO);
vm_page_wakeup(mem);
@@ -395,7 +395,7 @@ retry:
goto bad;
}
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
- vm_page_zero_fill(m);
+ pmap_zero_page(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index a4b9fc6..637d2c0 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -454,31 +454,6 @@ vm_page_protect(vm_page_t mem, int prot)
vm_page_flag_clear(mem, PG_WRITEABLE);
}
}
-/*
- * vm_page_zero_fill:
- *
- * Zero-fill the specified page.
- * Written as a standard pagein routine, to
- * be used by the zero-fill object.
- */
-boolean_t
-vm_page_zero_fill(vm_page_t m)
-{
- pmap_zero_page(m);
- return (TRUE);
-}
-
-/*
- * vm_page_zero_fill_area:
- *
- * Like vm_page_zero_fill but only fill the specified area.
- */
-boolean_t
-vm_page_zero_fill_area(vm_page_t m, int off, int size)
-{
- pmap_zero_page_area(m, off, size);
- return (TRUE);
-}
/*
* vm_page_copy:
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 46de831..d61b748 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -325,8 +325,6 @@ void vm_page_io_finish(vm_page_t m);
void vm_page_hold(vm_page_t mem);
void vm_page_unhold(vm_page_t mem);
void vm_page_protect(vm_page_t mem, int prot);
-boolean_t vm_page_zero_fill(vm_page_t m);
-boolean_t vm_page_zero_fill_area(vm_page_t m, int off, int len);
void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
void vm_page_free(vm_page_t m);
void vm_page_free_zero(vm_page_t m);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 68a47d0..f09f147 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -328,7 +328,7 @@ vnode_pager_setsize(vp, nsize)
* Clear out partial-page garbage in case
* the page has been mapped.
*/
- vm_page_zero_fill_area(m, base, size);
+ pmap_zero_page_area(m, base, size);
/*
* XXX work around SMP data integrity race
OpenPOWER on IntegriCloud