summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-03-18 18:23:37 +0000
committeralc <alc@FreeBSD.org>2004-03-18 18:23:37 +0000
commit6961e315f80c4297413622cc1ab6a16d47831e4f (patch)
tree062e5e4f22a02665ffa9833000d9046d4f13c800 /sys
parentaae79f39c1ed4d0a53e13d513424fd6530049a57 (diff)
downloadFreeBSD-src-6961e315f80c4297413622cc1ab6a16d47831e4f.zip
FreeBSD-src-6961e315f80c4297413622cc1ab6a16d47831e4f.tar.gz
Utilize sf_buf_alloc() and sf_buf_free() to implement the ephemeral
mappings required by mdstart_swap(). On i386, if the ephemeral mapping is already in the sf_buf mapping cache, a swap-backed md performs similarly to a malloc-backed md. Even if the ephemeral mapping is not cached, this implementation is still faster. On 64-bit platforms, this change has the effect of using the direct virtual-to-physical mapping, avoiding ephemeral mapping overheads, such as TLB shootdowns on SMPs. On a 2.4GHz, 400MHz FSB P4 Xeon configured with 64K sf_bufs and "mdmfs -S -o async -s 128m md /mnt" before: dd if=/dev/md0 of=/dev/null bs=64k 134217728 bytes transferred in 0.430923 secs (311465697 bytes/sec) after with cold sf_buf cache: dd if=/dev/md0 of=/dev/null bs=64k 134217728 bytes transferred in 0.367948 secs (364773576 bytes/sec) after with warm sf_buf cache: dd if=/dev/md0 of=/dev/null bs=64k 134217728 bytes transferred in 0.252826 secs (530870010 bytes/sec) malloc-backed md: dd if=/dev/md0 of=/dev/null bs=64k 134217728 bytes transferred in 0.253126 secs (530240978 bytes/sec)
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/md/md.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index 006f07f..8036f19 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -75,6 +75,7 @@
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/queue.h>
+#include <sys/sf_buf.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
@@ -523,11 +524,11 @@ static int
mdstart_swap(struct md_s *sc, struct bio *bp)
{
{
+ struct sf_buf *sf;
int i, rv;
int offs, len, lastp, lastend;
vm_page_t m;
u_char *p;
- vm_offset_t kva;
p = bp->bio_data;
@@ -542,8 +543,6 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
- kva = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
-
VM_OBJECT_LOCK(sc->object);
vm_object_pip_add(sc->object, 1);
for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
@@ -551,20 +550,22 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
m = vm_page_grab(sc->object, i,
VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
- pmap_qenter(kva, &m, 1);
+ VM_OBJECT_UNLOCK(sc->object);
+ sf = sf_buf_alloc(m);
+ VM_OBJECT_LOCK(sc->object);
if (bp->bio_cmd == BIO_READ) {
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(sc->object,
&m, 1, 0);
}
- bcopy((void *)(kva + offs), p, len);
+ bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
} else if (bp->bio_cmd == BIO_WRITE) {
if (len != PAGE_SIZE && m->valid !=
VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(sc->object,
&m, 1, 0);
}
- bcopy(p, (void *)(kva + offs), len);
+ bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
m->valid = VM_PAGE_BITS_ALL;
#if 0
} else if (bp->bio_cmd == BIO_DELETE) {
@@ -573,12 +574,12 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
rv = vm_pager_get_pages(sc->object,
&m, 1, 0);
}
- bzero((void *)(kva + offs), len);
+ bzero((void *)(sf_buf_kva(sf) + offs), len);
vm_page_dirty(m);
m->valid = VM_PAGE_BITS_ALL;
#endif
}
- pmap_qremove(kva, 1);
+ sf_buf_free(sf);
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_activate(m);
@@ -600,7 +601,6 @@ printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid
vm_object_pip_subtract(sc->object, 1);
vm_object_set_writeable_dirty(sc->object);
VM_OBJECT_UNLOCK(sc->object);
- kmem_free(kernel_map, kva, sc->secsize);
return (0);
}
}
OpenPOWER on IntegriCloud