summaryrefslogtreecommitdiffstats
path: root/sys/net/bpf_zerocopy.c
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/net/bpf_zerocopy.c
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/net/bpf_zerocopy.c')
-rw-r--r--sys/net/bpf_zerocopy.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/sys/net/bpf_zerocopy.c b/sys/net/bpf_zerocopy.c
index a1dd923..3cf4b8d 100644
--- a/sys/net/bpf_zerocopy.c
+++ b/sys/net/bpf_zerocopy.c
@@ -168,10 +168,12 @@ zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
VM_PROT_WRITE);
if (pp == NULL)
return (NULL);
+ vm_page_lock(pp);
vm_page_lock_queues();
vm_page_wire(pp);
vm_page_unhold(pp);
vm_page_unlock_queues();
+ vm_page_unlock(pp);
sf = sf_buf_alloc(pp, SFB_NOWAIT);
if (sf == NULL) {
zbuf_page_free(pp);
OpenPOWER on IntegriCloud