summaryrefslogtreecommitdiffstats
path: root/sys/net/bpf_zerocopy.c
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2009-03-07 10:21:37 +0000
committerrwatson <rwatson@FreeBSD.org>2009-03-07 10:21:37 +0000
commit7b4344f11fdcd78f3b2efef84c1eb704edf730b2 (patch)
tree70b71b1073541481eb57e0cdf7e4ae6e05158de4 /sys/net/bpf_zerocopy.c
parent6d523cd42af824d128b343edb51f35a859902554 (diff)
downloadFreeBSD-src-7b4344f11fdcd78f3b2efef84c1eb704edf730b2.zip
FreeBSD-src-7b4344f11fdcd78f3b2efef84c1eb704edf730b2.tar.gz
Clarify some comments, fix some types, and rename ZBUF_FLAG_IMMUTABLE to
ZBUF_FLAG_ASSIGNED to make it clear why the buffer can't be written to: it is assigned to userspace.
Diffstat (limited to 'sys/net/bpf_zerocopy.c')
-rw-r--r--sys/net/bpf_zerocopy.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/sys/net/bpf_zerocopy.c b/sys/net/bpf_zerocopy.c
index de4d49a..35b27b9 100644
--- a/sys/net/bpf_zerocopy.c
+++ b/sys/net/bpf_zerocopy.c
@@ -91,7 +91,7 @@ __FBSDID("$FreeBSD$");
* knows that the space is not available.
*/
struct zbuf {
- vm_offset_t zb_uaddr; /* User address, may be stale. */
+ vm_offset_t zb_uaddr; /* User address at time of setup. */
size_t zb_size; /* Size of buffer, incl. header. */
u_int zb_numpages; /* Number of pages. */
int zb_flags; /* Flags on zbuf. */
@@ -104,7 +104,7 @@ struct zbuf {
* buffer may remain in the store position as a result of the user process
* not yet having acknowledged the buffer in the hold position yet.
*/
-#define ZBUF_FLAG_IMMUTABLE 0x00000001 /* Set when owned by user. */
+#define ZBUF_FLAG_ASSIGNED 0x00000001 /* Set when owned by user. */
/*
* Release a page we've previously wired.
@@ -262,8 +262,8 @@ bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
src_bytes = (u_char *)src;
zb = (struct zbuf *)buf;
- KASSERT((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0,
- ("bpf_zerocopy_append_bytes: ZBUF_FLAG_IMMUTABLE"));
+ KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
+ ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED"));
/*
* Scatter-gather copy to user pages mapped into kernel address space
@@ -314,8 +314,8 @@ bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
m = (struct mbuf *)src;
zb = (struct zbuf *)buf;
- KASSERT((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0,
- ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_IMMUTABLE"));
+ KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
+ ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED"));
/*
* Scatter gather both from an mbuf chain and to a user page set
@@ -374,8 +374,8 @@ bpf_zerocopy_buffull(struct bpf_d *d)
zb = (struct zbuf *)d->bd_sbuf;
KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL"));
- if ((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0) {
- zb->zb_flags |= ZBUF_FLAG_IMMUTABLE;
+ if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
+ zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
zb->zb_header->bzh_kernel_len = d->bd_slen;
atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
}
@@ -384,9 +384,8 @@ bpf_zerocopy_buffull(struct bpf_d *d)
/*
* Notification from the BPF framework that a buffer has moved into the held
* slot on a descriptor. Zero-copy BPF will update the shared page to let
- * the user process know and flag the buffer as immutable if it hasn't
- * already been marked immutable due to filling while it was in the store
- * position.
+ * the user process know and flag the buffer as assigned if it hasn't already
+ * been marked assigned due to filling while it was in the store position.
*
* Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
* on bd_hbuf and bd_hlen.
@@ -402,8 +401,8 @@ bpf_zerocopy_bufheld(struct bpf_d *d)
zb = (struct zbuf *)d->bd_hbuf;
KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));
- if ((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0) {
- zb->zb_flags |= ZBUF_FLAG_IMMUTABLE;
+ if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
+ zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
zb->zb_header->bzh_kernel_len = d->bd_hlen;
atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
}
@@ -411,7 +410,8 @@ bpf_zerocopy_bufheld(struct bpf_d *d)
/*
* Notification from the BPF framework that the free buffer has been been
- * re-assigned. This happens when the user ackknowledges the buffer.
+ * rotated out of the held position to the free position. This happens when
+ * the user acknowledges the held buffer.
*/
void
bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
@@ -422,9 +422,9 @@ bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
KASSERT(d->bd_fbuf != NULL,
- ("bpf_zerocopy_buf_reclaimed: NULL free buff"));
+ ("bpf_zerocopy_buf_reclaimed: NULL free buf"));
zb = (struct zbuf *)d->bd_fbuf;
- zb->zb_flags &= ~ZBUF_FLAG_IMMUTABLE;
+ zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED;
}
/*
@@ -467,7 +467,7 @@ bpf_zerocopy_canwritebuf(struct bpf_d *d)
zb = (struct zbuf *)d->bd_sbuf;
KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
- if (zb->zb_flags & ZBUF_FLAG_IMMUTABLE)
+ if (zb->zb_flags & ZBUF_FLAG_ASSIGNED)
return (0);
return (1);
}
@@ -510,7 +510,7 @@ bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
/*
* Ioctl to force rotation of the two buffers, if there's any data available.
- * This can be used by user space to implement time outs when waiting for a
+ * This can be used by user space to implement timeouts when waiting for a
* buffer to fill.
*/
int
OpenPOWER on IntegriCloud