summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2004-08-24 05:28:18 +0000
committerrwatson <rwatson@FreeBSD.org>2004-08-24 05:28:18 +0000
commitd168fd36065f1513e36e983cecaed2049d2c232e (patch)
tree91b93ddd9ea0d339695db92f71046508607a02ff /sys
parent9b0c1e7ac1a709a6f41387b5675d2ac50df30239 (diff)
downloadFreeBSD-src-d168fd36065f1513e36e983cecaed2049d2c232e.zip
FreeBSD-src-d168fd36065f1513e36e983cecaed2049d2c232e.tar.gz
Conditional acquisition of socket buffer mutexes when testing socket
buffers with kqueue filters is no longer required: the kqueue framework will guarantee that the mutex is held on entering the filter, either due to a call from the socket code already holding the mutex, or by explicitly acquiring it. This removes the last of the conditional socket locking.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/uipc_socket.c51
1 files changed, 16 insertions, 35 deletions
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index 9b6c423..44fd16b 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -2171,31 +2171,22 @@ filt_sordetach(struct knote *kn)
static int
filt_soread(struct knote *kn, long hint)
{
- struct socket *so = kn->kn_fp->f_data;
- int need_lock, result;
+ struct socket *so;
+
+ so = kn->kn_fp->f_data;
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- /*
- * XXXRW: Conditional locking because filt_soread() can be called
- * either from KNOTE() in the socket context where the socket buffer
- * lock is already held, or from kqueue() itself.
- */
- need_lock = !SOCKBUF_OWNED(&so->so_rcv);
- if (need_lock)
- SOCKBUF_LOCK(&so->so_rcv);
kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
kn->kn_flags |= EV_EOF;
kn->kn_fflags = so->so_error;
- result = 1;
+ return (1);
} else if (so->so_error) /* temporary udp error */
- result = 1;
+ return (1);
else if (kn->kn_sfflags & NOTE_LOWAT)
- result = (kn->kn_data >= kn->kn_sdata);
+ return (kn->kn_data >= kn->kn_sdata);
else
- result = (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
- if (need_lock)
- SOCKBUF_UNLOCK(&so->so_rcv);
- return (result);
+ return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
}
static void
@@ -2214,34 +2205,24 @@ filt_sowdetach(struct knote *kn)
static int
filt_sowrite(struct knote *kn, long hint)
{
- struct socket *so = kn->kn_fp->f_data;
- int need_lock, result;
+ struct socket *so;
- /*
- * XXXRW: Conditional locking because filt_soread() can be called
- * either from KNOTE() in the socket context where the socket buffer
- * lock is already held, or from kqueue() itself.
- */
- need_lock = !SOCKBUF_OWNED(&so->so_snd);
- if (need_lock)
- SOCKBUF_LOCK(&so->so_snd);
+ so = kn->kn_fp->f_data;
+ SOCKBUF_LOCK_ASSERT(&so->so_snd);
kn->kn_data = sbspace(&so->so_snd);
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
kn->kn_flags |= EV_EOF;
kn->kn_fflags = so->so_error;
- result = 1;
+ return (1);
} else if (so->so_error) /* temporary udp error */
- result = 1;
+ return (1);
else if (((so->so_state & SS_ISCONNECTED) == 0) &&
(so->so_proto->pr_flags & PR_CONNREQUIRED))
- result = 0;
+ return (0);
else if (kn->kn_sfflags & NOTE_LOWAT)
- result = (kn->kn_data >= kn->kn_sdata);
+ return (kn->kn_data >= kn->kn_sdata);
else
- result = (kn->kn_data >= so->so_snd.sb_lowat);
- if (need_lock)
- SOCKBUF_UNLOCK(&so->so_snd);
- return (result);
+ return (kn->kn_data >= so->so_snd.sb_lowat);
}
/*ARGSUSED*/
OpenPOWER on IntegriCloud