summaryrefslogtreecommitdiffstats
path: root/sys/kern/uipc_socket.c
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2004-06-18 02:57:55 +0000
committerrwatson <rwatson@FreeBSD.org>2004-06-18 02:57:55 +0000
commitd87fad9f086c5e1c8af4ba8e1373bcc73f21dce5 (patch)
treef8e2158ca72e6e07a6df949c4ae028022ce2f5d0 /sys/kern/uipc_socket.c
parent48317d5cbf6fc60d08d669d945662e451113a578 (diff)
downloadFreeBSD-src-d87fad9f086c5e1c8af4ba8e1373bcc73f21dce5.zip
FreeBSD-src-d87fad9f086c5e1c8af4ba8e1373bcc73f21dce5.tar.gz
Merge some additional leaf node socket buffer locking from
rwatson_netperf: Introduce conditional locking of the socket buffer in fifofs kqueue filters; KNOTE() will be called holding the socket buffer locks in fifofs, but sometimes the kqueue() system call will poll using the same entry point without holding the socket buffer lock. Introduce conditional locking of the socket buffer in the socket kqueue filters; KNOTE() will be called holding the socket buffer locks in the socket code, but sometimes the kqueue() system call will poll using the same entry points without holding the socket buffer lock. Simplify the logic in sodisconnect() since we no longer need spls. NOTE: To remove conditional locking in the kqueue filters, it would make sense to use a separate kqueue API entry into the socket/fifo code when calling from the kqueue() system call.
Diffstat (limited to 'sys/kern/uipc_socket.c')
-rw-r--r--sys/kern/uipc_socket.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index 6606138..364f2db 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -506,20 +506,13 @@ int
sodisconnect(so)
struct socket *so;
{
- int s = splnet();
int error;
- if ((so->so_state & SS_ISCONNECTED) == 0) {
- error = ENOTCONN;
- goto bad;
- }
- if (so->so_state & SS_ISDISCONNECTING) {
- error = EALREADY;
- goto bad;
- }
+ if ((so->so_state & SS_ISCONNECTED) == 0)
+ return (ENOTCONN);
+ if (so->so_state & SS_ISDISCONNECTING)
+ return (EALREADY);
error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
-bad:
- splx(s);
return (error);
}
@@ -1913,8 +1906,16 @@ static int
filt_soread(struct knote *kn, long hint)
{
struct socket *so = kn->kn_fp->f_data;
- int result;
+ int need_lock, result;
+ /*
+ * XXXRW: Conditional locking because filt_soread() can be called
+ * either from KNOTE() in the socket context where the socket buffer
+ * lock is already held, or from kqueue() itself.
+ */
+ need_lock = !SOCKBUF_OWNED(&so->so_rcv);
+ if (need_lock)
+ SOCKBUF_LOCK(&so->so_rcv);
kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
kn->kn_flags |= EV_EOF;
@@ -1926,6 +1927,8 @@ filt_soread(struct knote *kn, long hint)
result = (kn->kn_data >= kn->kn_sdata);
else
result = (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
+ if (need_lock)
+ SOCKBUF_UNLOCK(&so->so_rcv);
return (result);
}
@@ -1946,8 +1949,16 @@ static int
filt_sowrite(struct knote *kn, long hint)
{
struct socket *so = kn->kn_fp->f_data;
- int result;
+ int need_lock, result;
+ /*
+ * XXXRW: Conditional locking because filt_soread() can be called
+ * either from KNOTE() in the socket context where the socket buffer
+ * lock is already held, or from kqueue() itself.
+ */
+ need_lock = !SOCKBUF_OWNED(&so->so_snd);
+ if (need_lock)
+ SOCKBUF_LOCK(&so->so_snd);
kn->kn_data = sbspace(&so->so_snd);
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
kn->kn_flags |= EV_EOF;
@@ -1962,6 +1973,8 @@ filt_sowrite(struct knote *kn, long hint)
result = (kn->kn_data >= kn->kn_sdata);
else
result = (kn->kn_data >= so->so_snd.sb_lowat);
+ if (need_lock)
+ SOCKBUF_UNLOCK(&so->so_snd);
return (result);
}
OpenPOWER on IntegriCloud