summaryrefslogtreecommitdiffstats
path: root/sys/kern/uipc_sockbuf.c
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2004-06-24 01:37:04 +0000
committerrwatson <rwatson@FreeBSD.org>2004-06-24 01:37:04 +0000
commitcaac080ec9db76e581f66abcc30718b1968b0d1c (patch)
treee5101348b9f0100f7efbc938d8137c3daf54ec62 /sys/kern/uipc_sockbuf.c
parente71609f557ffc08ae313e1f890e4083c67d7f797 (diff)
downloadFreeBSD-src-caac080ec9db76e581f66abcc30718b1968b0d1c.zip
FreeBSD-src-caac080ec9db76e581f66abcc30718b1968b0d1c.tar.gz
Introduce sbreserve_locked(), which asserts the socket buffer lock on
the socket buffer having its limits adjusted. sbreserve() now acquires the lock before calling sbreserve_locked(). In soreserve(), acquire socket buffer locks across read-modify-writes of socket buffer fields, and calls into sbreserve/sbrelease; make sure to acquire in keeping with the socket buffer lock order. In tcp_mss(), acquire the socket buffer lock in the calling context so that we have atomic read-modify -write on buffer sizes.
Diffstat (limited to 'sys/kern/uipc_sockbuf.c')
-rw-r--r--sys/kern/uipc_sockbuf.c33
1 files changed, 26 insertions, 7 deletions
diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c
index 7f18ba8..e366c3c 100644
--- a/sys/kern/uipc_sockbuf.c
+++ b/sys/kern/uipc_sockbuf.c
@@ -456,24 +456,26 @@ soreserve(so, sndcc, rcvcc)
{
struct thread *td = curthread;
- if (sbreserve(&so->so_snd, sndcc, so, td) == 0)
+ SOCKBUF_LOCK(&so->so_snd);
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
goto bad;
- if (sbreserve(&so->so_rcv, rcvcc, so, td) == 0)
+ if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
goto bad2;
- SOCKBUF_LOCK(&so->so_rcv);
if (so->so_rcv.sb_lowat == 0)
so->so_rcv.sb_lowat = 1;
- SOCKBUF_UNLOCK(&so->so_rcv);
- SOCKBUF_LOCK(&so->so_snd);
if (so->so_snd.sb_lowat == 0)
so->so_snd.sb_lowat = MCLBYTES;
if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
+ SOCKBUF_UNLOCK(&so->so_rcv);
SOCKBUF_UNLOCK(&so->so_snd);
return (0);
bad2:
- sbrelease(&so->so_snd, so);
+ sbrelease_locked(&so->so_snd, so);
bad:
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_UNLOCK(&so->so_snd);
return (ENOBUFS);
}
@@ -503,7 +505,7 @@ sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
* if buffering efficiency is near the normal case.
*/
int
-sbreserve(sb, cc, so, td)
+sbreserve_locked(sb, cc, so, td)
struct sockbuf *sb;
u_long cc;
struct socket *so;
@@ -511,6 +513,8 @@ sbreserve(sb, cc, so, td)
{
rlim_t sbsize_limit;
+ SOCKBUF_LOCK_ASSERT(sb);
+
/*
* td will only be NULL when we're in an interrupt
* (e.g. in tcp_input())
@@ -532,6 +536,21 @@ sbreserve(sb, cc, so, td)
return (1);
}
+int
+sbreserve(sb, cc, so, td)
+ struct sockbuf *sb;
+ u_long cc;
+ struct socket *so;
+ struct thread *td;
+{
+ int error;
+
+ SOCKBUF_LOCK(sb);
+ error = sbreserve_locked(sb, cc, so, td);
+ SOCKBUF_UNLOCK(sb);
+ return (error);
+}
+
/*
* Free mbufs held by a socket, and reserved mbuf space.
*/
OpenPOWER on IntegriCloud