summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/uipc_sockbuf.c8
-rw-r--r--sys/kern/uipc_socket.c16
-rw-r--r--sys/kern/uipc_syscalls.c9
-rw-r--r--sys/netinet/sctp_input.c3
-rw-r--r--sys/netinet/sctp_peeloff.c4
-rw-r--r--sys/netinet/sctputil.c2
-rw-r--r--sys/sys/socketvar.h7
7 files changed, 36 insertions, 13 deletions
diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c
index d8c0cab..16923cf 100644
--- a/sys/kern/uipc_sockbuf.c
+++ b/sys/kern/uipc_sockbuf.c
@@ -137,8 +137,12 @@ int
sblock(struct sockbuf *sb, int flags)
{
- if (flags == M_WAITOK) {
- if (sb->sb_flags & SB_NOINTR) {
+ KASSERT((flags & SBL_VALID) == flags,
+ ("sblock: flags invalid (0x%x)", flags));
+
+ if (flags & SBL_WAIT) {
+ if ((sb->sb_flags & SB_NOINTR) ||
+ (flags & SBL_NOINTR)) {
sx_xlock(&sb->sb_sx);
return (0);
}
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index d89b435..0de6d29 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -916,7 +916,7 @@ out:
}
#endif /*ZERO_COPY_SOCKETS*/
-#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
+#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
int
sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
@@ -1884,10 +1884,16 @@ sorflush(struct socket *so)
* however, we have to initialize and destroy the mutex in the copy
* so that dom_dispose() and sbrelease() can lock t as needed.
*/
- (void) sblock(sb, M_WAITOK);
- SOCKBUF_LOCK(sb);
- sb->sb_flags |= SB_NOINTR;
- socantrcvmore_locked(so);
+
+ /*
+ * Dislodge threads currently blocked in receive and wait to acquire
+ * a lock against other simultaneous readers before clearing the
+ * socket buffer. Don't let our acquire be interrupted by a signal
+ * despite any existing socket disposition on interruptable waiting.
+ */
+ socantrcvmore(so);
+ (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
+
/*
* Invalidate/clear most of the sockbuf structure, but leave selinfo
* and mutex data unchanged.
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index acda9ae..d0daa82 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1863,8 +1863,13 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
}
}
- /* Protect against multiple writers to the socket. */
- (void) sblock(&so->so_snd, M_WAITOK);
+ /*
+ * Protect against multiple writers to the socket.
+ *
+ * XXXRW: Historically this has assumed non-interruptibility, so now
+ * we implement that, but possibly shouldn't.
+ */
+ (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
/*
* Loop through the pages of the file, starting with the requested
diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c
index ea7456f..5bad746 100644
--- a/sys/netinet/sctp_input.c
+++ b/sys/netinet/sctp_input.c
@@ -2509,7 +2509,8 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
atomic_add_int(&(*stcb)->asoc.refcnt, 1);
SCTP_TCB_UNLOCK((*stcb));
- sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
+ sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
+ 0);
SCTP_TCB_LOCK((*stcb));
atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
diff --git a/sys/netinet/sctp_peeloff.c b/sys/netinet/sctp_peeloff.c
index 424de2c..d49688e 100644
--- a/sys/netinet/sctp_peeloff.c
+++ b/sys/netinet/sctp_peeloff.c
@@ -134,7 +134,7 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
- sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK);
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
return (0);
@@ -230,7 +230,7 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error)
* And now the final hack. We move data in the pending side i.e.
* head to the new socket buffer. Let the GRUBBING begin :-0
*/
- sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK);
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
return (newso);
}
diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c
index 89b5e4f..25b2f92 100644
--- a/sys/netinet/sctputil.c
+++ b/sys/netinet/sctputil.c
@@ -4993,7 +4993,7 @@ sctp_sorecvmsg(struct socket *so,
sctp_misc_ints(SCTP_SORECV_ENTERPL,
rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
}
- error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
+ error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
sockbuf_lock = 1;
if (error) {
goto release_unlocked;
diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h
index a8163f3..09e58ab 100644
--- a/sys/sys/socketvar.h
+++ b/sys/sys/socketvar.h
@@ -273,6 +273,13 @@ struct xsocket {
*/
/*
+ * Flags to sblock().
+ */
+#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */
+#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */
+#define SBL_VALID (SBL_WAIT | SBL_NOINTR)
+
+/*
* Do we need to notify the other side when I/O is possible?
*/
#define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \
OpenPOWER on IntegriCloud