diff options
author | rrs <rrs@FreeBSD.org> | 2007-09-08 11:35:11 +0000 |
---|---|---|
committer | rrs <rrs@FreeBSD.org> | 2007-09-08 11:35:11 +0000 |
commit | 4dd82bd675126ae3087b47d4425b57c8c44aa790 (patch) | |
tree | 33156f38f2aa559546f6d475b9777be0710945b3 /sys/netinet/sctp_output.c | |
parent | 8c4e364ee08a6259e006283ec6d3d38f50f37d5f (diff) | |
download | FreeBSD-src-4dd82bd675126ae3087b47d4425b57c8c44aa790.zip FreeBSD-src-4dd82bd675126ae3087b47d4425b57c8c44aa790.tar.gz |
- Locking compatiability changes. This involves adding
additional flags to many function calls. The flags only
get used in BSD when we compile with lock testing. These
flags allow apple to escape the "giant" lock it holds on
the socket and have more fine-grained locking in the NKE.
It also allows us to test (with witness) the locking used
by apple via a compile switch (manually applied).
Approved by: re@freebsd.org(B Mah)
Diffstat (limited to 'sys/netinet/sctp_output.c')
-rw-r--r-- | sys/netinet/sctp_output.c | 110 |
1 files changed, 70 insertions, 40 deletions
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c index 28b39d1..3b60f85 100644 --- a/sys/netinet/sctp_output.c +++ b/sys/netinet/sctp_output.c @@ -3270,7 +3270,12 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, int nofragment_flag, int ecn_ok, struct sctp_tmit_chunk *chk, - int out_of_asoc_ok) + int out_of_asoc_ok, + int so_locked +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) + SCTP_UNUSED +#endif +) /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ { /* @@ -3456,7 +3461,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, SCTP_FAILED_THRESHOLD, - (void *)net); + (void *)net, + so_locked); net->dest_state &= ~SCTP_ADDR_REACHABLE; net->dest_state |= SCTP_ADDR_NOT_REACHABLE; /* @@ -3840,7 +3846,11 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, void -sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb) +sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) + SCTP_UNUSED +#endif +) { struct mbuf *m, *m_at, *mp_last; struct sctp_nets *net; @@ -4108,7 +4118,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb) SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n"); ret = sctp_lowlevel_chunk_output(inp, stcb, net, (struct sockaddr *)&net->ro._l_addr, - m, 0, NULL, 0, 0, NULL, 0); + m, 0, NULL, 0, 0, NULL, 0, so_locked); SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); @@ -5169,7 +5179,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, p_len += padval; } (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, - NULL, 0); + NULL, 0, SCTP_SO_NOT_LOCKED); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } @@ -5279,7 +5289,7 @@ sctp_prune_prsctp(struct sctp_tcb *stcb, cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT; ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, cause, - &asoc->sent_queue); + &asoc->sent_queue, SCTP_SO_LOCKED); freed_spc += ret_spc; if (freed_spc >= dataout) { return; @@ -5304,7 +5314,7 @@ sctp_prune_prsctp(struct sctp_tcb *stcb, ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT, - &asoc->send_queue); + &asoc->send_queue, SCTP_SO_LOCKED); freed_spc += ret_spc; if (freed_spc >= dataout) { @@ -5670,7 +5680,11 @@ sctp_med_chunk_output(struct sctp_inpcb *inp, int *num_out, int *reason_code, int control_only, int *cwnd_full, int from_where, - struct timeval *now, int *now_filled, int frag_point); + struct timeval *now, int *now_filled, int frag_point, int so_locked +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) + SCTP_UNUSED +#endif +); static void sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, @@ -5720,7 +5734,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, atomic_add_int(&stcb->asoc.refcnt, 1); sctp_abort_an_association(inp, stcb, SCTP_RESPONSE_TO_USER_REQ, - m); + m, SCTP_SO_NOT_LOCKED); /* * sctp_abort_an_association calls sctp_free_asoc() * free association will NOT free it since we @@ -5814,7 +5828,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, atomic_add_int(&stcb->asoc.refcnt, 1); sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, - NULL); + NULL, SCTP_SO_NOT_LOCKED); atomic_add_int(&stcb->asoc.refcnt, -1); goto no_chunk_output; } @@ -5835,7 +5849,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, do_chunk_output = 0; } if (do_chunk_output) - sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED); else if (added_control) { int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0; struct timeval now; @@ -5843,7 +5857,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, frag_point = sctp_get_frag_point(stcb, &stcb->asoc); (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, - &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); + &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED); } no_chunk_output: if (ret) { @@ -6858,7 +6872,11 @@ sctp_med_chunk_output(struct sctp_inpcb *inp, int *num_out, int *reason_code, int control_only, int *cwnd_full, int from_where, - struct timeval *now, int *now_filled, int frag_point) + struct timeval *now, int *now_filled, int frag_point, int so_locked +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) + SCTP_UNUSED +#endif +) { /* * Ok this is the generic chunk service queue. we must do the @@ -7268,7 +7286,7 @@ again_one_more_time: if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, (struct sockaddr *)&net->ro._l_addr, outchain, auth_offset, auth, - no_fragmentflg, 0, NULL, asconf))) { + no_fragmentflg, 0, NULL, asconf, so_locked))) { if (error == ENOBUFS) { asoc->ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); @@ -7548,7 +7566,7 @@ again_one_more_time: no_fragmentflg, bundle_at, data_list[0], - asconf))) { + asconf, so_locked))) { /* error, we could not output */ if (error == ENOBUFS) { SCTP_STAT_INCR(sctps_lowlevelerr); @@ -8110,7 +8128,11 @@ static int sctp_chunk_retransmission(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc, - int *cnt_out, struct timeval *now, int *now_filled, int *fr_done) + int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) + SCTP_UNUSED +#endif +) { /*- * send out one MTU of retransmission. If fast_retransmit is @@ -8223,7 +8245,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset, - auth, no_fragmentflg, 0, NULL, asconf))) { + auth, no_fragmentflg, 0, NULL, asconf, so_locked))) { SCTP_STAT_INCR(sctps_lowlevelerr); return (error); } @@ -8272,7 +8294,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, chk->snd_count, sctp_max_retran_chunk); atomic_add_int(&stcb->asoc.refcnt, 1); - sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL); + sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); return (SCTP_RETRAN_EXIT); @@ -8461,7 +8483,7 @@ one_chunk_around: /* Now lets send it, if there is anything to send :> */ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, (struct sockaddr *)&net->ro._l_addr, m, auth_offset, - auth, no_fragmentflg, 0, NULL, asconf))) { + auth, no_fragmentflg, 0, NULL, asconf, so_locked))) { /* error, we could not output */ SCTP_STAT_INCR(sctps_lowlevelerr); return (error); @@ -8620,7 +8642,12 @@ sctp_timer_validation(struct sctp_inpcb *inp, void sctp_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, - int from_where) + int from_where, + int so_locked +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) + SCTP_UNUSED +#endif +) { /*- * Ok this is the generic chunk service queue. we must do the @@ -8686,12 +8713,12 @@ sctp_chunk_output(struct sctp_inpcb *inp, */ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where, - &now, &now_filled, frag_point); + &now, &now_filled, frag_point, so_locked); return; } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { /* if its not from a HB then do it */ fr_done = 0; - ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done); + ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked); if (fr_done) { tot_frs++; } @@ -8711,7 +8738,7 @@ sctp_chunk_output(struct sctp_inpcb *inp, */ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where, - &now, &now_filled, frag_point); + &now, &now_filled, frag_point, so_locked); #ifdef SCTP_AUDITING_ENABLED sctp_auditing(8, inp, stcb, NULL); #endif @@ -8738,7 +8765,7 @@ sctp_chunk_output(struct sctp_inpcb *inp, #endif /* Push out any control */ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where, - &now, &now_filled, frag_point); + &now, &now_filled, frag_point, so_locked); return; } if (tot_frs > asoc->max_burst) { @@ -8811,7 +8838,7 @@ sctp_chunk_output(struct sctp_inpcb *inp, do { error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 0, &cwnd_full, from_where, - &now, &now_filled, frag_point); + &now, &now_filled, frag_point, so_locked); if (error) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) { @@ -9092,7 +9119,6 @@ sctp_send_sack(struct sctp_tcb *stcb) int num_dups = 0; int space_req; - a_chk = NULL; asoc = &stcb->asoc; SCTP_TCB_LOCK_ASSERT(stcb); @@ -9354,7 +9380,11 @@ sctp_send_sack(struct sctp_tcb *stcb) void -sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr) +sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) + SCTP_UNUSED +#endif +) { struct mbuf *m_abort; struct mbuf *m_out = NULL, *m_end = NULL; @@ -9409,7 +9439,6 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr) abort->ch.chunk_flags = 0; abort->ch.chunk_length = htons(sizeof(*abort) + sz); - /* prepend and fill in the SCTP header */ SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT); if (m_out == NULL) { @@ -9426,7 +9455,7 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr) (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, stcb->asoc.primary_destination, (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr, - m_out, auth_offset, auth, 1, 0, NULL, 0); + m_out, auth_offset, auth, 1, 0, NULL, 0, so_locked); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } @@ -9455,7 +9484,7 @@ sctp_send_shutdown_complete(struct sctp_tcb *stcb, SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg); (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, (struct sockaddr *)&net->ro._l_addr, - m_shutdown_comp, 0, NULL, 1, 0, NULL, 0); + m_shutdown_comp, 0, NULL, 1, 0, NULL, 0, SCTP_SO_NOT_LOCKED); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); return; } @@ -10877,6 +10906,7 @@ sctp_lower_sosend(struct socket *so, net = NULL; stcb = NULL; asoc = NULL; + t_inp = inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); @@ -11422,7 +11452,7 @@ sctp_lower_sosend(struct socket *so, /* release this lock, otherwise we hang on ourselves */ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, - mm); + mm, SCTP_SO_LOCKED); /* now relock the stcb so everything is sane */ hold_tcblock = 0; stcb = NULL; @@ -11777,7 +11807,7 @@ sctp_lower_sosend(struct socket *so, queue_only_for_init = 0; queue_only = 0; } else { - sctp_send_initiate(inp, stcb); + sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); queue_only_for_init = 0; queue_only = 1; @@ -11797,12 +11827,12 @@ sctp_lower_sosend(struct socket *so, hold_tcblock = 1; sctp_chunk_output(inp, stcb, - SCTP_OUTPUT_FROM_USR_SEND); + SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); } } else { sctp_chunk_output(inp, stcb, - SCTP_OUTPUT_FROM_USR_SEND); + SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); } if (hold_tcblock == 1) { SCTP_TCB_UNLOCK(stcb); @@ -11960,7 +11990,7 @@ dataless_eof: } sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, - NULL); + NULL, SCTP_SO_LOCKED); /* * now relock the stcb so everything * is sane @@ -12039,7 +12069,7 @@ skip_out_eof: queue_only_for_init = 0; queue_only = 0; } else { - sctp_send_initiate(inp, stcb); + sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); queue_only_for_init = 0; queue_only = 1; @@ -12053,11 +12083,11 @@ skip_out_eof: * send */ if (SCTP_TCB_TRYLOCK(stcb)) { - sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); hold_tcblock = 1; } } else { - sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); } } else if ((queue_only == 0) && (stcb->asoc.peers_rwnd == 0) && @@ -12067,7 +12097,7 @@ skip_out_eof: hold_tcblock = 1; SCTP_TCB_LOCK(stcb); } - sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); } else if (some_on_control) { int num_out, reason, cwnd_full, frag_point; @@ -12078,7 +12108,7 @@ skip_out_eof: } frag_point = sctp_get_frag_point(stcb, &stcb->asoc); (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, - &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); + &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED); } SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d", queue_only, stcb->asoc.peers_rwnd, un_sent, |