summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTsutomu Fujii <t-fujii@nb.jp.nec.com>2006-06-17 22:58:28 -0700
committerDavid S. Miller <davem@davemloft.net>2006-06-17 22:58:28 -0700
commitd7c2c9e3977e4312d093ac092761798d4d47c9e0 (patch)
tree1228ed1e6729f8dd6069698e9221ac35790cf0c5
parent503b55fd77d11381b1950d1651d3bc782c0cc2cd (diff)
downloadop-kernel-dev-d7c2c9e3977e4312d093ac092761798d4d47c9e0.zip
op-kernel-dev-d7c2c9e3977e4312d093ac092761798d4d47c9e0.tar.gz
[SCTP]: Send only 1 window update SACK per message.
Right now, every time we increase our rwnd by more then MTU bytes, we trigger a SACK. When processing large messages, this will generate a SACK for almost every other SCTP fragment. However since we are freeing the entire message at the same time, we might as well collapse the SACK generation to 1. Signed-off-by: Tsutomu Fujii <t-fujii@nb.jp.nec.com> Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: Sridhar Samudrala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sctp/ulpevent.c30
1 files changed, 28 insertions, 2 deletions
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index ba97f97..ee236784 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -51,6 +51,8 @@
static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
struct sctp_association *asoc);
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
+static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event);
+
/* Initialize an ULP event from an given skb. */
SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags)
@@ -883,6 +885,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
{
struct sk_buff *skb, *frag;
+ unsigned int len;
/* Current stack structures assume that the rcv buffer is
* per socket. For UDP style sockets this is not true as
@@ -892,7 +895,30 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
*/
skb = sctp_event2skb(event);
- sctp_assoc_rwnd_increase(event->asoc, skb_headlen(skb));
+ len = skb->len;
+
+ if (!skb->data_len)
+ goto done;
+
+ /* Don't forget the fragments. */
+ for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
+ /* NOTE: skb_shinfos are recursive. Although IP returns
+ * skb's with only 1 level of fragments, SCTP reassembly can
+ * increase the levels.
+ */
+ sctp_ulpevent_release_frag_data(sctp_skb2event(frag));
+ }
+
+done:
+ sctp_assoc_rwnd_increase(event->asoc, len);
+ sctp_ulpevent_release_owner(event);
+}
+
+static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
+{
+ struct sk_buff *skb, *frag;
+
+ skb = sctp_event2skb(event);
if (!skb->data_len)
goto done;
@@ -903,7 +929,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
* skb's with only 1 level of fragments, SCTP reassembly can
* increase the levels.
*/
- sctp_ulpevent_release_data(sctp_skb2event(frag));
+ sctp_ulpevent_release_frag_data(sctp_skb2event(frag));
}
done:
OpenPOWER on IntegriCloud