summaryrefslogtreecommitdiffstats
path: root/sys/netinet
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2008-06-30 18:26:43 +0000
committerrwatson <rwatson@FreeBSD.org>2008-06-30 18:26:43 +0000
commit77ca9e76cd21adf0e8dbac842a5329139ff51a70 (patch)
tree8889f29afde8e61171066e7c614ff205c478c041 /sys/netinet
parent5cb2efe53094bce90bb19aca0b587af22d76f520 (diff)
downloadFreeBSD-src-77ca9e76cd21adf0e8dbac842a5329139ff51a70.zip
FreeBSD-src-77ca9e76cd21adf0e8dbac842a5329139ff51a70.tar.gz
In udp_append() and udp_input(), make use of read locking on incpbs
rather than write locking: while we need to maintain a valid reference to the inpcb and fix its state, no protocol layer state is modified during an IPv4 UDP receive -- there are only changes at the socket layer, which is separately protected by socket locking. While parallel concurrent receive on a single UDP socket is currently relatively unusual, introducing read locking in the transmit path, allowing concurrent receive and transmit, will significantly improve performance for loads such as BIND, memcached, etc. MFC after: 2 months Tested by: gnn, kris, ps
Diffstat (limited to 'sys/netinet')
-rw-r--r--sys/netinet/udp_usrreq.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index ca4c83a..99f40aa 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -195,7 +195,7 @@ udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
struct sockaddr_in6 udp_in6;
#endif
- INP_WLOCK_ASSERT(inp);
+ INP_RLOCK_ASSERT(inp);
#ifdef IPSEC
/* Check AH/ESP integrity. */
@@ -407,7 +407,7 @@ udp_input(struct mbuf *m, int off)
inp->inp_fport != uh->uh_sport)
continue;
- INP_WLOCK(inp);
+ INP_RLOCK(inp);
/*
* Handle socket delivery policy for any-source
@@ -464,7 +464,7 @@ udp_input(struct mbuf *m, int off)
}
}
if (blocked != 0) {
- INP_WUNLOCK(inp);
+ INP_RUNLOCK(inp);
continue;
}
}
@@ -475,7 +475,7 @@ udp_input(struct mbuf *m, int off)
if (n != NULL)
udp_append(last, ip, n, iphlen +
sizeof(struct udphdr), &udp_in);
- INP_WUNLOCK(last);
+ INP_RUNLOCK(last);
}
last = inp;
/*
@@ -502,7 +502,7 @@ udp_input(struct mbuf *m, int off)
}
udp_append(last, ip, m, iphlen + sizeof(struct udphdr),
&udp_in);
- INP_WUNLOCK(last);
+ INP_RUNLOCK(last);
INP_INFO_RUNLOCK(&udbinfo);
return;
}
@@ -541,17 +541,17 @@ udp_input(struct mbuf *m, int off)
/*
* Check the minimum TTL for socket.
*/
- INP_WLOCK(inp);
+ INP_RLOCK(inp);
if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl)
goto badheadlocked;
udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in);
- INP_WUNLOCK(inp);
+ INP_RUNLOCK(inp);
INP_INFO_RUNLOCK(&udbinfo);
return;
badheadlocked:
if (inp)
- INP_WUNLOCK(inp);
+ INP_RUNLOCK(inp);
INP_INFO_RUNLOCK(&udbinfo);
badunlocked:
m_freem(m);
OpenPOWER on IntegriCloud