summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbp <bp@FreeBSD.org>2002-09-18 07:38:10 +0000
committerbp <bp@FreeBSD.org>2002-09-18 07:38:10 +0000
commit549a16a759511d5c6bcff7ab98c0a91f7810dfc8 (patch)
tree4dcea3a71c3340936664264d8fc34728040f43f3
parentc30c4f6198157709ed26516fd8d8a998c54c3627 (diff)
downloadFreeBSD-src-549a16a759511d5c6bcff7ab98c0a91f7810dfc8.zip
FreeBSD-src-549a16a759511d5c6bcff7ab98c0a91f7810dfc8.tar.gz
Increase send/receive queue to accomodate large readx/writex requests.
Receive packets in a small pieces (NB_SORECEIVE_CHUNK), so TCP slowstart will get its ACKs faster. Obtained from: Darwin
-rw-r--r--sys/netsmb/smb_trantcp.c88
-rw-r--r--sys/netsmb/smb_trantcp.h12
2 files changed, 78 insertions, 22 deletions
diff --git a/sys/netsmb/smb_trantcp.c b/sys/netsmb/smb_trantcp.c
index bb017bf..497d95c 100644
--- a/sys/netsmb/smb_trantcp.c
+++ b/sys/netsmb/smb_trantcp.c
@@ -66,8 +66,8 @@
#define M_NBDATA M_PCB
-static int smb_tcpsndbuf = 10 * 1024;
-static int smb_tcprcvbuf = 10 * 1024;
+static int smb_tcpsndbuf = NB_SNDQ - 1;
+static int smb_tcprcvbuf = NB_RCVQ - 1;
SYSCTL_DECL(_net_smb);
SYSCTL_INT(_net_smb, OID_AUTO, tcpsndbuf, CTLFLAG_RW, &smb_tcpsndbuf, 0, "");
@@ -395,9 +395,9 @@ nbssn_recv(struct nbpcb *nbp, struct mbuf **mpp, int *lenp,
{
struct socket *so = nbp->nbp_tso;
struct uio auio;
- struct mbuf *m;
+ struct mbuf *m, *tm, *im;
u_int8_t rpcode;
- int len;
+ int len, resid;
int error, rcvflg;
if (so == NULL)
@@ -405,8 +405,12 @@ nbssn_recv(struct nbpcb *nbp, struct mbuf **mpp, int *lenp,
if (mpp)
*mpp = NULL;
+ m = NULL;
for(;;) {
- m = NULL;
+ /*
+ * Poll for a response header.
+ * If we don't have one waiting, return.
+ */
error = nbssn_recvhdr(nbp, &len, &rpcode, MSG_DONTWAIT, td);
if (so->so_state &
(SS_ISDISCONNECTING | SS_ISDISCONNECTED | SS_CANTRCVMORE)) {
@@ -418,32 +422,76 @@ nbssn_recv(struct nbpcb *nbp, struct mbuf **mpp, int *lenp,
return error;
if (len == 0 && nbp->nbp_state != NBST_SESSION)
break;
+ /* no data, try again */
if (rpcode == NB_SSN_KEEPALIVE)
continue;
- bzero(&auio, sizeof(auio));
- auio.uio_resid = len;
- auio.uio_td = td;
- do {
+
+ /*
+ * Loop, blocking, for data following the response header.
+ *
+ * Note that we can't simply block here with MSG_WAITALL for the
+ * entire response size, as it may be larger than the TCP
+ * slow-start window that the sender employs. This will result
+ * in the sender stalling until the delayed ACK is sent, then
+ * resuming slow-start, resulting in very poor performance.
+ *
+ * Instead, we never request more than NB_SORECEIVE_CHUNK
+ * bytes at a time, resulting in an ack being pushed by
+ * the TCP code at the completion of each call.
+ */
+ resid = len;
+ while (resid > 0) {
+ tm = NULL;
rcvflg = MSG_WAITALL;
- error = so->so_proto->pr_usrreqs->pru_soreceive
- (so, (struct sockaddr **)NULL,
- &auio, &m, (struct mbuf **)NULL, &rcvflg);
- } while (error == EWOULDBLOCK || error == EINTR ||
+ bzero(&auio, sizeof(auio));
+ auio.uio_resid = min(resid, NB_SORECEIVE_CHUNK);
+ auio.uio_td = td;
+ resid -= auio.uio_resid;
+ /*
+ * Spin until we have collected everything in
+ * this chunk.
+ */
+ do {
+ rcvflg = MSG_WAITALL;
+ error = so->so_proto->pr_usrreqs->pru_soreceive
+ (so, (struct sockaddr **)NULL,
+ &auio, &m, (struct mbuf **)NULL, &rcvflg);
+ } while (error == EWOULDBLOCK || error == EINTR ||
error == ERESTART);
- if (error)
- break;
- if (auio.uio_resid > 0) {
- SMBERROR("packet is shorter than expected\n");
- error = EPIPE;
- break;
+ if (error)
+ goto out;
+ /* short return guarantees unhappiness */
+ if (auio.uio_resid > 0) {
+ SMBERROR("packet is shorter than expected\n");
+ error = EPIPE;
+ goto out;
+ }
+ /* append received chunk to previous chunk(s) */
+ if (m == NULL) {
+ m = tm;
+ } else {
+ /*
+ * Just glue the new chain on the end.
+ * Consumer will pullup as required.
+ */
+ for (im = m; im->m_next != NULL; im = im->m_next)
+ ;
+ im->m_next = tm;
+ }
}
+ /* got a session/message packet? */
if (nbp->nbp_state == NBST_SESSION &&
rpcode == NB_SSN_MESSAGE)
break;
+ /* drop packet and try for another */
NBDEBUG("non-session packet %x\n", rpcode);
- if (m)
+ if (m) {
m_freem(m);
+ m = NULL;
+ }
}
+
+out:
if (error) {
if (m)
m_freem(m);
diff --git a/sys/netsmb/smb_trantcp.h b/sys/netsmb/smb_trantcp.h
index d160fa4..482d77e 100644
--- a/sys/netsmb/smb_trantcp.h
+++ b/sys/netsmb/smb_trantcp.h
@@ -78,8 +78,16 @@ struct nbpcb {
/*
* Nominal space allocated per a NETBIOS socket.
*/
-#define NB_SNDQ (10 * 1024)
-#define NB_RCVQ (20 * 1024)
+#define NB_SNDQ (64 * 1024)
+#define NB_RCVQ (64 * 1024)
+
+/*
+ * TCP slowstart presents a problem in conjunction with large
+ * reads. To ensure a steady stream of ACKs while reading using
+ * large transaction sizes, we call soreceive() with a smaller
+ * buffer size. See nbssn_recv().
+ */
+#define NB_SORECEIVE_CHUNK (8 * 1024)
extern struct smb_tran_desc smb_tran_nbtcp_desc;
OpenPOWER on IntegriCloud