diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-24 11:45:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-24 11:45:00 -0700 |
commit | 10c993a6b5418cb1026775765ba4c70ffb70853d (patch) | |
tree | 717deba79b938c2f3f786ff6fe908d30582f06f8 /net | |
parent | c328d54cd4ad120d76284e46dcca6c6cf996154a (diff) | |
parent | ca456252db0521e5e88024fa2b67535e9739e030 (diff) | |
download | op-kernel-dev-10c993a6b5418cb1026775765ba4c70ffb70853d.zip op-kernel-dev-10c993a6b5418cb1026775765ba4c70ffb70853d.tar.gz |
Merge branch 'for-linus' of git://linux-nfs.org/~bfields/linux
* 'for-linus' of git://linux-nfs.org/~bfields/linux: (52 commits)
knfsd: clear both setuid and setgid whenever a chown is done
knfsd: get rid of imode variable in nfsd_setattr
SUNRPC: Use unsigned loop and array index in svc_init_buffer()
SUNRPC: Use unsigned index when looping over arrays
SUNRPC: Update RPC server's TCP record marker decoder
SUNRPC: RPC server still uses 2.4 method for disabling TCP Nagle
NLM: don't let lockd exit on unexpected svc_recv errors (try #2)
NFS: don't let nfs_callback_svc exit on unexpected svc_recv errors (try #2)
Use a zero sized array for raw field in struct fid
nfsd: use static memory for callback program and stats
SUNRPC: remove svc_create_thread()
nfsd: fix comment
lockd: Fix stale nlmsvc_unlink_block comment
NFSD: Strip __KERNEL__ testing from unexported header files.
sunrpc: make token header values less confusing
gss_krb5: consistently use unsigned for seqnum
NFSD: Remove NFSv4 dependency on NFSv3
SUNRPC: Remove PROC_FS dependency
NFSD: Use "depends on" for PROC_FS dependency
nfsd: move most of fh_verify to separate function
...
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth_gss/gss_generic_token.c | 4 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_crypto.c | 6 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_seal.c | 9 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_seqnum.c | 4 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_unseal.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_wrap.c | 8 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_seal.c | 4 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/svcauth_gss.c | 9 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 1 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 25 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 30 | ||||
-rw-r--r-- | net/sunrpc/svcauth_unix.c | 118 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 29 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 2 |
14 files changed, 154 insertions, 97 deletions
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c index ea8c92e..d83b881 100644 --- a/net/sunrpc/auth_gss/gss_generic_token.c +++ b/net/sunrpc/auth_gss/gss_generic_token.c @@ -148,7 +148,7 @@ int g_token_size(struct xdr_netobj *mech, unsigned int body_size) { /* set body_size to sequence contents size */ - body_size += 4 + (int) mech->len; /* NEED overflow check */ + body_size += 2 + (int) mech->len; /* NEED overflow check */ return(1 + der_length_size(body_size) + body_size); } @@ -161,7 +161,7 @@ void g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf) { *(*buf)++ = 0x60; - der_write_length(buf, 4 + mech->len + body_size); + der_write_length(buf, 2 + mech->len + body_size); *(*buf)++ = 0x06; *(*buf)++ = (unsigned char) mech->len; TWRITE_STR(*buf, mech->data, ((int) mech->len)); diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 0dd7923..1d52308 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -66,8 +66,8 @@ krb5_encrypt( goto out; if (crypto_blkcipher_ivsize(tfm) > 16) { - dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", - crypto_blkcipher_ivsize(tfm)); + dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", + crypto_blkcipher_ivsize(tfm)); goto out; } @@ -102,7 +102,7 @@ krb5_decrypt( goto out; if (crypto_blkcipher_ivsize(tfm) > 16) { - dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", + dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; } diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index dedcbd61..5f1d36d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -87,10 +87,10 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, now = get_seconds(); - token->len = g_token_size(&ctx->mech_used, 22); + token->len = g_token_size(&ctx->mech_used, 24); ptr = token->data; - g_make_token_header(&ctx->mech_used, 22, &ptr); + g_make_token_header(&ctx->mech_used, 24, &ptr); *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff); *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff); @@ -109,15 +109,14 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, md5cksum.data, md5cksum.len)) return GSS_S_FAILURE; - memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, - KRB5_CKSUM_LENGTH); + memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8); spin_lock(&krb5_seq_lock); seq_send = ctx->seq_send++; spin_unlock(&krb5_seq_lock); if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, - ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8)) + seq_send, krb5_hdr + 16, krb5_hdr + 8)) return GSS_S_FAILURE; return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 43f3421..f160be6 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c @@ -43,7 +43,7 @@ s32 krb5_make_seq_num(struct crypto_blkcipher *key, int direction, - s32 seqnum, + u32 seqnum, unsigned char *cksum, unsigned char *buf) { unsigned char plain[8]; @@ -65,7 +65,7 @@ s32 krb5_get_seq_num(struct crypto_blkcipher *key, unsigned char *cksum, unsigned char *buf, - int *direction, s32 * seqnum) + int *direction, u32 *seqnum) { s32 code; unsigned char plain[8]; diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index e30a993..d91a5d0 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -82,7 +82,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; s32 now; int direction; - s32 seqnum; + u32 seqnum; unsigned char *ptr = (unsigned char *)read_token->data; int bodysize; diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 3bdc527..b00b1b4 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -137,7 +137,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, BUG_ON((buf->len - offset) % blocksize); plainlen = blocksize + buf->len - offset; - headlen = g_token_size(&kctx->mech_used, 22 + plainlen) - + headlen = g_token_size(&kctx->mech_used, 24 + plainlen) - (buf->len - offset); ptr = buf->head[0].iov_base + offset; @@ -149,7 +149,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, buf->len += headlen; BUG_ON((buf->len - offset - headlen) % blocksize); - g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr); + g_make_token_header(&kctx->mech_used, 24 + plainlen, &ptr); *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); @@ -176,9 +176,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, md5cksum.data, md5cksum.len)) return GSS_S_FAILURE; - memcpy(krb5_hdr + 16, - md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, - KRB5_CKSUM_LENGTH); + memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8); spin_lock(&krb5_seq_lock); seq_send = kctx->seq_send++; diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index abf17ce..c832712 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c @@ -107,10 +107,10 @@ spkm3_make_token(struct spkm3_ctx *ctx, tokenlen = 10 + ctxelen + 1 + md5elen + 1; /* Create token header using generic routines */ - token->len = g_token_size(&ctx->mech_used, tokenlen); + token->len = g_token_size(&ctx->mech_used, tokenlen + 2); ptr = token->data; - g_make_token_header(&ctx->mech_used, tokenlen, &ptr); + g_make_token_header(&ctx->mech_used, tokenlen + 2, &ptr); spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 481f984..5905d567 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1146,7 +1146,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) case RPC_GSS_SVC_INTEGRITY: if (unwrap_integ_data(&rqstp->rq_arg, gc->gc_seq, rsci->mechctx)) - goto auth_err; + goto garbage_args; /* placeholders for length and seq. number: */ svc_putnl(resv, 0); svc_putnl(resv, 0); @@ -1154,7 +1154,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) case RPC_GSS_SVC_PRIVACY: if (unwrap_priv_data(rqstp, &rqstp->rq_arg, gc->gc_seq, rsci->mechctx)) - goto auth_err; + goto garbage_args; /* placeholders for length and seq. number: */ svc_putnl(resv, 0); svc_putnl(resv, 0); @@ -1169,6 +1169,11 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) ret = SVC_OK; goto out; } +garbage_args: + /* Restore write pointer to its original value: */ + xdr_ressize_check(rqstp, reject_stat); + ret = SVC_GARBAGE; + goto out; auth_err: /* Restore write pointer to its original value: */ xdr_ressize_check(rqstp, reject_stat); diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index b5f2786..d75530f 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -571,7 +571,6 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) return -ETIMEDOUT; dreq->item = item; - dreq->recv_time = get_seconds(); spin_lock(&cache_defer_lock); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 090af78..d74c2d2 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -510,8 +510,7 @@ EXPORT_SYMBOL(svc_destroy); static int svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) { - int pages; - int arghi; + unsigned int pages, arghi; pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. * We assume one is at most one page @@ -525,7 +524,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) rqstp->rq_pages[arghi++] = p; pages--; } - return ! pages; + return pages == 0; } /* @@ -534,8 +533,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) static void svc_release_buffer(struct svc_rqst *rqstp) { - int i; - for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++) + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) if (rqstp->rq_pages[i]) put_page(rqstp->rq_pages[i]); } @@ -590,7 +590,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, struct svc_rqst *rqstp; int error = -ENOMEM; int have_oldmask = 0; - cpumask_t oldmask; + cpumask_t uninitialized_var(oldmask); rqstp = svc_prepare_thread(serv, pool); if (IS_ERR(rqstp)) { @@ -619,16 +619,6 @@ out_thread: } /* - * Create a thread in the default pool. Caller must hold BKL. - */ -int -svc_create_thread(svc_thread_fn func, struct svc_serv *serv) -{ - return __svc_create_thread(func, serv, &serv->sv_pools[0]); -} -EXPORT_SYMBOL(svc_create_thread); - -/* * Choose a pool in which to create a new thread, for svc_set_num_threads */ static inline struct svc_pool * @@ -921,8 +911,7 @@ svc_process(struct svc_rqst *rqstp) case SVC_OK: break; case SVC_GARBAGE: - rpc_stat = rpc_garbage_args; - goto err_bad; + goto err_garbage; case SVC_SYSERR: rpc_stat = rpc_system_err; goto err_bad; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 332eb47..d8e8d79 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -18,6 +18,7 @@ #include <linux/skbuff.h> #include <linux/file.h> #include <linux/freezer.h> +#include <linux/kthread.h> #include <net/sock.h> #include <net/checksum.h> #include <net/ip.h> @@ -586,8 +587,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) while (rqstp->rq_pages[i] == NULL) { struct page *p = alloc_page(GFP_KERNEL); if (!p) { - int j = msecs_to_jiffies(500); - schedule_timeout_uninterruptible(j); + set_current_state(TASK_INTERRUPTIBLE); + if (signalled() || kthread_should_stop()) { + set_current_state(TASK_RUNNING); + return -EINTR; + } + schedule_timeout(msecs_to_jiffies(500)); } rqstp->rq_pages[i] = p; } @@ -607,7 +612,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) try_to_freeze(); cond_resched(); - if (signalled()) + if (signalled() || kthread_should_stop()) return -EINTR; spin_lock_bh(&pool->sp_lock); @@ -626,6 +631,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) * to bring down the daemons ... */ set_current_state(TASK_INTERRUPTIBLE); + + /* + * checking kthread_should_stop() here allows us to avoid + * locking and signalling when stopping kthreads that call + * svc_recv. If the thread has already been woken up, then + * we can exit here without sleeping. If not, then it + * it'll be woken up quickly during the schedule_timeout + */ + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); + spin_unlock_bh(&pool->sp_lock); + return -EINTR; + } + add_wait_queue(&rqstp->rq_wait, &wait); spin_unlock_bh(&pool->sp_lock); @@ -641,7 +660,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) svc_thread_dequeue(pool, rqstp); spin_unlock_bh(&pool->sp_lock); dprintk("svc: server %p, no data yet\n", rqstp); - return signalled()? -EINTR : -EAGAIN; + if (signalled() || kthread_should_stop()) + return -EINTR; + else + return -EAGAIN; } } spin_unlock_bh(&pool->sp_lock); diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 3c64051..3f30ee6 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -11,7 +11,8 @@ #include <linux/hash.h> #include <linux/string.h> #include <net/sock.h> - +#include <net/ipv6.h> +#include <linux/kernel.h> #define RPCDBG_FACILITY RPCDBG_AUTH @@ -85,7 +86,7 @@ static void svcauth_unix_domain_release(struct auth_domain *dom) struct ip_map { struct cache_head h; char m_class[8]; /* e.g. "nfsd" */ - struct in_addr m_addr; + struct in6_addr m_addr; struct unix_domain *m_client; int m_add_change; }; @@ -113,12 +114,19 @@ static inline int hash_ip(__be32 ip) return (hash ^ (hash>>8)) & 0xff; } #endif +static inline int hash_ip6(struct in6_addr ip) +{ + return (hash_ip(ip.s6_addr32[0]) ^ + hash_ip(ip.s6_addr32[1]) ^ + hash_ip(ip.s6_addr32[2]) ^ + hash_ip(ip.s6_addr32[3])); +} static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) { struct ip_map *orig = container_of(corig, struct ip_map, h); struct ip_map *new = container_of(cnew, struct ip_map, h); return strcmp(orig->m_class, new->m_class) == 0 - && orig->m_addr.s_addr == new->m_addr.s_addr; + && ipv6_addr_equal(&orig->m_addr, &new->m_addr); } static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) { @@ -126,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) struct ip_map *item = container_of(citem, struct ip_map, h); strcpy(new->m_class, item->m_class); - new->m_addr.s_addr = item->m_addr.s_addr; + ipv6_addr_copy(&new->m_addr, &item->m_addr); } static void update(struct cache_head *cnew, struct cache_head *citem) { @@ -150,22 +158,24 @@ static void ip_map_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) { - char text_addr[20]; + char text_addr[40]; struct ip_map *im = container_of(h, struct ip_map, h); - __be32 addr = im->m_addr.s_addr; - - snprintf(text_addr, 20, "%u.%u.%u.%u", - ntohl(addr) >> 24 & 0xff, - ntohl(addr) >> 16 & 0xff, - ntohl(addr) >> 8 & 0xff, - ntohl(addr) >> 0 & 0xff); + if (ipv6_addr_v4mapped(&(im->m_addr))) { + snprintf(text_addr, 20, NIPQUAD_FMT, + ntohl(im->m_addr.s6_addr32[3]) >> 24 & 0xff, + ntohl(im->m_addr.s6_addr32[3]) >> 16 & 0xff, + ntohl(im->m_addr.s6_addr32[3]) >> 8 & 0xff, + ntohl(im->m_addr.s6_addr32[3]) >> 0 & 0xff); + } else { + snprintf(text_addr, 40, NIP6_FMT, NIP6(im->m_addr)); + } qword_add(bpp, blen, im->m_class); qword_add(bpp, blen, text_addr); (*bpp)[-1] = '\n'; } -static struct ip_map *ip_map_lookup(char *class, struct in_addr addr); +static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr); static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry); static int ip_map_parse(struct cache_detail *cd, @@ -176,10 +186,10 @@ static int ip_map_parse(struct cache_detail *cd, * for scratch: */ char *buf = mesg; int len; - int b1,b2,b3,b4; + int b1, b2, b3, b4, b5, b6, b7, b8; char c; char class[8]; - struct in_addr addr; + struct in6_addr addr; int err; struct ip_map *ipmp; @@ -198,7 +208,23 @@ static int ip_map_parse(struct cache_detail *cd, len = qword_get(&mesg, buf, mlen); if (len <= 0) return -EINVAL; - if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4) + if (sscanf(buf, NIPQUAD_FMT "%c", &b1, &b2, &b3, &b4, &c) == 4) { + addr.s6_addr32[0] = 0; + addr.s6_addr32[1] = 0; + addr.s6_addr32[2] = htonl(0xffff); + addr.s6_addr32[3] = + htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4); + } else if (sscanf(buf, NIP6_FMT "%c", + &b1, &b2, &b3, &b4, &b5, &b6, &b7, &b8, &c) == 8) { + addr.s6_addr16[0] = htons(b1); + addr.s6_addr16[1] = htons(b2); + addr.s6_addr16[2] = htons(b3); + addr.s6_addr16[3] = htons(b4); + addr.s6_addr16[4] = htons(b5); + addr.s6_addr16[5] = htons(b6); + addr.s6_addr16[6] = htons(b7); + addr.s6_addr16[7] = htons(b8); + } else return -EINVAL; expiry = get_expiry(&mesg); @@ -216,10 +242,7 @@ static int ip_map_parse(struct cache_detail *cd, } else dom = NULL; - addr.s_addr = - htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4); - - ipmp = ip_map_lookup(class,addr); + ipmp = ip_map_lookup(class, &addr); if (ipmp) { err = ip_map_update(ipmp, container_of(dom, struct unix_domain, h), @@ -239,7 +262,7 @@ static int ip_map_show(struct seq_file *m, struct cache_head *h) { struct ip_map *im; - struct in_addr addr; + struct in6_addr addr; char *dom = "-no-domain-"; if (h == NULL) { @@ -248,20 +271,24 @@ static int ip_map_show(struct seq_file *m, } im = container_of(h, struct ip_map, h); /* class addr domain */ - addr = im->m_addr; + ipv6_addr_copy(&addr, &im->m_addr); if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) dom = im->m_client->h.name; - seq_printf(m, "%s %d.%d.%d.%d %s\n", - im->m_class, - ntohl(addr.s_addr) >> 24 & 0xff, - ntohl(addr.s_addr) >> 16 & 0xff, - ntohl(addr.s_addr) >> 8 & 0xff, - ntohl(addr.s_addr) >> 0 & 0xff, - dom - ); + if (ipv6_addr_v4mapped(&addr)) { + seq_printf(m, "%s" NIPQUAD_FMT "%s\n", + im->m_class, + ntohl(addr.s6_addr32[3]) >> 24 & 0xff, + ntohl(addr.s6_addr32[3]) >> 16 & 0xff, + ntohl(addr.s6_addr32[3]) >> 8 & 0xff, + ntohl(addr.s6_addr32[3]) >> 0 & 0xff, + dom); + } else { + seq_printf(m, "%s" NIP6_FMT "%s\n", + im->m_class, NIP6(addr), dom); + } return 0; } @@ -281,16 +308,16 @@ struct cache_detail ip_map_cache = { .alloc = ip_map_alloc, }; -static struct ip_map *ip_map_lookup(char *class, struct in_addr addr) +static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr) { struct ip_map ip; struct cache_head *ch; strcpy(ip.m_class, class); - ip.m_addr = addr; + ipv6_addr_copy(&ip.m_addr, addr); ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, hash_str(class, IP_HASHBITS) ^ - hash_ip(addr.s_addr)); + hash_ip6(*addr)); if (ch) return container_of(ch, struct ip_map, h); @@ -319,14 +346,14 @@ static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t ex ch = sunrpc_cache_update(&ip_map_cache, &ip.h, &ipm->h, hash_str(ipm->m_class, IP_HASHBITS) ^ - hash_ip(ipm->m_addr.s_addr)); + hash_ip6(ipm->m_addr)); if (!ch) return -ENOMEM; cache_put(ch, &ip_map_cache); return 0; } -int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom) +int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom) { struct unix_domain *udom; struct ip_map *ipmp; @@ -355,7 +382,7 @@ int auth_unix_forget_old(struct auth_domain *dom) } EXPORT_SYMBOL(auth_unix_forget_old); -struct auth_domain *auth_unix_lookup(struct in_addr addr) +struct auth_domain *auth_unix_lookup(struct in6_addr *addr) { struct ip_map *ipm; struct auth_domain *rv; @@ -650,9 +677,24 @@ static int unix_gid_find(uid_t uid, struct group_info **gip, int svcauth_unix_set_client(struct svc_rqst *rqstp) { - struct sockaddr_in *sin = svc_addr_in(rqstp); + struct sockaddr_in *sin; + struct sockaddr_in6 *sin6, sin6_storage; struct ip_map *ipm; + switch (rqstp->rq_addr.ss_family) { + case AF_INET: + sin = svc_addr_in(rqstp); + sin6 = &sin6_storage; + ipv6_addr_set(&sin6->sin6_addr, 0, 0, + htonl(0x0000FFFF), sin->sin_addr.s_addr); + break; + case AF_INET6: + sin6 = svc_addr_in6(rqstp); + break; + default: + BUG(); + } + rqstp->rq_client = NULL; if (rqstp->rq_proc == 0) return SVC_OK; @@ -660,7 +702,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) ipm = ip_map_cached_get(rqstp); if (ipm == NULL) ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, - sin->sin_addr); + &sin6->sin6_addr); if (ipm == NULL) return SVC_DENIED; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index c475977..3e65719 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -38,6 +38,7 @@ #include <net/checksum.h> #include <net/ip.h> #include <net/ipv6.h> +#include <net/tcp.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <asm/ioctls.h> @@ -45,6 +46,7 @@ #include <linux/sunrpc/types.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/xdr.h> +#include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/stats.h> @@ -822,8 +824,8 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) * the next four bytes. Otherwise try to gobble up as much as * possible up to the complete record length. */ - if (svsk->sk_tcplen < 4) { - unsigned long want = 4 - svsk->sk_tcplen; + if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { + int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; struct kvec iov; iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; @@ -833,32 +835,31 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) svsk->sk_tcplen += len; if (len < want) { - dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", - len, want); + dprintk("svc: short recvfrom while reading record " + "length (%d of %d)\n", len, want); svc_xprt_received(&svsk->sk_xprt); return -EAGAIN; /* record header not complete */ } svsk->sk_reclen = ntohl(svsk->sk_reclen); - if (!(svsk->sk_reclen & 0x80000000)) { + if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) { /* FIXME: technically, a record can be fragmented, * and non-terminal fragments will not have the top * bit set in the fragment length header. * But apparently no known nfs clients send fragmented * records. */ if (net_ratelimit()) - printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" - " (non-terminal)\n", - (unsigned long) svsk->sk_reclen); + printk(KERN_NOTICE "RPC: multiple fragments " + "per record not supported\n"); goto err_delete; } - svsk->sk_reclen &= 0x7fffffff; + svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); if (svsk->sk_reclen > serv->sv_max_mesg) { if (net_ratelimit()) - printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" - " (large)\n", - (unsigned long) svsk->sk_reclen); + printk(KERN_NOTICE "RPC: " + "fragment too large: 0x%08lx\n", + (unsigned long)svsk->sk_reclen); goto err_delete; } } @@ -1045,7 +1046,6 @@ void svc_cleanup_xprt_sock(void) static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) { struct sock *sk = svsk->sk_sk; - struct tcp_sock *tp = tcp_sk(sk); svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv); set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); @@ -1063,7 +1063,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) svsk->sk_reclen = 0; svsk->sk_tcplen = 0; - tp->nonagle = 1; /* disable Nagle's algorithm */ + tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; /* initialise setting must have enough space to * receive and respond to one request. @@ -1101,6 +1101,7 @@ void svc_sock_update_bufs(struct svc_serv *serv) } spin_unlock_bh(&serv->sv_lock); } +EXPORT_SYMBOL(svc_sock_update_bufs); /* * Initialize socket for RPC use and create svc_sock struct diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 16fd3f6..af408fc 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -1036,6 +1036,8 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) wait_event(xprt->sc_send_wait, atomic_read(&xprt->sc_sq_count) < xprt->sc_sq_depth); + if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) + return 0; continue; } /* Bumped used SQ WR count and post */ |