diff options
Diffstat (limited to 'net')
57 files changed, 676 insertions, 368 deletions
diff --git a/net/802/tr.c b/net/802/tr.c index 158150f..f47ae28 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -668,3 +668,5 @@ module_init(rif_init); EXPORT_SYMBOL(tr_type_trans); EXPORT_SYMBOL(alloc_trdev); + +MODULE_LICENSE("GPL"); diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index e9db889..2886d2f 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -1,12 +1,16 @@ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> +#include <linux/netpoll.h> #include "vlan.h" /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling) { + if (netpoll_rx(skb)) + return NET_RX_DROP; + if (skb_bond_should_drop(skb)) goto drop; @@ -100,6 +104,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, { int err = NET_RX_SUCCESS; + if (netpoll_receive_skb(skb)) + return NET_RX_DROP; + switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { case -1: return netif_receive_skb(skb); @@ -126,6 +133,9 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, if (!skb) goto out; + if (netpoll_receive_skb(skb)) + goto out; + err = NET_RX_SUCCESS; switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4a19acd..1b34135 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -553,7 +553,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) int err = 0; if (netif_device_present(real_dev) && ops->ndo_neigh_setup) - err = ops->ndo_neigh_setup(dev, pa); + err = ops->ndo_neigh_setup(real_dev, pa); return err; } @@ -639,6 +639,7 @@ static int vlan_dev_init(struct net_device *dev) dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; dev->netdev_ops = &vlan_netdev_ops; } + netdev_resync_ops(dev); if (is_vlan_dev(real_dev)) subclass = 1; diff --git a/net/9p/protocol.c b/net/9p/protocol.c index dcd7666..fc70147 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -29,6 +29,7 @@ #include <linux/errno.h> #include <linux/uaccess.h> #include <linux/sched.h> +#include <linux/types.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "protocol.h" @@ -160,29 +161,32 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) break; case 'w':{ int16_t *val = va_arg(ap, int16_t *); - if (pdu_read(pdu, val, sizeof(*val))) { + __le16 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } - *val = cpu_to_le16(*val); + *val = le16_to_cpu(le_val); } break; case 'd':{ int32_t *val = va_arg(ap, int32_t *); - if (pdu_read(pdu, val, sizeof(*val))) { + __le32 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } - *val = cpu_to_le32(*val); + *val = le32_to_cpu(le_val); } break; case 'q':{ int64_t *val = va_arg(ap, int64_t *); - if (pdu_read(pdu, val, sizeof(*val))) { + __le64 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { errcode = -EFAULT; break; } - *val = cpu_to_le64(*val); + *val = le64_to_cpu(le_val); } break; case 's':{ @@ -362,19 +366,19 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) } break; case 'w':{ - int16_t val = va_arg(ap, int); + __le16 val = cpu_to_le16(va_arg(ap, int)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'd':{ - int32_t val = va_arg(ap, int32_t); + __le32 val = cpu_to_le32(va_arg(ap, int32_t)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } break; case 'q':{ - int64_t val = va_arg(ap, int64_t); + __le64 val = cpu_to_le64(va_arg(ap, int64_t)); if (pdu_write(pdu, &val, sizeof(val))) errcode = -EFAULT; } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index bdd9cce..d2c27c8 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -67,6 +67,11 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) { struct net_device *indev; + if (skb_warn_if_lro(skb)) { + kfree_skb(skb); + return; + } + indev = skb->dev; skb->dev = to->dev; skb_forward_csum(skb); @@ -89,7 +94,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) /* called with rcu_read_lock */ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) { - if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) { + if (should_deliver(to, skb)) { __br_forward(to, skb); return; } diff --git a/net/core/dev.c b/net/core/dev.c index 5379b0c..e3fe5c7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1090,7 +1090,7 @@ int dev_open(struct net_device *dev) /* * Enable NET_DMA */ - dmaengine_get(); + net_dmaengine_get(); /* * Initialize multicasting status @@ -1172,7 +1172,7 @@ int dev_close(struct net_device *dev) /* * Shutdown NET_DMA */ - dmaengine_put(); + net_dmaengine_put(); return 0; } @@ -2267,12 +2267,6 @@ int netif_receive_skb(struct sk_buff *skb) rcu_read_lock(); - /* Don't receive packets in an exiting network namespace */ - if (!net_alive(dev_net(skb->dev))) { - kfree_skb(skb); - goto out; - } - #ifdef CONFIG_NET_CLS_ACT if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); @@ -2488,6 +2482,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { + if (netpoll_receive_skb(skb)) + return NET_RX_DROP; + switch (__napi_gro_receive(napi, skb)) { case -1: return netif_receive_skb(skb); @@ -2558,6 +2555,9 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) if (!skb) goto out; + if (netpoll_receive_skb(skb)) + goto out; + err = NET_RX_SUCCESS; switch (__napi_gro_receive(napi, skb)) { @@ -2588,9 +2588,9 @@ static int process_backlog(struct napi_struct *napi, int quota) local_irq_disable(); skb = __skb_dequeue(&queue->input_pkt_queue); if (!skb) { - __napi_complete(napi); local_irq_enable(); - break; + napi_complete(napi); + goto out; } local_irq_enable(); @@ -2599,6 +2599,7 @@ static int process_backlog(struct napi_struct *napi, int quota) napi_gro_flush(napi); +out: return work; } @@ -2671,7 +2672,7 @@ void netif_napi_del(struct napi_struct *napi) struct sk_buff *skb, *next; list_del_init(&napi->dev_list); - kfree(napi->skb); + kfree_skb(napi->skb); for (skb = napi->gro_list; skb; skb = next) { next = skb->next; @@ -4282,6 +4283,39 @@ unsigned long netdev_fix_features(unsigned long features, const char *name) } EXPORT_SYMBOL(netdev_fix_features); +/* Some devices need to (re-)set their netdev_ops inside + * ->init() or similar. If that happens, we have to setup + * the compat pointers again. + */ +void netdev_resync_ops(struct net_device *dev) +{ +#ifdef CONFIG_COMPAT_NET_DEV_OPS + const struct net_device_ops *ops = dev->netdev_ops; + + dev->init = ops->ndo_init; + dev->uninit = ops->ndo_uninit; + dev->open = ops->ndo_open; + dev->change_rx_flags = ops->ndo_change_rx_flags; + dev->set_rx_mode = ops->ndo_set_rx_mode; + dev->set_multicast_list = ops->ndo_set_multicast_list; + dev->set_mac_address = ops->ndo_set_mac_address; + dev->validate_addr = ops->ndo_validate_addr; + dev->do_ioctl = ops->ndo_do_ioctl; + dev->set_config = ops->ndo_set_config; + dev->change_mtu = ops->ndo_change_mtu; + dev->neigh_setup = ops->ndo_neigh_setup; + dev->tx_timeout = ops->ndo_tx_timeout; + dev->get_stats = ops->ndo_get_stats; + dev->vlan_rx_register = ops->ndo_vlan_rx_register; + dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = ops->ndo_poll_controller; +#endif +#endif +} +EXPORT_SYMBOL(netdev_resync_ops); + /** * register_netdevice - register a network device * @dev: device to register @@ -4326,27 +4360,7 @@ int register_netdevice(struct net_device *dev) * This is temporary until all network devices are converted. */ if (dev->netdev_ops) { - const struct net_device_ops *ops = dev->netdev_ops; - - dev->init = ops->ndo_init; - dev->uninit = ops->ndo_uninit; - dev->open = ops->ndo_open; - dev->change_rx_flags = ops->ndo_change_rx_flags; - dev->set_rx_mode = ops->ndo_set_rx_mode; - dev->set_multicast_list = ops->ndo_set_multicast_list; - dev->set_mac_address = ops->ndo_set_mac_address; - dev->validate_addr = ops->ndo_validate_addr; - dev->do_ioctl = ops->ndo_do_ioctl; - dev->set_config = ops->ndo_set_config; - dev->change_mtu = ops->ndo_change_mtu; - dev->tx_timeout = ops->ndo_tx_timeout; - dev->get_stats = ops->ndo_get_stats; - dev->vlan_rx_register = ops->ndo_vlan_rx_register; - dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid; - dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = ops->ndo_poll_controller; -#endif + netdev_resync_ops(dev); } else { char drivername[64]; pr_info("%s (%s): not using net_device_ops yet\n", diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f66c58d..278a142 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1994,8 +1994,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) if (!net_eq(neigh_parms_net(p), net)) continue; - if (nidx++ < neigh_skip) - continue; + if (nidx < neigh_skip) + goto next; if (neightbl_fill_param_info(skb, tbl, p, NETLINK_CB(cb->skb).pid, @@ -2003,6 +2003,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) RTM_NEWNEIGHTBL, NLM_F_MULTI) <= 0) goto out; + next: + nidx++; } neigh_skip = 0; @@ -2082,12 +2084,10 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, if (h > s_h) s_idx = 0; for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) { - int lidx; if (dev_net(n->dev) != net) continue; - lidx = idx++; - if (lidx < s_idx) - continue; + if (idx < s_idx) + goto next; if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, @@ -2096,6 +2096,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, rc = -1; goto out; } + next: + idx++; } } read_unlock_bh(&tbl->lock); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 6ac29a4..484f587 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -77,7 +77,9 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, if (endp == buf) goto err; - rtnl_lock(); + if (!rtnl_trylock()) + return -ERESTARTSYS; + if (dev_isalive(net)) { if ((ret = (*set)(net, new)) == 0) ret = len; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 55151fa..e3bebd3 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -32,24 +32,14 @@ static __net_init int setup_net(struct net *net) { /* Must be called with net_mutex held */ struct pernet_operations *ops; - int error; - struct net_generic *ng; + int error = 0; atomic_set(&net->count, 1); + #ifdef NETNS_REFCNT_DEBUG atomic_set(&net->use_count, 0); #endif - error = -ENOMEM; - ng = kzalloc(sizeof(struct net_generic) + - INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL); - if (ng == NULL) - goto out; - - ng->len = INITIAL_NET_GEN_PTRS; - rcu_assign_pointer(net->gen, ng); - - error = 0; list_for_each_entry(ops, &pernet_list, list) { if (ops->init) { error = ops->init(net); @@ -70,24 +60,50 @@ out_undo: } rcu_barrier(); - kfree(ng); goto out; } +static struct net_generic *net_alloc_generic(void) +{ + struct net_generic *ng; + size_t generic_size = sizeof(struct net_generic) + + INITIAL_NET_GEN_PTRS * sizeof(void *); + + ng = kzalloc(generic_size, GFP_KERNEL); + if (ng) + ng->len = INITIAL_NET_GEN_PTRS; + + return ng; +} + #ifdef CONFIG_NET_NS static struct kmem_cache *net_cachep; static struct workqueue_struct *netns_wq; static struct net *net_alloc(void) { - return kmem_cache_zalloc(net_cachep, GFP_KERNEL); + struct net *net = NULL; + struct net_generic *ng; + + ng = net_alloc_generic(); + if (!ng) + goto out; + + net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); + if (!net) + goto out_free; + + rcu_assign_pointer(net->gen, ng); +out: + return net; + +out_free: + kfree(ng); + goto out; } static void net_free(struct net *net) { - if (!net) - return; - #ifdef NETNS_REFCNT_DEBUG if (unlikely(atomic_read(&net->use_count) != 0)) { printk(KERN_EMERG "network namespace not free! Usage: %d\n", @@ -112,27 +128,28 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) err = -ENOMEM; new_net = net_alloc(); if (!new_net) - goto out; + goto out_err; mutex_lock(&net_mutex); err = setup_net(new_net); - if (err) - goto out_unlock; - - rtnl_lock(); - list_add_tail(&new_net->list, &net_namespace_list); - rtnl_unlock(); - - -out_unlock: + if (!err) { + rtnl_lock(); + list_add_tail(&new_net->list, &net_namespace_list); + rtnl_unlock(); + } mutex_unlock(&net_mutex); + + if (err) + goto out_free; out: put_net(old_net); - if (err) { - net_free(new_net); - new_net = ERR_PTR(err); - } return new_net; + +out_free: + net_free(new_net); +out_err: + new_net = ERR_PTR(err); + goto out; } static void cleanup_net(struct work_struct *work) @@ -140,9 +157,6 @@ static void cleanup_net(struct work_struct *work) struct pernet_operations *ops; struct net *net; - /* Be very certain incoming network packets will not find us */ - rcu_barrier(); - net = container_of(work, struct net, work); mutex_lock(&net_mutex); @@ -188,6 +202,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) static int __init net_ns_init(void) { + struct net_generic *ng; int err; printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); @@ -202,6 +217,12 @@ static int __init net_ns_init(void) panic("Could not create netns workq"); #endif + ng = net_alloc_generic(); + if (!ng) + panic("Could not allocate generic netns"); + + rcu_assign_pointer(init_net.gen, ng); + mutex_lock(&net_mutex); err = setup_net(&init_net); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index da74b84..c6a6b16 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -143,14 +143,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) BUG(); } -void skb_truesize_bug(struct sk_buff *skb) -{ - WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) " - "len=%u, sizeof(sk_buff)=%Zd\n", - skb->truesize, skb->len, sizeof(struct sk_buff)); -} -EXPORT_SYMBOL(skb_truesize_bug); - /* Allocate a new skbuff. We do this ourselves so we can fill in a few * 'private' fields and also do memory statistics to find all the * [BEEP] leaks. diff --git a/net/core/sock.c b/net/core/sock.c index f3a0d08..5f97caa 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -696,6 +696,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname, if (len < 0) return -EINVAL; + memset(&v, 0, sizeof(v)); + switch(optname) { case SO_DEBUG: v.val = sock_flag(sk, SOCK_DBG); @@ -1135,7 +1137,6 @@ void sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; - skb_truesize_check(skb); atomic_sub(skb->truesize, &sk->sk_rmem_alloc); sk_mem_uncharge(skb->sk, skb->truesize); } diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 6bb2635..7bc9929 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -3,11 +3,16 @@ * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in - * FIPS-188, copies of both documents can be found in the Documentation - * directory. While CIPSO never became a full IETF RFC standard many vendors + * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * + * The CIPSO draft specification can be found in the kernel's Documentation + * directory as well as the following URL: + * http://netlabel.sourceforge.net/files/draft-ietf-cipso-ipsecurity-01.txt + * The FIPS-188 specification can be found at the following URL: + * http://www.itl.nist.gov/fipspubs/fip188.htm + * * Author: Paul Moore <paul.moore@hp.com> * */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 705b33b..fc562d2 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1205,7 +1205,7 @@ static struct pernet_operations __net_initdata icmp_sk_ops = { int __init icmp_init(void) { - return register_pernet_device(&icmp_sk_ops); + return register_pernet_subsys(&icmp_sk_ops); } EXPORT_SYMBOL(icmp_err_convert); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 6659ac0..7985346 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -463,6 +463,7 @@ err: static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev) { + struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; struct sk_buff *fp, *head = qp->q.fragments; int len; @@ -548,7 +549,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, iph = ip_hdr(head); iph->frag_off = 0; iph->tot_len = htons(len); - IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMOKS); + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; return 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a6961d7..c28976a7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, - unsigned int pcount, int shifted, int mss) + unsigned int pcount, int shifted, int mss, + int dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); @@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, } /* We discard results */ - tcp_sacktag_one(skb, sk, state, 0, pcount); + tcp_sacktag_one(skb, sk, state, dup_sack, pcount); /* Difference in this won't matter, both ACKed by the same cumul. ACK */ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); @@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, if (!skb_shift(prev, skb, len)) goto fallback; - if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss)) + if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) goto out; /* Hole filled allows collapsing with the next as well, this is very @@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, len = skb->len; if (skb_shift(prev, skb, len)) { pcount += tcp_skb_pcount(skb); - tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss); + tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); } out: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 19d7b42..cf74c41 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2443,7 +2443,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = { void __init tcp_v4_init(void) { inet_hashinfo_init(&tcp_hashinfo); - if (register_pernet_device(&tcp_sk_ops)) + if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 557fe16..da2c3b8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -663,14 +663,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, th->urg_ptr = 0; /* The urg_mode check is necessary during a below snd_una win probe */ - if (unlikely(tcp_urg_mode(tp))) { - if (between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF)) { - th->urg_ptr = htons(tp->snd_up - tcb->seq); - th->urg = 1; - } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { - th->urg_ptr = 0xFFFF; - th->urg = 1; - } + if (unlikely(tcp_urg_mode(tp) && + between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) { + th->urg_ptr = htons(tp->snd_up - tcb->seq); + th->urg = 1; } tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); @@ -2027,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk) last_lost = tp->snd_una; } - /* First pass: retransmit lost packets. */ tcp_for_write_queue_from(skb, sk) { __u8 sacked = TCP_SKB_CB(skb)->sacked; diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 2747ec7..4660b08 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -1,6 +1,6 @@ /* Tom Kelly's Scalable TCP * - * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/ + * See http://www.deneholme.net/tom/scalable/ * * John Heffner <jheffner@sc.edu> */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1ab180b..c47c989 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1231,11 +1231,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct sock *sk; - struct udphdr *uh = udp_hdr(skb); + struct udphdr *uh; unsigned short ulen; struct rtable *rt = (struct rtable*)skb->dst; - __be32 saddr = ip_hdr(skb)->saddr; - __be32 daddr = ip_hdr(skb)->daddr; + __be32 saddr, daddr; struct net *net = dev_net(skb->dev); /* @@ -1244,6 +1243,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto drop; /* No space for header. */ + uh = udp_hdr(skb); ulen = ntohs(uh->len); if (ulen > skb->len) goto short_packet; @@ -1258,6 +1258,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (udp4_csum_init(skb, uh, proto)) goto csum_error; + saddr = ip_hdr(skb)->saddr; + daddr = ip_hdr(skb)->daddr; + if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(net, skb, uh, saddr, daddr, udptable); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f9afb45..1220e2c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -493,15 +493,17 @@ static void addrconf_forward_change(struct net *net, __s32 newf) read_unlock(&dev_base_lock); } -static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) +static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) { struct net *net; net = (struct net *)table->extra2; if (p == &net->ipv6.devconf_dflt->forwarding) - return; + return 0; + + if (!rtnl_trylock()) + return -ERESTARTSYS; - rtnl_lock(); if (p == &net->ipv6.devconf_all->forwarding) { __s32 newf = net->ipv6.devconf_all->forwarding; net->ipv6.devconf_dflt->forwarding = newf; @@ -512,6 +514,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) if (*p) rt6_purge_dflt_routers(net); + return 1; } #endif @@ -2608,9 +2611,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) ASSERT_RTNL(); - if ((dev->flags & IFF_LOOPBACK) && how == 1) - how = 0; - rt6_ifdown(net, dev); neigh_ifdown(&nd_tbl, dev); @@ -3983,7 +3983,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp, ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); if (write) - addrconf_fixup_forwarding(ctl, valp, val); + ret = addrconf_fixup_forwarding(ctl, valp, val); return ret; } @@ -4019,8 +4019,7 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table, } *valp = new; - addrconf_fixup_forwarding(table, valp, val); - return 1; + return addrconf_fixup_forwarding(table, valp, val); } static struct addrconf_sysctl_table @@ -4446,25 +4445,6 @@ int unregister_inet6addr_notifier(struct notifier_block *nb) EXPORT_SYMBOL(unregister_inet6addr_notifier); -static void addrconf_net_exit(struct net *net) -{ - struct net_device *dev; - - rtnl_lock(); - /* clean dev list */ - for_each_netdev(net, dev) { - if (__in6_dev_get(dev) == NULL) - continue; - addrconf_ifdown(dev, 1); - } - addrconf_ifdown(net->loopback_dev, 2); - rtnl_unlock(); -} - -static struct pernet_operations addrconf_net_ops = { - .exit = addrconf_net_exit, -}; - /* * Init / cleanup code */ @@ -4506,10 +4486,6 @@ int __init addrconf_init(void) if (err) goto errlo; - err = register_pernet_device(&addrconf_net_ops); - if (err) - return err; - register_netdevice_notifier(&ipv6_dev_notf); addrconf_verify(0); @@ -4539,15 +4515,22 @@ errlo: void addrconf_cleanup(void) { struct inet6_ifaddr *ifa; + struct net_device *dev; int i; unregister_netdevice_notifier(&ipv6_dev_notf); - unregister_pernet_device(&addrconf_net_ops); - unregister_pernet_subsys(&addrconf_ops); rtnl_lock(); + /* clean dev list */ + for_each_netdev(&init_net, dev) { + if (__in6_dev_get(dev) == NULL) + continue; + addrconf_ifdown(dev, 1); + } + addrconf_ifdown(init_net.loopback_dev, 2); + /* * Check hash table. */ @@ -4568,6 +4551,4 @@ void addrconf_cleanup(void) del_timer(&addr_chk_timer); rtnl_unlock(); - - unregister_pernet_subsys(&addrconf_net_ops); } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index c802bc1..9c8309e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -72,6 +72,10 @@ MODULE_LICENSE("GPL"); static struct list_head inetsw6[SOCK_MAX]; static DEFINE_SPINLOCK(inetsw6_lock); +static int disable_ipv6 = 0; +module_param_named(disable, disable_ipv6, int, 0); +MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional"); + static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) { const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); @@ -991,10 +995,21 @@ static int __init inet6_init(void) { struct sk_buff *dummy_skb; struct list_head *r; - int err; + int err = 0; BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); + /* Register the socket-side information for inet6_create. */ + for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) + INIT_LIST_HEAD(r); + + if (disable_ipv6) { + printk(KERN_INFO + "IPv6: Loaded, but administratively disabled, " + "reboot required to enable\n"); + goto out; + } + err = proto_register(&tcpv6_prot, 1); if (err) goto out; @@ -1012,10 +1027,6 @@ static int __init inet6_init(void) goto out_unregister_udplite_proto; - /* Register the socket-side information for inet6_create. */ - for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) - INIT_LIST_HEAD(r); - /* We MUST register RAW sockets before we create the ICMP6, * IGMP6, or NDISC control sockets. */ @@ -1181,6 +1192,9 @@ module_init(inet6_init); static void __exit inet6_exit(void) { + if (disable_ipv6) + return; + /* First of all disallow new sockets creation. */ sock_unregister(PF_INET6); /* Disallow any further netlink messages */ diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 8fe267f..1bcc343 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -258,11 +258,11 @@ unique: if (twp != NULL) { *twp = tw; - NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); + NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); } else if (tw != NULL) { /* Silly. Should hash-dance instead... */ inet_twsk_deschedule(tw, death_row); - NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); + NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); inet_twsk_put(tw); } diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index c62dd24..7712578 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -323,17 +323,21 @@ static struct ip6_flowlabel * fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, int optlen, int *err_p) { - struct ip6_flowlabel *fl; + struct ip6_flowlabel *fl = NULL; int olen; int addr_type; int err; + olen = optlen - CMSG_ALIGN(sizeof(*freq)); + err = -EINVAL; + if (olen > 64 * 1024) + goto done; + err = -ENOMEM; fl = kzalloc(sizeof(*fl), GFP_KERNEL); if (fl == NULL) goto done; - olen = optlen - CMSG_ALIGN(sizeof(*freq)); if (olen > 0) { struct msghdr msg; struct flowi flowi; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 4b15938..9fb49c3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1105,6 +1105,18 @@ static inline int ip6_ufo_append_data(struct sock *sk, return err; } +static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, + gfp_t gfp) +{ + return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; +} + +static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src, + gfp_t gfp) +{ + return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; +} + int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, @@ -1130,17 +1142,37 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, * setup for corking */ if (opt) { - if (np->cork.opt == NULL) { - np->cork.opt = kmalloc(opt->tot_len, - sk->sk_allocation); - if (unlikely(np->cork.opt == NULL)) - return -ENOBUFS; - } else if (np->cork.opt->tot_len < opt->tot_len) { - printk(KERN_DEBUG "ip6_append_data: invalid option length\n"); + if (WARN_ON(np->cork.opt)) return -EINVAL; - } - memcpy(np->cork.opt, opt, opt->tot_len); - inet->cork.flags |= IPCORK_OPT; + + np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation); + if (unlikely(np->cork.opt == NULL)) + return -ENOBUFS; + + np->cork.opt->tot_len = opt->tot_len; + np->cork.opt->opt_flen = opt->opt_flen; + np->cork.opt->opt_nflen = opt->opt_nflen; + + np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt, + sk->sk_allocation); + if (opt->dst0opt && !np->cork.opt->dst0opt) + return -ENOBUFS; + + np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt, + sk->sk_allocation); + if (opt->dst1opt && !np->cork.opt->dst1opt) + return -ENOBUFS; + + np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt, + sk->sk_allocation); + if (opt->hopopt && !np->cork.opt->hopopt) + return -ENOBUFS; + + np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt, + sk->sk_allocation); + if (opt->srcrt && !np->cork.opt->srcrt) + return -ENOBUFS; + /* need source address above miyazawa*/ } dst_hold(&rt->u.dst); @@ -1167,8 +1199,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, } else { rt = (struct rt6_info *)inet->cork.dst; fl = &inet->cork.fl; - if (inet->cork.flags & IPCORK_OPT) - opt = np->cork.opt; + opt = np->cork.opt; transhdrlen = 0; exthdrlen = 0; mtu = inet->cork.fragsize; @@ -1407,9 +1438,15 @@ error: static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) { - inet->cork.flags &= ~IPCORK_OPT; - kfree(np->cork.opt); - np->cork.opt = NULL; + if (np->cork.opt) { + kfree(np->cork.opt->dst0opt); + kfree(np->cork.opt->dst1opt); + kfree(np->cork.opt->hopopt); + kfree(np->cork.opt->srcrt); + kfree(np->cork.opt); + np->cork.opt = NULL; + } + if (inet->cork.dst) { dst_release(inet->cork.dst); inet->cork.dst = NULL; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 58e2b0d..d994c55 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -249,8 +249,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) } t = netdev_priv(dev); - ip6_tnl_dev_init(dev); t->parms = *p; + ip6_tnl_dev_init(dev); if ((err = register_netdevice(dev)) < 0) goto failed_free; diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index c455cf4..72dbb6d 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -49,8 +49,19 @@ static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, static const u_int8_t invmap[] = { [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1, [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1, - [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1, - [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1 + [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1, + [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1 +}; + +static const u_int8_t noct_valid_new[] = { + [ICMPV6_MGM_QUERY - 130] = 1, + [ICMPV6_MGM_REPORT -130] = 1, + [ICMPV6_MGM_REDUCTION - 130] = 1, + [NDISC_ROUTER_SOLICITATION - 130] = 1, + [NDISC_ROUTER_ADVERTISEMENT - 130] = 1, + [NDISC_NEIGHBOUR_SOLICITATION - 130] = 1, + [NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1, + [ICMPV6_MLD2_REPORT - 130] = 1 }; static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple, @@ -178,6 +189,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, { const struct icmp6hdr *icmp6h; struct icmp6hdr _ih; + int type; icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); if (icmp6h == NULL) { @@ -189,11 +201,21 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { - nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, - "nf_ct_icmpv6: ICMPv6 checksum failed\n"); + if (LOG_INVALID(net, IPPROTO_ICMPV6)) + nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, + "nf_ct_icmpv6: ICMPv6 checksum failed "); return -NF_ACCEPT; } + type = icmp6h->icmp6_type - 130; + if (type >= 0 && type < sizeof(noct_valid_new) && + noct_valid_new[type]) { + skb->nfct = &nf_conntrack_untracked.ct_general; + skb->nfctinfo = IP_CT_NEW; + nf_conntrack_get(skb->nfct); + return NF_ACCEPT; + } + /* is not error message ? */ if (icmp6h->icmp6_type >= 128) return NF_ACCEPT; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index ed4d79a..058a5e4 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -528,14 +528,14 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) if (!ipv6_ext_hdr(nexthdr)) { return -1; } - if (len < (int)sizeof(struct ipv6_opt_hdr)) { - pr_debug("too short\n"); - return -1; - } if (nexthdr == NEXTHDR_NONE) { pr_debug("next header is none\n"); return -1; } + if (len < (int)sizeof(struct ipv6_opt_hdr)) { + pr_debug("too short\n"); + return -1; + } if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) BUG(); if (nexthdr == NEXTHDR_AUTH) diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 3c57511..e9ac7a1 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -452,6 +452,7 @@ err: static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { + struct net *net = container_of(fq->q.net, struct net, ipv6.frags); struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; @@ -551,8 +552,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, head->csum); rcu_read_lock(); - IP6_INC_STATS_BH(dev_net(dev), - __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); + IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; return 1; @@ -566,8 +566,7 @@ out_oom: printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); - IP6_INC_STATS_BH(dev_net(dev), - __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); + IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); return -1; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index d3467e5..5cee2bc 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -188,9 +188,9 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, } nt = netdev_priv(dev); - ipip6_tunnel_init(dev); nt->parms = *parms; + ipip6_tunnel_init(dev); if (parms->i_flags & SIT_ISATAP) dev->priv_flags |= IFF_ISATAP; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 4278e54..37e3d5ef 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -752,6 +752,8 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) skb_copy_queue_mapping(frag, first); frag->do_not_encrypt = first->do_not_encrypt; + frag->dev = first->dev; + frag->iif = first->iif; pos += copylen; left -= copylen; @@ -1343,6 +1345,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) list) { if (!netif_running(sdata->dev)) continue; + if (sdata->vif.type != NL80211_IFTYPE_AP) + continue; if (compare_ether_addr(sdata->dev->dev_addr, hdr->addr2)) { dev_hold(sdata->dev); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 90ce9dd..f4935e3 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -726,7 +726,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, NF_CT_ASSERT(skb->nfct); ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); - if (ret < 0) { + if (ret <= 0) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ pr_debug("nf_conntrack_in: Can't track with proto module\n"); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index c32a7e8..ed6d873 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -434,7 +434,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, } else return NOTIFY_DONE; - if (!nfnetlink_has_listeners(group)) + if (!item->report && !nfnetlink_has_listeners(group)) return NOTIFY_DONE; skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); @@ -1215,6 +1215,16 @@ ctnetlink_create_conntrack(struct nlattr *cda[], } } +#ifdef CONFIG_NF_NAT_NEEDED + if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { + err = ctnetlink_change_nat_seq_adj(ct, cda); + if (err < 0) { + rcu_read_unlock(); + goto err; + } + } +#endif + if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) { @@ -1492,7 +1502,8 @@ static int ctnetlink_expect_event(struct notifier_block *this, } else return NOTIFY_DONE; - if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) + if (!item->report && + !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) return NOTIFY_DONE; skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); @@ -1769,6 +1780,7 @@ ctnetlink_create_expect(struct nlattr *cda[], u_int8_t u3, u32 pid, int report) goto out; } + exp->class = 0; exp->expectfn = NULL; exp->flags = 0; exp->master = ct; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index a1edb9c..f3fd154 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -859,7 +859,7 @@ static int tcp_packet(struct nf_conn *ct, */ if (nf_ct_kill(ct)) return -NF_REPEAT; - return -NF_DROP; + return NF_DROP; } /* Fall through */ case TCP_CONNTRACK_IGNORE: @@ -892,7 +892,7 @@ static int tcp_packet(struct nf_conn *ct, nf_log_packet(pf, 0, skb, NULL, NULL, NULL, "nf_ct_tcp: killing out of sync session "); nf_ct_kill(ct); - return -NF_DROP; + return NF_DROP; } ct->proto.tcp.last_index = index; ct->proto.tcp.last_dir = dir; diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index fa49dc7..c712e9f 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -39,7 +39,7 @@ #endif #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE -#define NFULNL_TIMEOUT_DEFAULT HZ /* every second */ +#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ @@ -590,8 +590,10 @@ nfulnl_log_packet(u_int8_t pf, qthreshold = inst->qthreshold; /* per-rule qthreshold overrides per-instance */ - if (qthreshold > li->u.ulog.qthreshold) - qthreshold = li->u.ulog.qthreshold; + if (li->u.ulog.qthreshold) + if (qthreshold > li->u.ulog.qthreshold) + qthreshold = li->u.ulog.qthreshold; + switch (inst->copy_mode) { case NFULNL_COPY_META: diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index bfbf521..5baccfa 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -827,59 +827,143 @@ static const struct file_operations xt_table_ops = { .release = seq_release_net, }; -static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) +/* + * Traverse state for ip{,6}_{tables,matches} for helping crossing + * the multi-AF mutexes. + */ +struct nf_mttg_trav { + struct list_head *head, *curr; + uint8_t class, nfproto; +}; + +enum { + MTTG_TRAV_INIT, + MTTG_TRAV_NFP_UNSPEC, + MTTG_TRAV_NFP_SPEC, + MTTG_TRAV_DONE, +}; + +static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, + bool is_target) { - struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; - u_int16_t af = (unsigned long)pde->data; + static const uint8_t next_class[] = { + [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, + [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, + }; + struct nf_mttg_trav *trav = seq->private; + + switch (trav->class) { + case MTTG_TRAV_INIT: + trav->class = MTTG_TRAV_NFP_UNSPEC; + mutex_lock(&xt[NFPROTO_UNSPEC].mutex); + trav->head = trav->curr = is_target ? + &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; + break; + case MTTG_TRAV_NFP_UNSPEC: + trav->curr = trav->curr->next; + if (trav->curr != trav->head) + break; + mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); + mutex_lock(&xt[trav->nfproto].mutex); + trav->head = trav->curr = is_target ? + &xt[trav->nfproto].target : &xt[trav->nfproto].match; + trav->class = next_class[trav->class]; + break; + case MTTG_TRAV_NFP_SPEC: + trav->curr = trav->curr->next; + if (trav->curr != trav->head) + break; + /* fallthru, _stop will unlock */ + default: + return NULL; + } - mutex_lock(&xt[af].mutex); - return seq_list_start(&xt[af].match, *pos); + if (ppos != NULL) + ++*ppos; + return trav; } -static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *pos) +static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, + bool is_target) { - struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; - u_int16_t af = (unsigned long)pde->data; + struct nf_mttg_trav *trav = seq->private; + unsigned int j; - return seq_list_next(v, &xt[af].match, pos); + trav->class = MTTG_TRAV_INIT; + for (j = 0; j < *pos; ++j) + if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) + return NULL; + return trav; } -static void xt_match_seq_stop(struct seq_file *seq, void *v) +static void xt_mttg_seq_stop(struct seq_file *seq, void *v) { - struct proc_dir_entry *pde = seq->private; - u_int16_t af = (unsigned long)pde->data; + struct nf_mttg_trav *trav = seq->private; + + switch (trav->class) { + case MTTG_TRAV_NFP_UNSPEC: + mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); + break; + case MTTG_TRAV_NFP_SPEC: + mutex_unlock(&xt[trav->nfproto].mutex); + break; + } +} - mutex_unlock(&xt[af].mutex); +static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) +{ + return xt_mttg_seq_start(seq, pos, false); } -static int xt_match_seq_show(struct seq_file *seq, void *v) +static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) { - struct xt_match *match = list_entry(v, struct xt_match, list); + return xt_mttg_seq_next(seq, v, ppos, false); +} - if (strlen(match->name)) - return seq_printf(seq, "%s\n", match->name); - else - return 0; +static int xt_match_seq_show(struct seq_file *seq, void *v) +{ + const struct nf_mttg_trav *trav = seq->private; + const struct xt_match *match; + + switch (trav->class) { + case MTTG_TRAV_NFP_UNSPEC: + case MTTG_TRAV_NFP_SPEC: + if (trav->curr == trav->head) + return 0; + match = list_entry(trav->curr, struct xt_match, list); + return (*match->name == '\0') ? 0 : + seq_printf(seq, "%s\n", match->name); + } + return 0; } static const struct seq_operations xt_match_seq_ops = { .start = xt_match_seq_start, .next = xt_match_seq_next, - .stop = xt_match_seq_stop, + .stop = xt_mttg_seq_stop, .show = xt_match_seq_show, }; static int xt_match_open(struct inode *inode, struct file *file) { + struct seq_file *seq; + struct nf_mttg_trav *trav; int ret; - ret = seq_open(file, &xt_match_seq_ops); - if (!ret) { - struct seq_file *seq = file->private_data; + trav = kmalloc(sizeof(*trav), GFP_KERNEL); + if (trav == NULL) + return -ENOMEM; - seq->private = PDE(inode); + ret = seq_open(file, &xt_match_seq_ops); + if (ret < 0) { + kfree(trav); + return ret; } - return ret; + + seq = file->private_data; + seq->private = trav; + trav->nfproto = (unsigned long)PDE(inode)->data; + return 0; } static const struct file_operations xt_match_ops = { @@ -887,62 +971,63 @@ static const struct file_operations xt_match_ops = { .open = xt_match_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_private, }; static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) { - struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; - u_int16_t af = (unsigned long)pde->data; - - mutex_lock(&xt[af].mutex); - return seq_list_start(&xt[af].target, *pos); + return xt_mttg_seq_start(seq, pos, true); } -static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *pos) +static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) { - struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; - u_int16_t af = (unsigned long)pde->data; - - return seq_list_next(v, &xt[af].target, pos); -} - -static void xt_target_seq_stop(struct seq_file *seq, void *v) -{ - struct proc_dir_entry *pde = seq->private; - u_int16_t af = (unsigned long)pde->data; - - mutex_unlock(&xt[af].mutex); + return xt_mttg_seq_next(seq, v, ppos, true); } static int xt_target_seq_show(struct seq_file *seq, void *v) { - struct xt_target *target = list_entry(v, struct xt_target, list); - - if (strlen(target->name)) - return seq_printf(seq, "%s\n", target->name); - else - return 0; + const struct nf_mttg_trav *trav = seq->private; + const struct xt_target *target; + + switch (trav->class) { + case MTTG_TRAV_NFP_UNSPEC: + case MTTG_TRAV_NFP_SPEC: + if (trav->curr == trav->head) + return 0; + target = list_entry(trav->curr, struct xt_target, list); + return (*target->name == '\0') ? 0 : + seq_printf(seq, "%s\n", target->name); + } + return 0; } static const struct seq_operations xt_target_seq_ops = { .start = xt_target_seq_start, .next = xt_target_seq_next, - .stop = xt_target_seq_stop, + .stop = xt_mttg_seq_stop, .show = xt_target_seq_show, }; static int xt_target_open(struct inode *inode, struct file *file) { + struct seq_file *seq; + struct nf_mttg_trav *trav; int ret; - ret = seq_open(file, &xt_target_seq_ops); - if (!ret) { - struct seq_file *seq = file->private_data; + trav = kmalloc(sizeof(*trav), GFP_KERNEL); + if (trav == NULL) + return -ENOMEM; - seq->private = PDE(inode); + ret = seq_open(file, &xt_target_seq_ops); + if (ret < 0) { + kfree(trav); + return ret; } - return ret; + + seq = file->private_data; + seq->private = trav; + trav->nfproto = (unsigned long)PDE(inode)->data; + return 0; } static const struct file_operations xt_target_ops = { @@ -950,7 +1035,7 @@ static const struct file_operations xt_target_ops = { .open = xt_target_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_private, }; #define FORMAT_TABLES "_tables_names" diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index fe80b61..791e030 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -542,7 +542,7 @@ recent_mt_proc_write(struct file *file, const char __user *input, struct recent_entry *e; char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; const char *c = buf; - union nf_inet_addr addr; + union nf_inet_addr addr = {}; u_int16_t family; bool add, succ; diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c index e223cb4..a189ada9 100644 --- a/net/netfilter/xt_sctp.c +++ b/net/netfilter/xt_sctp.c @@ -105,7 +105,7 @@ match_packet(const struct sk_buff *skb, switch (chunk_match_type) { case SCTP_CHUNK_MATCH_ALL: - return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap); + return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy); case SCTP_CHUNK_MATCH_ANY: return false; case SCTP_CHUNK_MATCH_ONLY: diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9eb895c..3ae3cb8 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1084,6 +1084,13 @@ out: return 0; } +/** + * netlink_set_err - report error to broadcast listeners + * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() + * @pid: the PID of a process that we want to skip (if any) + * @groups: the broadcast group that will notice the error + * @code: error code, must be negative (as usual in kernelspace) + */ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) { struct netlink_set_err_data info; @@ -1093,7 +1100,8 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) info.exclude_sk = ssk; info.pid = pid; info.group = group; - info.code = code; + /* sk->sk_err wants a positive error value */ + info.code = -code; read_lock(&nl_table_lock); diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index 6a91a32..4aa8885 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c @@ -207,7 +207,6 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev) dev->name, err); dev->stats.tx_aborted_errors++; dev->stats.tx_errors++; - dev_kfree_skb(skb); } else { dev->stats.tx_packets++; dev->stats.tx_bytes += len; diff --git a/net/phonet/pep.c b/net/phonet/pep.c index bb3e678..8ad2b53 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -553,7 +553,7 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct sock *sknode; - struct pnpipehdr *hdr = pnp_hdr(skb); + struct pnpipehdr *hdr; struct sockaddr_pn dst; int err = NET_RX_SUCCESS; u8 pipe_handle; diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index d7d2bed..eac5e7b 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -284,13 +284,13 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, if (IS_ERR(trans)) { call = ERR_CAST(trans); trans = NULL; - goto out; + goto out_notrans; } } else { trans = rx->trans; if (!trans) { call = ERR_PTR(-ENOTCONN); - goto out; + goto out_notrans; } atomic_inc(&trans->usage); } @@ -315,6 +315,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, rxrpc_put_bundle(trans, bundle); out: rxrpc_put_transport(trans); +out_notrans: release_sock(&rx->sk); _leave(" = %p", call); return call; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 5c72a11..f8f047b6 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -183,13 +183,6 @@ override: if (R_tab == NULL) goto failure; - if (!est && (ret == ACT_P_CREATED || - !gen_estimator_active(&police->tcf_bstats, - &police->tcf_rate_est))) { - err = -EINVAL; - goto failure; - } - if (parm->peakrate.rate) { P_tab = qdisc_get_rtab(&parm->peakrate, tb[TCA_POLICE_PEAKRATE]); @@ -205,6 +198,12 @@ override: &police->tcf_lock, est); if (err) goto failure_unlock; + } else if (tb[TCA_POLICE_AVRATE] && + (ret == ACT_P_CREATED || + !gen_estimator_active(&police->tcf_bstats, + &police->tcf_rate_est))) { + err = -EINVAL; + goto failure_unlock; } /* No failure allowed after this point */ diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index f6b4fa9..e36e94a 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -66,11 +66,15 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl = (struct drr_class *)*arg; + struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_DRR_MAX + 1]; u32 quantum; int err; - err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy); + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy); if (err < 0) return err; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 4c8d9f4..905fda5 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -111,7 +111,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, if (sctp_addip_enable) { auth_chunks->chunks[0] = SCTP_CID_ASCONF; auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; - auth_chunks->param_hdr.length += htons(2); + auth_chunks->param_hdr.length = + htons(sizeof(sctp_paramhdr_t) + 2); } } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b78e3be..c4986d0 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -717,15 +717,20 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, static int sctp_ctl_sock_init(void) { int err; - sa_family_t family; + sa_family_t family = PF_INET; if (sctp_get_pf_specific(PF_INET6)) family = PF_INET6; - else - family = PF_INET; err = inet_ctl_sock_create(&sctp_ctl_sock, family, SOCK_SEQPACKET, IPPROTO_SCTP, &init_net); + + /* If IPv6 socket could not be created, try the IPv4 socket */ + if (err < 0 && family == PF_INET6) + err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET, + SOCK_SEQPACKET, IPPROTO_SCTP, + &init_net); + if (err < 0) { printk(KERN_ERR "SCTP: Failed to create the SCTP control socket.\n"); @@ -1322,9 +1327,8 @@ SCTP_STATIC __init int sctp_init(void) out: return status; err_v6_add_protocol: - sctp_v6_del_protocol(); -err_add_protocol: sctp_v4_del_protocol(); +err_add_protocol: inet_ctl_sock_destroy(sctp_ctl_sock); err_ctl_sock_init: sctp_v6_protosw_exit(); @@ -1335,7 +1339,6 @@ err_protosw_init: sctp_v4_pf_exit(); sctp_v6_pf_exit(); sctp_sysctl_unregister(); - list_del(&sctp_af_inet.list); free_pages((unsigned long)sctp_port_hashtable, get_order(sctp_port_hashsize * sizeof(struct sctp_bind_hashbucket))); @@ -1383,7 +1386,6 @@ SCTP_STATIC __exit void sctp_exit(void) sctp_v4_pf_exit(); sctp_sysctl_unregister(); - list_del(&sctp_af_inet.list); free_pages((unsigned long)sctp_assoc_hashtable, get_order(sctp_assoc_hashsize * diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index e1d6076..b5495ae 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -787,36 +787,48 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { - struct sctp_operr_chunk *operr_chunk; struct sctp_errhdr *err_hdr; + struct sctp_ulpevent *ev; - operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr; - err_hdr = &operr_chunk->err_hdr; + while (chunk->chunk_end > chunk->skb->data) { + err_hdr = (struct sctp_errhdr *)(chunk->skb->data); - switch (err_hdr->cause) { - case SCTP_ERROR_UNKNOWN_CHUNK: - { - struct sctp_chunkhdr *unk_chunk_hdr; + ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, + GFP_ATOMIC); + if (!ev) + return; - unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable; - switch (unk_chunk_hdr->type) { - /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an - * ERROR chunk reporting that it did not recognized the ASCONF - * chunk type, the sender of the ASCONF MUST NOT send any - * further ASCONF chunks and MUST stop its T-4 timer. - */ - case SCTP_CID_ASCONF: - asoc->peer.asconf_capable = 0; - sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, + sctp_ulpq_tail_event(&asoc->ulpq, ev); + + switch (err_hdr->cause) { + case SCTP_ERROR_UNKNOWN_CHUNK: + { + sctp_chunkhdr_t *unk_chunk_hdr; + + unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable; + switch (unk_chunk_hdr->type) { + /* ADDIP 4.1 A9) If the peer responds to an ASCONF with + * an ERROR chunk reporting that it did not recognized + * the ASCONF chunk type, the sender of the ASCONF MUST + * NOT send any further ASCONF chunks and MUST stop its + * T-4 timer. + */ + case SCTP_CID_ASCONF: + if (asoc->peer.asconf_capable == 0) + break; + + asoc->peer.asconf_capable = 0; + sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); + break; + default: + break; + } break; + } default: break; } - break; - } - default: - break; } } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 3a0cd07..f88dfde 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3163,7 +3163,6 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - struct sctp_ulpevent *ev; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); @@ -3173,21 +3172,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep, return sctp_sf_violation_chunklen(ep, asoc, type, arg, commands); - while (chunk->chunk_end > chunk->skb->data) { - ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, - GFP_ATOMIC); - if (!ev) - goto nomem; + sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, + SCTP_CHUNK(chunk)); - sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, - SCTP_ULPEVENT(ev)); - sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, - SCTP_CHUNK(chunk)); - } return SCTP_DISPOSITION_CONSUME; - -nomem: - return SCTP_DISPOSITION_NOMEM; } /* diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 385f427..ff50a05 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -293,11 +293,6 @@ static void rpc_make_runnable(struct rpc_task *task) rpc_clear_queued(task); if (rpc_test_and_set_running(task)) return; - /* We might have raced */ - if (RPC_IS_QUEUED(task)) { - rpc_clear_running(task); - return; - } if (RPC_IS_ASYNC(task)) { int status; @@ -607,7 +602,9 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) */ static void __rpc_execute(struct rpc_task *task) { - int status = 0; + struct rpc_wait_queue *queue; + int task_is_async = RPC_IS_ASYNC(task); + int status = 0; dprintk("RPC: %5u __rpc_execute flags=0x%x\n", task->tk_pid, task->tk_flags); @@ -647,15 +644,25 @@ static void __rpc_execute(struct rpc_task *task) */ if (!RPC_IS_QUEUED(task)) continue; - rpc_clear_running(task); - if (RPC_IS_ASYNC(task)) { - /* Careful! we may have raced... */ - if (RPC_IS_QUEUED(task)) - return; - if (rpc_test_and_set_running(task)) - return; + /* + * The queue->lock protects against races with + * rpc_make_runnable(). + * + * Note that once we clear RPC_TASK_RUNNING on an asynchronous + * rpc_task, rpc_make_runnable() can assign it to a + * different workqueue. We therefore cannot assume that the + * rpc_task pointer may still be dereferenced. + */ + queue = task->tk_waitqueue; + spin_lock_bh(&queue->lock); + if (!RPC_IS_QUEUED(task)) { + spin_unlock_bh(&queue->lock); continue; } + rpc_clear_running(task); + spin_unlock_bh(&queue->lock); + if (task_is_async) + return; /* sync task: sleep here */ dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 29e401b..62098d1 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -663,7 +663,7 @@ void xprt_connect(struct rpc_task *task) xprt, (xprt_connected(xprt) ? "is" : "is not")); if (!xprt_bound(xprt)) { - task->tk_status = -EIO; + task->tk_status = -EAGAIN; return; } if (!xprt_lock_write(xprt, task)) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 5cbb404..29c71e6 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -467,7 +467,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, int err, sent = 0; if (unlikely(!sock)) - return -ENOTCONN; + return -ENOTSOCK; clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); if (base != 0) { @@ -577,6 +577,8 @@ static int xs_udp_send_request(struct rpc_task *task) req->rq_svec->iov_base, req->rq_svec->iov_len); + if (!xprt_bound(xprt)) + return -ENOTCONN; status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, xdr, @@ -594,6 +596,10 @@ static int xs_udp_send_request(struct rpc_task *task) } switch (status) { + case -ENOTSOCK: + status = -ENOTCONN; + /* Should we call xs_close() here? */ + break; case -EAGAIN: xs_nospace(task); break; @@ -693,6 +699,10 @@ static int xs_tcp_send_request(struct rpc_task *task) } switch (status) { + case -ENOTSOCK: + status = -ENOTCONN; + /* Should we call xs_close() here? */ + break; case -EAGAIN: xs_nospace(task); break; @@ -1523,7 +1533,7 @@ static void xs_udp_connect_worker4(struct work_struct *work) struct socket *sock = transport->sock; int err, status = -EIO; - if (xprt->shutdown || !xprt_bound(xprt)) + if (xprt->shutdown) goto out; /* Start by resetting any existing state */ @@ -1564,7 +1574,7 @@ static void xs_udp_connect_worker6(struct work_struct *work) struct socket *sock = transport->sock; int err, status = -EIO; - if (xprt->shutdown || !xprt_bound(xprt)) + if (xprt->shutdown) goto out; /* Start by resetting any existing state */ @@ -1648,6 +1658,9 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) write_unlock_bh(&sk->sk_callback_lock); } + if (!xprt_bound(xprt)) + return -ENOTCONN; + /* Tell the socket layer to start connecting... */ xprt->stat.connect_count++; xprt->stat.connect_start = jiffies; @@ -1668,7 +1681,7 @@ static void xs_tcp_connect_worker4(struct work_struct *work) struct socket *sock = transport->sock; int err, status = -EIO; - if (xprt->shutdown || !xprt_bound(xprt)) + if (xprt->shutdown) goto out; if (!sock) { @@ -1728,7 +1741,7 @@ static void xs_tcp_connect_worker6(struct work_struct *work) struct socket *sock = transport->sock; int err, status = -EIO; - if (xprt->shutdown || !xprt_bound(xprt)) + if (xprt->shutdown) goto out; if (!sock) { diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c index 5e685f7..72273ab 100644 --- a/net/wimax/id-table.c +++ b/net/wimax/id-table.c @@ -94,12 +94,13 @@ struct wimax_dev *wimax_dev_get_by_genl_info( list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { if (wimax_dev->net_dev->ifindex == ifindex) { dev_hold(wimax_dev->net_dev); - break; + goto found; } } - if (wimax_dev == NULL) - d_printf(1, NULL, "wimax: no devices found with ifindex %d\n", - ifindex); + wimax_dev = NULL; + d_printf(1, NULL, "wimax: no devices found with ifindex %d\n", + ifindex); +found: spin_unlock(&wimax_id_table_lock); d_fnend(3, NULL, "(info %p ifindex %d) = %p\n", info, ifindex, wimax_dev); diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index e28e2b8..092ae6f 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -102,3 +102,13 @@ config LIB80211_CRYPT_CCMP config LIB80211_CRYPT_TKIP tristate + +config LIB80211_DEBUG + bool "lib80211 debugging messages" + depends on LIB80211 + default n + ---help--- + You can enable this if you want verbose debugging messages + from lib80211. + + If unsure, say N. diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c index db42819..2301dc1 100644 --- a/net/wireless/lib80211_crypt_ccmp.c +++ b/net/wireless/lib80211_crypt_ccmp.c @@ -337,6 +337,7 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += 8; if (ccmp_replay_check(pn, key->rx_pn)) { +#ifdef CONFIG_LIB80211_DEBUG if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: replay detected: STA=%pM " "previous PN %02x%02x%02x%02x%02x%02x " @@ -346,6 +347,7 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); } +#endif key->dot11RSNAStatsCCMPReplays++; return -4; } diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c index 7e8e22b..c362873 100644 --- a/net/wireless/lib80211_crypt_tkip.c +++ b/net/wireless/lib80211_crypt_tkip.c @@ -465,12 +465,14 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += 8; if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { +#ifdef CONFIG_LIB80211_DEBUG if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: replay detected: STA=%pM" " previous TSC %08x%04x received TSC " "%08x%04x\n", hdr->addr2, tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); } +#endif tkey->dot11RSNAStatsTKIPReplays++; return -4; } @@ -505,10 +507,12 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) * it needs to be recalculated for the next packet. */ tkey->rx_phase1_done = 0; } +#ifdef CONFIG_LIB80211_DEBUG if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: ICV error detected: STA=" "%pM\n", hdr->addr2); } +#endif tkey->dot11RSNAStatsTKIPICVErrors++; return -5; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 1e728ff..31b807a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1908,6 +1908,11 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, if (err) return err; + if (!drv->ops->get_mesh_params) { + err = -EOPNOTSUPP; + goto out; + } + /* Get the mesh params */ rtnl_lock(); err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params); @@ -2017,6 +2022,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) if (err) return err; + if (!drv->ops->set_mesh_params) { + err = -EOPNOTSUPP; + goto out; + } + /* This makes sure that there aren't more than 32 mesh config * parameters (otherwise our bitfield scheme would not work.) */ BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); @@ -2061,6 +2071,7 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask); rtnl_unlock(); + out: /* cleanup */ cfg80211_put_dev(drv); dev_put(dev); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 85c9034..bd0a16c 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -380,7 +380,8 @@ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; - if (freq_diff <= 0 || freq_range->max_bandwidth_khz > freq_diff) + if (freq_range->end_freq_khz <= freq_range->start_freq_khz || + freq_range->max_bandwidth_khz > freq_diff) return false; return true; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index e25ff62..62a5425 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -748,12 +748,51 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision) schedule_work(&net->xfrm.state_hash_work); } +static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, + struct flowi *fl, unsigned short family, + xfrm_address_t *daddr, xfrm_address_t *saddr, + struct xfrm_state **best, int *acq_in_progress, + int *error) +{ + /* Resolution logic: + * 1. There is a valid state with matching selector. Done. + * 2. Valid state with inappropriate selector. Skip. + * + * Entering area of "sysdeps". + * + * 3. If state is not valid, selector is temporary, it selects + * only session which triggered previous resolution. Key + * manager will do something to install a state with proper + * selector. + */ + if (x->km.state == XFRM_STATE_VALID) { + if ((x->sel.family && + !xfrm_selector_match(&x->sel, fl, x->sel.family)) || + !security_xfrm_state_pol_flow_match(x, pol, fl)) + return; + + if (!*best || + (*best)->km.dying > x->km.dying || + ((*best)->km.dying == x->km.dying && + (*best)->curlft.add_time < x->curlft.add_time)) + *best = x; + } else if (x->km.state == XFRM_STATE_ACQ) { + *acq_in_progress = 1; + } else if (x->km.state == XFRM_STATE_ERROR || + x->km.state == XFRM_STATE_EXPIRED) { + if (xfrm_selector_match(&x->sel, fl, x->sel.family) && + security_xfrm_state_pol_flow_match(x, pol, fl)) + *error = -ESRCH; + } +} + struct xfrm_state * xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, struct flowi *fl, struct xfrm_tmpl *tmpl, struct xfrm_policy *pol, int *err, unsigned short family) { + static xfrm_address_t saddr_wildcard = { }; struct net *net = xp_net(pol); unsigned int h; struct hlist_node *entry; @@ -773,40 +812,27 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, xfrm_state_addr_check(x, daddr, saddr, family) && tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && - (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) { - /* Resolution logic: - 1. There is a valid state with matching selector. - Done. - 2. Valid state with inappropriate selector. Skip. - - Entering area of "sysdeps". - - 3. If state is not valid, selector is temporary, - it selects only session which triggered - previous resolution. Key manager will do - something to install a state with proper - selector. - */ - if (x->km.state == XFRM_STATE_VALID) { - if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) || - !security_xfrm_state_pol_flow_match(x, pol, fl)) - continue; - if (!best || - best->km.dying > x->km.dying || - (best->km.dying == x->km.dying && - best->curlft.add_time < x->curlft.add_time)) - best = x; - } else if (x->km.state == XFRM_STATE_ACQ) { - acquire_in_progress = 1; - } else if (x->km.state == XFRM_STATE_ERROR || - x->km.state == XFRM_STATE_EXPIRED) { - if (xfrm_selector_match(&x->sel, fl, x->sel.family) && - security_xfrm_state_pol_flow_match(x, pol, fl)) - error = -ESRCH; - } - } + (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) + xfrm_state_look_at(pol, x, fl, family, daddr, saddr, + &best, &acquire_in_progress, &error); + } + if (best) + goto found; + + h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); + hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { + if (x->props.family == family && + x->props.reqid == tmpl->reqid && + !(x->props.flags & XFRM_STATE_WILDRECV) && + xfrm_state_addr_check(x, daddr, saddr, family) && + tmpl->mode == x->props.mode && + tmpl->id.proto == x->id.proto && + (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) + xfrm_state_look_at(pol, x, fl, family, daddr, saddr, + &best, &acquire_in_progress, &error); } +found: x = best; if (!x && !error && !acquire_in_progress) { if (tmpl->id.spi && |