diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 83 | ||||
-rw-r--r-- | net/core/fib_rules.c | 4 | ||||
-rw-r--r-- | net/core/filter.c | 5 | ||||
-rw-r--r-- | net/core/neighbour.c | 14 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 7 | ||||
-rw-r--r-- | net/core/net-sysfs.h | 2 | ||||
-rw-r--r-- | net/core/net_namespace.c | 3 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 | ||||
-rw-r--r-- | net/core/pktgen.c | 16 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 3 | ||||
-rw-r--r-- | net/core/skbuff.c | 26 | ||||
-rw-r--r-- | net/core/sock.c | 5 | ||||
-rw-r--r-- | net/core/user_dma.c | 2 |
13 files changed, 116 insertions, 56 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index ed49da5..fca23a3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -119,6 +119,7 @@ #include <linux/err.h> #include <linux/ctype.h> #include <linux/if_arp.h> +#include <linux/if_vlan.h> #include "net-sysfs.h" @@ -162,7 +163,7 @@ struct net_dma { struct dma_client client; spinlock_t lock; cpumask_t channel_mask; - struct dma_chan *channels[NR_CPUS]; + struct dma_chan **channels; }; static enum dma_state_client @@ -453,7 +454,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map) for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { memset(s[i].name, 0, sizeof(s[i].name)); - strcpy(s[i].name, name); + strlcpy(s[i].name, name, IFNAMSIZ); memcpy(&s[i].map, map, sizeof(s[i].map)); break; } @@ -478,7 +479,7 @@ int netdev_boot_setup_check(struct net_device *dev) for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && - !strncmp(dev->name, s[i].name, strlen(s[i].name))) { + !strcmp(dev->name, s[i].name)) { dev->irq = s[i].map.irq; dev->base_addr = s[i].map.base_addr; dev->mem_start = s[i].map.mem_start; @@ -903,7 +904,11 @@ int dev_change_name(struct net_device *dev, char *newname) strlcpy(dev->name, newname, IFNAMSIZ); rollback: - device_rename(&dev->dev, dev->name); + err = device_rename(&dev->dev, dev->name); + if (err) { + memcpy(dev->name, oldname, IFNAMSIZ); + return err; + } write_lock_bh(&dev_base_lock); hlist_del(&dev->name_hlist); @@ -994,6 +999,8 @@ int dev_open(struct net_device *dev) { int ret = 0; + ASSERT_RTNL(); + /* * Is it already up? */ @@ -1060,6 +1067,8 @@ int dev_open(struct net_device *dev) */ int dev_close(struct net_device *dev) { + ASSERT_RTNL(); + might_sleep(); if (!(dev->flags & IFF_UP)) @@ -1354,6 +1363,29 @@ void netif_device_attach(struct net_device *dev) } EXPORT_SYMBOL(netif_device_attach); +static bool can_checksum_protocol(unsigned long features, __be16 protocol) +{ + return ((features & NETIF_F_GEN_CSUM) || + ((features & NETIF_F_IP_CSUM) && + protocol == htons(ETH_P_IP)) || + ((features & NETIF_F_IPV6_CSUM) && + protocol == htons(ETH_P_IPV6))); +} + +static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) +{ + if (can_checksum_protocol(dev->features, skb->protocol)) + return true; + + if (skb->protocol == htons(ETH_P_8021Q)) { + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + if (can_checksum_protocol(dev->features & dev->vlan_features, + veh->h_vlan_encapsulated_proto)) + return true; + } + + return false; +} /* * Invalidate hardware checksum when packet is to be mangled, and @@ -1632,14 +1664,8 @@ int dev_queue_xmit(struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb)); - - if (!(dev->features & NETIF_F_GEN_CSUM) && - !((dev->features & NETIF_F_IP_CSUM) && - skb->protocol == htons(ETH_P_IP)) && - !((dev->features & NETIF_F_IPV6_CSUM) && - skb->protocol == htons(ETH_P_IPV6))) - if (skb_checksum_help(skb)) - goto out_kfree_skb; + if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb)) + goto out_kfree_skb; } gso: @@ -2051,6 +2077,10 @@ int netif_receive_skb(struct sk_buff *skb) rcu_read_lock(); + /* Don't receive packets in an exiting network namespace */ + if (!net_alive(dev_net(skb->dev))) + goto out; + #ifdef CONFIG_NET_CLS_ACT if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); @@ -2444,7 +2474,7 @@ static struct netif_rx_stats *softnet_get_online(loff_t *pos) { struct netif_rx_stats *rc = NULL; - while (*pos < NR_CPUS) + while (*pos < nr_cpu_ids) if (cpu_online(*pos)) { rc = &per_cpu(netdev_rx_stat, *pos); break; @@ -2943,7 +2973,7 @@ EXPORT_SYMBOL(dev_unicast_delete); /** * dev_unicast_add - add a secondary unicast address * @dev: device - * @addr: address to delete + * @addr: address to add * @alen: length of @addr * * Add a secondary unicast address to the device or increase @@ -3133,7 +3163,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags) * Load in the correct multicast list now the flags have changed. */ - if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST) + if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST) dev->change_rx_flags(dev, IFF_MULTICAST); dev_set_rx_mode(dev); @@ -3776,6 +3806,7 @@ int register_netdevice(struct net_device *dev) } } + netdev_initialize_kobject(dev); ret = netdev_register_kobject(dev); if (ret) goto err_uninit; @@ -4208,7 +4239,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char } /* Fixup kobjects */ - err = device_rename(&dev->dev, dev->name); + netdev_unregister_kobject(dev); + err = netdev_register_kobject(dev); WARN_ON(err); /* Add the device back in the hashes */ @@ -4324,7 +4356,7 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, spin_lock(&net_dma->lock); switch (state) { case DMA_RESOURCE_AVAILABLE: - for (i = 0; i < NR_CPUS; i++) + for (i = 0; i < nr_cpu_ids; i++) if (net_dma->channels[i] == chan) { found = 1; break; @@ -4339,7 +4371,7 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, } break; case DMA_RESOURCE_REMOVED: - for (i = 0; i < NR_CPUS; i++) + for (i = 0; i < nr_cpu_ids; i++) if (net_dma->channels[i] == chan) { found = 1; pos = i; @@ -4366,6 +4398,13 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, */ static int __init netdev_dma_register(void) { + net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma), + GFP_KERNEL); + if (unlikely(!net_dma.channels)) { + printk(KERN_NOTICE + "netdev_dma: no memory for net_dma.channels\n"); + return -ENOMEM; + } spin_lock_init(&net_dma.lock); dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); dma_async_client_register(&net_dma.client); @@ -4471,17 +4510,19 @@ static void __net_exit default_device_exit(struct net *net) rtnl_lock(); for_each_netdev_safe(net, dev, next) { int err; + char fb_name[IFNAMSIZ]; /* Ignore unmoveable devices (i.e. loopback) */ if (dev->features & NETIF_F_NETNS_LOCAL) continue; /* Push remaing network devices to init_net */ - err = dev_change_net_namespace(dev, &init_net, "dev%d"); + snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); + err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { - printk(KERN_WARNING "%s: failed to move %s to init_net: %d\n", + printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", __func__, dev->name, err); - unregister_netdevice(dev); + BUG(); } } rtnl_unlock(); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index e3e9ab0..277a230 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -226,7 +226,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) ops = lookup_rules_ops(net, frh->family); if (ops == NULL) { - err = EAFNOSUPPORT; + err = -EAFNOSUPPORT; goto errout; } @@ -365,7 +365,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) ops = lookup_rules_ops(net, frh->family); if (ops == NULL) { - err = EAFNOSUPPORT; + err = -EAFNOSUPPORT; goto errout; } diff --git a/net/core/filter.c b/net/core/filter.c index f5f3cf60..df37443 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -68,7 +68,6 @@ static inline void *load_pointer(struct sk_buff *skb, int k, * sk_filter - run a packet through a socket filter * @sk: sock associated with &sk_buff * @skb: buffer to filter - * @needlock: set to 1 if the sock is not locked by caller. * * Run the filter code and then cut skb->data to correct size returned by * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller @@ -213,7 +212,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { - A = ntohl(get_unaligned((__be32 *)ptr)); + A = get_unaligned_be32(ptr); continue; } break; @@ -222,7 +221,7 @@ load_w: load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { - A = ntohs(get_unaligned((__be16 *)ptr)); + A = get_unaligned_be16(ptr); continue; } break; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 75075c3..65f01f7 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1430,11 +1430,10 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) panic("cannot create neighbour cache statistics"); #ifdef CONFIG_PROC_FS - tbl->pde = proc_create(tbl->id, 0, init_net.proc_net_stat, - &neigh_stat_seq_fops); + tbl->pde = proc_create_data(tbl->id, 0, init_net.proc_net_stat, + &neigh_stat_seq_fops, tbl); if (!tbl->pde) panic("cannot create neighbour proc dir entry"); - tbl->pde->data = tbl; #endif tbl->hash_mask = 1; @@ -1715,7 +1714,8 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) return nla_nest_end(skb, nest); nla_put_failure: - return nla_nest_cancel(skb, nest); + nla_nest_cancel(skb, nest); + return -EMSGSIZE; } static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, @@ -2058,9 +2058,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, goto nla_put_failure; } - ci.ndm_used = now - neigh->used; - ci.ndm_confirmed = now - neigh->confirmed; - ci.ndm_updated = now - neigh->updated; + ci.ndm_used = jiffies_to_clock_t(now - neigh->used); + ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); + ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; read_unlock_bh(&neigh->lock); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4e7b847..90e2177 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -449,7 +449,6 @@ int netdev_register_kobject(struct net_device *net) struct device *dev = &(net->dev); struct attribute_group **groups = net->sysfs_groups; - device_initialize(dev); dev->class = &net_class; dev->platform_data = net; dev->groups = groups; @@ -470,6 +469,12 @@ int netdev_register_kobject(struct net_device *net) return device_add(dev); } +void netdev_initialize_kobject(struct net_device *net) +{ + struct device *device = &(net->dev); + device_initialize(device); +} + int netdev_kobject_init(void) { return class_register(&net_class); diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h index f5f108d..14e7524 100644 --- a/net/core/net-sysfs.h +++ b/net/core/net-sysfs.h @@ -4,5 +4,5 @@ int netdev_kobject_init(void); int netdev_register_kobject(struct net_device *); void netdev_unregister_kobject(struct net_device *); - +void netdev_initialize_kobject(struct net_device *); #endif diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 72b4c18..7c52fe2 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -140,6 +140,9 @@ static void cleanup_net(struct work_struct *work) struct pernet_operations *ops; struct net *net; + /* Be very certain incoming network packets will not find us */ + rcu_barrier(); + net = container_of(work, struct net, work); mutex_lock(&net_mutex); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b04d643..8fb134d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -419,7 +419,7 @@ static void arp_reply(struct sk_buff *skb) return; size = arp_hdr_len(skb->dev); - send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), + send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), LL_RESERVED_SPACE(np->dev)); if (!send_skb) diff --git a/net/core/pktgen.c b/net/core/pktgen.c index a803b44..fdf5377 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -390,6 +390,7 @@ struct pktgen_thread { int cpu; wait_queue_head_t queue; + struct completion start_done; }; #define REMOVE 1 @@ -3414,6 +3415,7 @@ static int pktgen_thread_worker(void *arg) BUG_ON(smp_processor_id() != cpu); init_waitqueue_head(&t->queue); + complete(&t->start_done); pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); @@ -3570,15 +3572,14 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) if (err) goto out1; - pkt_dev->entry = proc_create(ifname, 0600, - pg_proc_dir, &pktgen_if_fops); + pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir, + &pktgen_if_fops, pkt_dev); if (!pkt_dev->entry) { printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n", PG_PROC_DIR, ifname); err = -EINVAL; goto out2; } - pkt_dev->entry->data = pkt_dev; #ifdef CONFIG_XFRM pkt_dev->ipsmode = XFRM_MODE_TRANSPORT; pkt_dev->ipsproto = IPPROTO_ESP; @@ -3616,6 +3617,7 @@ static int __init pktgen_create_thread(int cpu) INIT_LIST_HEAD(&t->if_list); list_add_tail(&t->th_list, &pktgen_threads); + init_completion(&t->start_done); p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu); if (IS_ERR(p)) { @@ -3628,7 +3630,8 @@ static int __init pktgen_create_thread(int cpu) kthread_bind(p, cpu); t->tsk = p; - pe = proc_create(t->tsk->comm, 0600, pg_proc_dir, &pktgen_thread_fops); + pe = proc_create_data(t->tsk->comm, 0600, pg_proc_dir, + &pktgen_thread_fops, t); if (!pe) { printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n", PG_PROC_DIR, t->tsk->comm); @@ -3638,9 +3641,8 @@ static int __init pktgen_create_thread(int cpu) return -EINVAL; } - pe->data = t; - wake_up_process(p); + wait_for_completion(&t->start_done); return 0; } @@ -3716,8 +3718,6 @@ static int __init pg_init(void) return -EINVAL; } - pe->data = NULL; - /* Register us to receive netdevice events */ register_netdevice_notifier(&pktgen_notifier_block); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index cf857c4..a9a7721 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -498,7 +498,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) return nla_nest_end(skb, mx); nla_put_failure: - return nla_nest_cancel(skb, mx); + nla_nest_cancel(skb, mx); + return -EMSGSIZE; } int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4fe605f..3666216 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -200,7 +200,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, goto nodata; /* - * See comment in sk_buff definition, just before the 'tail' member + * Only clear those fields we need to clear, not those that we will + * actually initialise below. Hence, don't put any more fields after + * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = size + sizeof(struct sk_buff); @@ -1290,12 +1292,14 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, { unsigned int nr_pages = spd->nr_pages; unsigned int poff, plen, len, toff, tlen; - int headlen, seg; + int headlen, seg, error = 0; toff = *offset; tlen = *total_len; - if (!tlen) + if (!tlen) { + error = 1; goto err; + } /* * if the offset is greater than the linear part, go directly to @@ -1337,7 +1341,8 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, * just jump directly to update and return, no point * in going over fragments when the output is full. */ - if (spd_fill_page(spd, virt_to_page(p), plen, poff, skb)) + error = spd_fill_page(spd, virt_to_page(p), plen, poff, skb); + if (error) goto done; tlen -= plen; @@ -1367,7 +1372,8 @@ map_frag: if (!plen) break; - if (spd_fill_page(spd, f->page, plen, poff, skb)) + error = spd_fill_page(spd, f->page, plen, poff, skb); + if (error) break; tlen -= plen; @@ -1380,7 +1386,10 @@ done: return 0; } err: - return 1; + /* update the offset to reflect the linear part skip, if any */ + if (!error) + *offset = toff; + return error; } /* @@ -1443,6 +1452,7 @@ done: if (spd.nr_pages) { int ret; + struct sock *sk = __skb->sk; /* * Drop the socket lock, otherwise we have reverse @@ -1453,9 +1463,9 @@ done: * we call into ->sendpage() with the i_mutex lock held * and networking will grab the socket lock. */ - release_sock(__skb->sk); + release_sock(sk); ret = splice_to_pipe(pipe, &spd); - lock_sock(__skb->sk); + lock_sock(sk); return ret; } diff --git a/net/core/sock.c b/net/core/sock.c index 5dbb81b..88094cb 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -228,11 +228,12 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) static int warned __read_mostly; *timeo_p = 0; - if (warned < 10 && net_ratelimit()) + if (warned < 10 && net_ratelimit()) { warned++; printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " "tries to set negative timeout\n", current->comm, task_pid_nr(current)); + } return 0; } *timeo_p = MAX_SCHEDULE_TIMEOUT; @@ -269,7 +270,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) int err = 0; int skb_len; - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces + /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK */ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= diff --git a/net/core/user_dma.c b/net/core/user_dma.c index 0ad1cd5..c77aff9 100644 --- a/net/core/user_dma.c +++ b/net/core/user_dma.c @@ -75,7 +75,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan, end = start + skb_shinfo(skb)->frags[i].size; copy = end - offset; - if ((copy = end - offset) > 0) { + if (copy > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct page *page = frag->page; |