diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 3 | ||||
-rw-r--r-- | net/core/dev.c | 656 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 14 | ||||
-rw-r--r-- | net/core/ethtool.c | 32 | ||||
-rw-r--r-- | net/core/neighbour.c | 95 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 2 | ||||
-rw-r--r-- | net/core/net_namespace.c | 35 | ||||
-rw-r--r-- | net/core/netpoll.c | 6 | ||||
-rw-r--r-- | net/core/pktgen.c | 700 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 37 | ||||
-rw-r--r-- | net/core/skbuff.c | 3 | ||||
-rw-r--r-- | net/core/sock.c | 16 |
12 files changed, 837 insertions, 762 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index b0fe692..1c6cf3a 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -55,6 +55,7 @@ #include <net/checksum.h> #include <net/sock.h> #include <net/tcp_states.h> +#include <trace/events/skb.h> /* * Is a socket 'connection oriented' ? @@ -284,6 +285,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, int i, copy = start - offset; struct sk_buff *frag_iter; + trace_skb_copy_datagram_iovec(skb, len); + /* Copy header. */ if (copy > 0) { if (copy > len) diff --git a/net/core/dev.c b/net/core/dev.c index 278d489..8494547 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -191,7 +191,6 @@ static struct list_head ptype_all __read_mostly; /* Taps */ * semaphore held. */ DEFINE_RWLOCK(dev_base_lock); - EXPORT_SYMBOL(dev_base_lock); #define NETDEV_HASHBITS 8 @@ -248,6 +247,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain); */ DEFINE_PER_CPU(struct softnet_data, softnet_data); +EXPORT_PER_CPU_SYMBOL(softnet_data); #ifdef CONFIG_LOCKDEP /* @@ -269,10 +269,10 @@ static const unsigned short netdev_lock_type[] = ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, - ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY, + ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; -static const char *netdev_lock_name[] = +static const char *const netdev_lock_name[] = {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", @@ -287,7 +287,7 @@ static const char *netdev_lock_name[] = "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", - "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY", + "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; @@ -381,6 +381,7 @@ void dev_add_pack(struct packet_type *pt) } spin_unlock_bh(&ptype_lock); } +EXPORT_SYMBOL(dev_add_pack); /** * __dev_remove_pack - remove packet handler @@ -418,6 +419,8 @@ void __dev_remove_pack(struct packet_type *pt) out: spin_unlock_bh(&ptype_lock); } +EXPORT_SYMBOL(__dev_remove_pack); + /** * dev_remove_pack - remove packet handler * @pt: packet type declaration @@ -436,6 +439,7 @@ void dev_remove_pack(struct packet_type *pt) synchronize_net(); } +EXPORT_SYMBOL(dev_remove_pack); /****************************************************************************** @@ -499,6 +503,7 @@ int netdev_boot_setup_check(struct net_device *dev) } return 0; } +EXPORT_SYMBOL(netdev_boot_setup_check); /** @@ -591,6 +596,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name) } return NULL; } +EXPORT_SYMBOL(__dev_get_by_name); /** * dev_get_by_name - find a device by its name @@ -615,6 +621,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name) read_unlock(&dev_base_lock); return dev; } +EXPORT_SYMBOL(dev_get_by_name); /** * __dev_get_by_index - find a device by its ifindex @@ -640,6 +647,7 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex) } return NULL; } +EXPORT_SYMBOL(__dev_get_by_index); /** @@ -664,6 +672,7 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex) read_unlock(&dev_base_lock); return dev; } +EXPORT_SYMBOL(dev_get_by_index); /** * dev_getbyhwaddr - find a device by its hardware address @@ -693,7 +702,6 @@ struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *h return NULL; } - EXPORT_SYMBOL(dev_getbyhwaddr); struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) @@ -707,7 +715,6 @@ struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) return NULL; } - EXPORT_SYMBOL(__dev_getfirstbyhwtype); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) @@ -721,7 +728,6 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) rtnl_unlock(); return dev; } - EXPORT_SYMBOL(dev_getfirstbyhwtype); /** @@ -736,7 +742,8 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype); * dev_put to indicate they have finished with it. */ -struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask) +struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags, + unsigned short mask) { struct net_device *dev, *ret; @@ -752,6 +759,7 @@ struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, u read_unlock(&dev_base_lock); return ret; } +EXPORT_SYMBOL(dev_get_by_flags); /** * dev_valid_name - check if name is okay for network device @@ -777,6 +785,7 @@ int dev_valid_name(const char *name) } return 1; } +EXPORT_SYMBOL(dev_valid_name); /** * __dev_alloc_name - allocate a name for a device @@ -870,6 +879,7 @@ int dev_alloc_name(struct net_device *dev, const char *name) strlcpy(dev->name, buf, IFNAMSIZ); return ret; } +EXPORT_SYMBOL(dev_alloc_name); /** @@ -906,8 +916,7 @@ int dev_change_name(struct net_device *dev, const char *newname) err = dev_alloc_name(dev, newname); if (err < 0) return err; - } - else if (__dev_get_by_name(net, newname)) + } else if (__dev_get_by_name(net, newname)) return -EEXIST; else strlcpy(dev->name, newname, IFNAMSIZ); @@ -970,7 +979,7 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) return 0; } - dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL); + dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); if (!dev->ifalias) return -ENOMEM; @@ -1006,6 +1015,7 @@ void netdev_state_change(struct net_device *dev) rtmsg_ifinfo(RTM_NEWLINK, dev, 0); } } +EXPORT_SYMBOL(netdev_state_change); void netdev_bonding_change(struct net_device *dev) { @@ -1034,6 +1044,7 @@ void dev_load(struct net *net, const char *name) if (!dev && capable(CAP_NET_ADMIN)) request_module("%s", name); } +EXPORT_SYMBOL(dev_load); /** * dev_open - prepare an interface for use. @@ -1118,6 +1129,7 @@ int dev_open(struct net_device *dev) return ret; } +EXPORT_SYMBOL(dev_open); /** * dev_close - shutdown an interface. @@ -1184,6 +1196,7 @@ int dev_close(struct net_device *dev) return 0; } +EXPORT_SYMBOL(dev_close); /** @@ -1279,6 +1292,7 @@ rollback: raw_notifier_chain_unregister(&netdev_chain, nb); goto unlock; } +EXPORT_SYMBOL(register_netdevice_notifier); /** * unregister_netdevice_notifier - unregister a network notifier block @@ -1299,6 +1313,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb) rtnl_unlock(); return err; } +EXPORT_SYMBOL(unregister_netdevice_notifier); /** * call_netdevice_notifiers - call all network notifier blocks @@ -1321,11 +1336,13 @@ void net_enable_timestamp(void) { atomic_inc(&netstamp_needed); } +EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { atomic_dec(&netstamp_needed); } +EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp(struct sk_buff *skb) { @@ -1359,7 +1376,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) if ((ptype->dev == dev || !ptype->dev) && (ptype->af_packet_priv == NULL || (struct sock *)ptype->af_packet_priv != skb->sk)) { - struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC); + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) break; @@ -1527,6 +1544,7 @@ out_set_summed: out: return ret; } +EXPORT_SYMBOL(skb_checksum_help); /** * skb_gso_segment - Perform segmentation on skb. @@ -1589,7 +1607,6 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) return segs; } - EXPORT_SYMBOL(skb_gso_segment); /* Take action when hardware reception checksum errors are detected. */ @@ -1704,7 +1721,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, skb_dst_drop(skb); rc = ops->ndo_start_xmit(skb, dev); - if (rc == 0) + if (rc == NETDEV_TX_OK) txq_trans_update(txq); /* * TODO: if skb_orphan() was called by @@ -1730,7 +1747,7 @@ gso: skb->next = nskb->next; nskb->next = NULL; rc = ops->ndo_start_xmit(nskb, dev); - if (unlikely(rc)) { + if (unlikely(rc != NETDEV_TX_OK)) { nskb->next = skb->next; skb->next = nskb; return rc; @@ -1744,7 +1761,7 @@ gso: out_kfree_skb: kfree_skb(skb); - return 0; + return NETDEV_TX_OK; } static u32 skb_tx_hashrnd; @@ -1755,7 +1772,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); - while (unlikely (hash >= dev->real_num_tx_queues)) + while (unlikely(hash >= dev->real_num_tx_queues)) hash -= dev->real_num_tx_queues; return hash; } @@ -1786,6 +1803,40 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, return netdev_get_tx_queue(dev, queue_index); } +static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, + struct net_device *dev, + struct netdev_queue *txq) +{ + spinlock_t *root_lock = qdisc_lock(q); + int rc; + + spin_lock(root_lock); + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { + kfree_skb(skb); + rc = NET_XMIT_DROP; + } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && + !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) { + /* + * This is a work-conserving queue; there are no old skbs + * waiting to be sent out; and the qdisc is not running - + * xmit the skb directly. + */ + __qdisc_update_bstats(q, skb->len); + if (sch_direct_xmit(skb, q, dev, txq, root_lock)) + __qdisc_run(q); + else + clear_bit(__QDISC_STATE_RUNNING, &q->state); + + rc = NET_XMIT_SUCCESS; + } else { + rc = qdisc_enqueue_root(skb, q); + qdisc_run(q); + } + spin_unlock(root_lock); + + return rc; +} + /** * dev_queue_xmit - transmit a buffer * @skb: buffer to transmit @@ -1856,22 +1907,10 @@ gso: q = rcu_dereference(txq->qdisc); #ifdef CONFIG_NET_CLS_ACT - skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); + skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); #endif if (q->enqueue) { - spinlock_t *root_lock = qdisc_lock(q); - - spin_lock(root_lock); - - if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { - kfree_skb(skb); - rc = NET_XMIT_DROP; - } else { - rc = qdisc_enqueue_root(skb, q); - qdisc_run(q); - } - spin_unlock(root_lock); - + rc = __dev_xmit_skb(skb, q, dev, txq); goto out; } @@ -1895,7 +1934,7 @@ gso: HARD_TX_LOCK(dev, txq, cpu); if (!netif_tx_queue_stopped(txq)) { - rc = 0; + rc = NET_XMIT_SUCCESS; if (!dev_hard_start_xmit(skb, dev, txq)) { HARD_TX_UNLOCK(dev, txq); goto out; @@ -1924,6 +1963,7 @@ out: rcu_read_unlock_bh(); return rc; } +EXPORT_SYMBOL(dev_queue_xmit); /*======================================================================= @@ -1990,6 +2030,7 @@ enqueue: kfree_skb(skb); return NET_RX_DROP; } +EXPORT_SYMBOL(netif_rx); int netif_rx_ni(struct sk_buff *skb) { @@ -2003,7 +2044,6 @@ int netif_rx_ni(struct sk_buff *skb) return err; } - EXPORT_SYMBOL(netif_rx_ni); static void net_tx_action(struct softirq_action *h) @@ -2076,7 +2116,7 @@ static inline int deliver_skb(struct sk_buff *skb, /* This hook is defined here for ATM LANE */ int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr) __read_mostly; -EXPORT_SYMBOL(br_fdb_test_addr_hook); +EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); #endif /* @@ -2085,7 +2125,7 @@ EXPORT_SYMBOL(br_fdb_test_addr_hook); */ struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff *skb) __read_mostly; -EXPORT_SYMBOL(br_handle_frame_hook); +EXPORT_SYMBOL_GPL(br_handle_frame_hook); static inline struct sk_buff *handle_bridge(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, @@ -2336,6 +2376,7 @@ out: rcu_read_unlock(); return ret; } +EXPORT_SYMBOL(netif_receive_skb); /* Network device is going away, flush any packets still pending */ static void flush_backlog(void *arg) @@ -2852,7 +2893,7 @@ softnet_break: goto out; } -static gifconf_func_t * gifconf_list [NPROTO]; +static gifconf_func_t *gifconf_list[NPROTO]; /** * register_gifconf - register a SIOCGIF handler @@ -2863,13 +2904,14 @@ static gifconf_func_t * gifconf_list [NPROTO]; * that is passed must not be freed or reused until it has been replaced * by another handler. */ -int register_gifconf(unsigned int family, gifconf_func_t * gifconf) +int register_gifconf(unsigned int family, gifconf_func_t *gifconf) { if (family >= NPROTO) return -EINVAL; gifconf_list[family] = gifconf; return 0; } +EXPORT_SYMBOL(register_gifconf); /* @@ -3080,7 +3122,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", s->total, s->dropped, s->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ - s->cpu_collision ); + s->cpu_collision); return 0; } @@ -3316,6 +3358,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); return 0; } +EXPORT_SYMBOL(netdev_set_master); static void dev_change_rx_flags(struct net_device *dev, int flags) { @@ -3394,6 +3437,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc) dev_set_rx_mode(dev); return err; } +EXPORT_SYMBOL(dev_set_promiscuity); /** * dev_set_allmulti - update allmulti count on a device @@ -3437,6 +3481,7 @@ int dev_set_allmulti(struct net_device *dev, int inc) } return 0; } +EXPORT_SYMBOL(dev_set_allmulti); /* * Upload unicast and multicast address lists to device and @@ -3927,6 +3972,7 @@ int __dev_addr_sync(struct dev_addr_list **to, int *to_count, } return err; } +EXPORT_SYMBOL_GPL(__dev_addr_sync); void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count) @@ -3946,6 +3992,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, da = next; } } +EXPORT_SYMBOL_GPL(__dev_addr_unsync); /** * dev_unicast_sync - Synchronize device's unicast list to another device @@ -4064,6 +4111,7 @@ unsigned dev_get_flags(const struct net_device *dev) return flags; } +EXPORT_SYMBOL(dev_get_flags); /** * dev_change_flags - change device settings @@ -4114,12 +4162,13 @@ int dev_change_flags(struct net_device *dev, unsigned flags) } if (dev->flags & IFF_UP && - ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | + ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) call_netdevice_notifiers(NETDEV_CHANGE, dev); if ((flags ^ dev->gflags) & IFF_PROMISC) { - int inc = (flags & IFF_PROMISC) ? +1 : -1; + int inc = (flags & IFF_PROMISC) ? 1 : -1; + dev->gflags ^= IFF_PROMISC; dev_set_promiscuity(dev, inc); } @@ -4129,7 +4178,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags) IFF_ALLMULTI is requested not asking us and not reporting. */ if ((flags ^ dev->gflags) & IFF_ALLMULTI) { - int inc = (flags & IFF_ALLMULTI) ? +1 : -1; + int inc = (flags & IFF_ALLMULTI) ? 1 : -1; + dev->gflags ^= IFF_ALLMULTI; dev_set_allmulti(dev, inc); } @@ -4141,6 +4191,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags) return ret; } +EXPORT_SYMBOL(dev_change_flags); /** * dev_set_mtu - Change maximum transfer unit @@ -4174,6 +4225,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); return err; } +EXPORT_SYMBOL(dev_set_mtu); /** * dev_set_mac_address - Change Media Access Control Address @@ -4198,6 +4250,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return err; } +EXPORT_SYMBOL(dev_set_mac_address); /* * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) @@ -4211,56 +4264,56 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm return -ENODEV; switch (cmd) { - case SIOCGIFFLAGS: /* Get interface flags */ - ifr->ifr_flags = (short) dev_get_flags(dev); - return 0; + case SIOCGIFFLAGS: /* Get interface flags */ + ifr->ifr_flags = (short) dev_get_flags(dev); + return 0; - case SIOCGIFMETRIC: /* Get the metric on the interface - (currently unused) */ - ifr->ifr_metric = 0; - return 0; + case SIOCGIFMETRIC: /* Get the metric on the interface + (currently unused) */ + ifr->ifr_metric = 0; + return 0; - case SIOCGIFMTU: /* Get the MTU of a device */ - ifr->ifr_mtu = dev->mtu; - return 0; + case SIOCGIFMTU: /* Get the MTU of a device */ + ifr->ifr_mtu = dev->mtu; + return 0; - case SIOCGIFHWADDR: - if (!dev->addr_len) - memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); - else - memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, - min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); - ifr->ifr_hwaddr.sa_family = dev->type; - return 0; + case SIOCGIFHWADDR: + if (!dev->addr_len) + memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); + else + memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, + min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); + ifr->ifr_hwaddr.sa_family = dev->type; + return 0; - case SIOCGIFSLAVE: - err = -EINVAL; - break; + case SIOCGIFSLAVE: + err = -EINVAL; + break; - case SIOCGIFMAP: - ifr->ifr_map.mem_start = dev->mem_start; - ifr->ifr_map.mem_end = dev->mem_end; - ifr->ifr_map.base_addr = dev->base_addr; - ifr->ifr_map.irq = dev->irq; - ifr->ifr_map.dma = dev->dma; - ifr->ifr_map.port = dev->if_port; - return 0; + case SIOCGIFMAP: + ifr->ifr_map.mem_start = dev->mem_start; + ifr->ifr_map.mem_end = dev->mem_end; + ifr->ifr_map.base_addr = dev->base_addr; + ifr->ifr_map.irq = dev->irq; + ifr->ifr_map.dma = dev->dma; + ifr->ifr_map.port = dev->if_port; + return 0; - case SIOCGIFINDEX: - ifr->ifr_ifindex = dev->ifindex; - return 0; + case SIOCGIFINDEX: + ifr->ifr_ifindex = dev->ifindex; + return 0; - case SIOCGIFTXQLEN: - ifr->ifr_qlen = dev->tx_queue_len; - return 0; + case SIOCGIFTXQLEN: + ifr->ifr_qlen = dev->tx_queue_len; + return 0; - default: - /* dev_ioctl() should ensure this case - * is never reached - */ - WARN_ON(1); - err = -EINVAL; - break; + default: + /* dev_ioctl() should ensure this case + * is never reached + */ + WARN_ON(1); + err = -EINVAL; + break; } return err; @@ -4281,92 +4334,91 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) ops = dev->netdev_ops; switch (cmd) { - case SIOCSIFFLAGS: /* Set interface flags */ - return dev_change_flags(dev, ifr->ifr_flags); - - case SIOCSIFMETRIC: /* Set the metric on the interface - (currently unused) */ - return -EOPNOTSUPP; + case SIOCSIFFLAGS: /* Set interface flags */ + return dev_change_flags(dev, ifr->ifr_flags); - case SIOCSIFMTU: /* Set the MTU of a device */ - return dev_set_mtu(dev, ifr->ifr_mtu); + case SIOCSIFMETRIC: /* Set the metric on the interface + (currently unused) */ + return -EOPNOTSUPP; - case SIOCSIFHWADDR: - return dev_set_mac_address(dev, &ifr->ifr_hwaddr); + case SIOCSIFMTU: /* Set the MTU of a device */ + return dev_set_mtu(dev, ifr->ifr_mtu); - case SIOCSIFHWBROADCAST: - if (ifr->ifr_hwaddr.sa_family != dev->type) - return -EINVAL; - memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, - min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); - call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); - return 0; + case SIOCSIFHWADDR: + return dev_set_mac_address(dev, &ifr->ifr_hwaddr); - case SIOCSIFMAP: - if (ops->ndo_set_config) { - if (!netif_device_present(dev)) - return -ENODEV; - return ops->ndo_set_config(dev, &ifr->ifr_map); - } - return -EOPNOTSUPP; - - case SIOCADDMULTI: - if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || - ifr->ifr_hwaddr.sa_family != AF_UNSPEC) - return -EINVAL; - if (!netif_device_present(dev)) - return -ENODEV; - return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, - dev->addr_len, 1); + case SIOCSIFHWBROADCAST: + if (ifr->ifr_hwaddr.sa_family != dev->type) + return -EINVAL; + memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, + min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); + return 0; - case SIOCDELMULTI: - if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || - ifr->ifr_hwaddr.sa_family != AF_UNSPEC) - return -EINVAL; + case SIOCSIFMAP: + if (ops->ndo_set_config) { if (!netif_device_present(dev)) return -ENODEV; - return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, - dev->addr_len, 1); + return ops->ndo_set_config(dev, &ifr->ifr_map); + } + return -EOPNOTSUPP; - case SIOCSIFTXQLEN: - if (ifr->ifr_qlen < 0) - return -EINVAL; - dev->tx_queue_len = ifr->ifr_qlen; - return 0; + case SIOCADDMULTI: + if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || + ifr->ifr_hwaddr.sa_family != AF_UNSPEC) + return -EINVAL; + if (!netif_device_present(dev)) + return -ENODEV; + return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, + dev->addr_len, 1); + + case SIOCDELMULTI: + if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || + ifr->ifr_hwaddr.sa_family != AF_UNSPEC) + return -EINVAL; + if (!netif_device_present(dev)) + return -ENODEV; + return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, + dev->addr_len, 1); - case SIOCSIFNAME: - ifr->ifr_newname[IFNAMSIZ-1] = '\0'; - return dev_change_name(dev, ifr->ifr_newname); + case SIOCSIFTXQLEN: + if (ifr->ifr_qlen < 0) + return -EINVAL; + dev->tx_queue_len = ifr->ifr_qlen; + return 0; - /* - * Unknown or private ioctl - */ + case SIOCSIFNAME: + ifr->ifr_newname[IFNAMSIZ-1] = '\0'; + return dev_change_name(dev, ifr->ifr_newname); - default: - if ((cmd >= SIOCDEVPRIVATE && - cmd <= SIOCDEVPRIVATE + 15) || - cmd == SIOCBONDENSLAVE || - cmd == SIOCBONDRELEASE || - cmd == SIOCBONDSETHWADDR || - cmd == SIOCBONDSLAVEINFOQUERY || - cmd == SIOCBONDINFOQUERY || - cmd == SIOCBONDCHANGEACTIVE || - cmd == SIOCGMIIPHY || - cmd == SIOCGMIIREG || - cmd == SIOCSMIIREG || - cmd == SIOCBRADDIF || - cmd == SIOCBRDELIF || - cmd == SIOCSHWTSTAMP || - cmd == SIOCWANDEV) { - err = -EOPNOTSUPP; - if (ops->ndo_do_ioctl) { - if (netif_device_present(dev)) - err = ops->ndo_do_ioctl(dev, ifr, cmd); - else - err = -ENODEV; - } - } else - err = -EINVAL; + /* + * Unknown or private ioctl + */ + default: + if ((cmd >= SIOCDEVPRIVATE && + cmd <= SIOCDEVPRIVATE + 15) || + cmd == SIOCBONDENSLAVE || + cmd == SIOCBONDRELEASE || + cmd == SIOCBONDSETHWADDR || + cmd == SIOCBONDSLAVEINFOQUERY || + cmd == SIOCBONDINFOQUERY || + cmd == SIOCBONDCHANGEACTIVE || + cmd == SIOCGMIIPHY || + cmd == SIOCGMIIREG || + cmd == SIOCSMIIREG || + cmd == SIOCBRADDIF || + cmd == SIOCBRDELIF || + cmd == SIOCSHWTSTAMP || + cmd == SIOCWANDEV) { + err = -EOPNOTSUPP; + if (ops->ndo_do_ioctl) { + if (netif_device_present(dev)) + err = ops->ndo_do_ioctl(dev, ifr, cmd); + else + err = -ENODEV; + } + } else + err = -EINVAL; } return err; @@ -4423,135 +4475,135 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) */ switch (cmd) { - /* - * These ioctl calls: - * - can be done by all. - * - atomic and do not require locking. - * - return a value - */ - case SIOCGIFFLAGS: - case SIOCGIFMETRIC: - case SIOCGIFMTU: - case SIOCGIFHWADDR: - case SIOCGIFSLAVE: - case SIOCGIFMAP: - case SIOCGIFINDEX: - case SIOCGIFTXQLEN: - dev_load(net, ifr.ifr_name); - read_lock(&dev_base_lock); - ret = dev_ifsioc_locked(net, &ifr, cmd); - read_unlock(&dev_base_lock); - if (!ret) { - if (colon) - *colon = ':'; - if (copy_to_user(arg, &ifr, - sizeof(struct ifreq))) - ret = -EFAULT; - } - return ret; + /* + * These ioctl calls: + * - can be done by all. + * - atomic and do not require locking. + * - return a value + */ + case SIOCGIFFLAGS: + case SIOCGIFMETRIC: + case SIOCGIFMTU: + case SIOCGIFHWADDR: + case SIOCGIFSLAVE: + case SIOCGIFMAP: + case SIOCGIFINDEX: + case SIOCGIFTXQLEN: + dev_load(net, ifr.ifr_name); + read_lock(&dev_base_lock); + ret = dev_ifsioc_locked(net, &ifr, cmd); + read_unlock(&dev_base_lock); + if (!ret) { + if (colon) + *colon = ':'; + if (copy_to_user(arg, &ifr, + sizeof(struct ifreq))) + ret = -EFAULT; + } + return ret; - case SIOCETHTOOL: - dev_load(net, ifr.ifr_name); - rtnl_lock(); - ret = dev_ethtool(net, &ifr); - rtnl_unlock(); - if (!ret) { - if (colon) - *colon = ':'; - if (copy_to_user(arg, &ifr, - sizeof(struct ifreq))) - ret = -EFAULT; - } - return ret; + case SIOCETHTOOL: + dev_load(net, ifr.ifr_name); + rtnl_lock(); + ret = dev_ethtool(net, &ifr); + rtnl_unlock(); + if (!ret) { + if (colon) + *colon = ':'; + if (copy_to_user(arg, &ifr, + sizeof(struct ifreq))) + ret = -EFAULT; + } + return ret; - /* - * These ioctl calls: - * - require superuser power. - * - require strict serialization. - * - return a value - */ - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSIFNAME: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - dev_load(net, ifr.ifr_name); - rtnl_lock(); - ret = dev_ifsioc(net, &ifr, cmd); - rtnl_unlock(); - if (!ret) { - if (colon) - *colon = ':'; - if (copy_to_user(arg, &ifr, - sizeof(struct ifreq))) - ret = -EFAULT; - } - return ret; + /* + * These ioctl calls: + * - require superuser power. + * - require strict serialization. + * - return a value + */ + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSIFNAME: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + dev_load(net, ifr.ifr_name); + rtnl_lock(); + ret = dev_ifsioc(net, &ifr, cmd); + rtnl_unlock(); + if (!ret) { + if (colon) + *colon = ':'; + if (copy_to_user(arg, &ifr, + sizeof(struct ifreq))) + ret = -EFAULT; + } + return ret; - /* - * These ioctl calls: - * - require superuser power. - * - require strict serialization. - * - do not return a value - */ - case SIOCSIFFLAGS: - case SIOCSIFMETRIC: - case SIOCSIFMTU: - case SIOCSIFMAP: - case SIOCSIFHWADDR: - case SIOCSIFSLAVE: - case SIOCADDMULTI: - case SIOCDELMULTI: - case SIOCSIFHWBROADCAST: - case SIOCSIFTXQLEN: - case SIOCSMIIREG: - case SIOCBONDENSLAVE: - case SIOCBONDRELEASE: - case SIOCBONDSETHWADDR: - case SIOCBONDCHANGEACTIVE: - case SIOCBRADDIF: - case SIOCBRDELIF: - case SIOCSHWTSTAMP: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - /* fall through */ - case SIOCBONDSLAVEINFOQUERY: - case SIOCBONDINFOQUERY: + /* + * These ioctl calls: + * - require superuser power. + * - require strict serialization. + * - do not return a value + */ + case SIOCSIFFLAGS: + case SIOCSIFMETRIC: + case SIOCSIFMTU: + case SIOCSIFMAP: + case SIOCSIFHWADDR: + case SIOCSIFSLAVE: + case SIOCADDMULTI: + case SIOCDELMULTI: + case SIOCSIFHWBROADCAST: + case SIOCSIFTXQLEN: + case SIOCSMIIREG: + case SIOCBONDENSLAVE: + case SIOCBONDRELEASE: + case SIOCBONDSETHWADDR: + case SIOCBONDCHANGEACTIVE: + case SIOCBRADDIF: + case SIOCBRDELIF: + case SIOCSHWTSTAMP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + /* fall through */ + case SIOCBONDSLAVEINFOQUERY: + case SIOCBONDINFOQUERY: + dev_load(net, ifr.ifr_name); + rtnl_lock(); + ret = dev_ifsioc(net, &ifr, cmd); + rtnl_unlock(); + return ret; + + case SIOCGIFMEM: + /* Get the per device memory space. We can add this but + * currently do not support it */ + case SIOCSIFMEM: + /* Set the per device memory buffer space. + * Not applicable in our case */ + case SIOCSIFLINK: + return -EINVAL; + + /* + * Unknown or private ioctl. + */ + default: + if (cmd == SIOCWANDEV || + (cmd >= SIOCDEVPRIVATE && + cmd <= SIOCDEVPRIVATE + 15)) { dev_load(net, ifr.ifr_name); rtnl_lock(); ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); + if (!ret && copy_to_user(arg, &ifr, + sizeof(struct ifreq))) + ret = -EFAULT; return ret; - - case SIOCGIFMEM: - /* Get the per device memory space. We can add this but - * currently do not support it */ - case SIOCSIFMEM: - /* Set the per device memory buffer space. - * Not applicable in our case */ - case SIOCSIFLINK: - return -EINVAL; - - /* - * Unknown or private ioctl. - */ - default: - if (cmd == SIOCWANDEV || - (cmd >= SIOCDEVPRIVATE && - cmd <= SIOCDEVPRIVATE + 15)) { - dev_load(net, ifr.ifr_name); - rtnl_lock(); - ret = dev_ifsioc(net, &ifr, cmd); - rtnl_unlock(); - if (!ret && copy_to_user(arg, &ifr, - sizeof(struct ifreq))) - ret = -EFAULT; - return ret; - } - /* Take care of Wireless Extensions */ - if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) - return wext_handle_ioctl(net, &ifr, cmd, arg); - return -EINVAL; + } + /* Take care of Wireless Extensions */ + if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) + return wext_handle_ioctl(net, &ifr, cmd, arg); + return -EINVAL; } } @@ -4816,6 +4868,7 @@ err_uninit: dev->netdev_ops->ndo_uninit(dev); goto out; } +EXPORT_SYMBOL(register_netdevice); /** * init_dummy_netdev - init a dummy network device for NAPI @@ -5168,6 +5221,7 @@ void free_netdev(struct net_device *dev) /* will free via device release */ put_device(&dev->dev); } +EXPORT_SYMBOL(free_netdev); /** * synchronize_net - Synchronize with packet receive processing @@ -5180,6 +5234,7 @@ void synchronize_net(void) might_sleep(); synchronize_rcu(); } +EXPORT_SYMBOL(synchronize_net); /** * unregister_netdevice - remove device from the kernel @@ -5200,6 +5255,7 @@ void unregister_netdevice(struct net_device *dev) /* Finish processing unregister after unlock */ net_set_todo(dev); } +EXPORT_SYMBOL(unregister_netdevice); /** * unregister_netdev - remove device from the kernel @@ -5218,7 +5274,6 @@ void unregister_netdev(struct net_device *dev) unregister_netdevice(dev); rtnl_unlock(); } - EXPORT_SYMBOL(unregister_netdev); /** @@ -5347,6 +5402,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char out: return err; } +EXPORT_SYMBOL_GPL(dev_change_net_namespace); static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, @@ -5407,7 +5463,7 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one, unsigned long mask) { /* If device needs checksumming, downgrade to it. */ - if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) + if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); else if (mask & NETIF_F_ALL_CSUM) { /* If one device supports v4/v6 checksumming, set for all. */ @@ -5633,41 +5689,3 @@ static int __init initialize_hashrnd(void) late_initcall_sync(initialize_hashrnd); -EXPORT_SYMBOL(__dev_get_by_index); -EXPORT_SYMBOL(__dev_get_by_name); -EXPORT_SYMBOL(__dev_remove_pack); -EXPORT_SYMBOL(dev_valid_name); -EXPORT_SYMBOL(dev_add_pack); -EXPORT_SYMBOL(dev_alloc_name); -EXPORT_SYMBOL(dev_close); -EXPORT_SYMBOL(dev_get_by_flags); -EXPORT_SYMBOL(dev_get_by_index); -EXPORT_SYMBOL(dev_get_by_name); -EXPORT_SYMBOL(dev_open); -EXPORT_SYMBOL(dev_queue_xmit); -EXPORT_SYMBOL(dev_remove_pack); -EXPORT_SYMBOL(dev_set_allmulti); -EXPORT_SYMBOL(dev_set_promiscuity); -EXPORT_SYMBOL(dev_change_flags); -EXPORT_SYMBOL(dev_set_mtu); -EXPORT_SYMBOL(dev_set_mac_address); -EXPORT_SYMBOL(free_netdev); -EXPORT_SYMBOL(netdev_boot_setup_check); -EXPORT_SYMBOL(netdev_set_master); -EXPORT_SYMBOL(netdev_state_change); -EXPORT_SYMBOL(netif_receive_skb); -EXPORT_SYMBOL(netif_rx); -EXPORT_SYMBOL(register_gifconf); -EXPORT_SYMBOL(register_netdevice); -EXPORT_SYMBOL(register_netdevice_notifier); -EXPORT_SYMBOL(skb_checksum_help); -EXPORT_SYMBOL(synchronize_net); -EXPORT_SYMBOL(unregister_netdevice); -EXPORT_SYMBOL(unregister_netdevice_notifier); -EXPORT_SYMBOL(net_enable_timestamp); -EXPORT_SYMBOL(net_disable_timestamp); -EXPORT_SYMBOL(dev_get_flags); - -EXPORT_SYMBOL(dev_load); - -EXPORT_PER_CPU_SYMBOL(softnet_data); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 9d66fa9..0a113f2 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -52,6 +52,7 @@ struct per_cpu_dm_data { struct dm_hw_stat_delta { struct net_device *dev; + unsigned long last_rx; struct list_head list; struct rcu_head rcu; unsigned long last_drop_val; @@ -180,17 +181,25 @@ static void trace_napi_poll_hit(struct napi_struct *napi) struct dm_hw_stat_delta *new_stat; /* - * Ratelimit our check time to dm_hw_check_delta jiffies + * Don't check napi structures with no associated device */ - if (!time_after(jiffies, napi->dev->last_rx + dm_hw_check_delta)) + if (!napi->dev) return; rcu_read_lock(); list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { + /* + * only add a note to our monitor buffer if: + * 1) this is the dev we received on + * 2) its after the last_rx delta + * 3) our rx_dropped count has gone up + */ if ((new_stat->dev == napi->dev) && + (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) && (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { trace_drop_common(NULL, NULL); new_stat->last_drop_val = napi->dev->stats.rx_dropped; + new_stat->last_rx = jiffies; break; } } @@ -286,6 +295,7 @@ static int dropmon_net_event(struct notifier_block *ev_block, goto out; new_stat->dev = dev; + new_stat->last_rx = jiffies; INIT_RCU_HEAD(&new_stat->rcu); spin_lock(&trace_state_lock); list_add_rcu(&new_stat->list, &hw_stats_list); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index d9d5160..4c12ddb 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -30,10 +30,17 @@ u32 ethtool_op_get_link(struct net_device *dev) return netif_carrier_ok(dev) ? 1 : 0; } +u32 ethtool_op_get_rx_csum(struct net_device *dev) +{ + return (dev->features & NETIF_F_ALL_CSUM) != 0; +} +EXPORT_SYMBOL(ethtool_op_get_rx_csum); + u32 ethtool_op_get_tx_csum(struct net_device *dev) { return (dev->features & NETIF_F_ALL_CSUM) != 0; } +EXPORT_SYMBOL(ethtool_op_get_tx_csum); int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) { @@ -891,6 +898,19 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, return actor(dev, edata.data); } +static int ethtool_flash_device(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_flash efl; + + if (copy_from_user(&efl, useraddr, sizeof(efl))) + return -EFAULT; + + if (!dev->ethtool_ops->flash_device) + return -EOPNOTSUPP; + + return dev->ethtool_ops->flash_device(dev, &efl); +} + /* The main entry point in this file. Called from net/core/dev.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) @@ -1004,7 +1024,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) break; case ETHTOOL_GRXCSUM: rc = ethtool_get_value(dev, useraddr, ethcmd, - dev->ethtool_ops->get_rx_csum); + (dev->ethtool_ops->get_rx_csum ? + dev->ethtool_ops->get_rx_csum : + ethtool_op_get_rx_csum)); break; case ETHTOOL_SRXCSUM: rc = ethtool_set_rx_csum(dev, useraddr); @@ -1068,7 +1090,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) break; case ETHTOOL_GFLAGS: rc = ethtool_get_value(dev, useraddr, ethcmd, - dev->ethtool_ops->get_flags); + (dev->ethtool_ops->get_flags ? + dev->ethtool_ops->get_flags : + ethtool_op_get_flags)); break; case ETHTOOL_SFLAGS: rc = ethtool_set_value(dev, useraddr, @@ -1100,6 +1124,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_SGRO: rc = ethtool_set_gro(dev, useraddr); break; + case ETHTOOL_FLASHDEV: + rc = ethtool_flash_device(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } @@ -1116,7 +1143,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) EXPORT_SYMBOL(ethtool_op_get_link); EXPORT_SYMBOL(ethtool_op_get_sg); EXPORT_SYMBOL(ethtool_op_get_tso); -EXPORT_SYMBOL(ethtool_op_get_tx_csum); EXPORT_SYMBOL(ethtool_op_set_sg); EXPORT_SYMBOL(ethtool_op_set_tso); EXPORT_SYMBOL(ethtool_op_set_tx_csum); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 163b4f5..e587e68 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -692,75 +692,74 @@ static void neigh_connect(struct neighbour *neigh) hh->hh_output = neigh->ops->hh_output; } -static void neigh_periodic_timer(unsigned long arg) +static void neigh_periodic_work(struct work_struct *work) { - struct neigh_table *tbl = (struct neigh_table *)arg; + struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); struct neighbour *n, **np; - unsigned long expire, now = jiffies; + unsigned int i; NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); - write_lock(&tbl->lock); + write_lock_bh(&tbl->lock); /* * periodically recompute ReachableTime from random function */ - if (time_after(now, tbl->last_rand + 300 * HZ)) { + if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { struct neigh_parms *p; - tbl->last_rand = now; + tbl->last_rand = jiffies; for (p = &tbl->parms; p; p = p->next) p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); } - np = &tbl->hash_buckets[tbl->hash_chain_gc]; - tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask); + for (i = 0 ; i <= tbl->hash_mask; i++) { + np = &tbl->hash_buckets[i]; - while ((n = *np) != NULL) { - unsigned int state; + while ((n = *np) != NULL) { + unsigned int state; - write_lock(&n->lock); + write_lock(&n->lock); - state = n->nud_state; - if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { - write_unlock(&n->lock); - goto next_elt; - } + state = n->nud_state; + if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { + write_unlock(&n->lock); + goto next_elt; + } - if (time_before(n->used, n->confirmed)) - n->used = n->confirmed; + if (time_before(n->used, n->confirmed)) + n->used = n->confirmed; - if (atomic_read(&n->refcnt) == 1 && - (state == NUD_FAILED || - time_after(now, n->used + n->parms->gc_staletime))) { - *np = n->next; - n->dead = 1; + if (atomic_read(&n->refcnt) == 1 && + (state == NUD_FAILED || + time_after(jiffies, n->used + n->parms->gc_staletime))) { + *np = n->next; + n->dead = 1; + write_unlock(&n->lock); + neigh_cleanup_and_release(n); + continue; + } write_unlock(&n->lock); - neigh_cleanup_and_release(n); - continue; - } - write_unlock(&n->lock); next_elt: - np = &n->next; + np = &n->next; + } + /* + * It's fine to release lock here, even if hash table + * grows while we are preempted. + */ + write_unlock_bh(&tbl->lock); + cond_resched(); + write_lock_bh(&tbl->lock); } - /* Cycle through all hash buckets every base_reachable_time/2 ticks. * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 * base_reachable_time. */ - expire = tbl->parms.base_reachable_time >> 1; - expire /= (tbl->hash_mask + 1); - if (!expire) - expire = 1; - - if (expire>HZ) - mod_timer(&tbl->gc_timer, round_jiffies(now + expire)); - else - mod_timer(&tbl->gc_timer, now + expire); - - write_unlock(&tbl->lock); + schedule_delayed_work(&tbl->gc_work, + tbl->parms.base_reachable_time >> 1); + write_unlock_bh(&tbl->lock); } static __inline__ int neigh_max_probes(struct neighbour *n) @@ -1316,7 +1315,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, } EXPORT_SYMBOL(pneigh_enqueue); -static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, +static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, struct net *net, int ifindex) { struct neigh_parms *p; @@ -1337,7 +1336,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct net *net = dev_net(dev); const struct net_device_ops *ops = dev->netdev_ops; - ref = lookup_neigh_params(tbl, net, 0); + ref = lookup_neigh_parms(tbl, net, 0); if (!ref) return NULL; @@ -1442,10 +1441,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); rwlock_init(&tbl->lock); - setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl); - tbl->gc_timer.expires = now + 1; - add_timer(&tbl->gc_timer); - + INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); + schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); skb_queue_head_init_class(&tbl->proxy_queue, &neigh_table_proxy_queue_class); @@ -1482,7 +1479,8 @@ int neigh_table_clear(struct neigh_table *tbl) struct neigh_table **tp; /* It is not clean... Fix it to unload IPv6 module safely */ - del_timer_sync(&tbl->gc_timer); + cancel_delayed_work(&tbl->gc_work); + flush_scheduled_work(); del_timer_sync(&tbl->proxy_timer); pneigh_queue_purge(&tbl->proxy_queue); neigh_ifdown(tbl, NULL); @@ -1752,7 +1750,6 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, .ndtc_last_rand = jiffies_to_msecs(rand_delta), .ndtc_hash_rnd = tbl->hash_rnd, .ndtc_hash_mask = tbl->hash_mask, - .ndtc_hash_chain_gc = tbl->hash_chain_gc, .ndtc_proxy_qlen = tbl->proxy_queue.qlen, }; @@ -1906,7 +1903,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if (tbp[NDTPA_IFINDEX]) ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); - p = lookup_neigh_params(tbl, net, ifindex); + p = lookup_neigh_parms(tbl, net, ifindex); if (p == NULL) { err = -ENOENT; goto errout_tbl_lock; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 3994680..ad91e9e 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -141,7 +141,7 @@ static ssize_t show_dormant(struct device *dev, return -EINVAL; } -static const char *operstates[] = { +static const char *const operstates[] = { "unknown", "notpresent", /* currently unused */ "down", diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 1972830..1c1af27 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -6,6 +6,8 @@ #include <linux/delay.h> #include <linux/sched.h> #include <linux/idr.h> +#include <linux/rculist.h> +#include <linux/nsproxy.h> #include <net/net_namespace.h> #include <net/netns/generic.h> @@ -127,7 +129,7 @@ static struct net *net_create(void) rv = setup_net(net); if (rv == 0) { rtnl_lock(); - list_add_tail(&net->list, &net_namespace_list); + list_add_tail_rcu(&net->list, &net_namespace_list); rtnl_unlock(); } mutex_unlock(&net_mutex); @@ -156,9 +158,16 @@ static void cleanup_net(struct work_struct *work) /* Don't let anyone else find us. */ rtnl_lock(); - list_del(&net->list); + list_del_rcu(&net->list); rtnl_unlock(); + /* + * Another CPU might be rcu-iterating the list, wait for it. + * This needs to be before calling the exit() notifiers, so + * the rcu_barrier() below isn't sufficient alone. + */ + synchronize_rcu(); + /* Run all of the network namespace exit methods */ list_for_each_entry_reverse(ops, &pernet_list, list) { if (ops->exit) @@ -193,6 +202,26 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) } #endif +struct net *get_net_ns_by_pid(pid_t pid) +{ + struct task_struct *tsk; + struct net *net; + + /* Lookup the network namespace */ + net = ERR_PTR(-ESRCH); + rcu_read_lock(); + tsk = find_task_by_vpid(pid); + if (tsk) { + struct nsproxy *nsproxy; + nsproxy = task_nsproxy(tsk); + if (nsproxy) + net = get_net(nsproxy->net_ns); + } + rcu_read_unlock(); + return net; +} +EXPORT_SYMBOL_GPL(get_net_ns_by_pid); + static int __init net_ns_init(void) { struct net_generic *ng; @@ -219,7 +248,7 @@ static int __init net_ns_init(void) panic("Could not setup the initial network namespace"); rtnl_lock(); - list_add_tail(&init_net.list, &net_namespace_list); + list_add_tail_rcu(&init_net.list, &net_namespace_list); rtnl_unlock(); mutex_unlock(&net_mutex); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 1b76eb1..0b4d0d3 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -9,6 +9,7 @@ * Copyright (C) 2002 Red Hat, Inc. */ +#include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/string.h> @@ -50,6 +51,9 @@ static atomic_t trapped; static void zap_completion_queue(void); static void arp_reply(struct sk_buff *skb); +static unsigned int carrier_timeout = 4; +module_param(carrier_timeout, uint, 0644); + static void queue_process(struct work_struct *work) { struct netpoll_info *npinfo = @@ -737,7 +741,7 @@ int netpoll_setup(struct netpoll *np) } atleast = jiffies + HZ/10; - atmost = jiffies + 4*HZ; + atmost = jiffies + carrier_timeout * HZ; while (!netif_carrier_ok(ndev)) { if (time_after(jiffies, atmost)) { printk(KERN_NOTICE diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 19b8c20..0bcecbf 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -131,6 +131,7 @@ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/capability.h> +#include <linux/hrtimer.h> #include <linux/freezer.h> #include <linux/delay.h> #include <linux/timer.h> @@ -162,14 +163,13 @@ #include <asm/byteorder.h> #include <linux/rcupdate.h> #include <linux/bitops.h> -#include <asm/io.h> +#include <linux/io.h> +#include <linux/timex.h> +#include <linux/uaccess.h> #include <asm/dma.h> -#include <asm/uaccess.h> #include <asm/div64.h> /* do_div */ -#include <asm/timex.h> - -#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n" +#define VERSION "2.72" #define IP_NAME_SZ 32 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ #define MPLS_STACK_BOTTOM htonl(0x00000100) @@ -206,7 +206,7 @@ #define PKTGEN_MAGIC 0xbe9be955 #define PG_PROC_DIR "pktgen" #define PGCTRL "pgctrl" -static struct proc_dir_entry *pg_proc_dir = NULL; +static struct proc_dir_entry *pg_proc_dir; #define MAX_CFLOWS 65536 @@ -231,9 +231,9 @@ struct pktgen_dev { */ struct proc_dir_entry *entry; /* proc file */ struct pktgen_thread *pg_thread;/* the owner */ - struct list_head list; /* Used for chaining in the thread's run-queue */ + struct list_head list; /* chaining in the thread's run-queue */ - int running; /* if this changes to false, the test will stop */ + int running; /* if false, the test will stop */ /* If min != max, then we will either do a linear iteration, or * we will do a random selection from within the range. @@ -246,33 +246,37 @@ struct pktgen_dev { int max_pkt_size; /* = ETH_ZLEN; */ int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ int nfrags; - __u32 delay_us; /* Default delay */ - __u32 delay_ns; + u64 delay; /* nano-seconds */ + __u64 count; /* Default No packets to send */ __u64 sofar; /* How many pkts we've sent so far */ __u64 tx_bytes; /* How many bytes we've transmitted */ - __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */ + __u64 errors; /* Errors when trying to transmit, + pkts will be re-sent */ /* runtime counters relating to clone_skb */ - __u64 next_tx_us; /* timestamp of when to tx next */ - __u32 next_tx_ns; __u64 allocated_skbs; __u32 clone_count; int last_ok; /* Was last skb sent? - * Or a failed transmit of some sort? This will keep - * sequence numbers in order, for example. + * Or a failed transmit of some sort? + * This will keep sequence numbers in order */ - __u64 started_at; /* micro-seconds */ - __u64 stopped_at; /* micro-seconds */ - __u64 idle_acc; /* micro-seconds */ + ktime_t next_tx; + ktime_t started_at; + ktime_t stopped_at; + u64 idle_acc; /* nano-seconds */ + __u32 seq_num; - int clone_skb; /* Use multiple SKBs during packet gen. If this number - * is greater than 1, then that many copies of the same - * packet will be sent before a new packet is allocated. - * For instance, if you want to send 1024 identical packets - * before creating a new packet, set clone_skb to 1024. + int clone_skb; /* + * Use multiple SKBs during packet gen. + * If this number is greater than 1, then + * that many copies of the same packet will be + * sent before a new packet is allocated. + * If you want to send 1024 identical packets + * before creating a new packet, + * set clone_skb to 1024. */ char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ @@ -304,8 +308,10 @@ struct pktgen_dev { __u16 udp_dst_max; /* exclusive, dest UDP port */ /* DSCP + ECN */ - __u8 tos; /* six most significant bits of (former) IPv4 TOS are for dscp codepoint */ - __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */ + __u8 tos; /* six MSB of (former) IPv4 TOS + are for dscp codepoint */ + __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 + (see RFC 3260, sec. 4) */ /* MPLS */ unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ @@ -346,15 +352,17 @@ struct pktgen_dev { */ __u16 pad; /* pad out the hh struct to an even 16 bytes */ - struct sk_buff *skb; /* skb we are to transmit next, mainly used for when we + struct sk_buff *skb; /* skb we are to transmit next, used for when we * are transmitting the same one multiple times */ - struct net_device *odev; /* The out-going device. Note that the device should - * have it's pg_info pointer pointing back to this - * device. This will be set when the user specifies - * the out-going device name (not when the inject is - * started as it used to do.) - */ + struct net_device *odev; /* The out-going device. + * Note that the device should have it's + * pg_info pointer pointing back to this + * device. + * Set when the user specifies the out-going + * device name (not when the inject is + * started as it used to do.) + */ struct flow_state *flows; unsigned cflows; /* Concurrent flows (config) */ unsigned lflow; /* Flow length (config) */ @@ -379,13 +387,14 @@ struct pktgen_hdr { }; struct pktgen_thread { - spinlock_t if_lock; + spinlock_t if_lock; /* for list of devices */ struct list_head if_list; /* All device here */ struct list_head th_list; struct task_struct *tsk; char result[512]; - /* Field for thread to receive "posted" events terminate, stop ifs etc. */ + /* Field for thread to receive "posted" events terminate, + stop ifs etc. */ u32 control; int cpu; @@ -397,24 +406,22 @@ struct pktgen_thread { #define REMOVE 1 #define FIND 0 -/** Convert to micro-seconds */ -static inline __u64 tv_to_us(const struct timeval *tv) +static inline ktime_t ktime_now(void) { - __u64 us = tv->tv_usec; - us += (__u64) tv->tv_sec * (__u64) 1000000; - return us; + struct timespec ts; + ktime_get_ts(&ts); + + return timespec_to_ktime(ts); } -static __u64 getCurUs(void) +/* This works even if 32 bit because of careful byte order choice */ +static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2) { - struct timeval tv; - do_gettimeofday(&tv); - return tv_to_us(&tv); + return cmp1.tv64 < cmp2.tv64; } -/* old include end */ - -static char version[] __initdata = VERSION; +static const char version[] = + "pktgen " VERSION ": Packet Generator for packet performance testing.\n"; static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); @@ -424,7 +431,7 @@ static int pktgen_device_event(struct notifier_block *, unsigned long, void *); static void pktgen_run_all_threads(void); static void pktgen_reset_all_threads(void); static void pktgen_stop_all_threads_ifs(void); -static int pktgen_stop_device(struct pktgen_dev *pkt_dev); + static void pktgen_stop(struct pktgen_thread *t); static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); @@ -432,10 +439,10 @@ static unsigned int scan_ip6(const char *s, char ip[16]); static unsigned int fmt_ip6(char *s, const char ip[16]); /* Module parameters, defaults. */ -static int pg_count_d = 1000; /* 1000 pkts by default */ -static int pg_delay_d; -static int pg_clone_skb_d; -static int debug; +static int pg_count_d __read_mostly = 1000; +static int pg_delay_d __read_mostly; +static int pg_clone_skb_d __read_mostly; +static int debug __read_mostly; static DEFINE_MUTEX(pktgen_thread_lock); static LIST_HEAD(pktgen_threads); @@ -451,12 +458,12 @@ static struct notifier_block pktgen_notifier_block = { static int pgctrl_show(struct seq_file *seq, void *v) { - seq_puts(seq, VERSION); + seq_puts(seq, version); return 0; } -static ssize_t pgctrl_write(struct file *file, const char __user * buf, - size_t count, loff_t * ppos) +static ssize_t pgctrl_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) { int err = 0; char data[128]; @@ -509,10 +516,9 @@ static const struct file_operations pktgen_fops = { static int pktgen_if_show(struct seq_file *seq, void *v) { - struct pktgen_dev *pkt_dev = seq->private; - __u64 sa; - __u64 stopped; - __u64 now = getCurUs(); + const struct pktgen_dev *pkt_dev = seq->private; + ktime_t stopped; + u64 idle; seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", @@ -520,9 +526,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) pkt_dev->max_pkt_size); seq_printf(seq, - " frags: %d delay: %u clone_skb: %d ifname: %s\n", - pkt_dev->nfrags, - 1000 * pkt_dev->delay_us + pkt_dev->delay_ns, + " frags: %d delay: %llu clone_skb: %d ifname: %s\n", + pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, pkt_dev->clone_skb, pkt_dev->odev->name); seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, @@ -549,11 +554,14 @@ static int pktgen_if_show(struct seq_file *seq, void *v) " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3); - } else + } else { + seq_printf(seq, + " dst_min: %s dst_max: %s\n", + pkt_dev->dst_min, pkt_dev->dst_max); seq_printf(seq, - " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", - pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, - pkt_dev->src_max); + " src_min: %s src_max: %s\n", + pkt_dev->src_min, pkt_dev->src_max); + } seq_puts(seq, " src_mac: "); @@ -565,7 +573,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) seq_printf(seq, "%pM\n", pkt_dev->dst_mac); seq_printf(seq, - " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", + " udp_src_min: %d udp_src_max: %d" + " udp_dst_min: %d udp_dst_max: %d\n", pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); @@ -581,23 +590,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v) i == pkt_dev->nr_labels-1 ? "\n" : ", "); } - if (pkt_dev->vlan_id != 0xffff) { + if (pkt_dev->vlan_id != 0xffff) seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", - pkt_dev->vlan_id, pkt_dev->vlan_p, pkt_dev->vlan_cfi); - } + pkt_dev->vlan_id, pkt_dev->vlan_p, + pkt_dev->vlan_cfi); - if (pkt_dev->svlan_id != 0xffff) { + if (pkt_dev->svlan_id != 0xffff) seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", - pkt_dev->svlan_id, pkt_dev->svlan_p, pkt_dev->svlan_cfi); - } + pkt_dev->svlan_id, pkt_dev->svlan_p, + pkt_dev->svlan_cfi); - if (pkt_dev->tos) { + if (pkt_dev->tos) seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); - } - if (pkt_dev->traffic_class) { + if (pkt_dev->traffic_class) seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); - } seq_printf(seq, " Flags: "); @@ -654,17 +661,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v) seq_puts(seq, "\n"); - sa = pkt_dev->started_at; - stopped = pkt_dev->stopped_at; - if (pkt_dev->running) - stopped = now; /* not really stopped, more like last-running-at */ + /* not really stopped, more like last-running-at */ + stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at; + idle = pkt_dev->idle_acc; + do_div(idle, NSEC_PER_USEC); seq_printf(seq, - "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", + "Current:\n pkts-sofar: %llu errors: %llu\n", (unsigned long long)pkt_dev->sofar, - (unsigned long long)pkt_dev->errors, (unsigned long long)sa, - (unsigned long long)stopped, - (unsigned long long)pkt_dev->idle_acc); + (unsigned long long)pkt_dev->errors); + + seq_printf(seq, + " started: %lluus stopped: %lluus idle: %lluus\n", + (unsigned long long) ktime_to_us(pkt_dev->started_at), + (unsigned long long) ktime_to_us(stopped), + (unsigned long long) idle); seq_printf(seq, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", @@ -696,7 +707,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) } -static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32 *num) +static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, + __u32 *num) { int i = 0; *num = 0; @@ -846,9 +858,9 @@ static ssize_t pktgen_if_write(struct file *file, /* Read variable name */ len = strn_len(&user_buffer[i], sizeof(name) - 1); - if (len < 0) { + if (len < 0) return len; - } + memset(name, 0, sizeof(name)); if (copy_from_user(name, &user_buffer[i], len)) return -EFAULT; @@ -872,9 +884,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "min_pkt_size")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; @@ -889,9 +901,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "max_pkt_size")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; @@ -908,9 +920,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "pkt_size")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; @@ -925,9 +937,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "debug")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; debug = value; sprintf(pg_result, "OK: debug=%u", debug); @@ -936,9 +948,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "frags")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; pkt_dev->nfrags = value; sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); @@ -946,26 +958,24 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "delay")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; - if (value == 0x7FFFFFFF) { - pkt_dev->delay_us = 0x7FFFFFFF; - pkt_dev->delay_ns = 0; - } else { - pkt_dev->delay_us = value / 1000; - pkt_dev->delay_ns = value % 1000; - } - sprintf(pg_result, "OK: delay=%u", - 1000 * pkt_dev->delay_us + pkt_dev->delay_ns); + if (value == 0x7FFFFFFF) + pkt_dev->delay = ULLONG_MAX; + else + pkt_dev->delay = (u64)value * NSEC_PER_USEC; + + sprintf(pg_result, "OK: delay=%llu", + (unsigned long long) pkt_dev->delay); return count; } if (!strcmp(name, "udp_src_min")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value != pkt_dev->udp_src_min) { pkt_dev->udp_src_min = value; @@ -976,9 +986,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "udp_dst_min")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value != pkt_dev->udp_dst_min) { pkt_dev->udp_dst_min = value; @@ -989,9 +999,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "udp_src_max")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value != pkt_dev->udp_src_max) { pkt_dev->udp_src_max = value; @@ -1002,9 +1012,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "udp_dst_max")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value != pkt_dev->udp_dst_max) { pkt_dev->udp_dst_max = value; @@ -1015,9 +1025,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "clone_skb")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; pkt_dev->clone_skb = value; @@ -1026,9 +1036,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "count")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; pkt_dev->count = value; sprintf(pg_result, "OK: count=%llu", @@ -1037,9 +1047,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "src_mac_count")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (pkt_dev->src_mac_count != value) { pkt_dev->src_mac_count = value; @@ -1051,9 +1061,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "dst_mac_count")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (pkt_dev->dst_mac_count != value) { pkt_dev->dst_mac_count = value; @@ -1067,9 +1077,9 @@ static ssize_t pktgen_if_write(struct file *file, char f[32]; memset(f, 0, 32); len = strn_len(&user_buffer[i], sizeof(f) - 1); - if (len < 0) { + if (len < 0) return len; - } + if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT; i += len; @@ -1168,9 +1178,8 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); - if (len < 0) { + if (len < 0) return len; - } if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; @@ -1190,9 +1199,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "dst_max")) { len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); - if (len < 0) { + if (len < 0) return len; - } + if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; @@ -1303,9 +1312,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "src_min")) { len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); - if (len < 0) { + if (len < 0) return len; - } + if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; @@ -1324,9 +1333,9 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "src_max")) { len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); - if (len < 0) { + if (len < 0) return len; - } + if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; @@ -1350,9 +1359,9 @@ static ssize_t pktgen_if_write(struct file *file, memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); len = strn_len(&user_buffer[i], sizeof(valstr) - 1); - if (len < 0) { + if (len < 0) return len; - } + memset(valstr, 0, sizeof(valstr)); if (copy_from_user(valstr, &user_buffer[i], len)) return -EFAULT; @@ -1392,9 +1401,9 @@ static ssize_t pktgen_if_write(struct file *file, memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN); len = strn_len(&user_buffer[i], sizeof(valstr) - 1); - if (len < 0) { + if (len < 0) return len; - } + memset(valstr, 0, sizeof(valstr)); if (copy_from_user(valstr, &user_buffer[i], len)) return -EFAULT; @@ -1435,9 +1444,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "flows")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value > MAX_CFLOWS) value = MAX_CFLOWS; @@ -1449,9 +1458,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "flowlen")) { len = num_arg(&user_buffer[i], 10, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; pkt_dev->lflow = value; sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); @@ -1460,9 +1469,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "queue_map_min")) { len = num_arg(&user_buffer[i], 5, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; pkt_dev->queue_map_min = value; sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); @@ -1471,9 +1480,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "queue_map_max")) { len = num_arg(&user_buffer[i], 5, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; pkt_dev->queue_map_max = value; sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); @@ -1505,9 +1514,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "vlan_id")) { len = num_arg(&user_buffer[i], 4, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (value <= 4095) { pkt_dev->vlan_id = value; /* turn on VLAN */ @@ -1532,9 +1541,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "vlan_p")) { len = num_arg(&user_buffer[i], 1, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { pkt_dev->vlan_p = value; @@ -1547,9 +1556,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "vlan_cfi")) { len = num_arg(&user_buffer[i], 1, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { pkt_dev->vlan_cfi = value; @@ -1562,9 +1571,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "svlan_id")) { len = num_arg(&user_buffer[i], 4, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { pkt_dev->svlan_id = value; /* turn on SVLAN */ @@ -1589,9 +1598,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "svlan_p")) { len = num_arg(&user_buffer[i], 1, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { pkt_dev->svlan_p = value; @@ -1604,9 +1613,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "svlan_cfi")) { len = num_arg(&user_buffer[i], 1, &value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { pkt_dev->svlan_cfi = value; @@ -1620,9 +1629,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "tos")) { __u32 tmp_value = 0; len = hex32_arg(&user_buffer[i], 2, &tmp_value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (len == 2) { pkt_dev->tos = tmp_value; @@ -1636,9 +1645,9 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "traffic_class")) { __u32 tmp_value = 0; len = hex32_arg(&user_buffer[i], 2, &tmp_value); - if (len < 0) { + if (len < 0) return len; - } + i += len; if (len == 2) { pkt_dev->traffic_class = tmp_value; @@ -1670,7 +1679,7 @@ static const struct file_operations pktgen_if_fops = { static int pktgen_thread_show(struct seq_file *seq, void *v) { struct pktgen_thread *t = seq->private; - struct pktgen_dev *pkt_dev; + const struct pktgen_dev *pkt_dev; BUG_ON(!t); @@ -1873,8 +1882,10 @@ static void pktgen_change_name(struct net_device *dev) remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); - pkt_dev->entry = create_proc_entry(dev->name, 0600, - pg_proc_dir); + pkt_dev->entry = proc_create_data(dev->name, 0600, + pg_proc_dir, + &pktgen_if_fops, + pkt_dev); if (!pkt_dev->entry) printk(KERN_ERR "pktgen: can't move proc " " entry for '%s'\n", dev->name); @@ -1908,13 +1919,14 @@ static int pktgen_device_event(struct notifier_block *unused, return NOTIFY_DONE; } -static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname) +static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, + const char *ifname) { char b[IFNAMSIZ+5]; int i = 0; - for(i=0; ifname[i] != '@'; i++) { - if(i == IFNAMSIZ) + for (i = 0; ifname[i] != '@'; i++) { + if (i == IFNAMSIZ) break; b[i] = ifname[i]; @@ -1981,7 +1993,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) printk(KERN_WARNING "pktgen: WARNING: Requested " "queue_map_min (zero-based) (%d) exceeds valid range " "[0 - %d] for (%d) queues on %s, resetting\n", - pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq, + pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, pkt_dev->odev->name); pkt_dev->queue_map_min = ntxq - 1; } @@ -1989,7 +2001,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) printk(KERN_WARNING "pktgen: WARNING: Requested " "queue_map_max (zero-based) (%d) exceeds valid range " "[0 - %d] for (%d) queues on %s, resetting\n", - pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq, + pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, pkt_dev->odev->name); pkt_dev->queue_map_max = ntxq - 1; } @@ -2030,7 +2042,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) */ rcu_read_lock(); - if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) { + idev = __in6_dev_get(pkt_dev->odev); + if (idev) { struct inet6_ifaddr *ifp; read_lock_bh(&idev->lock); @@ -2089,27 +2102,40 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) pkt_dev->nflows = 0; } -static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) + +static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) { - __u64 start; - __u64 now; + ktime_t start; + s32 remaining; + struct hrtimer_sleeper t; + + hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_set_expires(&t.timer, spin_until); + + remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer)); + if (remaining <= 0) + return; - start = now = getCurUs(); - while (now < spin_until_us) { - /* TODO: optimize sleeping behavior */ - if (spin_until_us - now > jiffies_to_usecs(1) + 1) - schedule_timeout_interruptible(1); - else if (spin_until_us - now > 100) { - if (!pkt_dev->running) - return; - if (need_resched()) + start = ktime_now(); + if (remaining < 100) + udelay(remaining); /* really small just spin */ + else { + /* see do_nanosleep */ + hrtimer_init_sleeper(&t, current); + do { + set_current_state(TASK_INTERRUPTIBLE); + hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); + if (!hrtimer_active(&t.timer)) + t.task = NULL; + + if (likely(t.task)) schedule(); - } - now = getCurUs(); + hrtimer_cancel(&t.timer); + } while (t.task && pkt_dev->running && !signal_pending(current)); + __set_current_state(TASK_RUNNING); } - - pkt_dev->idle_acc += now - start; + pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start)); } static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) @@ -2120,13 +2146,9 @@ static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); } -static inline int f_seen(struct pktgen_dev *pkt_dev, int flow) +static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow) { - - if (pkt_dev->flows[flow].flags & F_INIT) - return 1; - else - return 0; + return !!(pkt_dev->flows[flow].flags & F_INIT); } static inline int f_pick(struct pktgen_dev *pkt_dev) @@ -2174,7 +2196,7 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) if (x) { pkt_dev->flows[flow].x = x; set_pkt_overhead(pkt_dev); - pkt_dev->pkt_overhead+=x->props.header_len; + pkt_dev->pkt_overhead += x->props.header_len; } } @@ -2313,18 +2335,18 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (!(pkt_dev->flags & F_IPV6)) { - if ((imn = ntohl(pkt_dev->saddr_min)) < (imx = - ntohl(pkt_dev-> - saddr_max))) { + imn = ntohl(pkt_dev->saddr_min); + imx = ntohl(pkt_dev->saddr_max); + if (imn < imx) { __u32 t; if (pkt_dev->flags & F_IPSRC_RND) t = random32() % (imx - imn) + imn; else { t = ntohl(pkt_dev->cur_saddr); t++; - if (t > imx) { + if (t > imx) t = imn; - } + } pkt_dev->cur_saddr = htonl(t); } @@ -2435,14 +2457,14 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) if (err) goto error; - x->curlft.bytes +=skb->len; + x->curlft.bytes += skb->len; x->curlft.packets++; error: spin_unlock(&x->lock); return err; } -static inline void free_SAs(struct pktgen_dev *pkt_dev) +static void free_SAs(struct pktgen_dev *pkt_dev) { if (pkt_dev->cflows) { /* let go of the SAs if we have them */ @@ -2457,7 +2479,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev) } } -static inline int process_ipsec(struct pktgen_dev *pkt_dev, +static int process_ipsec(struct pktgen_dev *pkt_dev, struct sk_buff *skb, __be16 protocol) { if (pkt_dev->flags & F_IPSEC_ON) { @@ -2467,11 +2489,11 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev, int ret; __u8 *eth; nhead = x->props.header_len - skb_headroom(skb); - if (nhead >0) { + if (nhead > 0) { ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); if (ret < 0) { printk(KERN_ERR "Error expanding " - "ipsec packet %d\n",ret); + "ipsec packet %d\n", ret); goto err; } } @@ -2481,13 +2503,13 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev, ret = pktgen_output_ipsec(skb, pkt_dev); if (ret) { printk(KERN_ERR "Error creating ipsec " - "packet %d\n",ret); + "packet %d\n", ret); goto err; } /* restore ll */ eth = (__u8 *) skb_push(skb, ETH_HLEN); memcpy(eth, pkt_dev->hh, 12); - *(u16 *) & eth[12] = protocol; + *(u16 *) ð[12] = protocol; } } return 1; @@ -2500,9 +2522,9 @@ err: static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) { unsigned i; - for (i = 0; i < pkt_dev->nr_labels; i++) { + for (i = 0; i < pkt_dev->nr_labels; i++) *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; - } + mpls--; *mpls |= MPLS_STACK_BOTTOM; } @@ -2543,8 +2565,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, mod_cur_headers(pkt_dev); datalen = (odev->hard_header_len + 16) & ~0xf; - skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + - pkt_dev->pkt_overhead, GFP_ATOMIC); + skb = __netdev_alloc_skb(odev, + pkt_dev->cur_pkt_size + 64 + + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; @@ -2668,8 +2691,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, } } - /* Stamp the time, and sequence number, convert them to network byte order */ - + /* Stamp the time, and sequence number, + * convert them to network byte order + */ if (pgh) { struct timeval timestamp; @@ -2882,8 +2906,9 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, queue_map = pkt_dev->cur_queue_map; mod_cur_headers(pkt_dev); - skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + - pkt_dev->pkt_overhead, GFP_ATOMIC); + skb = __netdev_alloc_skb(odev, + pkt_dev->cur_pkt_size + 64 + + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; @@ -2922,7 +2947,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, udph = udp_hdr(skb); memcpy(eth, pkt_dev->hh, 12); - *(__be16 *) & eth[12] = protocol; + *(__be16 *) ð[12] = protocol; /* Eth + IPh + UDPh + mpls */ datalen = pkt_dev->cur_pkt_size - 14 - @@ -3016,8 +3041,10 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, } } - /* Stamp the time, and sequence number, convert them to network byte order */ - /* should we update cloned packets too ? */ + /* Stamp the time, and sequence number, + * convert them to network byte order + * should we update cloned packets too ? + */ if (pgh) { struct timeval timestamp; @@ -3033,8 +3060,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, return skb; } -static inline struct sk_buff *fill_packet(struct net_device *odev, - struct pktgen_dev *pkt_dev) +static struct sk_buff *fill_packet(struct net_device *odev, + struct pktgen_dev *pkt_dev) { if (pkt_dev->flags & F_IPV6) return fill_packet_ipv6(odev, pkt_dev); @@ -3072,9 +3099,9 @@ static void pktgen_run(struct pktgen_thread *t) pktgen_clear_counters(pkt_dev); pkt_dev->running = 1; /* Cranke yeself! */ pkt_dev->skb = NULL; - pkt_dev->started_at = getCurUs(); - pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ - pkt_dev->next_tx_ns = 0; + pkt_dev->started_at = + pkt_dev->next_tx = ktime_now(); + set_pkt_overhead(pkt_dev); strcpy(pkt_dev->result, "Starting"); @@ -3101,17 +3128,14 @@ static void pktgen_stop_all_threads_ifs(void) mutex_unlock(&pktgen_thread_lock); } -static int thread_is_running(struct pktgen_thread *t) +static int thread_is_running(const struct pktgen_thread *t) { - struct pktgen_dev *pkt_dev; - int res = 0; + const struct pktgen_dev *pkt_dev; list_for_each_entry(pkt_dev, &t->if_list, list) - if (pkt_dev->running) { - res = 1; - break; - } - return res; + if (pkt_dev->running) + return 1; + return 0; } static int pktgen_wait_thread_run(struct pktgen_thread *t) @@ -3168,7 +3192,8 @@ static void pktgen_run_all_threads(void) mutex_unlock(&pktgen_thread_lock); - schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ + /* Propagate thread->control */ + schedule_timeout_interruptible(msecs_to_jiffies(125)); pktgen_wait_all_threads_run(); } @@ -3186,35 +3211,29 @@ static void pktgen_reset_all_threads(void) mutex_unlock(&pktgen_thread_lock); - schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ + /* Propagate thread->control */ + schedule_timeout_interruptible(msecs_to_jiffies(125)); pktgen_wait_all_threads_run(); } static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) { - __u64 total_us, bps, mbps, pps, idle; + __u64 bps, mbps, pps; char *p = pkt_dev->result; - - total_us = pkt_dev->stopped_at - pkt_dev->started_at; - - idle = pkt_dev->idle_acc; - - p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", - (unsigned long long)total_us, - (unsigned long long)(total_us - idle), - (unsigned long long)idle, + ktime_t elapsed = ktime_sub(pkt_dev->stopped_at, + pkt_dev->started_at); + ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); + + p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", + (unsigned long long)ktime_to_us(elapsed), + (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), + (unsigned long long)ktime_to_us(idle), (unsigned long long)pkt_dev->sofar, pkt_dev->cur_pkt_size, nr_frags); - pps = pkt_dev->sofar * USEC_PER_SEC; - - while ((total_us >> 32) != 0) { - pps >>= 1; - total_us >>= 1; - } - - do_div(pps, total_us); + pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC, + ktime_to_ns(elapsed)); bps = pps * 8 * pkt_dev->cur_pkt_size; @@ -3228,7 +3247,6 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) } /* Set stopped-at timer, remove from running list, do counters & statistics */ - static int pktgen_stop_device(struct pktgen_dev *pkt_dev) { int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; @@ -3239,7 +3257,9 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev) return -EINVAL; } - pkt_dev->stopped_at = getCurUs(); + kfree_skb(pkt_dev->skb); + pkt_dev->skb = NULL; + pkt_dev->stopped_at = ktime_now(); pkt_dev->running = 0; show_results(pkt_dev, nr_frags); @@ -3258,7 +3278,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t) continue; if (best == NULL) best = pkt_dev; - else if (pkt_dev->next_tx_us < best->next_tx_us) + else if (ktime_lt(pkt_dev->next_tx, best->next_tx)) best = pkt_dev; } if_unlock(t); @@ -3275,9 +3295,6 @@ static void pktgen_stop(struct pktgen_thread *t) list_for_each_entry(pkt_dev, &t->if_list, list) { pktgen_stop_device(pkt_dev); - kfree_skb(pkt_dev->skb); - - pkt_dev->skb = NULL; } if_unlock(t); @@ -3348,30 +3365,37 @@ static void pktgen_rem_thread(struct pktgen_thread *t) mutex_unlock(&pktgen_thread_lock); } -static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) +static void idle(struct pktgen_dev *pkt_dev) +{ + ktime_t idle_start = ktime_now(); + + if (need_resched()) + schedule(); + else + cpu_relax(); + + pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); +} + + +static void pktgen_xmit(struct pktgen_dev *pkt_dev) { struct net_device *odev = pkt_dev->odev; - int (*xmit)(struct sk_buff *, struct net_device *) + netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *) = odev->netdev_ops->ndo_start_xmit; struct netdev_queue *txq; - __u64 idle_start = 0; u16 queue_map; int ret; - if (pkt_dev->delay_us || pkt_dev->delay_ns) { - u64 now; - - now = getCurUs(); - if (now < pkt_dev->next_tx_us) - spin(pkt_dev, pkt_dev->next_tx_us); + if (pkt_dev->delay) { + spin(pkt_dev, pkt_dev->next_tx); /* This is max DELAY, this has special meaning of * "never transmit" */ - if (pkt_dev->delay_us == 0x7FFFFFFF) { - pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; - pkt_dev->next_tx_ns = pkt_dev->delay_ns; - goto out; + if (pkt_dev->delay == ULLONG_MAX) { + pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX); + return; } } @@ -3383,47 +3407,32 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) } txq = netdev_get_tx_queue(odev, queue_map); - if (netif_tx_queue_stopped(txq) || - netif_tx_queue_frozen(txq) || - need_resched()) { - idle_start = getCurUs(); - - if (!netif_running(odev)) { + /* Did we saturate the queue already? */ + if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) { + /* If device is down, then all queues are permnantly frozen */ + if (netif_running(odev)) + idle(pkt_dev); + else pktgen_stop_device(pkt_dev); - kfree_skb(pkt_dev->skb); - pkt_dev->skb = NULL; - goto out; - } - if (need_resched()) - schedule(); - - pkt_dev->idle_acc += getCurUs() - idle_start; - - if (netif_tx_queue_stopped(txq) || - netif_tx_queue_frozen(txq)) { - pkt_dev->next_tx_us = getCurUs(); /* TODO */ - pkt_dev->next_tx_ns = 0; - goto out; /* Try the next interface */ - } + return; } - if (pkt_dev->last_ok || !pkt_dev->skb) { - if ((++pkt_dev->clone_count >= pkt_dev->clone_skb) - || (!pkt_dev->skb)) { - /* build a new pkt */ - kfree_skb(pkt_dev->skb); + if (!pkt_dev->skb || (pkt_dev->last_ok && + ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { + /* build a new pkt */ + kfree_skb(pkt_dev->skb); - pkt_dev->skb = fill_packet(odev, pkt_dev); - if (pkt_dev->skb == NULL) { - printk(KERN_ERR "pktgen: ERROR: couldn't " - "allocate skb in fill_packet.\n"); - schedule(); - pkt_dev->clone_count--; /* back out increment, OOM */ - goto out; - } - pkt_dev->allocated_skbs++; - pkt_dev->clone_count = 0; /* reset counter */ + pkt_dev->skb = fill_packet(odev, pkt_dev); + if (pkt_dev->skb == NULL) { + printk(KERN_ERR "pktgen: ERROR: couldn't " + "allocate skb in fill_packet.\n"); + schedule(); + pkt_dev->clone_count--; /* back out increment, OOM */ + return; } + + pkt_dev->allocated_skbs++; + pkt_dev->clone_count = 0; /* reset counter */ } /* fill_packet() might have changed the queue */ @@ -3431,73 +3440,53 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) txq = netdev_get_tx_queue(odev, queue_map); __netif_tx_lock_bh(txq); - if (!netif_tx_queue_stopped(txq) && - !netif_tx_queue_frozen(txq)) { - + if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) + pkt_dev->last_ok = 0; + else { atomic_inc(&(pkt_dev->skb->users)); - retry_now: + + retry_now: ret = (*xmit)(pkt_dev->skb, odev); - if (likely(ret == NETDEV_TX_OK)) { + switch (ret) { + case NETDEV_TX_OK: txq_trans_update(txq); pkt_dev->last_ok = 1; pkt_dev->sofar++; pkt_dev->seq_num++; pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; - - } else if (ret == NETDEV_TX_LOCKED - && (odev->features & NETIF_F_LLTX)) { + break; + case NETDEV_TX_LOCKED: cpu_relax(); goto retry_now; - } else { /* Retry it next time */ - - atomic_dec(&(pkt_dev->skb->users)); - - if (debug && net_ratelimit()) - printk(KERN_INFO "pktgen: Hard xmit error\n"); - + default: /* Drivers are not supposed to return other values! */ + if (net_ratelimit()) + pr_info("pktgen: %s xmit error: %d\n", + odev->name, ret); pkt_dev->errors++; + /* fallthru */ + case NETDEV_TX_BUSY: + /* Retry it next time */ + atomic_dec(&(pkt_dev->skb->users)); pkt_dev->last_ok = 0; } - pkt_dev->next_tx_us = getCurUs(); - pkt_dev->next_tx_ns = 0; - - pkt_dev->next_tx_us += pkt_dev->delay_us; - pkt_dev->next_tx_ns += pkt_dev->delay_ns; - - if (pkt_dev->next_tx_ns > 1000) { - pkt_dev->next_tx_us++; - pkt_dev->next_tx_ns -= 1000; - } + if (pkt_dev->delay) + pkt_dev->next_tx = ktime_add_ns(ktime_now(), + pkt_dev->delay); } - - else { /* Retry it next time */ - pkt_dev->last_ok = 0; - pkt_dev->next_tx_us = getCurUs(); /* TODO */ - pkt_dev->next_tx_ns = 0; - } - __netif_tx_unlock_bh(txq); /* If pkt_dev->count is zero, then run forever */ if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { - if (atomic_read(&(pkt_dev->skb->users)) != 1) { - idle_start = getCurUs(); - while (atomic_read(&(pkt_dev->skb->users)) != 1) { - if (signal_pending(current)) { - break; - } - schedule(); - } - pkt_dev->idle_acc += getCurUs() - idle_start; + while (atomic_read(&(pkt_dev->skb->users)) != 1) { + if (signal_pending(current)) + break; + idle(pkt_dev); } /* Done with this */ pktgen_stop_device(pkt_dev); - kfree_skb(pkt_dev->skb); - pkt_dev->skb = NULL; } -out:; } /* @@ -3516,7 +3505,8 @@ static int pktgen_thread_worker(void *arg) init_waitqueue_head(&t->queue); complete(&t->start_done); - pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); + pr_debug("pktgen: starting pktgen/%d: pid=%d\n", + cpu, task_pid_nr(current)); set_current_state(TASK_INTERRUPTIBLE); @@ -3651,8 +3641,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) pkt_dev->max_pkt_size = ETH_ZLEN; pkt_dev->nfrags = 0; pkt_dev->clone_skb = pg_clone_skb_d; - pkt_dev->delay_us = pg_delay_d / 1000; - pkt_dev->delay_ns = pg_delay_d % 1000; + pkt_dev->delay = pg_delay_d; pkt_dev->count = pg_count_d; pkt_dev->sofar = 0; pkt_dev->udp_src_min = 9; /* sink port */ @@ -3864,10 +3853,15 @@ static void __exit pg_cleanup(void) module_init(pg_init); module_exit(pg_cleanup); -MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se"); +MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>"); MODULE_DESCRIPTION("Packet Generator tool"); MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); module_param(pg_count_d, int, 0); +MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject"); module_param(pg_delay_d, int, 0); +MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)"); module_param(pg_clone_skb_d, int, 0); +MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet"); module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Enable debugging of pktgen module"); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d78030f..eb42873 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -35,7 +35,6 @@ #include <linux/security.h> #include <linux/mutex.h> #include <linux/if_addr.h> -#include <linux/nsproxy.h> #include <asm/uaccess.h> #include <asm/system.h> @@ -52,6 +51,7 @@ #include <net/pkt_sched.h> #include <net/fib_rules.h> #include <net/rtnetlink.h> +#include <net/net_namespace.h> struct rtnl_link { @@ -606,7 +606,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, unsigned int flags) { - struct netdev_queue *txq; struct ifinfomsg *ifm; struct nlmsghdr *nlh; const struct net_device_stats *stats; @@ -637,9 +636,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, if (dev->master) NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); - txq = netdev_get_tx_queue(dev, 0); - if (txq->qdisc_sleeping) - NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id); + if (dev->qdisc) + NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id); if (dev->ifalias) NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); @@ -725,25 +723,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { [IFLA_INFO_DATA] = { .type = NLA_NESTED }, }; -static struct net *get_net_ns_by_pid(pid_t pid) -{ - struct task_struct *tsk; - struct net *net; - - /* Lookup the network namespace */ - net = ERR_PTR(-ESRCH); - rcu_read_lock(); - tsk = find_task_by_vpid(pid); - if (tsk) { - struct nsproxy *nsproxy; - nsproxy = task_nsproxy(tsk); - if (nsproxy) - net = get_net(nsproxy->net_ns); - } - rcu_read_unlock(); - return net; -} - static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) { if (dev) { @@ -993,12 +972,20 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname, { int err; struct net_device *dev; + unsigned int num_queues = 1; + unsigned int real_num_queues = 1; + if (ops->get_tx_queues) { + err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues); + if (err) + goto err; + } err = -ENOMEM; - dev = alloc_netdev(ops->priv_size, ifname, ops->setup); + dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); if (!dev) goto err; + dev->real_num_tx_queues = real_num_queues; if (strchr(dev->name, '%')) { err = dev_alloc_name(dev, dev->name); if (err < 0) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9e0597d..80a9616 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -559,9 +559,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) #endif #endif new->vlan_tci = old->vlan_tci; -#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) - new->do_not_encrypt = old->do_not_encrypt; -#endif skb_copy_secmark(new, old); } diff --git a/net/core/sock.c b/net/core/sock.c index 7633422..30d5446 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -142,7 +142,7 @@ static struct lock_class_key af_family_slock_keys[AF_MAX]; * strings build-time, so that runtime initialization of socket * locks is fast): */ -static const char *af_family_key_strings[AF_MAX+1] = { +static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , @@ -158,7 +158,7 @@ static const char *af_family_key_strings[AF_MAX+1] = { "sk_lock-AF_IEEE802154", "sk_lock-AF_MAX" }; -static const char *af_family_slock_key_strings[AF_MAX+1] = { +static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , @@ -174,7 +174,7 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_IEEE802154", "slock-AF_MAX" }; -static const char *af_family_clock_key_strings[AF_MAX+1] = { +static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , @@ -482,6 +482,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname, sk->sk_reuse = valbool; break; case SO_TYPE: + case SO_PROTOCOL: + case SO_DOMAIN: case SO_ERROR: ret = -ENOPROTOOPT; break; @@ -764,6 +766,14 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_type; break; + case SO_PROTOCOL: + v.val = sk->sk_protocol; + break; + + case SO_DOMAIN: + v.val = sk->sk_family; + break; + case SO_ERROR: v.val = -sock_error(sk); if (v.val == 0) |