diff options
-rw-r--r-- | drivers/net/tg3.c | 51 | ||||
-rw-r--r-- | drivers/net/tg3.h | 8 | ||||
-rw-r--r-- | include/linux/skbuff.h | 4 | ||||
-rw-r--r-- | net/core/dst.c | 3 | ||||
-rw-r--r-- | net/core/pktgen.c | 4 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 15 | ||||
-rw-r--r-- | net/core/skbuff.c | 4 | ||||
-rw-r--r-- | net/ipv4/route.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 12 | ||||
-rw-r--r-- | net/ipx/af_ipx.c | 10 |
10 files changed, 72 insertions, 41 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6f97962..eafabb2 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -68,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.64" -#define DRV_MODULE_RELDATE "July 31, 2006" +#define DRV_MODULE_VERSION "3.65" +#define DRV_MODULE_RELDATE "August 07, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -123,9 +123,6 @@ TG3_RX_RCB_RING_SIZE(tp)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) -#define TX_BUFFS_AVAIL(TP) \ - ((TP)->tx_pending - \ - (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) @@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp) spin_unlock(&tp->lock); } +static inline u32 tg3_tx_avail(struct tg3 *tp) +{ + smp_mb(); + return (tp->tx_pending - + ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); +} + /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. @@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp) tp->tx_cons = sw_idx; - if (unlikely(netif_queue_stopped(tp->dev))) { - spin_lock(&tp->tx_lock); + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_queue_stopped(tp->dev) && + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { + netif_tx_lock(tp->dev); if (netif_queue_stopped(tp->dev) && - (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); + netif_tx_unlock(tp->dev); } } @@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, if (skb == NULL) return -ENOMEM; - skb->dev = tp->dev; skb_reserve(skb, tp->rx_offset); mapping = pci_map_single(tp->pdev, skb->data, @@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget) if (copy_skb == NULL) goto drop_it_no_recycle; - copy_skb->dev = tp->dev; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); @@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) struct sk_buff *segs, *nskb; /* Estimate the number of fragments in the worst case */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { netif_stop_queue(tp->dev); return NETDEV_TX_BUSY; } @@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; #endif spin_lock_init(&tp->lock); - spin_lock_init(&tp->tx_lock); spin_lock_init(&tp->indirect_lock); INIT_WORK(&tp->reset_task, tg3_reset_task, tp); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ba2c987..3ecf356 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -2079,9 +2079,9 @@ struct tg3 { * lock: Held during reset, PHY access, timer, and when * updating tg3_flags and tg3_flags2. * - * tx_lock: Held during tg3_start_xmit and tg3_tx only - * when calling netif_[start|stop]_queue. - * tg3_start_xmit is protected by netif_tx_lock. + * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds + * netif_tx_lock when it needs to call + * netif_wake_queue. * * Both of these locks are to be held with BH safety. * @@ -2118,8 +2118,6 @@ struct tg3 { u32 tx_cons; u32 tx_pending; - spinlock_t tx_lock; - struct tg3_tx_buffer_desc *tx_ring; struct tx_ring_info *tx_buffers; dma_addr_t tx_desc_mapping; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 19c96d4..3573ba9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1081,7 +1081,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * - * %NULL is returned in there is no free memory. + * %NULL is returned if there is no free memory. */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask) @@ -1101,7 +1101,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * - * %NULL is returned in there is no free memory. Although this function + * %NULL is returned if there is no free memory. Although this function * allocates memory it can be called from an interrupt. */ static inline struct sk_buff *dev_alloc_skb(unsigned int length) diff --git a/net/core/dst.c b/net/core/dst.c index 470c05b..1a5e49d 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy) dst_gc_timer_inc = DST_GC_INC; dst_gc_timer_expires = DST_GC_MIN; } - dst_gc_timer.expires = jiffies + dst_gc_timer_expires; #if RT_CACHE_DEBUG >= 2 printk("dst_total: %d/%d %ld\n", atomic_read(&dst_total), delayed, dst_gc_timer_expires); #endif - add_timer(&dst_gc_timer); + mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); out: spin_unlock(&dst_lock); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 67ed14d..6a7320b 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); skb->dev = odev; skb->pkt_type = PACKET_HOST; + skb->nh.iph = iph; + skb->h.uh = udph; if (pkt_dev->nfrags <= 0) pgh = (struct pktgen_hdr *)skb_put(skb, datalen); @@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; + skb->nh.ipv6h = iph; + skb->h.uh = udph; if (pkt_dev->nfrags <= 0) pgh = (struct pktgen_hdr *)skb_put(skb, datalen); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 20e5bb7..30cc1ba 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) } if (ida[IFLA_ADDRESS - 1]) { + struct sockaddr *sa; + int len; + if (!dev->set_mac_address) { err = -EOPNOTSUPP; goto out; @@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len)) goto out; - err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1])); + len = sizeof(sa_family_t) + dev->addr_len; + sa = kmalloc(len, GFP_KERNEL); + if (!sa) { + err = -ENOMEM; + goto out; + } + sa->sa_family = dev->type; + memcpy(sa->sa_data, RTA_DATA(ida[IFLA_ADDRESS - 1]), + dev->addr_len); + err = dev->set_mac_address(dev, sa); + kfree(sa); if (err) goto out; send_addr_notify = 1; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 022d889..c54f366 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -268,8 +268,10 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, struct sk_buff *skb; skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); - if (likely(skb)) + if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); + skb->dev = dev; + } return skb; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 19bd49d..b873cbc 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3157,7 +3157,7 @@ int __init ip_rt_init(void) rhash_entries, (num_physpages >= 128 * 1024) ? 15 : 17, - HASH_HIGHMEM, + 0, &rt_hash_log, &rt_hash_mask, 0); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5c08ea2..507adef 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -466,7 +466,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, if (skb->len != tcp_header_size) tcp_event_data_sent(tp, skb, sk); - TCP_INC_STATS(TCP_MIB_OUTSEGS); + if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) + TCP_INC_STATS(TCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, 0); if (likely(err <= 0)) @@ -2157,10 +2158,9 @@ int tcp_connect(struct sock *sk) skb_shinfo(buff)->gso_size = 0; skb_shinfo(buff)->gso_type = 0; buff->csum = 0; + tp->snd_nxt = tp->write_seq; TCP_SKB_CB(buff)->seq = tp->write_seq++; TCP_SKB_CB(buff)->end_seq = tp->write_seq; - tp->snd_nxt = tp->write_seq; - tp->pushed_seq = tp->write_seq; /* Send it off. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; @@ -2170,6 +2170,12 @@ int tcp_connect(struct sock *sk) sk_charge_skb(sk, buff); tp->packets_out += tcp_skb_pcount(buff); tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); + + /* We change tp->snd_nxt after the tcp_transmit_skb() call + * in order to make this packet get counted in tcpOutSegs. + */ + tp->snd_nxt = tp->write_seq; + tp->pushed_seq = tp->write_seq; TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); /* Timer for repeating the SYN until an answer. */ diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index aa34ff4..40196420 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out; - ipx = ipx_hdr(skb); - ipx_pktsize = ntohs(ipx->ipx_pktsize); + if (!pskb_may_pull(skb, sizeof(struct ipxhdr))) + goto drop; + + ipx_pktsize = ntohs(ipxhdr(skb)->ipx_pktsize); /* Too small or invalid header? */ - if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len) + if (ipx_pktsize < sizeof(struct ipxhdr) || + !pskb_may_pull(skb, ipx_pktsize)) goto drop; + ipx = ipx_hdr(skb); if (ipx->ipx_checksum != IPX_NO_CHECKSUM && ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) goto drop; |