From 49a45a0686cc2b43bcb3834a68416a201475dc77 Mon Sep 17 00:00:00 2001 From: Hong Zhiguo Date: Tue, 22 Oct 2013 18:32:56 +0000 Subject: e1000: fix wrong queue idx calculation tx_ring and adapter->tx_ring are already of type "struct e1000_tx_ring *" Signed-off-by: Hong Zhiguo Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000/e1000_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 59ad007..ad6800a 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3917,8 +3917,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, " next_to_watch <%x>\n" " jiffies <%lx>\n" " next_to_watch.status <%x>\n", - (unsigned long)((tx_ring - adapter->tx_ring) / - sizeof(struct e1000_tx_ring)), + (unsigned long)(tx_ring - adapter->tx_ring), readl(hw->hw_addr + tx_ring->tdh), readl(hw->hw_addr + tx_ring->tdt), tx_ring->next_to_use, -- cgit v1.1 From 76b81748d43f2c60774c2703e3a9390bcc552adb Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 29 Oct 2013 08:31:49 +0000 Subject: ixgbevf: remove redundant workaround This patch removes a workaround related to header split, which is redundant because the driver does not support splitting packet headers on Rx. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b16b694..9685354 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -497,15 +497,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, total_rx_bytes += skb->len; total_rx_packets++; - /* - * Work around issue of some types of VM to VM loop back - * packets not getting split correctly - */ - if (staterr & IXGBE_RXD_STAT_LB) { - u32 header_fixup_len = skb_headlen(skb); - if (header_fixup_len < 14) - skb_push(skb, header_fixup_len); - } skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast -- cgit v1.1 From a71fc313c4f569be5788caff07ef1fe346842c5b Mon Sep 17 00:00:00 2001 From: "Fujinaka, Todd" Date: Wed, 23 Oct 2013 05:52:11 +0000 Subject: igb: Don't let ethtool try to write to iNVM in i210/i211 Don't let ethtool try to write to iNVM in i210/i211. This fixes an issue seen by Marek Vasut. Reported-by: Marek Vasut Signed-off-by: Todd Fujinaka Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 0ae3177..b918ba3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -771,8 +771,10 @@ static int igb_set_eeprom(struct net_device *netdev, if (eeprom->len == 0) return -EOPNOTSUPP; - if (hw->mac.type == e1000_i211) + if ((hw->mac.type >= e1000_i210) && + !igb_get_flash_presence_i210(hw)) { return -EOPNOTSUPP; + } if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EFAULT; -- cgit v1.1 From fb44519de929d1d9bba967645c6d9def8784d857 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 22 Oct 2013 18:34:01 +0000 Subject: ixgbe: Reduce memory consumption with larger page sizes The ixgbe driver allocates pages for its receive rings. It currently uses 512 pages, regardless of page size. During receive handling it adds the unused part of the page back into the rx ring, avoiding the need for a new allocation. On a ppc64 box with 64 threads and 64kB pages, we end up with 512 entries * 64 rx queues * 64kB = 2GB memory used. Even more of a concern is that we use up 2GB of IOMMU space in order to map all this memory. The driver makes a number of decisions based on if PAGE_SIZE is less than 8kB, so use this as the breakpoint and only allocate 128 entries on 8kB or larger page sizes. Signed-off-by: Anton Blanchard Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index f51fd1f..0914914 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -67,7 +67,11 @@ #define IXGBE_MAX_TXD 4096 #define IXGBE_MIN_TXD 64 +#if (PAGE_SIZE < 8192) #define IXGBE_DEFAULT_RXD 512 +#else +#define IXGBE_DEFAULT_RXD 128 +#endif #define IXGBE_MAX_RXD 4096 #define IXGBE_MIN_RXD 64 -- cgit v1.1 From f880d07bc5bc9f453be7b1fc9c1a34853719d148 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Wed, 23 Oct 2013 02:17:52 +0000 Subject: ixgbe: cleanup IXGBE_DESC_UNUSED This patch just replaces the IXGBE_DESC_UNUSED macro with a like named inline function ixgbevf_desc_unused. The inline function makes the logic a bit more readable. Signed-off-by: Alexander Duyck Signed-off-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 10 +++++++--- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 10 +++++----- 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index acf3806..8971e2d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -286,9 +286,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG -#define IXGBE_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) +static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} #define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9685354..038bfc8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && - (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -529,7 +529,7 @@ next_desc: } rx_ring->next_to_clean = i; - cleaned_count = IXGBE_DESC_UNUSED(rx_ring); + cleaned_count = ixgbevf_desc_unused(rx_ring); if (cleaned_count) ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); @@ -1380,7 +1380,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *ring = &adapter->rx_ring[i]; ixgbevf_alloc_rx_buffers(adapter, ring, - IXGBE_DESC_UNUSED(ring)); + ixgbevf_desc_unused(ring)); } } @@ -3102,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) /* We need to check again in a case another CPU has just * made room available. */ - if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) + if (likely(ixgbevf_desc_unused(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ @@ -3113,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { - if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) + if (likely(ixgbevf_desc_unused(tx_ring) >= size)) return 0; return __ixgbevf_maybe_stop_tx(tx_ring, size); } -- cgit v1.1 From cf78959c0d7afbde31498afc4212294c28e2c278 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Sat, 26 Oct 2013 08:13:20 +0000 Subject: ixgbe: fix inconsistent clearing of the multicast table This patch resolves an issue where the MTA table can be cleared when the interface is reset while in promisc mode. As result IPv6 traffic between VFs will be interrupted. This patch makes the update of the MTA table unconditional to avoid the inconsistent clearing on reset. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a7d1a1c..5191b3c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3823,14 +3823,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev) if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; - } else { - /* - * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ - hw->mac.ops.update_mc_addr_list(hw, netdev); - vmolr |= IXGBE_VMOLR_ROMPE; } ixgbe_vlan_filter_enable(adapter); hw->addr_ctrl.user_set_promisc = false; @@ -3847,6 +3839,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev) vmolr |= IXGBE_VMOLR_ROPE; } + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + hw->mac.ops.update_mc_addr_list(hw, netdev); + vmolr |= IXGBE_VMOLR_ROMPE; + if (adapter->num_vfs) ixgbe_restore_vf_multicasts(adapter); -- cgit v1.1