summaryrefslogtreecommitdiffstats
path: root/drivers/net/igbvf/netdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igbvf/netdev.c')
-rw-r--r--drivers/net/igbvf/netdev.c83
1 files changed, 57 insertions, 26 deletions
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 91024a3..a127620 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -170,18 +170,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
}
if (!buffer_info->skb) {
- skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
+ skb = netdev_alloc_skb_ip_align(netdev, bufsz);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- /* Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
-
buffer_info->skb = skb;
buffer_info->dma = pci_map_single(pdev, skb->data,
bufsz,
@@ -372,10 +366,20 @@ next_desc:
static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
struct igbvf_buffer *buffer_info)
{
- buffer_info->dma = 0;
+ if (buffer_info->dma) {
+ if (buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
if (buffer_info->skb) {
- skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
- DMA_TO_DEVICE);
dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
@@ -823,8 +827,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[i].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
- (adapter->tx_timeout_factor * HZ))
- && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+ (adapter->tx_timeout_factor * HZ)) &&
+ !(er32(STATUS) & E1000_STATUS_TXOFF)) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
/* detected Tx unit hang */
@@ -1049,7 +1053,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
}
err = request_irq(adapter->msix_entries[vector].vector,
- &igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
+ igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
netdev);
if (err)
goto out;
@@ -1059,7 +1063,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
vector++;
err = request_irq(adapter->msix_entries[vector].vector,
- &igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+ igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
goto out;
@@ -1069,7 +1073,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
vector++;
err = request_irq(adapter->msix_entries[vector].vector,
- &igbvf_msix_other, 0, netdev->name, netdev);
+ igbvf_msix_other, 0, netdev->name, netdev);
if (err)
goto out;
@@ -2094,27 +2098,24 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
unsigned int first)
{
struct igbvf_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
unsigned int len = skb_headlen(skb);
unsigned int count = 0, i;
unsigned int f;
- dma_addr_t *map;
i = tx_ring->next_to_use;
- if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
- dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
- return 0;
- }
-
- map = skb_shinfo(skb)->dma_maps;
-
buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = skb_shinfo(skb)->dma_head;
+ buffer_info->dma = pci_map_single(pdev, skb->data, len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ goto dma_error;
+
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
@@ -2131,14 +2132,44 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = map[count];
+ buffer_info->mapped_as_page = true;
+ buffer_info->dma = pci_map_page(pdev,
+ frag->page,
+ frag->page_offset,
+ len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ goto dma_error;
count++;
}
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
- return count + 1;
+ return ++count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed buffer_info mapping */
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ buffer_info->length = 0;
+ buffer_info->next_to_watch = 0;
+ buffer_info->mapped_as_page = false;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ buffer_info = &tx_ring->buffer_info[i];
+ igbvf_put_txbuf(adapter, buffer_info);
+ }
+
+ return 0;
}
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
OpenPOWER on IntegriCloud