summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Hennerich <michael.hennerich@analog.com>2009-05-29 03:41:15 +0000
committerDavid S. Miller <davem@davemloft.net>2009-05-29 15:49:09 -0700
commit015dac8886b5c48d62ebc33a964b9086d6a71bd7 (patch)
tree976e6916811e7dee4d884419dcb43fe8769e49ce
parent805a8ab3ce1be83e9a98c21a625ebbb549a2d317 (diff)
downloadop-kernel-dev-015dac8886b5c48d62ebc33a964b9086d6a71bd7.zip
op-kernel-dev-015dac8886b5c48d62ebc33a964b9086d6a71bd7.tar.gz
netdev: bfin_mac: fix performance issue found by netperf
- Remove dead long delay - Use proper defines - Remove broken implementation of the TX DMA Data Alignment TXDWA feature Signed-off-by: Michael Hennerich <michael.hennerich@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org> Signed-off-by: Bryan Wu <cooloney@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bfin_mac.c110
1 files changed, 26 insertions, 84 deletions
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 38d34ce..f0f1eb9 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -194,13 +194,13 @@ static int desc_list_init(void)
struct dma_descriptor *b = &(r->desc_b);
/* allocate a new skb for next time receive */
- new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
printk(KERN_NOTICE DRV_NAME
": init: low on mem - packet dropped\n");
goto init_error;
}
- skb_reserve(new_skb, 2);
+ skb_reserve(new_skb, NET_IP_ALIGN);
r->skb = new_skb;
/*
@@ -566,9 +566,9 @@ static void adjust_tx_list(void)
*/
if (current_tx_ptr->next->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) {
- mdelay(1);
+ udelay(10);
if (tx_list_head->status.status_word != 0
- || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
+ || !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
goto adjust_head;
}
if (timeout_cnt-- < 0) {
@@ -606,86 +606,28 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
u16 *data;
-
+ u32 data_align = (unsigned long)(skb->data) & 0x3;
current_tx_ptr->skb = skb;
- if (ANOMALY_05000285) {
- /*
- * TXDWA feature is not avaible to older revision < 0.3 silicon
- * of BF537
- *
- * Only if data buffer is ODD WORD alignment, we do not
- * need to memcpy
- */
- u32 data_align = (u32)(skb->data) & 0x3;
- if (data_align == 0x2) {
- /* move skb->data to current_tx_ptr payload */
- data = (u16 *)(skb->data) - 1;
- *data = (u16)(skb->len);
- current_tx_ptr->desc_a.start_addr = (u32)data;
- /* this is important! */
- blackfin_dcache_flush_range((u32)data,
- (u32)((u8 *)data + skb->len + 4));
- } else {
- *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
- memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
- skb->len);
- current_tx_ptr->desc_a.start_addr =
- (u32)current_tx_ptr->packet;
- if (current_tx_ptr->status.status_word != 0)
- current_tx_ptr->status.status_word = 0;
- blackfin_dcache_flush_range(
- (u32)current_tx_ptr->packet,
- (u32)(current_tx_ptr->packet + skb->len + 2));
- }
+ if (data_align == 0x2) {
+ /* move skb->data to current_tx_ptr payload */
+ data = (u16 *)(skb->data) - 1;
+ *data = (u16)(skb->len);
+ current_tx_ptr->desc_a.start_addr = (u32)data;
+ /* this is important! */
+ blackfin_dcache_flush_range((u32)data,
+ (u32)((u8 *)data + skb->len + 4));
} else {
- /*
- * TXDWA feature is avaible to revision < 0.3 silicon of
- * BF537 and always avaible to BF52x
- */
- u32 data_align = (u32)(skb->data) & 0x3;
- if (data_align == 0x0) {
- u16 sysctl = bfin_read_EMAC_SYSCTL();
- sysctl |= TXDWA;
- bfin_write_EMAC_SYSCTL(sysctl);
-
- /* move skb->data to current_tx_ptr payload */
- data = (u16 *)(skb->data) - 2;
- *data = (u16)(skb->len);
- current_tx_ptr->desc_a.start_addr = (u32)data;
- /* this is important! */
- blackfin_dcache_flush_range(
- (u32)data,
- (u32)((u8 *)data + skb->len + 4));
- } else if (data_align == 0x2) {
- u16 sysctl = bfin_read_EMAC_SYSCTL();
- sysctl &= ~TXDWA;
- bfin_write_EMAC_SYSCTL(sysctl);
-
- /* move skb->data to current_tx_ptr payload */
- data = (u16 *)(skb->data) - 1;
- *data = (u16)(skb->len);
- current_tx_ptr->desc_a.start_addr = (u32)data;
- /* this is important! */
- blackfin_dcache_flush_range(
- (u32)data,
- (u32)((u8 *)data + skb->len + 4));
- } else {
- u16 sysctl = bfin_read_EMAC_SYSCTL();
- sysctl &= ~TXDWA;
- bfin_write_EMAC_SYSCTL(sysctl);
-
- *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
- memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
- skb->len);
- current_tx_ptr->desc_a.start_addr =
- (u32)current_tx_ptr->packet;
- if (current_tx_ptr->status.status_word != 0)
- current_tx_ptr->status.status_word = 0;
- blackfin_dcache_flush_range(
- (u32)current_tx_ptr->packet,
- (u32)(current_tx_ptr->packet + skb->len + 2));
- }
+ *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
+ memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
+ skb->len);
+ current_tx_ptr->desc_a.start_addr =
+ (u32)current_tx_ptr->packet;
+ if (current_tx_ptr->status.status_word != 0)
+ current_tx_ptr->status.status_word = 0;
+ blackfin_dcache_flush_range(
+ (u32)current_tx_ptr->packet,
+ (u32)(current_tx_ptr->packet + skb->len + 2));
}
/* make sure the internal data buffers in the core are drained
@@ -698,7 +640,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
current_tx_ptr->desc_a.config |= DMAEN;
/* tx dma is running, just return */
- if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
+ if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
goto out;
/* tx dma is not running */
@@ -724,7 +666,7 @@ static void bfin_mac_rx(struct net_device *dev)
/* allocate a new skb for next time receive */
skb = current_rx_ptr->skb;
- new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
printk(KERN_NOTICE DRV_NAME
": rx: low on mem - packet dropped\n");
@@ -732,7 +674,7 @@ static void bfin_mac_rx(struct net_device *dev)
goto out;
}
/* reserve 2 bytes for RXDWA padding */
- skb_reserve(new_skb, 2);
+ skb_reserve(new_skb, NET_IP_ALIGN);
current_rx_ptr->skb = new_skb;
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
OpenPOWER on IntegriCloud