diff options
author | Mugunthan V N <mugunthanvnm@ti.com> | 2013-01-17 06:31:34 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-18 14:27:50 -0500 |
commit | fae50823d0ee579e006a7ba2b20880e354388b25 (patch) | |
tree | 8226ff014b455d5efded71ffb46e983d69d2f561 /drivers/net/ethernet/ti/davinci_cpdma.c | |
parent | ee21c7e0d1f6e742e9f441f580a3bac316a39a21 (diff) | |
download | op-kernel-dev-fae50823d0ee579e006a7ba2b20880e354388b25.zip op-kernel-dev-fae50823d0ee579e006a7ba2b20880e354388b25.tar.gz |
net: ethernet: davinci_cpdma: Add boundary for rx and tx descriptors
When there is heavy transmission traffic in the CPDMA, then Rx descriptors
memory is also utilized as tx desc memory looses all rx descriptors and the
driver stops working then.
This patch adds boundary for tx and rx descriptors in bd ram dividing the
descriptor memory to ensure that during heavy transmission tx doesn't use
rx descriptors.
This patch is already applied to davinci_emac driver, since CPSW and
davici_dmac shares the same CPDMA, moving the boundry seperation from
Davinci EMAC driver to CPDMA driver which was done in the following
commit
commit 86d8c07ff2448eb4e860e50f34ef6ee78e45c40c
Author: Sascha Hauer <s.hauer@pengutronix.de>
Date: Tue Jan 3 05:27:47 2012 +0000
net/davinci: do not use all descriptors for tx packets
The driver uses a shared pool for both rx and tx descriptors.
During open it queues fixed number of 128 descriptors for receive
packets. For each received packet it tries to queue another
descriptor. If this fails the descriptor is lost for rx.
The driver has no limitation on tx descriptors to use, so it
can happen during a nmap / ping -f attack that the driver
allocates all descriptors for tx and looses all rx descriptors.
The driver stops working then.
To fix this limit the number of tx descriptors used to half of
the descriptors available, the rx path uses the other half.
Tested on a custom board using nmap / ping -f to the board from
two different hosts.
Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/ti/davinci_cpdma.c')
-rw-r--r-- | drivers/net/ethernet/ti/davinci_cpdma.c | 47 |
1 files changed, 40 insertions, 7 deletions
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 4995673..f862918 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -105,13 +105,13 @@ struct cpdma_ctlr { }; struct cpdma_chan { + struct cpdma_desc __iomem *head, *tail; + void __iomem *hdp, *cp, *rxfree; enum cpdma_state state; struct cpdma_ctlr *ctlr; int chan_num; spinlock_t lock; - struct cpdma_desc __iomem *head, *tail; int count; - void __iomem *hdp, *cp, *rxfree; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; @@ -217,17 +217,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) } static struct cpdma_desc __iomem * -cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc) +cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) { unsigned long flags; int index; + int desc_start; + int desc_end; struct cpdma_desc __iomem *desc = NULL; spin_lock_irqsave(&pool->lock, flags); - index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0, - num_desc, 0); - if (index < pool->num_desc) { + if (is_rx) { + desc_start = 0; + desc_end = pool->num_desc/2; + } else { + desc_start = pool->num_desc/2; + desc_end = pool->num_desc; + } + + index = bitmap_find_next_zero_area(pool->bitmap, + desc_end, desc_start, num_desc, 0); + if (index < desc_end) { bitmap_set(pool->bitmap, index, num_desc); desc = pool->iomap + pool->desc_size * index; pool->used_desc++; @@ -668,7 +678,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, goto unlock_ret; } - desc = cpdma_desc_alloc(ctlr->pool, 1); + desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; @@ -704,6 +714,29 @@ unlock_ret: } EXPORT_SYMBOL_GPL(cpdma_chan_submit); +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) +{ + unsigned long flags; + int index; + bool ret; + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc_pool *pool = ctlr->pool; + + spin_lock_irqsave(&pool->lock, flags); + + index = bitmap_find_next_zero_area(pool->bitmap, + pool->num_desc, pool->num_desc/2, 1, 0); + + if (index < pool->num_desc) + ret = true; + else + ret = false; + + spin_unlock_irqrestore(&pool->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); + static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, int outlen, int status) |