summaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioatdma.c
diff options
context:
space:
mode:
authorChris Leech <christopher.leech@intel.com>2007-03-08 09:57:35 -0800
committerDan Williams <dan.j.williams@intel.com>2007-07-11 15:39:04 -0700
commite38288117c50fe22ed1693c2d8397245bb7e1a53 (patch)
treea3189299b0056bb109fb7d306d5d0574c621a0a0 /drivers/dma/ioatdma.c
parentff487fb773749124550a5ad2b7fbfe0376af6f0d (diff)
downloadop-kernel-dev-e38288117c50fe22ed1693c2d8397245bb7e1a53.zip
op-kernel-dev-e38288117c50fe22ed1693c2d8397245bb7e1a53.tar.gz
ioatdma: Remove the wrappers around read(bwl)/write(bwl) in ioatdma
Signed-off-by: Chris Leech <christopher.leech@intel.com>
Diffstat (limited to 'drivers/dma/ioatdma.c')
-rw-r--r--drivers/dma/ioatdma.c60
1 files changed, 28 insertions, 32 deletions
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 4e6afd7..2757070 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -32,7 +32,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include "ioatdma.h"
-#include "ioatdma_io.h"
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"
@@ -51,8 +50,8 @@ static int enumerate_dma_channels(struct ioat_device *device)
int i;
struct ioat_dma_chan *ioat_chan;
- device->common.chancnt = ioatdma_read8(device, IOAT_CHANCNT_OFFSET);
- xfercap_scale = ioatdma_read8(device, IOAT_XFERCAP_OFFSET);
+ device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
+ xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
for (i = 0; i < device->common.chancnt; i++) {
@@ -123,7 +122,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
* In-use bit automatically set by reading chanctrl
* If 0, we got it, if 1, someone else did
*/
- chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);
+ chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE)
return -EBUSY;
@@ -132,12 +131,12 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
IOAT_CHANCTRL_ERR_INT_EN |
IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
IOAT_CHANCTRL_ERR_COMPLETION_EN;
- ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);
+ writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
- chanerr = ioatdma_chan_read32(ioat_chan, IOAT_CHANERR_OFFSET);
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
if (chanerr) {
printk("IOAT: CHANERR = %x, clearing\n", chanerr);
- ioatdma_chan_write32(ioat_chan, IOAT_CHANERR_OFFSET, chanerr);
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
}
/* Allocate descriptors */
@@ -161,10 +160,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
&ioat_chan->completion_addr);
memset(ioat_chan->completion_virt, 0,
sizeof(*ioat_chan->completion_virt));
- ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_LOW,
- ((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF);
- ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_HIGH,
- ((u64) ioat_chan->completion_addr) >> 32);
+ writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+ writel(((u64) ioat_chan->completion_addr) >> 32,
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
ioat_start_null_desc(ioat_chan);
return i;
@@ -182,7 +181,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
ioat_dma_memcpy_cleanup(ioat_chan);
- ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);
+ writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
spin_lock_bh(&ioat_chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
@@ -210,9 +209,9 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
/* Tell hw the chan is free */
- chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);
+ chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE;
- ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);
+ writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
}
/**
@@ -318,9 +317,8 @@ static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan,
spin_unlock_bh(&ioat_chan->desc_lock);
if (append)
- ioatdma_chan_write8(ioat_chan,
- IOAT_CHANCMD_OFFSET,
- IOAT_CHANCMD_APPEND);
+ writeb(IOAT_CHANCMD_APPEND,
+ ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
return cookie;
}
@@ -417,9 +415,8 @@ static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
if (ioat_chan->pending != 0) {
ioat_chan->pending = 0;
- ioatdma_chan_write8(ioat_chan,
- IOAT_CHANCMD_OFFSET,
- IOAT_CHANCMD_APPEND);
+ writeb(IOAT_CHANCMD_APPEND,
+ ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
}
}
@@ -449,7 +446,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
printk("IOAT: Channel halted, chanerr = %x\n",
- ioatdma_chan_read32(chan, IOAT_CHANERR_OFFSET));
+ readl(chan->reg_base + IOAT_CHANERR_OFFSET));
/* TODO do something to salvage the situation */
}
@@ -569,21 +566,21 @@ static irqreturn_t ioat_do_interrupt(int irq, void *data)
unsigned long attnstatus;
u8 intrctrl;
- intrctrl = ioatdma_read8(instance, IOAT_INTRCTRL_OFFSET);
+ intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
return IRQ_NONE;
if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
- ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);
+ writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
return IRQ_NONE;
}
- attnstatus = ioatdma_read32(instance, IOAT_ATTNSTATUS_OFFSET);
+ attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);
- ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);
+ writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
return IRQ_HANDLED;
}
@@ -612,14 +609,13 @@ static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)
spin_unlock_bh(&ioat_chan->desc_lock);
#if (BITS_PER_LONG == 64)
- ioatdma_chan_write64(ioat_chan, IOAT_CHAINADDR_OFFSET, desc->phys);
+ writeq(desc->phys, ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET);
#else
- ioatdma_chan_write32(ioat_chan,
- IOAT_CHAINADDR_OFFSET_LOW,
- (u32) desc->phys);
- ioatdma_chan_write32(ioat_chan, IOAT_CHAINADDR_OFFSET_HIGH, 0);
+ writel((u32) desc->phys,
+ ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
+ writel(0, ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
#endif
- ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_START);
+ writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
}
/*
@@ -748,7 +744,7 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
device->reg_base = reg_base;
- ioatdma_write8(device, IOAT_INTRCTRL_OFFSET, IOAT_INTRCTRL_MASTER_INT_EN);
+ writeb(IOAT_INTRCTRL_MASTER_INT_EN, device->reg_base + IOAT_INTRCTRL_OFFSET);
pci_set_master(pdev);
INIT_LIST_HEAD(&device->common.channels);
OpenPOWER on IntegriCloud