diff options
Diffstat (limited to 'drivers/spi')
-rw-r--r-- | drivers/spi/spi-pl022.c | 13 | ||||
-rw-r--r-- | drivers/spi/spi-pxa2xx-pci.c | 76 | ||||
-rw-r--r-- | drivers/spi/spi-pxa2xx.c | 20 | ||||
-rw-r--r-- | drivers/spi/spi-rspi.c | 601 | ||||
-rw-r--r-- | drivers/spi/spi-s3c24xx.c | 14 | ||||
-rw-r--r-- | drivers/spi/spi-s3c64xx.c | 5 | ||||
-rw-r--r-- | drivers/spi/spi-sh-msiof.c | 4 | ||||
-rw-r--r-- | drivers/spi/spi-sirf.c | 305 | ||||
-rw-r--r-- | drivers/spi/spi-tle62x0.c | 4 | ||||
-rw-r--r-- | drivers/spi/spi-topcliff-pch.c | 5 |
10 files changed, 467 insertions, 580 deletions
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 51d9977..66d2ae2 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -1111,10 +1111,8 @@ static int pl022_dma_probe(struct pl022 *pl022) } pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!pl022->dummypage) { - dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); + if (!pl022->dummypage) goto err_no_dummypage; - } dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", dma_chan_name(pl022->dma_rx_channel), @@ -1809,11 +1807,8 @@ static int pl022_setup(struct spi_device *spi) if (chip == NULL) { chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) { - dev_err(&spi->dev, - "cannot allocate controller state\n"); + if (!chip) return -ENOMEM; - } dev_dbg(&spi->dev, "allocated memory for controller's runtime state\n"); } @@ -2050,10 +2045,8 @@ pl022_platform_data_dt_get(struct device *dev) } pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL); - if (!pd) { - dev_err(dev, "cannot allocate platform data memory\n"); + if (!pd) return NULL; - } pd->bus_id = -1; pd->enable_dma = 1; diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 3f006d3..c1865c9 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -8,7 +8,43 @@ #include <linux/module.h> #include <linux/spi/pxa2xx_spi.h> -static int ce4100_spi_probe(struct pci_dev *dev, +enum { + PORT_CE4100, + PORT_BYT, +}; + +struct pxa_spi_info { + enum pxa_ssp_type type; + int port_id; + int num_chipselect; + int tx_slave_id; + int tx_chan_id; + int rx_slave_id; + int rx_chan_id; +}; + +static struct pxa_spi_info spi_info_configs[] = { + [PORT_CE4100] = { + .type = PXA25x_SSP, + .port_id = -1, + .num_chipselect = -1, + .tx_slave_id = -1, + .tx_chan_id = -1, + .rx_slave_id = -1, + .rx_chan_id = -1, + }, + [PORT_BYT] = { + .type = LPSS_SSP, + .port_id = 0, + .num_chipselect = 1, + .tx_slave_id = 0, + .tx_chan_id = 0, + .rx_slave_id = 1, + .rx_chan_id = 1, + }, +}; + +static int pxa2xx_spi_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct platform_device_info pi; @@ -16,6 +52,7 @@ static int ce4100_spi_probe(struct pci_dev *dev, struct platform_device *pdev; struct pxa2xx_spi_master spi_pdata; struct ssp_device *ssp; + struct pxa_spi_info *c; ret = pcim_enable_device(dev); if (ret) @@ -25,8 +62,16 @@ static int ce4100_spi_probe(struct pci_dev *dev, if (ret) return ret; + c = &spi_info_configs[ent->driver_data]; + memset(&spi_pdata, 0, sizeof(spi_pdata)); - spi_pdata.num_chipselect = dev->devfn; + spi_pdata.num_chipselect = (c->num_chipselect > 0) ? + c->num_chipselect : dev->devfn; + spi_pdata.tx_slave_id = c->tx_slave_id; + spi_pdata.tx_chan_id = c->tx_chan_id; + spi_pdata.rx_slave_id = c->rx_slave_id; + spi_pdata.rx_chan_id = c->rx_chan_id; + spi_pdata.enable_dma = c->rx_slave_id >= 0 && c->tx_slave_id >= 0; ssp = &spi_pdata.ssp; ssp->phys_base = pci_resource_start(dev, 0); @@ -36,8 +81,8 @@ static int ce4100_spi_probe(struct pci_dev *dev, return -EIO; } ssp->irq = dev->irq; - ssp->port_id = dev->devfn; - ssp->type = PXA25x_SSP; + ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn; + ssp->type = c->type; memset(&pi, 0, sizeof(pi)); pi.parent = &dev->dev; @@ -55,28 +100,29 @@ static int ce4100_spi_probe(struct pci_dev *dev, return 0; } -static void ce4100_spi_remove(struct pci_dev *dev) +static void pxa2xx_spi_pci_remove(struct pci_dev *dev) { struct platform_device *pdev = pci_get_drvdata(dev); platform_device_unregister(pdev); } -static const struct pci_device_id ce4100_spi_devices[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, +static const struct pci_device_id pxa2xx_spi_pci_devices[] = { + { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 }, + { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT }, { }, }; -MODULE_DEVICE_TABLE(pci, ce4100_spi_devices); +MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices); -static struct pci_driver ce4100_spi_driver = { - .name = "ce4100_spi", - .id_table = ce4100_spi_devices, - .probe = ce4100_spi_probe, - .remove = ce4100_spi_remove, +static struct pci_driver pxa2xx_spi_pci_driver = { + .name = "pxa2xx_spi_pci", + .id_table = pxa2xx_spi_pci_devices, + .probe = pxa2xx_spi_pci_probe, + .remove = pxa2xx_spi_pci_remove, }; -module_pci_driver(ce4100_spi_driver); +module_pci_driver(pxa2xx_spi_pci_driver); -MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver"); +MODULE_DESCRIPTION("CE4100/LPSS PCI-SPI glue code for PXA's driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>"); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 41185d0..901d0e0 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -886,11 +886,8 @@ static int setup(struct spi_device *spi) chip = spi_get_ctldata(spi); if (!chip) { chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) { - dev_err(&spi->dev, - "failed setup: can't allocate chip data\n"); + if (!chip) return -ENOMEM; - } if (drv_data->ssp_type == CE4100_SSP) { if (spi->chip_select > 4) { @@ -1037,11 +1034,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) return NULL; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) { - dev_err(&pdev->dev, - "failed to allocate memory for platform data\n"); + if (!pdata) return NULL; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) @@ -1202,6 +1196,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) tasklet_init(&drv_data->pump_transfers, pump_transfers, (unsigned long)drv_data); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); status = devm_spi_register_master(&pdev->dev, master); @@ -1210,11 +1209,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) goto out_error_clock_enabled; } - pm_runtime_set_autosuspend_delay(&pdev->dev, 50); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - return status; out_error_clock_enabled: diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 1fb0ad2..1011274 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -183,8 +183,6 @@ #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ -#define DUMMY_DATA 0x00 - struct rspi_data { void __iomem *addr; u32 max_speed_hz; @@ -197,11 +195,6 @@ struct rspi_data { int rx_irq, tx_irq; const struct spi_ops *ops; - /* for dmaengine */ - struct dma_chan *chan_tx; - struct dma_chan *chan_rx; - - unsigned dma_width_16bit:1; unsigned dma_callbacked:1; unsigned byte_access:1; }; @@ -253,6 +246,8 @@ struct spi_ops { int (*transfer_one)(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer); u16 mode_bits; + u16 flags; + u16 fifo_size; }; /* @@ -266,7 +261,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); /* Sets transfer bit rate */ - spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; + spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), + 2 * rspi->max_speed_hz) - 1; rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); /* Disable dummy transmission, set 16-bit word access, 1 frame */ @@ -302,7 +298,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); /* Sets transfer bit rate */ - spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; + spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), + 2 * rspi->max_speed_hz) - 1; rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); /* Disable dummy transmission, set byte access */ @@ -335,7 +332,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); /* Sets transfer bit rate */ - spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz); + spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz); rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); /* Disable dummy transmission, set byte access */ @@ -403,11 +400,22 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask, return 0; } +static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi) +{ + return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); +} + +static inline int rspi_wait_for_rx_full(struct rspi_data *rspi) +{ + return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE); +} + static int rspi_data_out(struct rspi_data *rspi, u8 data) { - if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { + int error = rspi_wait_for_tx_empty(rspi); + if (error < 0) { dev_err(&rspi->master->dev, "transmit timeout\n"); - return -ETIMEDOUT; + return error; } rspi_write_data(rspi, data); return 0; @@ -415,25 +423,36 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data) static int rspi_data_in(struct rspi_data *rspi) { + int error; u8 data; - if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) { + error = rspi_wait_for_rx_full(rspi); + if (error < 0) { dev_err(&rspi->master->dev, "receive timeout\n"); - return -ETIMEDOUT; + return error; } data = rspi_read_data(rspi); return data; } -static int rspi_data_out_in(struct rspi_data *rspi, u8 data) +static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx, + unsigned int n) { - int ret; - - ret = rspi_data_out(rspi, data); - if (ret < 0) - return ret; + while (n-- > 0) { + if (tx) { + int ret = rspi_data_out(rspi, *tx++); + if (ret < 0) + return ret; + } + if (rx) { + int ret = rspi_data_in(rspi); + if (ret < 0) + return ret; + *rx++ = ret; + } + } - return rspi_data_in(rspi); + return 0; } static void rspi_dma_complete(void *arg) @@ -444,97 +463,67 @@ static void rspi_dma_complete(void *arg) wake_up_interruptible(&rspi->wait); } -static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf, - unsigned len, struct dma_chan *chan, - enum dma_transfer_direction dir) -{ - sg_init_table(sg, 1); - sg_set_buf(sg, buf, len); - sg_dma_len(sg) = len; - return dma_map_sg(chan->device->dev, sg, 1, dir); -} - -static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan, - enum dma_transfer_direction dir) +static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, + struct sg_table *rx) { - dma_unmap_sg(chan->device->dev, sg, 1, dir); -} - -static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len) -{ - u16 *dst = buf; - const u8 *src = data; - - while (len) { - *dst++ = (u16)(*src++); - len--; - } -} - -static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len) -{ - u8 *dst = buf; - const u16 *src = data; - - while (len) { - *dst++ = (u8)*src++; - len--; - } -} + struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; + u8 irq_mask = 0; + unsigned int other_irq = 0; + dma_cookie_t cookie; + int ret; -static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) -{ - struct scatterlist sg; - const void *buf = NULL; - struct dma_async_tx_descriptor *desc; - unsigned int len; - int ret = 0; - - if (rspi->dma_width_16bit) { - void *tmp; - /* - * If DMAC bus width is 16-bit, the driver allocates a dummy - * buffer. And, the driver converts original data into the - * DMAC data as the following format: - * original data: 1st byte, 2nd byte ... - * DMAC data: 1st byte, dummy, 2nd byte, dummy ... - */ - len = t->len * 2; - tmp = kmalloc(len, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - rspi_memory_to_8bit(tmp, t->tx_buf, t->len); - buf = tmp; - } else { - len = t->len; - buf = t->tx_buf; - } + if (tx) { + desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, + tx->sgl, tx->nents, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) + return -EIO; - if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) { - ret = -EFAULT; - goto end_nomap; + irq_mask |= SPCR_SPTIE; } - desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc) { - ret = -EIO; - goto end; + if (rx) { + desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, + rx->sgl, rx->nents, DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_rx) + return -EIO; + + irq_mask |= SPCR_SPRIE; } /* - * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be + * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be * called. So, this driver disables the IRQ while DMA transfer. */ - disable_irq(rspi->tx_irq); + if (tx) + disable_irq(other_irq = rspi->tx_irq); + if (rx && rspi->rx_irq != other_irq) + disable_irq(rspi->rx_irq); - rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR); - rspi_enable_irq(rspi, SPCR_SPTIE); + rspi_enable_irq(rspi, irq_mask); rspi->dma_callbacked = 0; - desc->callback = rspi_dma_complete; - desc->callback_param = rspi; - dmaengine_submit(desc); - dma_async_issue_pending(rspi->chan_tx); + if (rx) { + desc_rx->callback = rspi_dma_complete; + desc_rx->callback_param = rspi; + cookie = dmaengine_submit(desc_rx); + if (dma_submit_error(cookie)) + return cookie; + dma_async_issue_pending(rspi->master->dma_rx); + } + if (tx) { + if (rx) { + /* No callback */ + desc_tx->callback = NULL; + } else { + desc_tx->callback = rspi_dma_complete; + desc_tx->callback_param = rspi; + } + cookie = dmaengine_submit(desc_tx); + if (dma_submit_error(cookie)) + return cookie; + dma_async_issue_pending(rspi->master->dma_tx); + } ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); @@ -542,15 +531,13 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) ret = 0; else if (!ret) ret = -ETIMEDOUT; - rspi_disable_irq(rspi, SPCR_SPTIE); - enable_irq(rspi->tx_irq); + rspi_disable_irq(rspi, irq_mask); -end: - rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE); -end_nomap: - if (rspi->dma_width_16bit) - kfree(buf); + if (tx) + enable_irq(rspi->tx_irq); + if (rx && rspi->rx_irq != other_irq) + enable_irq(rspi->rx_irq); return ret; } @@ -585,157 +572,37 @@ static void qspi_receive_init(const struct rspi_data *rspi) rspi_write8(rspi, 0, QSPI_SPBFCR); } -static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) +static bool __rspi_can_dma(const struct rspi_data *rspi, + const struct spi_transfer *xfer) { - struct scatterlist sg, sg_dummy; - void *dummy = NULL, *rx_buf = NULL; - struct dma_async_tx_descriptor *desc, *desc_dummy; - unsigned int len; - int ret = 0; - - if (rspi->dma_width_16bit) { - /* - * If DMAC bus width is 16-bit, the driver allocates a dummy - * buffer. And, finally the driver converts the DMAC data into - * actual data as the following format: - * DMAC data: 1st byte, dummy, 2nd byte, dummy ... - * actual data: 1st byte, 2nd byte ... - */ - len = t->len * 2; - rx_buf = kmalloc(len, GFP_KERNEL); - if (!rx_buf) - return -ENOMEM; - } else { - len = t->len; - rx_buf = t->rx_buf; - } - - /* prepare dummy transfer to generate SPI clocks */ - dummy = kzalloc(len, GFP_KERNEL); - if (!dummy) { - ret = -ENOMEM; - goto end_nomap; - } - if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx, - DMA_TO_DEVICE)) { - ret = -EFAULT; - goto end_nomap; - } - desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1, - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc_dummy) { - ret = -EIO; - goto end_dummy_mapped; - } - - /* prepare receive transfer */ - if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx, - DMA_FROM_DEVICE)) { - ret = -EFAULT; - goto end_dummy_mapped; - - } - desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc) { - ret = -EIO; - goto end; - } - - rspi_receive_init(rspi); - - /* - * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be - * called. So, this driver disables the IRQ while DMA transfer. - */ - disable_irq(rspi->tx_irq); - if (rspi->rx_irq != rspi->tx_irq) - disable_irq(rspi->rx_irq); - - rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR); - rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE); - rspi->dma_callbacked = 0; - - desc->callback = rspi_dma_complete; - desc->callback_param = rspi; - dmaengine_submit(desc); - dma_async_issue_pending(rspi->chan_rx); - - desc_dummy->callback = NULL; /* No callback */ - dmaengine_submit(desc_dummy); - dma_async_issue_pending(rspi->chan_tx); - - ret = wait_event_interruptible_timeout(rspi->wait, - rspi->dma_callbacked, HZ); - if (ret > 0 && rspi->dma_callbacked) - ret = 0; - else if (!ret) - ret = -ETIMEDOUT; - rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE); - - enable_irq(rspi->tx_irq); - if (rspi->rx_irq != rspi->tx_irq) - enable_irq(rspi->rx_irq); - -end: - rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE); -end_dummy_mapped: - rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE); -end_nomap: - if (rspi->dma_width_16bit) { - if (!ret) - rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len); - kfree(rx_buf); - } - kfree(dummy); - - return ret; + return xfer->len > rspi->ops->fifo_size; } -static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t) +static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, + struct spi_transfer *xfer) { - if (t->tx_buf && rspi->chan_tx) - return 1; - /* If the module receives data by DMAC, it also needs TX DMAC */ - if (t->rx_buf && rspi->chan_tx && rspi->chan_rx) - return 1; + struct rspi_data *rspi = spi_master_get_devdata(master); - return 0; + return __rspi_can_dma(rspi, xfer); } -static int rspi_transfer_out_in(struct rspi_data *rspi, +static int rspi_common_transfer(struct rspi_data *rspi, struct spi_transfer *xfer) { - int remain = xfer->len, ret; - const u8 *tx_buf = xfer->tx_buf; - u8 *rx_buf = xfer->rx_buf; - u8 spcr, data; - - rspi_receive_init(rspi); - - spcr = rspi_read8(rspi, RSPI_SPCR); - if (rx_buf) - spcr &= ~SPCR_TXMD; - else - spcr |= SPCR_TXMD; - rspi_write8(rspi, spcr, RSPI_SPCR); + int ret; - while (remain > 0) { - data = tx_buf ? *tx_buf++ : DUMMY_DATA; - ret = rspi_data_out(rspi, data); - if (ret < 0) - return ret; - if (rx_buf) { - ret = rspi_data_in(rspi); - if (ret < 0) - return ret; - *rx_buf++ = ret; - } - remain--; + if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { + /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ + return rspi_dma_transfer(rspi, &xfer->tx_sg, + xfer->rx_buf ? &xfer->rx_sg : NULL); } + ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); + if (ret < 0) + return ret; + /* Wait for the last transmission */ - rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); + rspi_wait_for_tx_empty(rspi); return 0; } @@ -744,46 +611,18 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer) { struct rspi_data *rspi = spi_master_get_devdata(master); - int ret; + u8 spcr; - if (!rspi_is_dma(rspi, xfer)) - return rspi_transfer_out_in(rspi, xfer); - - if (xfer->tx_buf) { - ret = rspi_send_dma(rspi, xfer); - if (ret < 0) - return ret; - } - if (xfer->rx_buf) - return rspi_receive_dma(rspi, xfer); - - return 0; -} - -static int rspi_rz_transfer_out_in(struct rspi_data *rspi, - struct spi_transfer *xfer) -{ - int remain = xfer->len, ret; - const u8 *tx_buf = xfer->tx_buf; - u8 *rx_buf = xfer->rx_buf; - u8 data; - - rspi_rz_receive_init(rspi); - - while (remain > 0) { - data = tx_buf ? *tx_buf++ : DUMMY_DATA; - ret = rspi_data_out_in(rspi, data); - if (ret < 0) - return ret; - if (rx_buf) - *rx_buf++ = ret; - remain--; + spcr = rspi_read8(rspi, RSPI_SPCR); + if (xfer->rx_buf) { + rspi_receive_init(rspi); + spcr &= ~SPCR_TXMD; + } else { + spcr |= SPCR_TXMD; } + rspi_write8(rspi, spcr, RSPI_SPCR); - /* Wait for the last transmission */ - rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); - - return 0; + return rspi_common_transfer(rspi, xfer); } static int rspi_rz_transfer_one(struct spi_master *master, @@ -791,68 +630,44 @@ static int rspi_rz_transfer_one(struct spi_master *master, struct spi_transfer *xfer) { struct rspi_data *rspi = spi_master_get_devdata(master); + int ret; - return rspi_rz_transfer_out_in(rspi, xfer); + rspi_rz_receive_init(rspi); + + return rspi_common_transfer(rspi, xfer); } static int qspi_transfer_out_in(struct rspi_data *rspi, struct spi_transfer *xfer) { - int remain = xfer->len, ret; - const u8 *tx_buf = xfer->tx_buf; - u8 *rx_buf = xfer->rx_buf; - u8 data; - qspi_receive_init(rspi); - while (remain > 0) { - data = tx_buf ? *tx_buf++ : DUMMY_DATA; - ret = rspi_data_out_in(rspi, data); - if (ret < 0) - return ret; - if (rx_buf) - *rx_buf++ = ret; - remain--; - } - - /* Wait for the last transmission */ - rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); - - return 0; + return rspi_common_transfer(rspi, xfer); } static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) { - const u8 *buf = xfer->tx_buf; - unsigned int i; int ret; - for (i = 0; i < xfer->len; i++) { - ret = rspi_data_out(rspi, *buf++); - if (ret < 0) - return ret; - } + if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) + return rspi_dma_transfer(rspi, &xfer->tx_sg, NULL); + + ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len); + if (ret < 0) + return ret; /* Wait for the last transmission */ - rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); + rspi_wait_for_tx_empty(rspi); return 0; } static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) { - u8 *buf = xfer->rx_buf; - unsigned int i; - int ret; - - for (i = 0; i < xfer->len; i++) { - ret = rspi_data_in(rspi); - if (ret < 0) - return ret; - *buf++ = ret; - } + if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) + return rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); - return 0; + return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len); } static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, @@ -862,10 +677,10 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, if (spi->mode & SPI_LOOP) { return qspi_transfer_out_in(rspi, xfer); - } else if (xfer->tx_buf && xfer->tx_nbits > SPI_NBITS_SINGLE) { + } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) { /* Quad or Dual SPI Write */ return qspi_transfer_out(rspi, xfer); - } else if (xfer->rx_buf && xfer->rx_nbits > SPI_NBITS_SINGLE) { + } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) { /* Quad or Dual SPI Read */ return qspi_transfer_in(rspi, xfer); } else { @@ -1046,65 +861,78 @@ static irqreturn_t rspi_irq_tx(int irq, void *_sr) return 0; } -static int rspi_request_dma(struct rspi_data *rspi, - struct platform_device *pdev) +static struct dma_chan *rspi_request_dma_chan(struct device *dev, + enum dma_transfer_direction dir, + unsigned int id, + dma_addr_t port_addr) { - const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev); - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dma_cap_mask_t mask; + struct dma_chan *chan; struct dma_slave_config cfg; int ret; - if (!res || !rspi_pd) - return 0; /* The driver assumes no error. */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); - rspi->dma_width_16bit = rspi_pd->dma_width_16bit; - - /* If the module receives data by DMAC, it also needs TX DMAC */ - if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) { - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter, - (void *)rspi_pd->dma_rx_id); - if (rspi->chan_rx) { - cfg.slave_id = rspi_pd->dma_rx_id; - cfg.direction = DMA_DEV_TO_MEM; - cfg.dst_addr = 0; - cfg.src_addr = res->start + RSPI_SPDR; - ret = dmaengine_slave_config(rspi->chan_rx, &cfg); - if (!ret) - dev_info(&pdev->dev, "Use DMA when rx.\n"); - else - return ret; - } + chan = dma_request_channel(mask, shdma_chan_filter, + (void *)(unsigned long)id); + if (!chan) { + dev_warn(dev, "dma_request_channel failed\n"); + return NULL; } - if (rspi_pd->dma_tx_id) { - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter, - (void *)rspi_pd->dma_tx_id); - if (rspi->chan_tx) { - cfg.slave_id = rspi_pd->dma_tx_id; - cfg.direction = DMA_MEM_TO_DEV; - cfg.dst_addr = res->start + RSPI_SPDR; - cfg.src_addr = 0; - ret = dmaengine_slave_config(rspi->chan_tx, &cfg); - if (!ret) - dev_info(&pdev->dev, "Use DMA when tx\n"); - else - return ret; - } + + memset(&cfg, 0, sizeof(cfg)); + cfg.slave_id = id; + cfg.direction = dir; + if (dir == DMA_MEM_TO_DEV) + cfg.dst_addr = port_addr; + else + cfg.src_addr = port_addr; + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) { + dev_warn(dev, "dmaengine_slave_config failed %d\n", ret); + dma_release_channel(chan); + return NULL; } + return chan; +} + +static int rspi_request_dma(struct device *dev, struct spi_master *master, + const struct resource *res) +{ + const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); + + if (!rspi_pd || !rspi_pd->dma_rx_id || !rspi_pd->dma_tx_id) + return 0; /* The driver assumes no error. */ + + master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, + rspi_pd->dma_rx_id, + res->start + RSPI_SPDR); + if (!master->dma_rx) + return -ENODEV; + + master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, + rspi_pd->dma_tx_id, + res->start + RSPI_SPDR); + if (!master->dma_tx) { + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; + return -ENODEV; + } + + master->can_dma = rspi_can_dma; + dev_info(dev, "DMA available"); return 0; } static void rspi_release_dma(struct rspi_data *rspi) { - if (rspi->chan_tx) - dma_release_channel(rspi->chan_tx); - if (rspi->chan_rx) - dma_release_channel(rspi->chan_rx); + if (rspi->master->dma_tx) + dma_release_channel(rspi->master->dma_tx); + if (rspi->master->dma_rx) + dma_release_channel(rspi->master->dma_rx); } static int rspi_remove(struct platform_device *pdev) @@ -1118,23 +946,29 @@ static int rspi_remove(struct platform_device *pdev) } static const struct spi_ops rspi_ops = { - .set_config_register = rspi_set_config_register, - .transfer_one = rspi_transfer_one, - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, + .set_config_register = rspi_set_config_register, + .transfer_one = rspi_transfer_one, + .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, + .flags = SPI_MASTER_MUST_TX, + .fifo_size = 8, }; static const struct spi_ops rspi_rz_ops = { - .set_config_register = rspi_rz_set_config_register, - .transfer_one = rspi_rz_transfer_one, - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, + .set_config_register = rspi_rz_set_config_register, + .transfer_one = rspi_rz_transfer_one, + .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, + .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .fifo_size = 8, /* 8 for TX, 32 for RX */ }; static const struct spi_ops qspi_ops = { - .set_config_register = qspi_set_config_register, - .transfer_one = qspi_transfer_one, - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | - SPI_TX_DUAL | SPI_TX_QUAD | - SPI_RX_DUAL | SPI_RX_QUAD, + .set_config_register = qspi_set_config_register, + .transfer_one = qspi_transfer_one, + .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | + SPI_TX_DUAL | SPI_TX_QUAD | + SPI_RX_DUAL | SPI_RX_QUAD, + .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .fifo_size = 32, }; #ifdef CONFIG_OF @@ -1254,6 +1088,7 @@ static int rspi_probe(struct platform_device *pdev) master->prepare_message = rspi_prepare_message; master->unprepare_message = rspi_unprepare_message; master->mode_bits = ops->mode_bits; + master->flags = ops->flags; master->dev.of_node = pdev->dev.of_node; ret = platform_get_irq_byname(pdev, "rx"); @@ -1291,11 +1126,9 @@ static int rspi_probe(struct platform_device *pdev) goto error2; } - ret = rspi_request_dma(rspi, pdev); - if (ret < 0) { - dev_err(&pdev->dev, "rspi_request_dma failed.\n"); - goto error3; - } + ret = rspi_request_dma(&pdev->dev, master, res); + if (ret < 0) + dev_warn(&pdev->dev, "DMA not available, using PIO\n"); ret = devm_spi_register_master(&pdev->dev, master); if (ret < 0) { diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c index bed2338..381d4af 100644 --- a/drivers/spi/spi-s3c24xx.c +++ b/drivers/spi/spi-s3c24xx.c @@ -183,11 +183,11 @@ static int s3c24xx_spi_setup(struct spi_device *spi) /* allocate settings on the first call */ if (!cs) { - cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL); - if (!cs) { - dev_err(&spi->dev, "no memory for controller state\n"); + cs = devm_kzalloc(&spi->dev, + sizeof(struct s3c24xx_spi_devstate), + GFP_KERNEL); + if (!cs) return -ENOMEM; - } cs->spcon = SPCON_DEFAULT; cs->hz = -1; @@ -209,11 +209,6 @@ static int s3c24xx_spi_setup(struct spi_device *spi) return 0; } -static void s3c24xx_spi_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) { return hw->tx ? hw->tx[count] : 0; @@ -543,7 +538,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev) hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; hw->master->setup = s3c24xx_spi_setup; - hw->master->cleanup = s3c24xx_spi_cleanup; dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index f19cd97..affcfd6 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -773,7 +773,6 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) { - dev_err(&spi->dev, "could not allocate memory for controller data\n"); of_node_put(data_np); return ERR_PTR(-ENOMEM); } @@ -987,10 +986,8 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) u32 temp; sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL); - if (!sci) { - dev_err(dev, "memory allocation for spi_info failed\n"); + if (!sci) return ERR_PTR(-ENOMEM); - } if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) { dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n"); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index e850d03..45b0914 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -642,10 +642,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) u32 num_cs = 1; info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL); - if (!info) { - dev_err(dev, "failed to allocate setup data\n"); + if (!info) return NULL; - } /* Parse the MSIOF properties */ of_property_read_u32(np, "num-cs", &num_cs); diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index 67d8909..95ac276 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/clk.h> +#include <linux/completion.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of.h> @@ -85,6 +86,7 @@ #define SIRFSOC_SPI_TX_DONE BIT(1) #define SIRFSOC_SPI_RX_OFLOW BIT(2) #define SIRFSOC_SPI_TX_UFLOW BIT(3) +#define SIRFSOC_SPI_RX_IO_DMA BIT(4) #define SIRFSOC_SPI_RX_FIFO_FULL BIT(6) #define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7) #define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8) @@ -264,41 +266,34 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) { struct sirfsoc_spi *sspi = dev_id; u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS); - - writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS); - if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) { complete(&sspi->tx_done); writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); + writel(SIRFSOC_SPI_INT_MASK_ALL, + sspi->base + SIRFSOC_SPI_INT_STATUS); return IRQ_HANDLED; } /* Error Conditions */ if (spi_stat & SIRFSOC_SPI_RX_OFLOW || spi_stat & SIRFSOC_SPI_TX_UFLOW) { + complete(&sspi->tx_done); complete(&sspi->rx_done); writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); + writel(SIRFSOC_SPI_INT_MASK_ALL, + sspi->base + SIRFSOC_SPI_INT_STATUS); + return IRQ_HANDLED; } + if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY) + complete(&sspi->tx_done); + while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) & + SIRFSOC_SPI_RX_IO_DMA)) + cpu_relax(); + complete(&sspi->rx_done); + writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); + writel(SIRFSOC_SPI_INT_MASK_ALL, + sspi->base + SIRFSOC_SPI_INT_STATUS); - if (spi_stat & (SIRFSOC_SPI_FRM_END - | SIRFSOC_SPI_RXFIFO_THD_REACH)) - while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS) - & SIRFSOC_SPI_FIFO_EMPTY)) && - sspi->left_rx_word) - sspi->rx_word(sspi); - - if (spi_stat & (SIRFSOC_SPI_TXFIFO_EMPTY | - SIRFSOC_SPI_TXFIFO_THD_REACH)) - while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) - & SIRFSOC_SPI_FIFO_FULL)) && - sspi->left_tx_word) - sspi->tx_word(sspi); - - /* Received all words */ - if ((sspi->left_rx_word == 0) && (sspi->left_tx_word == 0)) { - complete(&sspi->rx_done); - writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); - } return IRQ_HANDLED; } @@ -309,59 +304,51 @@ static void spi_sirfsoc_dma_fini_callback(void *data) complete(dma_complete); } -static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) +static int spi_sirfsoc_cmd_transfer(struct spi_device *spi, + struct spi_transfer *t) { struct sirfsoc_spi *sspi; int timeout = t->len * 10; - sspi = spi_master_get_devdata(spi->master); + u32 cmd; - sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; - sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage; - sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; - reinit_completion(&sspi->rx_done); - reinit_completion(&sspi->tx_done); - - writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); - - /* - * fill tx_buf into command register and wait for its completion - */ - if (sspi->tx_by_cmd) { - u32 cmd; - memcpy(&cmd, sspi->tx, t->len); - - if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) - cmd = cpu_to_be32(cmd) >> - ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8); - if (sspi->word_width == 2 && t->len == 4 && - (!(spi->mode & SPI_LSB_FIRST))) - cmd = ((cmd & 0xffff) << 16) | (cmd >> 16); - - writel(cmd, sspi->base + SIRFSOC_SPI_CMD); - writel(SIRFSOC_SPI_FRM_END_INT_EN, - sspi->base + SIRFSOC_SPI_INT_EN); - writel(SIRFSOC_SPI_CMD_TX_EN, - sspi->base + SIRFSOC_SPI_TX_RX_EN); + sspi = spi_master_get_devdata(spi->master); + memcpy(&cmd, sspi->tx, t->len); + if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) + cmd = cpu_to_be32(cmd) >> + ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8); + if (sspi->word_width == 2 && t->len == 4 && + (!(spi->mode & SPI_LSB_FIRST))) + cmd = ((cmd & 0xffff) << 16) | (cmd >> 16); + writel(cmd, sspi->base + SIRFSOC_SPI_CMD); + writel(SIRFSOC_SPI_FRM_END_INT_EN, + sspi->base + SIRFSOC_SPI_INT_EN); + writel(SIRFSOC_SPI_CMD_TX_EN, + sspi->base + SIRFSOC_SPI_TX_RX_EN); + if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { + dev_err(&spi->dev, "cmd transfer timeout\n"); + return 0; + } - if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { - dev_err(&spi->dev, "transfer timeout\n"); - return 0; - } + return t->len; +} - return t->len; - } +static void spi_sirfsoc_dma_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct sirfsoc_spi *sspi; + struct dma_async_tx_descriptor *rx_desc, *tx_desc; + int timeout = t->len * 10; - if (sspi->left_tx_word == 1) { - writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | - SIRFSOC_SPI_ENA_AUTO_CLR, - sspi->base + SIRFSOC_SPI_CTRL); - writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); - writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); - } else if ((sspi->left_tx_word > 1) && (sspi->left_tx_word < - SIRFSOC_SPI_DAT_FRM_LEN_MAX)) { + sspi = spi_master_get_devdata(spi->master); + writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); + writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); + writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); + writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); + writel(0, sspi->base + SIRFSOC_SPI_INT_EN); + writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); + if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) { writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | - SIRFSOC_SPI_MUL_DAT_MODE | - SIRFSOC_SPI_ENA_AUTO_CLR, + SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE, sspi->base + SIRFSOC_SPI_CTRL); writel(sspi->left_tx_word - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); @@ -373,76 +360,122 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); } - - writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); - writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); - writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); - writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); - - if (IS_DMA_VALID(t)) { - struct dma_async_tx_descriptor *rx_desc, *tx_desc; - - sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE); - rx_desc = dmaengine_prep_slave_single(sspi->rx_chan, - sspi->dst_start, t->len, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - rx_desc->callback = spi_sirfsoc_dma_fini_callback; - rx_desc->callback_param = &sspi->rx_done; - - sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE); - tx_desc = dmaengine_prep_slave_single(sspi->tx_chan, - sspi->src_start, t->len, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - tx_desc->callback = spi_sirfsoc_dma_fini_callback; - tx_desc->callback_param = &sspi->tx_done; - - dmaengine_submit(tx_desc); - dmaengine_submit(rx_desc); - dma_async_issue_pending(sspi->tx_chan); - dma_async_issue_pending(sspi->rx_chan); - } else { - /* Send the first word to trigger the whole tx/rx process */ - sspi->tx_word(sspi); - - writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN | - SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN | - SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN | - SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN); - } - - writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN); - - if (!IS_DMA_VALID(t)) { /* for PIO */ - if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) - dev_err(&spi->dev, "transfer timeout\n"); - } else if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) { + sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, + (t->tx_buf != t->rx_buf) ? + DMA_FROM_DEVICE : DMA_BIDIRECTIONAL); + rx_desc = dmaengine_prep_slave_single(sspi->rx_chan, + sspi->dst_start, t->len, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + rx_desc->callback = spi_sirfsoc_dma_fini_callback; + rx_desc->callback_param = &sspi->rx_done; + + sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, + (t->tx_buf != t->rx_buf) ? + DMA_TO_DEVICE : DMA_BIDIRECTIONAL); + tx_desc = dmaengine_prep_slave_single(sspi->tx_chan, + sspi->src_start, t->len, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + tx_desc->callback = spi_sirfsoc_dma_fini_callback; + tx_desc->callback_param = &sspi->tx_done; + + dmaengine_submit(tx_desc); + dmaengine_submit(rx_desc); + dma_async_issue_pending(sspi->tx_chan); + dma_async_issue_pending(sspi->rx_chan); + writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, + sspi->base + SIRFSOC_SPI_TX_RX_EN); + if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) { dev_err(&spi->dev, "transfer timeout\n"); dmaengine_terminate_all(sspi->rx_chan); } else sspi->left_rx_word = 0; - /* * we only wait tx-done event if transferring by DMA. for PIO, * we get rx data by writing tx data, so if rx is done, tx has * done earlier */ - if (IS_DMA_VALID(t)) { - if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { - dev_err(&spi->dev, "transfer timeout\n"); - dmaengine_terminate_all(sspi->tx_chan); - } - } - - if (IS_DMA_VALID(t)) { - dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE); - dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE); + if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { + dev_err(&spi->dev, "transfer timeout\n"); + dmaengine_terminate_all(sspi->tx_chan); } - + dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE); + dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE); /* TX, RX FIFO stop */ writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); - writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); - writel(0, sspi->base + SIRFSOC_SPI_INT_EN); + if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX) + writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); +} + +static void spi_sirfsoc_pio_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct sirfsoc_spi *sspi; + int timeout = t->len * 10; + + sspi = spi_master_get_devdata(spi->master); + do { + writel(SIRFSOC_SPI_FIFO_RESET, + sspi->base + SIRFSOC_SPI_RXFIFO_OP); + writel(SIRFSOC_SPI_FIFO_RESET, + sspi->base + SIRFSOC_SPI_TXFIFO_OP); + writel(SIRFSOC_SPI_FIFO_START, + sspi->base + SIRFSOC_SPI_RXFIFO_OP); + writel(SIRFSOC_SPI_FIFO_START, + sspi->base + SIRFSOC_SPI_TXFIFO_OP); + writel(0, sspi->base + SIRFSOC_SPI_INT_EN); + writel(SIRFSOC_SPI_INT_MASK_ALL, + sspi->base + SIRFSOC_SPI_INT_STATUS); + writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | + SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR, + sspi->base + SIRFSOC_SPI_CTRL); + writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width)) + - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); + writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width)) + - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); + while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) + & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word) + sspi->tx_word(sspi); + writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN | + SIRFSOC_SPI_TX_UFLOW_INT_EN | + SIRFSOC_SPI_RX_OFLOW_INT_EN, + sspi->base + SIRFSOC_SPI_INT_EN); + writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, + sspi->base + SIRFSOC_SPI_TX_RX_EN); + if (!wait_for_completion_timeout(&sspi->tx_done, timeout) || + !wait_for_completion_timeout(&sspi->rx_done, timeout)) { + dev_err(&spi->dev, "transfer timeout\n"); + break; + } + while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS) + & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word) + sspi->rx_word(sspi); + writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); + writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); + } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0); +} + +static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct sirfsoc_spi *sspi; + sspi = spi_master_get_devdata(spi->master); + + sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; + sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage; + sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; + reinit_completion(&sspi->rx_done); + reinit_completion(&sspi->tx_done); + /* + * in the transfer, if transfer data using command register with rx_buf + * null, just fill command data into command register and wait for its + * completion. + */ + if (sspi->tx_by_cmd) + spi_sirfsoc_cmd_transfer(spi, t); + else if (IS_DMA_VALID(t)) + spi_sirfsoc_dma_transfer(spi, t); + else + spi_sirfsoc_pio_transfer(spi, t); return t->len - sspi->left_rx_word * sspi->word_width; } @@ -512,7 +545,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) break; case 12: case 16: - regval |= (bits_per_word == 12) ? SIRFSOC_SPI_TRAN_DAT_FORMAT_12 : + regval |= (bits_per_word == 12) ? + SIRFSOC_SPI_TRAN_DAT_FORMAT_12 : SIRFSOC_SPI_TRAN_DAT_FORMAT_16; sspi->rx_word = spi_sirfsoc_rx_word_u16; sspi->tx_word = spi_sirfsoc_tx_word_u16; @@ -540,8 +574,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) regval |= SIRFSOC_SPI_CLK_IDLE_STAT; /* - * Data should be driven at least 1/2 cycle before the fetch edge to make - * sure that data gets stable at the fetch edge. + * Data should be driven at least 1/2 cycle before the fetch edge + * to make sure that data gets stable at the fetch edge. */ if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) || (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) @@ -578,11 +612,14 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) if (IS_DMA_VALID(t)) { /* Enable DMA mode for RX, TX */ writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); - writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); + writel(SIRFSOC_SPI_RX_DMA_FLUSH, + sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); } else { /* Enable IO mode for RX, TX */ - writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); - writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); + writel(SIRFSOC_SPI_IO_MODE_SEL, + sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); + writel(SIRFSOC_SPI_IO_MODE_SEL, + sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); } return 0; @@ -612,7 +649,8 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) goto err_cs; } - master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs); + master = spi_alloc_master(&pdev->dev, + sizeof(*sspi) + sizeof(int) * num_cs); if (!master) { dev_err(&pdev->dev, "Unable to allocate SPI master\n"); return -ENOMEM; @@ -808,8 +846,7 @@ static struct platform_driver spi_sirfsoc_driver = { .remove = spi_sirfsoc_remove, }; module_platform_driver(spi_sirfsoc_driver); - MODULE_DESCRIPTION("SiRF SoC SPI master driver"); -MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, " - "Barry Song <Baohua.Song@csr.com>"); +MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>"); +MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c index 2d4010d..daf5aa1 100644 --- a/drivers/spi/spi-tle62x0.c +++ b/drivers/spi/spi-tle62x0.c @@ -253,10 +253,8 @@ static int tle62x0_probe(struct spi_device *spi) } st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL); - if (st == NULL) { - dev_err(&spi->dev, "no memory for device state\n"); + if (st == NULL) return -ENOMEM; - } st->us = spi; st->nr_gpio = pdata->gpio_count; diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index f406b30..f05abf8 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -1578,14 +1578,11 @@ static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct pch_pd_dev_save *pd_dev_save; pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL); - if (!pd_dev_save) { - dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__); + if (!pd_dev_save) return -ENOMEM; - } board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); if (!board_dat) { - dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__); retval = -ENOMEM; goto err_no_mem; } |