From f139f97878039f9a49db6cb555d95f6b6e9ba0f8 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Mon, 11 Apr 2016 11:38:38 +0300 Subject: dmaengine: qcom: bam_dma: fix dma free memory on remove Building the driver as a module and when removing the already inserted module gives below: [ 1389.392788] Unable to handle kernel paging request at virtual address ffffffbdc000001c [ 1389.421321] pgd = ffffffc02fa87000 [ 1389.447899] [ffffffbdc000001c] *pgd=0000000000000000, *pud=0000000000000000 [ 1389.460142] Internal error: Oops: 96000006 [#1] PREEMPT SMP [ 1389.466963] Modules linked in: qcom_bam_dma(-) [ 1389.486608] CPU: 2 PID: 2442 Comm: rmmod Not tainted 4.2.0+ #407 [ 1389.493885] Hardware name: Qualcomm Technologies, Inc. APQ 8016 SBC (DT) [ 1389.501196] task: ffffffc035bae2c0 ti: ffffffc0368a8000 task.ti: ffffffc0368a8000 [ 1389.508566] PC is at __free_pages+0xc/0x40 [ 1389.515893] LR is at free_pages.part.93+0x30/0x38 [ 1389.523141] pc : [] lr : [] pstate: 80000145 [ 1389.530602] sp : ffffffc0368abc20 [ 1389.537931] x29: ffffffc0368abc20 x28: ffffffc0368a8000 [ 1389.549153] x27: 0000000000000000 x26: 0000000000000000 [ 1389.560412] x25: ffffffc000cb2000 x24: 0000000000000170 [ 1389.571530] x23: 0000000000000004 x22: ffffffc036bc5010 [ 1389.582721] x21: ffffffc036bc5010 x20: 0000000000000000 [ 1389.593981] x19: 0000000000000002 x18: 0000007fcbc8e8b0 [ 1389.605301] x17: 0000007f9b8226ec x16: ffffffc0002089e8 [ 1389.616647] x15: 0000007f9b8a0588 x14: 0ffffffffffffffc [ 1389.628039] x13: 0000000000000030 x12: 0000000000000000 [ 1389.639436] x11: 0000000000000008 x10: ffffffc000ecc000 [ 1389.650872] x9 : ffffffc035bae2c0 x8 : ffffffc035bae9a8 [ 1389.662367] x7 : ffffffc035bae9a0 x6 : 0000000000000000 [ 1389.673906] x5 : ffffffbdc000001c x4 : 0000000080000000 [ 1389.685475] x3 : ffffffbdc0000000 x2 : 0000004080000000 [ 1389.697049] x1 : 0000000000000003 x0 : ffffffbdc0000000 The memory has been already freed by bam_free_chan() so fix this by skiping already freed memory. Signed-off-by: Stanimir Varbanov Reviewed-by: Andy Gross Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index d5e0a9c..a486bc0 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -1234,6 +1234,9 @@ static int bam_dma_remove(struct platform_device *pdev) bam_dma_terminate_all(&bdev->channels[i].vc.chan); tasklet_kill(&bdev->channels[i].vc.task); + if (!bdev->channels[i].fifo_virt) + continue; + dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bdev->channels[i].fifo_virt, bdev->channels[i].fifo_phys); -- cgit v1.1 From f89117c0f54c235c149d31174d1dd855e04765b8 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Mon, 11 Apr 2016 11:38:39 +0300 Subject: dmaengine: qcom: bam_dma: clear BAM interrupt only if it is raised Currently we write BAM_IRQ_CLR register with zero even when no BAM_IRQ occured. This write has some bad side effects when the BAM instance is for the crypto engine. In case of crypto engine some of the BAM registers are xPU protected and they cannot be controlled by the driver. Signed-off-by: Stanimir Varbanov Reviewed-by: Andy Gross Tested-by: Pramod Gurav Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index a486bc0..789d5f8 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -801,13 +801,17 @@ static irqreturn_t bam_dma_irq(int irq, void *data) if (srcs & P_IRQ) tasklet_schedule(&bdev->task); - if (srcs & BAM_IRQ) + if (srcs & BAM_IRQ) { clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); - /* don't allow reorder of the various accesses to the BAM registers */ - mb(); + /* + * don't allow reorder of the various accesses to the BAM + * registers + */ + mb(); - writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); + writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); + } return IRQ_HANDLED; } -- cgit v1.1 From 5172c9eb89d4ea41f86ff91b15b2b0dc75ded869 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Mon, 11 Apr 2016 11:38:41 +0300 Subject: dmaengine: qcom: bam_dma: add controlled-remotely dt property Some of the peripherals has bam which is controlled by remote processor, thus the bam dma driver must avoid register writes which initialise bam hw block. Those registers are protected from xPU block and any writes to them will lead to secure violation and system reboot. Adding the contolled_remotely flag in bam driver to avoid not permitted register writes in bam_init function. Signed-off-by: Stanimir Varbanov Reviewed-by: Andy Gross Tested-by: Pramod Gurav Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 789d5f8..d0f878a 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -387,6 +387,7 @@ struct bam_device { /* execution environment ID, from DT */ u32 ee; + bool controlled_remotely; const struct reg_offset_data *layout; @@ -1042,6 +1043,9 @@ static int bam_init(struct bam_device *bdev) val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); bdev->num_channels = val & BAM_NUM_PIPES_MASK; + if (bdev->controlled_remotely) + return 0; + /* s/w reset bam */ /* after reset all pipes are disabled and idle */ val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); @@ -1129,6 +1133,9 @@ static int bam_dma_probe(struct platform_device *pdev) return ret; } + bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, + "qcom,controlled-remotely"); + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); if (IS_ERR(bdev->bamclk)) return PTR_ERR(bdev->bamclk); -- cgit v1.1 From 2a663ed9fe88cb237d72ce869aeadbbf119ad8e4 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Mon, 11 Apr 2016 11:38:42 +0300 Subject: dmaengine: qcom: bam_dma: use correct pipe FIFO size The pipe fifo size register must instruct the bam hw how many hw descriptors can be pushed to fifo. Currently we instruct the hw with 32KBytes but wrap the tail in bam_start_dma in BAM_P_EVNT_REG on 4095 i.e. 32760. This leads to stalled transactions when the tail wraps. Fix this by use the correct fifo size in BAM_P_FIFO_SIZES register i.e. 32K - 8. Signed-off-by: Stanimir Varbanov Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index d0f878a..7e5ad1c 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -459,7 +459,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan, */ writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); - writel_relaxed(BAM_DESC_FIFO_SIZE, + writel_relaxed(BAM_MAX_DATA_SIZE, bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ -- cgit v1.1 From 5ad3f29f6a712c3a37dbda4fba3b6969857d39d9 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Mon, 11 Apr 2016 11:38:43 +0300 Subject: dmaengine: qcom: bam_dma: rename BAM_MAX_DATA_SIZE define It seems that the define has not been with acurate name and makes confusion while reading the code. The more acurate name should be BAM_FIFO_SIZE. Signed-off-by: Stanimir Varbanov Reviewed-by: Andy Gross Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 7e5ad1c..969b481 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -342,7 +342,7 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = { #define BAM_DESC_FIFO_SIZE SZ_32K #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) -#define BAM_MAX_DATA_SIZE (SZ_32K - 8) +#define BAM_FIFO_SIZE (SZ_32K - 8) struct bam_chan { struct virt_dma_chan vc; @@ -459,7 +459,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan, */ writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); - writel_relaxed(BAM_MAX_DATA_SIZE, + writel_relaxed(BAM_FIFO_SIZE, bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ @@ -605,7 +605,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, /* calculate number of required entries */ for_each_sg(sgl, sg, sg_len, i) - num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); + num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); /* allocate enough room to accomodate the number of entries */ async_desc = kzalloc(sizeof(*async_desc) + @@ -636,10 +636,10 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, desc->addr = cpu_to_le32(sg_dma_address(sg) + curr_offset); - if (remainder > BAM_MAX_DATA_SIZE) { - desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE); - remainder -= BAM_MAX_DATA_SIZE; - curr_offset += BAM_MAX_DATA_SIZE; + if (remainder > BAM_FIFO_SIZE) { + desc->size = cpu_to_le16(BAM_FIFO_SIZE); + remainder -= BAM_FIFO_SIZE; + curr_offset += BAM_FIFO_SIZE; } else { desc->size = cpu_to_le16(remainder); remainder = 0; @@ -1174,7 +1174,7 @@ static int bam_dma_probe(struct platform_device *pdev) /* set max dma segment size */ bdev->common.dev = bdev->dev; bdev->common.dev->dma_parms = &bdev->dma_parms; - ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); + ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE); if (ret) { dev_err(bdev->dev, "cannot set maximum segment size\n"); goto err_bam_channel_exit; -- cgit v1.1 From d1615ca2e085222025118793b7b4af2cf4867b6e Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Sun, 1 May 2016 00:25:26 -0400 Subject: dmaengine: qcom_hidma: implement lower level hardware interface This patch implements the hardware hooks for the HIDMA channel driver. The main functions of interest are: - hidma_ll_init - hidma_ll_request - hidma_ll_queue_request - hidma_ll_hw_start OS layer calls the hidma_ll_init function during probe to set up the hardware. At this moment, the number of supported descriptors are also given. On each request, a descriptor is allocated from the free pool and filled in with the transfer parameters. Multiple requests can be queued into the hardware via the OS interface. When client is ready for requests to be executed, start method is called. Completions are delivered via callbacks via tasklet. Signed-off-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/Makefile | 2 + drivers/dma/qcom/hidma.c | 8 +- drivers/dma/qcom/hidma.h | 38 +- drivers/dma/qcom/hidma_ll.c | 872 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 895 insertions(+), 25 deletions(-) create mode 100644 drivers/dma/qcom/hidma_ll.c (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile index bfea699..6bf9267 100644 --- a/drivers/dma/qcom/Makefile +++ b/drivers/dma/qcom/Makefile @@ -1,3 +1,5 @@ obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o +obj-$(CONFIG_QCOM_HIDMA) += hdma.o +hdma-objs := hidma_ll.o hidma.o diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index cccc78e..af5e542 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -404,7 +404,7 @@ static int hidma_terminate_channel(struct dma_chan *chan) spin_unlock_irqrestore(&mchan->lock, irqflags); /* this suspends the existing transfer */ - rc = hidma_ll_pause(dmadev->lldev); + rc = hidma_ll_disable(dmadev->lldev); if (rc) { dev_err(dmadev->ddev.dev, "channel did not pause\n"); goto out; @@ -427,7 +427,7 @@ static int hidma_terminate_channel(struct dma_chan *chan) list_move(&mdesc->node, &mchan->free); } - rc = hidma_ll_resume(dmadev->lldev); + rc = hidma_ll_enable(dmadev->lldev); out: pm_runtime_mark_last_busy(dmadev->ddev.dev); pm_runtime_put_autosuspend(dmadev->ddev.dev); @@ -488,7 +488,7 @@ static int hidma_pause(struct dma_chan *chan) dmadev = to_hidma_dev(mchan->chan.device); if (!mchan->paused) { pm_runtime_get_sync(dmadev->ddev.dev); - if (hidma_ll_pause(dmadev->lldev)) + if (hidma_ll_disable(dmadev->lldev)) dev_warn(dmadev->ddev.dev, "channel did not stop\n"); mchan->paused = true; pm_runtime_mark_last_busy(dmadev->ddev.dev); @@ -507,7 +507,7 @@ static int hidma_resume(struct dma_chan *chan) dmadev = to_hidma_dev(mchan->chan.device); if (mchan->paused) { pm_runtime_get_sync(dmadev->ddev.dev); - rc = hidma_ll_resume(dmadev->lldev); + rc = hidma_ll_enable(dmadev->lldev); if (!rc) mchan->paused = false; else diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index 231e306..b1362ef 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h @@ -1,7 +1,7 @@ /* * Qualcomm Technologies HIDMA data structures * - * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -20,32 +20,29 @@ #include #include -#define TRE_SIZE 32 /* each TRE is 32 bytes */ -#define TRE_CFG_IDX 0 -#define TRE_LEN_IDX 1 -#define TRE_SRC_LOW_IDX 2 -#define TRE_SRC_HI_IDX 3 -#define TRE_DEST_LOW_IDX 4 -#define TRE_DEST_HI_IDX 5 - -struct hidma_tx_status { - u8 err_info; /* error record in this transfer */ - u8 err_code; /* completion code */ -}; +#define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */ +#define HIDMA_TRE_CFG_IDX 0 +#define HIDMA_TRE_LEN_IDX 1 +#define HIDMA_TRE_SRC_LOW_IDX 2 +#define HIDMA_TRE_SRC_HI_IDX 3 +#define HIDMA_TRE_DEST_LOW_IDX 4 +#define HIDMA_TRE_DEST_HI_IDX 5 struct hidma_tre { atomic_t allocated; /* if this channel is allocated */ bool queued; /* flag whether this is pending */ u16 status; /* status */ - u32 chidx; /* index of the tre */ + u32 idx; /* index of the tre */ u32 dma_sig; /* signature of the tre */ const char *dev_name; /* name of the device */ void (*callback)(void *data); /* requester callback */ void *data; /* Data associated with this channel*/ struct hidma_lldev *lldev; /* lldma device pointer */ - u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ + u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ u32 tre_index; /* the offset where this was written*/ u32 int_flags; /* interrupt flags */ + u8 err_info; /* error record in this transfer */ + u8 err_code; /* completion code */ }; struct hidma_lldev { @@ -61,22 +58,21 @@ struct hidma_lldev { void __iomem *evca; /* Event Channel address */ struct hidma_tre **pending_tre_list; /* Pointers to pending TREs */ - struct hidma_tx_status - *tx_status_list; /* Pointers to pending TREs status*/ s32 pending_tre_count; /* Number of TREs pending */ void *tre_ring; /* TRE ring */ - dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */ + dma_addr_t tre_dma; /* TRE ring to be shared with HW */ u32 tre_ring_size; /* Byte size of the ring */ u32 tre_processed_off; /* last processed TRE */ void *evre_ring; /* EVRE ring */ - dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */ + dma_addr_t evre_dma; /* EVRE ring to be shared with HW */ u32 evre_ring_size; /* Byte size of the ring */ u32 evre_processed_off; /* last processed EVRE */ u32 tre_write_offset; /* TRE write location */ struct tasklet_struct task; /* task delivering notifications */ + struct tasklet_struct rst_task; /* task to reset HW */ DECLARE_KFIFO_PTR(handoff_fifo, struct hidma_tre *); /* pending TREs FIFO */ }; @@ -145,8 +141,8 @@ enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); bool hidma_ll_isenabled(struct hidma_lldev *llhndl); void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); void hidma_ll_start(struct hidma_lldev *llhndl); -int hidma_ll_pause(struct hidma_lldev *llhndl); -int hidma_ll_resume(struct hidma_lldev *llhndl); +int hidma_ll_disable(struct hidma_lldev *lldev); +int hidma_ll_enable(struct hidma_lldev *llhndl); void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); int hidma_ll_setup(struct hidma_lldev *lldev); diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c new file mode 100644 index 0000000..f392900 --- /dev/null +++ b/drivers/dma/qcom/hidma_ll.c @@ -0,0 +1,872 @@ +/* + * Qualcomm Technologies HIDMA DMA engine low level code + * + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hidma.h" + +#define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */ + +#define HIDMA_TRCA_CTRLSTS_REG 0x000 +#define HIDMA_TRCA_RING_LOW_REG 0x008 +#define HIDMA_TRCA_RING_HIGH_REG 0x00C +#define HIDMA_TRCA_RING_LEN_REG 0x010 +#define HIDMA_TRCA_DOORBELL_REG 0x400 + +#define HIDMA_EVCA_CTRLSTS_REG 0x000 +#define HIDMA_EVCA_INTCTRL_REG 0x004 +#define HIDMA_EVCA_RING_LOW_REG 0x008 +#define HIDMA_EVCA_RING_HIGH_REG 0x00C +#define HIDMA_EVCA_RING_LEN_REG 0x010 +#define HIDMA_EVCA_WRITE_PTR_REG 0x020 +#define HIDMA_EVCA_DOORBELL_REG 0x400 + +#define HIDMA_EVCA_IRQ_STAT_REG 0x100 +#define HIDMA_EVCA_IRQ_CLR_REG 0x108 +#define HIDMA_EVCA_IRQ_EN_REG 0x110 + +#define HIDMA_EVRE_CFG_IDX 0 + +#define HIDMA_EVRE_ERRINFO_BIT_POS 24 +#define HIDMA_EVRE_CODE_BIT_POS 28 + +#define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0) +#define HIDMA_EVRE_CODE_MASK GENMASK(3, 0) + +#define HIDMA_CH_CONTROL_MASK GENMASK(7, 0) +#define HIDMA_CH_STATE_MASK GENMASK(7, 0) +#define HIDMA_CH_STATE_BIT_POS 0x8 + +#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0 +#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1 +#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9 +#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10 +#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11 +#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14 + +#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \ + BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \ + BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \ + BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \ + BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \ + BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS)) + +#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \ +do { \ + iter += size; \ + if (iter >= ring_size) \ + iter -= ring_size; \ +} while (0) + +#define HIDMA_CH_STATE(val) \ + ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK) + +#define HIDMA_ERR_INT_MASK \ + (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \ + BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \ + BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \ + BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \ + BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS)) + +enum ch_command { + HIDMA_CH_DISABLE = 0, + HIDMA_CH_ENABLE = 1, + HIDMA_CH_SUSPEND = 2, + HIDMA_CH_RESET = 9, +}; + +enum ch_state { + HIDMA_CH_DISABLED = 0, + HIDMA_CH_ENABLED = 1, + HIDMA_CH_RUNNING = 2, + HIDMA_CH_SUSPENDED = 3, + HIDMA_CH_STOPPED = 4, +}; + +enum tre_type { + HIDMA_TRE_MEMCPY = 3, +}; + +enum err_code { + HIDMA_EVRE_STATUS_COMPLETE = 1, + HIDMA_EVRE_STATUS_ERROR = 4, +}; + +static int hidma_is_chan_enabled(int state) +{ + switch (state) { + case HIDMA_CH_ENABLED: + case HIDMA_CH_RUNNING: + return true; + default: + return false; + } +} + +void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch) +{ + struct hidma_tre *tre; + + if (tre_ch >= lldev->nr_tres) { + dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch); + return; + } + + tre = &lldev->trepool[tre_ch]; + if (atomic_read(&tre->allocated) != true) { + dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch); + return; + } + + atomic_set(&tre->allocated, 0); +} + +int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name, + void (*callback)(void *data), void *data, u32 *tre_ch) +{ + unsigned int i; + struct hidma_tre *tre; + u32 *tre_local; + + if (!tre_ch || !lldev) + return -EINVAL; + + /* need to have at least one empty spot in the queue */ + for (i = 0; i < lldev->nr_tres - 1; i++) { + if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1)) + break; + } + + if (i == (lldev->nr_tres - 1)) + return -ENOMEM; + + tre = &lldev->trepool[i]; + tre->dma_sig = sig; + tre->dev_name = dev_name; + tre->callback = callback; + tre->data = data; + tre->idx = i; + tre->status = 0; + tre->queued = 0; + tre->err_code = 0; + tre->err_info = 0; + tre->lldev = lldev; + tre_local = &tre->tre_local[0]; + tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY; + tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8; + tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */ + *tre_ch = i; + if (callback) + callback(data); + return 0; +} + +/* + * Multiple TREs may be queued and waiting in the pending queue. + */ +static void hidma_ll_tre_complete(unsigned long arg) +{ + struct hidma_lldev *lldev = (struct hidma_lldev *)arg; + struct hidma_tre *tre; + + while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) { + /* call the user if it has been read by the hardware */ + if (tre->callback) + tre->callback(tre->data); + } +} + +static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, + u8 err_info, u8 err_code) +{ + struct hidma_tre *tre; + unsigned long flags; + + spin_lock_irqsave(&lldev->lock, flags); + tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE]; + if (!tre) { + spin_unlock_irqrestore(&lldev->lock, flags); + dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n", + tre_iterator / HIDMA_TRE_SIZE); + return -EINVAL; + } + lldev->pending_tre_list[tre->tre_index] = NULL; + + /* + * Keep track of pending TREs that SW is expecting to receive + * from HW. We got one now. Decrement our counter. + */ + lldev->pending_tre_count--; + if (lldev->pending_tre_count < 0) { + dev_warn(lldev->dev, "tre count mismatch on completion"); + lldev->pending_tre_count = 0; + } + + spin_unlock_irqrestore(&lldev->lock, flags); + + tre->err_info = err_info; + tre->err_code = err_code; + tre->queued = 0; + + kfifo_put(&lldev->handoff_fifo, tre); + tasklet_schedule(&lldev->task); + + return 0; +} + +/* + * Called to handle the interrupt for the channel. + * Return a positive number if TRE or EVRE were consumed on this run. + * Return a positive number if there are pending TREs or EVREs. + * Return 0 if there is nothing to consume or no pending TREs/EVREs found. + */ +static int hidma_handle_tre_completion(struct hidma_lldev *lldev) +{ + u32 evre_ring_size = lldev->evre_ring_size; + u32 tre_ring_size = lldev->tre_ring_size; + u32 err_info, err_code, evre_write_off; + u32 tre_iterator, evre_iterator; + u32 num_completed = 0; + + evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); + tre_iterator = lldev->tre_processed_off; + evre_iterator = lldev->evre_processed_off; + + if ((evre_write_off > evre_ring_size) || + (evre_write_off % HIDMA_EVRE_SIZE)) { + dev_err(lldev->dev, "HW reports invalid EVRE write offset\n"); + return 0; + } + + /* + * By the time control reaches here the number of EVREs and TREs + * may not match. Only consume the ones that hardware told us. + */ + while ((evre_iterator != evre_write_off)) { + u32 *current_evre = lldev->evre_ring + evre_iterator; + u32 cfg; + + cfg = current_evre[HIDMA_EVRE_CFG_IDX]; + err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS; + err_info &= HIDMA_EVRE_ERRINFO_MASK; + err_code = + (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK; + + if (hidma_post_completed(lldev, tre_iterator, err_info, + err_code)) + break; + + HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, + tre_ring_size); + HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE, + evre_ring_size); + + /* + * Read the new event descriptor written by the HW. + * As we are processing the delivered events, other events + * get queued to the SW for processing. + */ + evre_write_off = + readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); + num_completed++; + } + + if (num_completed) { + u32 evre_read_off = (lldev->evre_processed_off + + HIDMA_EVRE_SIZE * num_completed); + u32 tre_read_off = (lldev->tre_processed_off + + HIDMA_TRE_SIZE * num_completed); + + evre_read_off = evre_read_off % evre_ring_size; + tre_read_off = tre_read_off % tre_ring_size; + + writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG); + + /* record the last processed tre offset */ + lldev->tre_processed_off = tre_read_off; + lldev->evre_processed_off = evre_read_off; + } + + return num_completed; +} + +void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info, + u8 err_code) +{ + u32 tre_iterator; + u32 tre_ring_size = lldev->tre_ring_size; + int num_completed = 0; + u32 tre_read_off; + + tre_iterator = lldev->tre_processed_off; + while (lldev->pending_tre_count) { + if (hidma_post_completed(lldev, tre_iterator, err_info, + err_code)) + break; + HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, + tre_ring_size); + num_completed++; + } + tre_read_off = (lldev->tre_processed_off + + HIDMA_TRE_SIZE * num_completed); + + tre_read_off = tre_read_off % tre_ring_size; + + /* record the last processed tre offset */ + lldev->tre_processed_off = tre_read_off; +} + +static int hidma_ll_reset(struct hidma_lldev *lldev) +{ + u32 val; + int ret; + + val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); + val &= ~(HIDMA_CH_CONTROL_MASK << 16); + val |= HIDMA_CH_RESET << 16; + writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); + + /* + * Delay 10ms after reset to allow DMA logic to quiesce. + * Do a polled read up to 1ms and 10ms maximum. + */ + ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, + HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED, + 1000, 10000); + if (ret) { + dev_err(lldev->dev, "transfer channel did not reset\n"); + return ret; + } + + val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); + val &= ~(HIDMA_CH_CONTROL_MASK << 16); + val |= HIDMA_CH_RESET << 16; + writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); + + /* + * Delay 10ms after reset to allow DMA logic to quiesce. + * Do a polled read up to 1ms and 10ms maximum. + */ + ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, + HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED, + 1000, 10000); + if (ret) + return ret; + + lldev->trch_state = HIDMA_CH_DISABLED; + lldev->evch_state = HIDMA_CH_DISABLED; + return 0; +} + +/* + * Abort all transactions and perform a reset. + */ +static void hidma_ll_abort(unsigned long arg) +{ + struct hidma_lldev *lldev = (struct hidma_lldev *)arg; + u8 err_code = HIDMA_EVRE_STATUS_ERROR; + u8 err_info = 0xFF; + int rc; + + hidma_cleanup_pending_tre(lldev, err_info, err_code); + + /* reset the channel for recovery */ + rc = hidma_ll_setup(lldev); + if (rc) { + dev_err(lldev->dev, "channel reinitialize failed after error\n"); + return; + } + writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); +} + +/* + * The interrupt handler for HIDMA will try to consume as many pending + * EVRE from the event queue as possible. Each EVRE has an associated + * TRE that holds the user interface parameters. EVRE reports the + * result of the transaction. Hardware guarantees ordering between EVREs + * and TREs. We use last processed offset to figure out which TRE is + * associated with which EVRE. If two TREs are consumed by HW, the EVREs + * are in order in the event ring. + * + * This handler will do a one pass for consuming EVREs. Other EVREs may + * be delivered while we are working. It will try to consume incoming + * EVREs one more time and return. + * + * For unprocessed EVREs, hardware will trigger another interrupt until + * all the interrupt bits are cleared. + * + * Hardware guarantees that by the time interrupt is observed, all data + * transactions in flight are delivered to their respective places and + * are visible to the CPU. + * + * On demand paging for IOMMU is only supported for PCIe via PRI + * (Page Request Interface) not for HIDMA. All other hardware instances + * including HIDMA work on pinned DMA addresses. + * + * HIDMA is not aware of IOMMU presence since it follows the DMA API. All + * IOMMU latency will be built into the data movement time. By the time + * interrupt happens, IOMMU lookups + data movement has already taken place. + * + * While the first read in a typical PCI endpoint ISR flushes all outstanding + * requests traditionally to the destination, this concept does not apply + * here for this HW. + */ +irqreturn_t hidma_ll_inthandler(int chirq, void *arg) +{ + struct hidma_lldev *lldev = arg; + u32 status; + u32 enable; + u32 cause; + + /* + * Fine tuned for this HW... + * + * This ISR has been designed for this particular hardware. Relaxed + * read and write accessors are used for performance reasons due to + * interrupt delivery guarantees. Do not copy this code blindly and + * expect that to work. + */ + status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); + enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + cause = status & enable; + + while (cause) { + if (cause & HIDMA_ERR_INT_MASK) { + dev_err(lldev->dev, "error 0x%x, resetting...\n", + cause); + + /* Clear out pending interrupts */ + writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + + tasklet_schedule(&lldev->rst_task); + goto out; + } + + /* + * Try to consume as many EVREs as possible. + */ + hidma_handle_tre_completion(lldev); + + /* We consumed TREs or there are pending TREs or EVREs. */ + writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + + /* + * Another interrupt might have arrived while we are + * processing this one. Read the new cause. + */ + status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); + enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + cause = status & enable; + } + +out: + return IRQ_HANDLED; +} + +int hidma_ll_enable(struct hidma_lldev *lldev) +{ + u32 val; + int ret; + + val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); + val &= ~(HIDMA_CH_CONTROL_MASK << 16); + val |= HIDMA_CH_ENABLE << 16; + writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); + + ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, + hidma_is_chan_enabled(HIDMA_CH_STATE(val)), + 1000, 10000); + if (ret) { + dev_err(lldev->dev, "event channel did not get enabled\n"); + return ret; + } + + val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); + val &= ~(HIDMA_CH_CONTROL_MASK << 16); + val |= HIDMA_CH_ENABLE << 16; + writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); + + ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, + hidma_is_chan_enabled(HIDMA_CH_STATE(val)), + 1000, 10000); + if (ret) { + dev_err(lldev->dev, "transfer channel did not get enabled\n"); + return ret; + } + + lldev->trch_state = HIDMA_CH_ENABLED; + lldev->evch_state = HIDMA_CH_ENABLED; + + return 0; +} + +void hidma_ll_start(struct hidma_lldev *lldev) +{ + unsigned long irqflags; + + spin_lock_irqsave(&lldev->lock, irqflags); + writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG); + spin_unlock_irqrestore(&lldev->lock, irqflags); +} + +bool hidma_ll_isenabled(struct hidma_lldev *lldev) +{ + u32 val; + + val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); + lldev->trch_state = HIDMA_CH_STATE(val); + val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); + lldev->evch_state = HIDMA_CH_STATE(val); + + /* both channels have to be enabled before calling this function */ + if (hidma_is_chan_enabled(lldev->trch_state) && + hidma_is_chan_enabled(lldev->evch_state)) + return true; + + return false; +} + +void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch) +{ + struct hidma_tre *tre; + unsigned long flags; + + tre = &lldev->trepool[tre_ch]; + + /* copy the TRE into its location in the TRE ring */ + spin_lock_irqsave(&lldev->lock, flags); + tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE; + lldev->pending_tre_list[tre->tre_index] = tre; + memcpy(lldev->tre_ring + lldev->tre_write_offset, + &tre->tre_local[0], HIDMA_TRE_SIZE); + tre->err_code = 0; + tre->err_info = 0; + tre->queued = 1; + lldev->pending_tre_count++; + lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE) + % lldev->tre_ring_size; + spin_unlock_irqrestore(&lldev->lock, flags); +} + +/* + * Note that even though we stop this channel if there is a pending transaction + * in flight it will complete and follow the callback. This request will + * prevent further requests to be made. + */ +int hidma_ll_disable(struct hidma_lldev *lldev) +{ + u32 val; + int ret; + + val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); + lldev->evch_state = HIDMA_CH_STATE(val); + val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); + lldev->trch_state = HIDMA_CH_STATE(val); + + /* already suspended by this OS */ + if ((lldev->trch_state == HIDMA_CH_SUSPENDED) || + (lldev->evch_state == HIDMA_CH_SUSPENDED)) + return 0; + + /* already stopped by the manager */ + if ((lldev->trch_state == HIDMA_CH_STOPPED) || + (lldev->evch_state == HIDMA_CH_STOPPED)) + return 0; + + val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); + val &= ~(HIDMA_CH_CONTROL_MASK << 16); + val |= HIDMA_CH_SUSPEND << 16; + writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); + + /* + * Start the wait right after the suspend is confirmed. + * Do a polled read up to 1ms and 10ms maximum. + */ + ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, + HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED, + 1000, 10000); + if (ret) + return ret; + + val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); + val &= ~(HIDMA_CH_CONTROL_MASK << 16); + val |= HIDMA_CH_SUSPEND << 16; + writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); + + /* + * Start the wait right after the suspend is confirmed + * Delay up to 10ms after reset to allow DMA logic to quiesce. + */ + ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, + HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED, + 1000, 10000); + if (ret) + return ret; + + lldev->trch_state = HIDMA_CH_SUSPENDED; + lldev->evch_state = HIDMA_CH_SUSPENDED; + return 0; +} + +void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, + dma_addr_t src, dma_addr_t dest, u32 len, + u32 flags) +{ + struct hidma_tre *tre; + u32 *tre_local; + + if (tre_ch >= lldev->nr_tres) { + dev_err(lldev->dev, "invalid TRE number in transfer params:%d", + tre_ch); + return; + } + + tre = &lldev->trepool[tre_ch]; + if (atomic_read(&tre->allocated) != true) { + dev_err(lldev->dev, "trying to set params on an unused TRE:%d", + tre_ch); + return; + } + + tre_local = &tre->tre_local[0]; + tre_local[HIDMA_TRE_LEN_IDX] = len; + tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src); + tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src); + tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest); + tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest); + tre->int_flags = flags; +} + +/* + * Called during initialization and after an error condition + * to restore hardware state. + */ +int hidma_ll_setup(struct hidma_lldev *lldev) +{ + int rc; + u64 addr; + u32 val; + u32 nr_tres = lldev->nr_tres; + + lldev->pending_tre_count = 0; + lldev->tre_processed_off = 0; + lldev->evre_processed_off = 0; + lldev->tre_write_offset = 0; + + /* disable interrupts */ + writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + + /* clear all pending interrupts */ + val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); + writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + + rc = hidma_ll_reset(lldev); + if (rc) + return rc; + + /* + * Clear all pending interrupts again. + * Otherwise, we observe reset complete interrupts. + */ + val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); + writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + + /* disable interrupts again after reset */ + writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + + addr = lldev->tre_dma; + writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG); + writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG); + writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG); + + addr = lldev->evre_dma; + writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG); + writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG); + writel(HIDMA_EVRE_SIZE * nr_tres, + lldev->evca + HIDMA_EVCA_RING_LEN_REG); + + /* support IRQ only for now */ + val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG); + val &= ~0xF; + val |= 0x1; + writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG); + + /* clear all pending interrupts and enable them */ + writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + + return hidma_ll_enable(lldev); +} + +struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres, + void __iomem *trca, void __iomem *evca, + u8 chidx) +{ + u32 required_bytes; + struct hidma_lldev *lldev; + int rc; + size_t sz; + + if (!trca || !evca || !dev || !nr_tres) + return NULL; + + /* need at least four TREs */ + if (nr_tres < 4) + return NULL; + + /* need an extra space */ + nr_tres += 1; + + lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL); + if (!lldev) + return NULL; + + lldev->evca = evca; + lldev->trca = trca; + lldev->dev = dev; + sz = sizeof(struct hidma_tre); + lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL); + if (!lldev->trepool) + return NULL; + + required_bytes = sizeof(lldev->pending_tre_list[0]); + lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes, + GFP_KERNEL); + if (!lldev->pending_tre_list) + return NULL; + + sz = (HIDMA_TRE_SIZE + 1) * nr_tres; + lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma, + GFP_KERNEL); + if (!lldev->tre_ring) + return NULL; + + memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres); + lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres; + lldev->nr_tres = nr_tres; + + /* the TRE ring has to be TRE_SIZE aligned */ + if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) { + u8 tre_ring_shift; + + tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE; + tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift; + lldev->tre_dma += tre_ring_shift; + lldev->tre_ring += tre_ring_shift; + } + + sz = (HIDMA_EVRE_SIZE + 1) * nr_tres; + lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma, + GFP_KERNEL); + if (!lldev->evre_ring) + return NULL; + + memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres); + lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres; + + /* the EVRE ring has to be EVRE_SIZE aligned */ + if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) { + u8 evre_ring_shift; + + evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE; + evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift; + lldev->evre_dma += evre_ring_shift; + lldev->evre_ring += evre_ring_shift; + } + lldev->nr_tres = nr_tres; + lldev->chidx = chidx; + + sz = nr_tres * sizeof(struct hidma_tre *); + rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL); + if (rc) + return NULL; + + rc = hidma_ll_setup(lldev); + if (rc) + return NULL; + + spin_lock_init(&lldev->lock); + tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev); + tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); + lldev->initialized = 1; + writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + return lldev; +} + +int hidma_ll_uninit(struct hidma_lldev *lldev) +{ + u32 required_bytes; + int rc = 0; + u32 val; + + if (!lldev) + return -ENODEV; + + if (!lldev->initialized) + return 0; + + lldev->initialized = 0; + + required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; + tasklet_kill(&lldev->task); + memset(lldev->trepool, 0, required_bytes); + lldev->trepool = NULL; + lldev->pending_tre_count = 0; + lldev->tre_write_offset = 0; + + rc = hidma_ll_reset(lldev); + + /* + * Clear all pending interrupts again. + * Otherwise, we observe reset complete interrupts. + */ + val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); + writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + return rc; +} + +enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch) +{ + enum dma_status ret = DMA_ERROR; + struct hidma_tre *tre; + unsigned long flags; + u8 err_code; + + spin_lock_irqsave(&lldev->lock, flags); + + tre = &lldev->trepool[tre_ch]; + err_code = tre->err_code; + + if (err_code & HIDMA_EVRE_STATUS_COMPLETE) + ret = DMA_COMPLETE; + else if (err_code & HIDMA_EVRE_STATUS_ERROR) + ret = DMA_ERROR; + else + ret = DMA_IN_PROGRESS; + spin_unlock_irqrestore(&lldev->lock, flags); + + return ret; +} -- cgit v1.1 From 570d0176296f0d17c4b5ab206ad4a4bc027b863b Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Sun, 1 May 2016 00:25:27 -0400 Subject: dmaengine: qcom_hidma: add debugfs hooks Add debugfs hooks for debugging the execution behavior of the DMA channel. The debugfs hooks get initialized by the probe function and uninitialized by the remove function. A stats file is created in debugfs. The stats file will show the information about each HIDMA channel as well as each asynchronous job queued and completed at a given time. Signed-off-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/Makefile | 2 +- drivers/dma/qcom/hidma.c | 5 +- drivers/dma/qcom/hidma.h | 2 + drivers/dma/qcom/hidma_dbg.c | 217 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 224 insertions(+), 2 deletions(-) create mode 100644 drivers/dma/qcom/hidma_dbg.c (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile index 6bf9267..4bfc38b 100644 --- a/drivers/dma/qcom/Makefile +++ b/drivers/dma/qcom/Makefile @@ -2,4 +2,4 @@ obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o obj-$(CONFIG_QCOM_HIDMA) += hdma.o -hdma-objs := hidma_ll.o hidma.o +hdma-objs := hidma_ll.o hidma.o hidma_dbg.o diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index af5e542..1ad27c2 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -1,7 +1,7 @@ /* * Qualcomm Technologies HIDMA DMA engine interface * - * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -644,6 +644,7 @@ static int hidma_probe(struct platform_device *pdev) dmadev->irq = chirq; tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); + hidma_debug_init(dmadev); dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); platform_set_drvdata(pdev, dmadev); pm_runtime_mark_last_busy(dmadev->ddev.dev); @@ -651,6 +652,7 @@ static int hidma_probe(struct platform_device *pdev) return 0; uninit: + hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); dmafree: if (dmadev) @@ -668,6 +670,7 @@ static int hidma_remove(struct platform_device *pdev) pm_runtime_get_sync(dmadev->ddev.dev); dma_async_device_unregister(&dmadev->ddev); devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); + hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); hidma_free(dmadev); diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index b1362ef..db413a5 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h @@ -153,4 +153,6 @@ int hidma_ll_uninit(struct hidma_lldev *llhndl); irqreturn_t hidma_ll_inthandler(int irq, void *arg); void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, u8 err_code); +int hidma_debug_init(struct hidma_dev *dmadev); +void hidma_debug_uninit(struct hidma_dev *dmadev); #endif diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c new file mode 100644 index 0000000..fa827e5 --- /dev/null +++ b/drivers/dma/qcom/hidma_dbg.c @@ -0,0 +1,217 @@ +/* + * Qualcomm Technologies HIDMA debug file + * + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "hidma.h" + +static void hidma_ll_chstats(struct seq_file *s, void *llhndl, u32 tre_ch) +{ + struct hidma_lldev *lldev = llhndl; + struct hidma_tre *tre; + u32 length; + dma_addr_t src_start; + dma_addr_t dest_start; + u32 *tre_local; + + if (tre_ch >= lldev->nr_tres) { + dev_err(lldev->dev, "invalid TRE number in chstats:%d", tre_ch); + return; + } + tre = &lldev->trepool[tre_ch]; + seq_printf(s, "------Channel %d -----\n", tre_ch); + seq_printf(s, "allocated=%d\n", atomic_read(&tre->allocated)); + seq_printf(s, "queued = 0x%x\n", tre->queued); + seq_printf(s, "err_info = 0x%x\n", tre->err_info); + seq_printf(s, "err_code = 0x%x\n", tre->err_code); + seq_printf(s, "status = 0x%x\n", tre->status); + seq_printf(s, "idx = 0x%x\n", tre->idx); + seq_printf(s, "dma_sig = 0x%x\n", tre->dma_sig); + seq_printf(s, "dev_name=%s\n", tre->dev_name); + seq_printf(s, "callback=%p\n", tre->callback); + seq_printf(s, "data=%p\n", tre->data); + seq_printf(s, "tre_index = 0x%x\n", tre->tre_index); + + tre_local = &tre->tre_local[0]; + src_start = tre_local[HIDMA_TRE_SRC_LOW_IDX]; + src_start = ((u64) (tre_local[HIDMA_TRE_SRC_HI_IDX]) << 32) + src_start; + dest_start = tre_local[HIDMA_TRE_DEST_LOW_IDX]; + dest_start += ((u64) (tre_local[HIDMA_TRE_DEST_HI_IDX]) << 32); + length = tre_local[HIDMA_TRE_LEN_IDX]; + + seq_printf(s, "src=%pap\n", &src_start); + seq_printf(s, "dest=%pap\n", &dest_start); + seq_printf(s, "length = 0x%x\n", length); +} + +static void hidma_ll_devstats(struct seq_file *s, void *llhndl) +{ + struct hidma_lldev *lldev = llhndl; + + seq_puts(s, "------Device -----\n"); + seq_printf(s, "lldev init = 0x%x\n", lldev->initialized); + seq_printf(s, "trch_state = 0x%x\n", lldev->trch_state); + seq_printf(s, "evch_state = 0x%x\n", lldev->evch_state); + seq_printf(s, "chidx = 0x%x\n", lldev->chidx); + seq_printf(s, "nr_tres = 0x%x\n", lldev->nr_tres); + seq_printf(s, "trca=%p\n", lldev->trca); + seq_printf(s, "tre_ring=%p\n", lldev->tre_ring); + seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma); + seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size); + seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off); + seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count); + seq_printf(s, "evca=%p\n", lldev->evca); + seq_printf(s, "evre_ring=%p\n", lldev->evre_ring); + seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma); + seq_printf(s, "evre_ring_size = 0x%x\n", lldev->evre_ring_size); + seq_printf(s, "evre_processed_off = 0x%x\n", lldev->evre_processed_off); + seq_printf(s, "tre_write_offset = 0x%x\n", lldev->tre_write_offset); +} + +/* + * hidma_chan_stats: display HIDMA channel statistics + * + * Display the statistics for the current HIDMA virtual channel device. + */ +static int hidma_chan_stats(struct seq_file *s, void *unused) +{ + struct hidma_chan *mchan = s->private; + struct hidma_desc *mdesc; + struct hidma_dev *dmadev = mchan->dmadev; + + pm_runtime_get_sync(dmadev->ddev.dev); + seq_printf(s, "paused=%u\n", mchan->paused); + seq_printf(s, "dma_sig=%u\n", mchan->dma_sig); + seq_puts(s, "prepared\n"); + list_for_each_entry(mdesc, &mchan->prepared, node) + hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); + + seq_puts(s, "active\n"); + list_for_each_entry(mdesc, &mchan->active, node) + hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); + + seq_puts(s, "completed\n"); + list_for_each_entry(mdesc, &mchan->completed, node) + hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); + + hidma_ll_devstats(s, mchan->dmadev->lldev); + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); + return 0; +} + +/* + * hidma_dma_info: display HIDMA device info + * + * Display the info for the current HIDMA device. + */ +static int hidma_dma_info(struct seq_file *s, void *unused) +{ + struct hidma_dev *dmadev = s->private; + resource_size_t sz; + + seq_printf(s, "nr_descriptors=%d\n", dmadev->nr_descriptors); + seq_printf(s, "dev_trca=%p\n", &dmadev->dev_trca); + seq_printf(s, "dev_trca_phys=%pa\n", &dmadev->trca_resource->start); + sz = resource_size(dmadev->trca_resource); + seq_printf(s, "dev_trca_size=%pa\n", &sz); + seq_printf(s, "dev_evca=%p\n", &dmadev->dev_evca); + seq_printf(s, "dev_evca_phys=%pa\n", &dmadev->evca_resource->start); + sz = resource_size(dmadev->evca_resource); + seq_printf(s, "dev_evca_size=%pa\n", &sz); + return 0; +} + +static int hidma_chan_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, hidma_chan_stats, inode->i_private); +} + +static int hidma_dma_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, hidma_dma_info, inode->i_private); +} + +static const struct file_operations hidma_chan_fops = { + .open = hidma_chan_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations hidma_dma_fops = { + .open = hidma_dma_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void hidma_debug_uninit(struct hidma_dev *dmadev) +{ + debugfs_remove_recursive(dmadev->debugfs); + debugfs_remove_recursive(dmadev->stats); +} + +int hidma_debug_init(struct hidma_dev *dmadev) +{ + int rc = 0; + int chidx = 0; + struct list_head *position = NULL; + + dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL); + if (!dmadev->debugfs) { + rc = -ENODEV; + return rc; + } + + /* walk through the virtual channel list */ + list_for_each(position, &dmadev->ddev.channels) { + struct hidma_chan *chan; + + chan = list_entry(position, struct hidma_chan, + chan.device_node); + sprintf(chan->dbg_name, "chan%d", chidx); + chan->debugfs = debugfs_create_dir(chan->dbg_name, + dmadev->debugfs); + if (!chan->debugfs) { + rc = -ENOMEM; + goto cleanup; + } + chan->stats = debugfs_create_file("stats", S_IRUGO, + chan->debugfs, chan, + &hidma_chan_fops); + if (!chan->stats) { + rc = -ENOMEM; + goto cleanup; + } + chidx++; + } + + dmadev->stats = debugfs_create_file("stats", S_IRUGO, + dmadev->debugfs, dmadev, + &hidma_dma_fops); + if (!dmadev->stats) { + rc = -ENOMEM; + goto cleanup; + } + + return 0; +cleanup: + hidma_debug_uninit(dmadev); + return rc; +} -- cgit v1.1 From 42d236f8a4479fefb69b20da3962a462e05a112d Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Sun, 1 May 2016 00:25:28 -0400 Subject: dmaengine: qcom_hidma: add support for object hierarchy In order to create a relationship model between the channels and the management object, we are adding support for object hierarchy to the drivers. This patch simplifies the userspace application development. We will not have to traverse different firmware paths based on device tree or ACPI based kernels. No matter what flavor of kernel is used, objects will be represented as platform devices. The new layout is as follows: hidmam_10: hidma-mgmt@0x5A000000 { compatible = "qcom,hidma-mgmt-1.0"; ... hidma_10: hidma@0x5a010000 { compatible = "qcom,hidma-1.0"; ... } } The hidma_mgmt_init detects each instance of the hidma-mgmt-1.0 objects in device tree and calls into the channel driver to create platform devices for each child of the management object. Signed-off-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.c | 39 ++++++++++++++- drivers/dma/qcom/hidma_mgmt.c | 113 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 147 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 1ad27c2..41b5c6d 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -530,6 +530,43 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg) return hidma_ll_inthandler(chirq, lldev); } +static ssize_t hidma_show_values(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct hidma_dev *mdev = platform_get_drvdata(pdev); + + buf[0] = 0; + + if (strcmp(attr->attr.name, "chid") == 0) + sprintf(buf, "%d\n", mdev->chidx); + + return strlen(buf); +} + +static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, + int mode) +{ + struct device_attribute *attrs; + char *name_copy; + + attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), + GFP_KERNEL); + if (!attrs) + return -ENOMEM; + + name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); + if (!name_copy) + return -ENOMEM; + + attrs->attr.name = name_copy; + attrs->attr.mode = mode; + attrs->show = hidma_show_values; + sysfs_attr_init(&attrs->attr); + + return device_create_file(dev->ddev.dev, attrs); +} + static int hidma_probe(struct platform_device *pdev) { struct hidma_dev *dmadev; @@ -645,6 +682,7 @@ static int hidma_probe(struct platform_device *pdev) dmadev->irq = chirq; tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); hidma_debug_init(dmadev); + hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO); dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); platform_set_drvdata(pdev, dmadev); pm_runtime_mark_last_busy(dmadev->ddev.dev); @@ -692,7 +730,6 @@ static const struct of_device_id hidma_match[] = { {.compatible = "qcom,hidma-1.0",}, {}, }; - MODULE_DEVICE_TABLE(of, hidma_match); static struct platform_driver hidma_driver = { diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index ef491b8..c0e3653 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -1,7 +1,7 @@ /* * Qualcomm Technologies HIDMA DMA engine Management interface * - * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,13 +17,14 @@ #include #include #include -#include -#include +#include +#include #include #include #include #include #include +#include #include "hidma_mgmt.h" @@ -298,5 +299,109 @@ static struct platform_driver hidma_mgmt_driver = { }, }; -module_platform_driver(hidma_mgmt_driver); +#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) +static int object_counter; + +static int __init hidma_mgmt_of_populate_channels(struct device_node *np) +{ + struct platform_device *pdev_parent = of_find_device_by_node(np); + struct platform_device_info pdevinfo; + struct of_phandle_args out_irq; + struct device_node *child; + struct resource *res; + const __be32 *cell; + int ret = 0, size, i, num; + u64 addr, addr_size; + + for_each_available_child_of_node(np, child) { + struct resource *res_iter; + struct platform_device *new_pdev; + + cell = of_get_property(child, "reg", &size); + if (!cell) { + ret = -EINVAL; + goto out; + } + + size /= sizeof(*cell); + num = size / + (of_n_addr_cells(child) + of_n_size_cells(child)) + 1; + + /* allocate a resource array */ + res = kcalloc(num, sizeof(*res), GFP_KERNEL); + if (!res) { + ret = -ENOMEM; + goto out; + } + + /* read each reg value */ + i = 0; + res_iter = res; + while (i < size) { + addr = of_read_number(&cell[i], + of_n_addr_cells(child)); + i += of_n_addr_cells(child); + + addr_size = of_read_number(&cell[i], + of_n_size_cells(child)); + i += of_n_size_cells(child); + + res_iter->start = addr; + res_iter->end = res_iter->start + addr_size - 1; + res_iter->flags = IORESOURCE_MEM; + res_iter++; + } + + ret = of_irq_parse_one(child, 0, &out_irq); + if (ret) + goto out; + + res_iter->start = irq_create_of_mapping(&out_irq); + res_iter->name = "hidma event irq"; + res_iter->flags = IORESOURCE_IRQ; + + memset(&pdevinfo, 0, sizeof(pdevinfo)); + pdevinfo.fwnode = &child->fwnode; + pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; + pdevinfo.name = child->name; + pdevinfo.id = object_counter++; + pdevinfo.res = res; + pdevinfo.num_res = num; + pdevinfo.data = NULL; + pdevinfo.size_data = 0; + pdevinfo.dma_mask = DMA_BIT_MASK(64); + new_pdev = platform_device_register_full(&pdevinfo); + if (!new_pdev) { + ret = -ENODEV; + goto out; + } + of_dma_configure(&new_pdev->dev, child); + + kfree(res); + res = NULL; + } +out: + kfree(res); + + return ret; +} +#endif + +static int __init hidma_mgmt_init(void) +{ +#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) + struct device_node *child; + + for (child = of_find_matching_node(NULL, hidma_mgmt_match); child; + child = of_find_matching_node(child, hidma_mgmt_match)) { + /* device tree based firmware here */ + hidma_mgmt_of_populate_channels(child); + of_node_put(child); + } +#endif + platform_driver_register(&hidma_mgmt_driver); + + return 0; +} +module_init(hidma_mgmt_init); MODULE_LICENSE("GPL v2"); -- cgit v1.1