diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/ce.c')
-rw-r--r-- | drivers/net/wireless/ath/ath10k/ce.c | 383 |
1 files changed, 228 insertions, 155 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index a79499c..d185dc0 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -329,6 +329,33 @@ exit: return ret; } +void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_ring *src_ring = pipe->src_ring; + u32 ctrl_addr = pipe->ctrl_addr; + + lockdep_assert_held(&ar_pci->ce_lock); + + /* + * This function must be called only if there is an incomplete + * scatter-gather transfer (before index register is updated) + * that needs to be cleaned up. + */ + if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index)) + return; + + if (WARN_ON_ONCE(src_ring->write_index == + ath10k_ce_src_ring_write_index_get(ar, ctrl_addr))) + return; + + src_ring->write_index--; + src_ring->write_index &= src_ring->nentries_mask; + + src_ring->per_transfer_context[src_ring->write_index] = NULL; +} + int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, u32 buffer, @@ -840,35 +867,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, - struct ath10k_ce_pipe *ce_state, const struct ce_attr *attr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_ring *src_ring; - unsigned int nentries = attr->src_nentries; - unsigned int ce_nbytes; - u32 ctrl_addr = ath10k_ce_base_address(ce_id); - dma_addr_t base_addr; - char *ptr; - - nentries = roundup_pow_of_two(nentries); - - if (ce_state->src_ring) { - WARN_ON(ce_state->src_ring->nentries != nentries); - return 0; - } - - ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); - ptr = kzalloc(ce_nbytes, GFP_KERNEL); - if (ptr == NULL) - return -ENOMEM; + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce_ring *src_ring = ce_state->src_ring; + u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); - ce_state->src_ring = (struct ath10k_ce_ring *)ptr; - src_ring = ce_state->src_ring; + nentries = roundup_pow_of_two(attr->src_nentries); - ptr += sizeof(struct ath10k_ce_ring); - src_ring->nentries = nentries; - src_ring->nentries_mask = nentries - 1; + memset(src_ring->per_transfer_context, 0, + nentries * sizeof(*src_ring->per_transfer_context)); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index &= src_ring->nentries_mask; @@ -878,21 +887,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); src_ring->write_index &= src_ring->nentries_mask; - src_ring->per_transfer_context = (void **)ptr; + ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, + src_ring->base_addr_ce_space); + ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); + ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); + ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); + + ath10k_dbg(ATH10K_DBG_BOOT, + "boot init ce src ring id %d entries %d base_addr %p\n", + ce_id, nentries, src_ring->base_addr_owner_space); + + return 0; +} + +static int ath10k_ce_init_dest_ring(struct ath10k *ar, + unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; + u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); + + nentries = roundup_pow_of_two(attr->dest_nentries); + + memset(dest_ring->per_transfer_context, 0, + nentries * sizeof(*dest_ring->per_transfer_context)); + + dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); + dest_ring->sw_index &= dest_ring->nentries_mask; + dest_ring->write_index = + ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); + dest_ring->write_index &= dest_ring->nentries_mask; + + ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, + dest_ring->base_addr_ce_space); + ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); + ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); + + ath10k_dbg(ATH10K_DBG_BOOT, + "boot ce dest ring id %d entries %d base_addr %p\n", + ce_id, nentries, dest_ring->base_addr_owner_space); + + return 0; +} + +static struct ath10k_ce_ring * +ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_ce_ring *src_ring; + u32 nentries = attr->src_nentries; + dma_addr_t base_addr; + + nentries = roundup_pow_of_two(nentries); + + src_ring = kzalloc(sizeof(*src_ring) + + (nentries * + sizeof(*src_ring->per_transfer_context)), + GFP_KERNEL); + if (src_ring == NULL) + return ERR_PTR(-ENOMEM); + + src_ring->nentries = nentries; + src_ring->nentries_mask = nentries - 1; /* * Legacy platforms that do not support cache * coherent DMA are unsupported */ src_ring->base_addr_owner_space_unaligned = - pci_alloc_consistent(ar_pci->pdev, - (nentries * sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - &base_addr); + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); if (!src_ring->base_addr_owner_space_unaligned) { - kfree(ce_state->src_ring); - ce_state->src_ring = NULL; - return -ENOMEM; + kfree(src_ring); + return ERR_PTR(-ENOMEM); } src_ring->base_addr_ce_space_unaligned = base_addr; @@ -912,88 +987,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, kmalloc((nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), GFP_KERNEL); if (!src_ring->shadow_base_unaligned) { - pci_free_consistent(ar_pci->pdev, - (nentries * sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - src_ring->base_addr_owner_space, - src_ring->base_addr_ce_space); - kfree(ce_state->src_ring); - ce_state->src_ring = NULL; - return -ENOMEM; + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space, + src_ring->base_addr_ce_space); + kfree(src_ring); + return ERR_PTR(-ENOMEM); } src_ring->shadow_base = PTR_ALIGN( src_ring->shadow_base_unaligned, CE_DESC_RING_ALIGN); - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, - src_ring->base_addr_ce_space); - ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); - ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); - ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); - ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); - ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); - - ath10k_dbg(ATH10K_DBG_BOOT, - "boot ce src ring id %d entries %d base_addr %p\n", - ce_id, nentries, src_ring->base_addr_owner_space); - - return 0; + return src_ring; } -static int ath10k_ce_init_dest_ring(struct ath10k *ar, - unsigned int ce_id, - struct ath10k_ce_pipe *ce_state, - const struct ce_attr *attr) +static struct ath10k_ce_ring * +ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_ring *dest_ring; - unsigned int nentries = attr->dest_nentries; - unsigned int ce_nbytes; - u32 ctrl_addr = ath10k_ce_base_address(ce_id); + u32 nentries; dma_addr_t base_addr; - char *ptr; - nentries = roundup_pow_of_two(nentries); + nentries = roundup_pow_of_two(attr->dest_nentries); - if (ce_state->dest_ring) { - WARN_ON(ce_state->dest_ring->nentries != nentries); - return 0; - } - - ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); - ptr = kzalloc(ce_nbytes, GFP_KERNEL); - if (ptr == NULL) - return -ENOMEM; + dest_ring = kzalloc(sizeof(*dest_ring) + + (nentries * + sizeof(*dest_ring->per_transfer_context)), + GFP_KERNEL); + if (dest_ring == NULL) + return ERR_PTR(-ENOMEM); - ce_state->dest_ring = (struct ath10k_ce_ring *)ptr; - dest_ring = ce_state->dest_ring; - - ptr += sizeof(struct ath10k_ce_ring); dest_ring->nentries = nentries; dest_ring->nentries_mask = nentries - 1; - dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); - dest_ring->sw_index &= dest_ring->nentries_mask; - dest_ring->write_index = - ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); - dest_ring->write_index &= dest_ring->nentries_mask; - - dest_ring->per_transfer_context = (void **)ptr; - /* * Legacy platforms that do not support cache * coherent DMA are unsupported */ dest_ring->base_addr_owner_space_unaligned = - pci_alloc_consistent(ar_pci->pdev, - (nentries * sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - &base_addr); + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); if (!dest_ring->base_addr_owner_space_unaligned) { - kfree(ce_state->dest_ring); - ce_state->dest_ring = NULL; - return -ENOMEM; + kfree(dest_ring); + return ERR_PTR(-ENOMEM); } dest_ring->base_addr_ce_space_unaligned = base_addr; @@ -1012,39 +1053,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, dest_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, - dest_ring->base_addr_ce_space); - ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); - ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); - ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); - ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); - - ath10k_dbg(ATH10K_DBG_BOOT, - "boot ce dest ring id %d entries %d base_addr %p\n", - ce_id, nentries, dest_ring->base_addr_owner_space); - - return 0; -} - -static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar, - unsigned int ce_id, - const struct ce_attr *attr) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; - u32 ctrl_addr = ath10k_ce_base_address(ce_id); - - spin_lock_bh(&ar_pci->ce_lock); - - ce_state->ar = ar; - ce_state->id = ce_id; - ce_state->ctrl_addr = ctrl_addr; - ce_state->attr_flags = attr->flags; - ce_state->src_sz_max = attr->src_sz_max; - - spin_unlock_bh(&ar_pci->ce_lock); - - return ce_state; + return dest_ring; } /* @@ -1054,11 +1063,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar, * initialization. It may be that only one side or the other is * initialized by software/firmware. */ -struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, - unsigned int ce_id, - const struct ce_attr *attr) +int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) { - struct ath10k_ce_pipe *ce_state; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; int ret; /* @@ -1074,64 +1083,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, ret = ath10k_pci_wake(ar); if (ret) - return NULL; + return ret; - ce_state = ath10k_ce_init_state(ar, ce_id, attr); - if (!ce_state) { - ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); - goto out; - } + spin_lock_bh(&ar_pci->ce_lock); + ce_state->ar = ar; + ce_state->id = ce_id; + ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); + ce_state->attr_flags = attr->flags; + ce_state->src_sz_max = attr->src_sz_max; + spin_unlock_bh(&ar_pci->ce_lock); if (attr->src_nentries) { - ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); + ret = ath10k_ce_init_src_ring(ar, ce_id, attr); if (ret) { ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", ce_id, ret); - ath10k_ce_deinit(ce_state); - ce_state = NULL; goto out; } } if (attr->dest_nentries) { - ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); + ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); if (ret) { ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", ce_id, ret); - ath10k_ce_deinit(ce_state); - ce_state = NULL; goto out; } } out: ath10k_pci_sleep(ar); - return ce_state; + return ret; } -void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) +static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) +{ + u32 ctrl_addr = ath10k_ce_base_address(ce_id); + + ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); +} + +static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) +{ + u32 ctrl_addr = ath10k_ce_base_address(ce_id); + + ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); +} + +void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) +{ + int ret; + + ret = ath10k_pci_wake(ar); + if (ret) + return; + + ath10k_ce_deinit_src_ring(ar, ce_id); + ath10k_ce_deinit_dest_ring(ar, ce_id); + + ath10k_pci_sleep(ar); +} + +int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + int ret; + + if (attr->src_nentries) { + ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); + if (IS_ERR(ce_state->src_ring)) { + ret = PTR_ERR(ce_state->src_ring); + ath10k_err("failed to allocate copy engine source ring %d: %d\n", + ce_id, ret); + ce_state->src_ring = NULL; + return ret; + } + } + + if (attr->dest_nentries) { + ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id, + attr); + if (IS_ERR(ce_state->dest_ring)) { + ret = PTR_ERR(ce_state->dest_ring); + ath10k_err("failed to allocate copy engine destination ring %d: %d\n", + ce_id, ret); + ce_state->dest_ring = NULL; + return ret; + } + } + + return 0; +} + +void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) { - struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; if (ce_state->src_ring) { kfree(ce_state->src_ring->shadow_base_unaligned); - pci_free_consistent(ar_pci->pdev, - (ce_state->src_ring->nentries * - sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - ce_state->src_ring->base_addr_owner_space, - ce_state->src_ring->base_addr_ce_space); + dma_free_coherent(ar->dev, + (ce_state->src_ring->nentries * + sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + ce_state->src_ring->base_addr_owner_space, + ce_state->src_ring->base_addr_ce_space); kfree(ce_state->src_ring); } if (ce_state->dest_ring) { - pci_free_consistent(ar_pci->pdev, - (ce_state->dest_ring->nentries * - sizeof(struct ce_desc) + - CE_DESC_RING_ALIGN), - ce_state->dest_ring->base_addr_owner_space, - ce_state->dest_ring->base_addr_ce_space); + dma_free_coherent(ar->dev, + (ce_state->dest_ring->nentries * + sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + ce_state->dest_ring->base_addr_owner_space, + ce_state->dest_ring->base_addr_ce_space); kfree(ce_state->dest_ring); } |