diff options
Diffstat (limited to 'sys/ofed/drivers/net/mlx4/en_cq.c')
-rw-r--r-- | sys/ofed/drivers/net/mlx4/en_cq.c | 136 |
1 files changed, 94 insertions, 42 deletions
diff --git a/sys/ofed/drivers/net/mlx4/en_cq.c b/sys/ofed/drivers/net/mlx4/en_cq.c index 9783e23..be043ce 100644 --- a/sys/ofed/drivers/net/mlx4/en_cq.c +++ b/sys/ofed/drivers/net/mlx4/en_cq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -31,12 +31,13 @@ * */ -#include "mlx4_en.h" - #include <linux/mlx4/cq.h> #include <linux/mlx4/qp.h> #include <linux/mlx4/cmd.h> +#include "mlx4_en.h" + + static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) { return; @@ -44,52 +45,72 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) int mlx4_en_create_cq(struct mlx4_en_priv *priv, - struct mlx4_en_cq *cq, - int entries, int ring, enum cq_type mode) + struct mlx4_en_cq **pcq, + int entries, int ring, enum cq_type mode, + int node) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq; int err; + cq = kzalloc_node(sizeof(struct mlx4_en_cq), GFP_KERNEL, node); + if (!cq) { + cq = kzalloc(sizeof(struct mlx4_en_cq), GFP_KERNEL); + if (!cq) { + en_err(priv, "Failed to allocate CW struture\n"); + return -ENOMEM; + } + } + cq->size = entries; + cq->buf_size = cq->size * mdev->dev->caps.cqe_size; + cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT, - taskqueue_thread_enqueue, &cq->tq); - if (mode == RX) { - cq->buf_size = cq->size * sizeof(struct mlx4_cqe); - cq->vector = (ring + priv->port) % - mdev->dev->caps.num_comp_vectors; + taskqueue_thread_enqueue, &cq->tq); + if (mode == RX) { TASK_INIT(&cq->cq_task, 0, mlx4_en_rx_que, cq); taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s rx cq", - if_name(priv->dev)); + if_name(priv->dev)); + } else { - cq->buf_size = sizeof(struct mlx4_cqe); - cq->vector = MLX4_LEAST_ATTACHED_VECTOR; TASK_INIT(&cq->cq_task, 0, mlx4_en_tx_que, cq); taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s tx cq", - if_name(priv->dev)); + if_name(priv->dev)); } cq->ring = ring; cq->is_tx = mode; - mtx_init(&cq->lock.m, "mlx4 cq", NULL, MTX_DEF); + spin_lock_init(&cq->lock); err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, cq->buf_size, 2 * PAGE_SIZE); if (err) - return err; + goto err_cq; err = mlx4_en_map_buffer(&cq->wqres.buf); if (err) - mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - else - cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; + goto err_res; + + cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; + *pcq = cq; + return 0; + +err_res: + mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); +err_cq: + kfree(cq); return err; } -int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) + +int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, + int cq_idx) { struct mlx4_en_dev *mdev = priv->mdev; - int err; + int err = 0; + char name[25]; + int timestamp_en = 0; cq->dev = mdev->pndev[priv->port]; cq->mcq.set_ci_db = cq->wqres.db.db; @@ -98,52 +119,83 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) *cq->mcq.arm_db = 0; memset(cq->buf, 0, cq->buf_size); - if (!cq->is_tx) - cq->size = priv->rx_ring[cq->ring].actual_size; - + if (cq->is_tx == RX) { + if (mdev->dev->caps.comp_pool) { + if (!cq->vector) { + sprintf(name, "%s-%d", if_name(priv->dev), + cq->ring); + /* Set IRQ for specific name (per ring) */ + if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) { + cq->vector = (cq->ring + 1 + priv->port) + % mdev->dev->caps.num_comp_vectors; + mlx4_warn(mdev, "Failed Assigning an EQ to " + "%s ,Falling back to legacy EQ's\n", + name); + } + } + } else { + cq->vector = (cq->ring + 1 + priv->port) % + mdev->dev->caps.num_comp_vectors; + } + } else { + struct mlx4_en_cq *rx_cq; + /* + * For TX we use the same irq per + * ring we assigned for the RX + */ + cq_idx = cq_idx % priv->rx_ring_num; + rx_cq = priv->rx_cq[cq_idx]; + cq->vector = rx_cq->vector; + } - err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, - cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx, 0); - if (err) { + if (!cq->is_tx) + cq->size = priv->rx_ring[cq->ring]->actual_size; + err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, + &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, + cq->vector, 0, timestamp_en); + if (err) return err; - } cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; - if (cq->is_tx) { - init_timer(&cq->timer); - cq->timer.function = mlx4_en_poll_tx_cq; - cq->timer.data = (unsigned long) cq; - } + if (cq->is_tx) { + init_timer(&cq->timer); + cq->timer.function = mlx4_en_poll_tx_cq; + cq->timer.data = (unsigned long) cq; + } + return 0; } -void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq = *pcq; taskqueue_drain(cq->tq, &cq->cq_task); taskqueue_free(cq->tq); mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - cq->buf_size = 0; - cq->buf = NULL; - mtx_destroy(&cq->lock.m); + if (priv->mdev->dev->caps.comp_pool && cq->vector) + mlx4_release_eq(priv->mdev->dev, cq->vector); + kfree(cq); + *pcq = NULL; } void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { - struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_dev *mdev = priv->mdev; - taskqueue_drain(cq->tq, &cq->cq_task); - if (cq->is_tx) - del_timer(&cq->timer); + taskqueue_drain(cq->tq, &cq->cq_task); + if (cq->is_tx) + del_timer(&cq->timer); - mlx4_cq_free(mdev->dev, &cq->mcq); + mlx4_cq_free(mdev->dev, &cq->mcq); } + /* Set rx cq moderation parameters */ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { |