diff options
author | Upinder Malhi <umalhi@cisco.com> | 2014-01-09 14:48:07 -0800 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-01-14 00:44:41 -0800 |
commit | 2183b990b67b761f81c68a18f60df028e080cf05 (patch) | |
tree | a4584a808fa389bd51394b4e8c1ef5fe2979fe48 | |
parent | 301a0dd68e5ddd22d992a58f466b621987d9df3b (diff) | |
download | op-kernel-dev-2183b990b67b761f81c68a18f60df028e080cf05.zip op-kernel-dev-2183b990b67b761f81c68a18f60df028e080cf05.tar.gz |
IB/usnic: Push all forwarding state to usnic_fwd.[hc]
Push all of the usnic device forwarding state - such as mtu, mac - to
usnic_fwd_dev. Furthermore, usnic_fwd.h exposes a improved interface
for rest of the usnic code. The primary improvement is that
usnic_fwd.h's flow management interface takes in high-level *filter*
and *action* structures now, instead of low-level paramaters such as
vnic_idx, rq_idx.
Signed-off-by: Upinder Malhi <umalhi@cisco.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/usnic/usnic_fwd.c | 269 | ||||
-rw-r--r-- | drivers/infiniband/hw/usnic/usnic_fwd.h | 68 |
2 files changed, 212 insertions, 125 deletions
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c index 8e42216..33fdd77 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.c +++ b/drivers/infiniband/hw/usnic/usnic_fwd.c @@ -23,6 +23,49 @@ #include "usnic_fwd.h" #include "usnic_log.h" +static int usnic_fwd_devcmd_locked(struct usnic_fwd_dev *ufdev, int vnic_idx, + enum vnic_devcmd_cmd cmd, u64 *a0, + u64 *a1) +{ + int status; + struct net_device *netdev = ufdev->netdev; + + lockdep_assert_held(&ufdev->lock); + + status = enic_api_devcmd_proxy_by_index(netdev, + vnic_idx, + cmd, + a0, a1, + 1000); + if (status) { + if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) { + usnic_dbg("Dev %s vnic idx %u cmd %u already deleted", + ufdev->name, vnic_idx, cmd); + } else { + usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n", + ufdev->name, vnic_idx, cmd, + status); + } + } else { + usnic_dbg("Dev %s vnic idx %u cmd %u success", + ufdev->name, vnic_idx, cmd); + } + + return status; +} + +static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1) +{ + int status; + + spin_lock(&ufdev->lock); + status = usnic_fwd_devcmd_locked(ufdev, vnic_idx, cmd, a0, a1); + spin_unlock(&ufdev->lock); + + return status; +} + struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev) { struct usnic_fwd_dev *ufdev; @@ -34,6 +77,8 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev) ufdev->pdev = pdev; ufdev->netdev = pci_get_drvdata(pdev); spin_lock_init(&ufdev->lock); + strncpy(ufdev->name, netdev_name(ufdev->netdev), + sizeof(ufdev->name) - 1); return ufdev; } @@ -43,200 +88,208 @@ void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev) kfree(ufdev); } -static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx, - enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1) +void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]) { - int status; - struct net_device *netdev = ufdev->netdev; + spin_lock(&ufdev->lock); + memcpy(&ufdev->mac, mac, sizeof(ufdev->mac)); + spin_unlock(&ufdev->lock); +} +void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev) +{ spin_lock(&ufdev->lock); - status = enic_api_devcmd_proxy_by_index(netdev, - vnic_idx, - cmd, - a0, a1, - 1000); + ufdev->link_up = 1; spin_unlock(&ufdev->lock); - if (status) { - if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) { - usnic_dbg("Dev %s vnic idx %u cmd %u already deleted", - netdev_name(netdev), vnic_idx, cmd); - } else { - usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n", - netdev_name(netdev), vnic_idx, cmd, - status); - } - } else { - usnic_dbg("Dev %s vnic idx %u cmd %u success", - netdev_name(netdev), vnic_idx, - cmd); - } +} - return status; +void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev) +{ + spin_lock(&ufdev->lock); + ufdev->link_up = 0; + spin_unlock(&ufdev->lock); +} + +void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu) +{ + spin_lock(&ufdev->lock); + ufdev->mtu = mtu; + spin_unlock(&ufdev->lock); +} + +static int usnic_fwd_dev_ready_locked(struct usnic_fwd_dev *ufdev) +{ + lockdep_assert_held(&ufdev->lock); + + if (!ufdev->link_up) + return -EPERM; + + return 0; +} + +static void fill_tlv(struct filter_tlv *tlv, struct filter *filter, + struct filter_action *action) +{ + tlv->type = CLSF_TLV_FILTER; + tlv->length = sizeof(struct filter); + *((struct filter *)&tlv->val) = *filter; + + tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) + + sizeof(struct filter)); + tlv->type = CLSF_TLV_ACTION; + tlv->length = sizeof(struct filter_action); + *((struct filter_action *)&tlv->val) = *action; } -int usnic_fwd_add_usnic_filter(struct usnic_fwd_dev *ufdev, int vnic_idx, - int rq_idx, struct usnic_fwd_filter *fwd_filter, - struct usnic_fwd_filter_hndl **filter_hndl) +struct usnic_fwd_flow* +usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter, + struct usnic_filter_action *uaction) { - struct filter_tlv *tlv, *tlv_va; - struct filter *filter; - struct filter_action *action; + struct filter_tlv *tlv; struct pci_dev *pdev; - struct usnic_fwd_filter_hndl *usnic_filter_hndl; - int status; - u64 a0, a1; - u64 tlv_size; + struct usnic_fwd_flow *flow; + uint64_t a0, a1; + uint64_t tlv_size; dma_addr_t tlv_pa; + int status; pdev = ufdev->pdev; - tlv_size = (2*sizeof(struct filter_tlv) + - sizeof(struct filter) + - sizeof(struct filter_action)); + tlv_size = (2*sizeof(struct filter_tlv) + sizeof(struct filter) + + sizeof(struct filter_action)); + + flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + if (!flow) + return ERR_PTR(-ENOMEM); + tlv = pci_alloc_consistent(pdev, tlv_size, &tlv_pa); if (!tlv) { usnic_err("Failed to allocate memory\n"); - return -ENOMEM; + status = -ENOMEM; + goto out_free_flow; } - usnic_filter_hndl = kzalloc(sizeof(*usnic_filter_hndl), GFP_ATOMIC); - if (!usnic_filter_hndl) { - usnic_err("Failed to allocate memory for hndl\n"); - pci_free_consistent(pdev, tlv_size, tlv, tlv_pa); - return -ENOMEM; + fill_tlv(tlv, filter, &uaction->action); + + spin_lock(&ufdev->lock); + status = usnic_fwd_dev_ready_locked(ufdev); + if (status) { + usnic_err("Forwarding dev %s not ready with status %d\n", + ufdev->name, status); + goto out_free_tlv; } - tlv_va = tlv; + /* Issue Devcmd */ a0 = tlv_pa; a1 = tlv_size; - memset(tlv, 0, tlv_size); - tlv->type = CLSF_TLV_FILTER; - tlv->length = sizeof(struct filter); - filter = (struct filter *)&tlv->val; - filter->type = FILTER_USNIC_ID; - filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE; - filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE | - FILTER_FIELD_USNIC_ID | - FILTER_FIELD_USNIC_PROTO; - filter->u.usnic.proto_version = (USNIC_ROCE_GRH_VER << - USNIC_ROCE_GRH_VER_SHIFT) - | USNIC_PROTO_VER; - filter->u.usnic.usnic_id = fwd_filter->port_num; - tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) + - sizeof(struct filter)); - tlv->type = CLSF_TLV_ACTION; - tlv->length = sizeof(struct filter_action); - action = (struct filter_action *)&tlv->val; - action->type = FILTER_ACTION_RQ_STEERING; - action->u.rq_idx = rq_idx; - - status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_ADD_FILTER, &a0, &a1); - pci_free_consistent(pdev, tlv_size, tlv_va, tlv_pa); + status = usnic_fwd_devcmd_locked(ufdev, uaction->vnic_idx, + CMD_ADD_FILTER, &a0, &a1); if (status) { usnic_err("VF %s Filter add failed with status:%d", - pci_name(pdev), - status); - kfree(usnic_filter_hndl); - return status; + ufdev->name, status); + status = -EFAULT; + goto out_free_tlv; } else { - usnic_dbg("VF %s FILTER ID:%u", - pci_name(pdev), - (u32)a0); + usnic_dbg("VF %s FILTER ID:%llu", ufdev->name, a0); } - usnic_filter_hndl->type = FILTER_USNIC_ID; - usnic_filter_hndl->id = (u32)a0; - usnic_filter_hndl->vnic_idx = vnic_idx; - usnic_filter_hndl->ufdev = ufdev; - usnic_filter_hndl->filter = fwd_filter; - *filter_hndl = usnic_filter_hndl; + flow->flow_id = (uint32_t) a0; + flow->vnic_idx = uaction->vnic_idx; + flow->ufdev = ufdev; - return status; +out_free_tlv: + spin_unlock(&ufdev->lock); + pci_free_consistent(pdev, tlv_size, tlv, tlv_pa); + if (!status) + return flow; +out_free_flow: + kfree(flow); + return ERR_PTR(status); } -int usnic_fwd_del_filter(struct usnic_fwd_filter_hndl *filter_hndl) +int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow) { int status; u64 a0, a1; - struct net_device *netdev; - netdev = filter_hndl->ufdev->netdev; - a0 = filter_hndl->id; + a0 = flow->flow_id; - status = usnic_fwd_devcmd(filter_hndl->ufdev, filter_hndl->vnic_idx, + status = usnic_fwd_devcmd(flow->ufdev, flow->vnic_idx, CMD_DEL_FILTER, &a0, &a1); if (status) { if (status == ERR_EINVAL) { usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d", - filter_hndl->id, filter_hndl->vnic_idx, - netdev_name(netdev), status); - status = 0; - kfree(filter_hndl); + flow->flow_id, flow->vnic_idx, + flow->ufdev->name, status); } else { usnic_err("PF %s VF Idx %u Filter: %u FILTER DELETE failed with status %d", - netdev_name(netdev), - filter_hndl->vnic_idx, filter_hndl->id, - status); + flow->ufdev->name, flow->vnic_idx, + flow->flow_id, status); } + status = 0; + /* + * Log the error and fake success to the caller because if + * a flow fails to be deleted in the firmware, it is an + * unrecoverable error. + */ } else { usnic_dbg("PF %s VF Idx %u Filter: %u FILTER DELETED", - netdev_name(netdev), filter_hndl->vnic_idx, - filter_hndl->id); - kfree(filter_hndl); + flow->ufdev->name, flow->vnic_idx, + flow->flow_id); } + kfree(flow); return status; } -int usnic_fwd_enable_rq(struct usnic_fwd_dev *ufdev, int vnic_idx, int rq_idx) +int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx) { int status; struct net_device *pf_netdev; u64 a0, a1; pf_netdev = ufdev->netdev; - a0 = rq_idx; + a0 = qp_idx; a1 = CMD_QP_RQWQ; - status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE, &a0, &a1); - + status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE, + &a0, &a1); if (status) { usnic_err("PF %s VNIC Index %u RQ Index: %u ENABLE Failed with status %d", netdev_name(pf_netdev), vnic_idx, - rq_idx, + qp_idx, status); } else { usnic_dbg("PF %s VNIC Index %u RQ Index: %u ENABLED", netdev_name(pf_netdev), - vnic_idx, rq_idx); + vnic_idx, qp_idx); } return status; } -int usnic_fwd_disable_rq(struct usnic_fwd_dev *ufdev, int vnic_idx, int rq_idx) +int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx) { int status; u64 a0, a1; struct net_device *pf_netdev; pf_netdev = ufdev->netdev; - a0 = rq_idx; + a0 = qp_idx; a1 = CMD_QP_RQWQ; - status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE, &a0, &a1); - + status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE, + &a0, &a1); if (status) { usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d", netdev_name(pf_netdev), vnic_idx, - rq_idx, + qp_idx, status); } else { usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED", netdev_name(pf_netdev), vnic_idx, - rq_idx); + qp_idx); } return status; diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h index 6973901..b146eb9 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.h +++ b/drivers/infiniband/hw/usnic/usnic_fwd.h @@ -20,39 +20,73 @@ #define USNIC_FWD_H_ #include <linux/if.h> +#include <linux/netdevice.h> #include <linux/pci.h> -#include <linux/spinlock.h> +#include <linux/in.h> #include "usnic_abi.h" +#include "usnic_common_pkt_hdr.h" #include "vnic_devcmd.h" struct usnic_fwd_dev { struct pci_dev *pdev; struct net_device *netdev; spinlock_t lock; + /* + * The following fields can be read directly off the device. + * However, they should be set by a accessor function, except name, + * which cannot be changed. + */ + bool link_up; + char mac[ETH_ALEN]; + unsigned int mtu; + char name[IFNAMSIZ+1]; }; -struct usnic_fwd_filter { - enum usnic_transport_type transport; - u16 port_num; +struct usnic_fwd_flow { + uint32_t flow_id; + struct usnic_fwd_dev *ufdev; + unsigned int vnic_idx; }; -struct usnic_fwd_filter_hndl { - enum filter_type type; - u32 id; - u32 vnic_idx; - struct usnic_fwd_dev *ufdev; - struct list_head link; - struct usnic_fwd_filter *filter; +struct usnic_filter_action { + int vnic_idx; + struct filter_action action; }; struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev); void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev); -int usnic_fwd_add_usnic_filter(struct usnic_fwd_dev *ufdev, int vnic_idx, - int rq_idx, struct usnic_fwd_filter *filter, - struct usnic_fwd_filter_hndl **filter_hndl); -int usnic_fwd_del_filter(struct usnic_fwd_filter_hndl *filter_hndl); -int usnic_fwd_enable_rq(struct usnic_fwd_dev *ufdev, int vnic_idx, int rq_idx); -int usnic_fwd_disable_rq(struct usnic_fwd_dev *ufdev, int vnic_idx, int rq_idx); + +void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]); +void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev); +void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev); +void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu); + +/* + * Allocate a flow on this forwarding device. Whoever calls this function, + * must monitor netdev events on ufdev's netdevice. If NETDEV_REBOOT or + * NETDEV_DOWN is seen, flow will no longer function and must be + * immediately freed by calling usnic_dealloc_flow. + */ +struct usnic_fwd_flow* +usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter, + struct usnic_filter_action *action); +int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow); +int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx); +int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx); + +static inline void usnic_fwd_init_usnic_filter(struct filter *filter, + uint32_t usnic_id) +{ + filter->type = FILTER_USNIC_ID; + filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE; + filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE | + FILTER_FIELD_USNIC_ID | + FILTER_FIELD_USNIC_PROTO; + filter->u.usnic.proto_version = (USNIC_ROCE_GRH_VER << + USNIC_ROCE_GRH_VER_SHIFT) | + USNIC_PROTO_VER; + filter->u.usnic.usnic_id = usnic_id; +} #endif /* !USNIC_FWD_H_ */ |