summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-05-24 00:32:30 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-24 00:32:30 -0400
commit54e5c4def0614ab540fbdf68e45342a4af141702 (patch)
tree95a2f61c72336932e83d9e4180cd9739106d624b /drivers/infiniband
parentbe65de7174123e02477bd488db1a657caf0f9947 (diff)
parent1ee1ceafb572f1a925809168267a7962a4289de8 (diff)
downloadop-kernel-dev-54e5c4def0614ab540fbdf68e45342a4af141702.zip
op-kernel-dev-54e5c4def0614ab540fbdf68e45342a4af141702.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/bonding/bond_alb.c drivers/net/ethernet/altera/altera_msgdma.c drivers/net/ethernet/altera/altera_sgdma.c net/ipv6/xfrm6_output.c Several cases of overlapping changes. The xfrm6_output.c has a bug fix which overlaps the renaming of skb->local_df to skb->ignore_df. In the Altera TSE driver cases, the register access cleanups in net-next overlapped with bug fixes done in net. Similarly a bug fix to send ALB packets in the bonding driver using the right source address overlaps with cleanups in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c67
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c38
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
5 files changed, 93 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1b6dbe15..199c789 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -48,6 +48,7 @@
#include <linux/mlx4/driver.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
#include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
}
#endif
+#define MLX4_IB_INVALID_MAC ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ int port)
+{
+ u64 new_smac = 0;
+ u64 release_mac = MLX4_IB_INVALID_MAC;
+ struct mlx4_ib_qp *qp;
+
+ read_lock(&dev_base_lock);
+ new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ read_unlock(&dev_base_lock);
+
+ mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+ qp = ibdev->qp1_proxy[port - 1];
+ if (qp) {
+ int new_smac_index;
+ u64 old_smac = qp->pri.smac;
+ struct mlx4_update_qp_params update_params;
+
+ if (new_smac == old_smac)
+ goto unlock;
+
+ new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+ if (new_smac_index < 0)
+ goto unlock;
+
+ update_params.smac_index = new_smac_index;
+ if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+ &update_params)) {
+ release_mac = new_smac;
+ goto unlock;
+ }
+
+ qp->pri.smac = new_smac;
+ qp->pri.smac_index = new_smac_index;
+
+ release_mac = old_smac;
+ }
+
+unlock:
+ mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+ if (release_mac != MLX4_IB_INVALID_MAC)
+ mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
static void mlx4_ib_get_dev_addr(struct net_device *dev,
struct mlx4_ib_dev *ibdev, u8 port)
{
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
return 0;
}
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ unsigned long event)
+
{
struct mlx4_ib_iboe *iboe;
+ int update_qps_port = -1;
int port;
iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
}
curr_master = iboe->masters[port - 1];
+ if (dev == iboe->netdevs[port - 1] &&
+ (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+ event == NETDEV_UP || event == NETDEV_CHANGE))
+ update_qps_port = port;
+
if (curr_netdev) {
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
}
spin_unlock(&iboe->lock);
+
+ if (update_qps_port > 0)
+ mlx4_ib_update_qps(ibdev, dev, update_qps_port);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
- mlx4_ib_scan_netdevs(ibdev);
+ mlx4_ib_scan_netdevs(ibdev, dev, event);
return NOTIFY_DONE;
}
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_map;
for (i = 0; i < ibdev->num_ports; ++i) {
+ mutex_init(&ibdev->qp1_proxy_lock[i]);
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for (i = 1 ; i <= ibdev->num_ports ; ++i)
reset_gid_table(ibdev, i);
rtnl_lock();
- mlx4_ib_scan_netdevs(ibdev);
+ mlx4_ib_scan_netdevs(ibdev, NULL, 0);
rtnl_unlock();
mlx4_ib_init_gid_table(ibdev);
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f589522..66b0b7d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
int steer_qpn_count;
int steer_qpn_base;
int steering_support;
+ struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
+ /* lock when destroying qp1_proxy and getting netdev events */
+ struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
};
struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af..dc57482 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
+ if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+ mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ dev->qp1_proxy[mqp->port - 1] = NULL;
+ mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ }
+
pd = get_pd(mqp);
destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
if (err)
return -EINVAL;
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+ dev->qp1_proxy[qp->port - 1] = qp;
}
}
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index c98fdb1..a171046 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -28,6 +28,7 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
+#include <linux/semaphore.h>
#include "isert_proto.h"
#include "ib_isert.h"
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
struct isert_device *device;
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
- u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
+ u8 pi_support;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (!np->enabled) {
+ spin_unlock_bh(&np->np_thread_lock);
+ pr_debug("iscsi_np is not enabled, reject connect request\n");
+ return rdma_reject(cma_id, NULL, 0);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
cma_id, cma_id->context);
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
goto out_mr;
}
+ pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
if (pi_support && !device->pi_capable) {
pr_err("Protection information requested but not supported\n");
ret = -EINVAL;
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
goto out_conn_dev;
mutex_lock(&isert_np->np_accept_mutex);
- list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
+ list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
- pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
- wake_up(&isert_np->np_accept_wq);
+ pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+ up(&isert_np->np_sem);
return 0;
out_conn_dev:
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np,
pr_err("Unable to allocate struct isert_np\n");
return -ENOMEM;
}
- init_waitqueue_head(&isert_np->np_accept_wq);
+ sema_init(&isert_np->np_sem, 0);
mutex_init(&isert_np->np_accept_mutex);
INIT_LIST_HEAD(&isert_np->np_accept_list);
init_completion(&isert_np->np_login_comp);
@@ -3048,18 +3058,6 @@ out:
}
static int
-isert_check_accept_queue(struct isert_np *isert_np)
-{
- int empty;
-
- mutex_lock(&isert_np->np_accept_mutex);
- empty = list_empty(&isert_np->np_accept_list);
- mutex_unlock(&isert_np->np_accept_mutex);
-
- return empty;
-}
-
-static int
isert_rdma_accept(struct isert_conn *isert_conn)
{
struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
int max_accept = 0, ret;
accept_wait:
- ret = wait_event_interruptible(isert_np->np_accept_wq,
- !isert_check_accept_queue(isert_np) ||
- np->np_thread_state == ISCSI_NP_THREAD_RESET);
+ ret = down_interruptible(&isert_np->np_sem);
if (max_accept > 5)
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
- pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+ pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 4c072ae..da6612e 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -182,7 +182,7 @@ struct isert_device {
};
struct isert_np {
- wait_queue_head_t np_accept_wq;
+ struct semaphore np_sem;
struct rdma_cm_id *np_cm_id;
struct mutex np_accept_mutex;
struct list_head np_accept_list;
OpenPOWER on IntegriCloud