summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c17
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c31
-rw-r--r--drivers/net/ethernet/sfc/ef10.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.c19
-rw-r--r--drivers/net/ethernet/sfc/enum.h23
-rw-r--r--drivers/net/ethernet/sfc/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c55
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c10
-rw-r--r--drivers/net/phy/mdio-gpio.c68
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wireless/cw1200/debug.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c261
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c21
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h20
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c5
47 files changed, 649 insertions, 337 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a8efb18..0ab8370 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8627,6 +8627,7 @@ bnx2_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+#ifdef CONFIG_PM_SLEEP
static int
bnx2_suspend(struct device *device)
{
@@ -8665,7 +8666,6 @@ bnx2_resume(struct device *device)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
#define BNX2_PM_OPS (&bnx2_pm_ops)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b9f7022..e5d95c5 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12286,7 +12286,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (tg3_flag(tp, MAX_RXPEND_64) &&
tp->rx_pending > 63)
tp->rx_pending = 63;
- tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
+ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].tx_pending = ering->tx_pending;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 751d5c7..7e49c43 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
config NET_CADENCE
bool "Cadence devices"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST)
default y
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -22,7 +22,7 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
- depends on HAS_DMA
+ depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST)
select MACB
---help---
If you wish to compile a kernel for the AT91RM9200 and enable
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
config MACB
tristate "Cadence MACB/GEM support"
- depends on HAS_DMA
+ depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST)
select PHYLIB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 81e8402..8a96572 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -154,7 +154,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
req->l2t_idx = htons(e->idx);
req->vlan = htons(e->vlan);
- if (e->neigh)
+ if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
@@ -394,6 +394,8 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
if (e) {
spin_lock(&e->lock); /* avoid race with t4_l2t_free */
e->state = L2T_STATE_RESOLVING;
+ if (neigh->dev->flags & IFF_LOOPBACK)
+ memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
memcpy(e->addr, addr, addr_len);
e->ifindex = ifidx;
e->hash = hash;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fb2fe65..bba6768 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -682,7 +682,7 @@ enum {
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
- FW_MAX_SIZE = 512 * 1024,
+ FW_MAX_SIZE = 16 * SF_SEC_SIZE,
};
/**
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8ccaa25..97db5a7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -374,6 +374,7 @@ enum vf_state {
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
+#define BE_FLAGS_SETUP_DONE (1 << 13)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3e6df47..a186454 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2033,11 +2033,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
bool dummy_wrb;
int i, pending_txqs;
- /* Wait for a max of 200ms for all the tx-completions to arrive. */
+ /* Stop polling for compls when HW has been silent for 10ms */
do {
pending_txqs = adapter->num_tx_qs;
for_all_tx_queues(adapter, txo, i) {
+ cmpl = 0;
+ num_wrbs = 0;
txq = &txo->q;
while ((txcp = be_tx_compl_get(&txo->cq))) {
end_idx =
@@ -2050,14 +2052,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
if (cmpl) {
be_cq_notify(adapter, txo->cq.id, false, cmpl);
atomic_sub(num_wrbs, &txq->used);
- cmpl = 0;
- num_wrbs = 0;
+ timeo = 0;
}
if (atomic_read(&txq->used) == 0)
pending_txqs--;
}
- if (pending_txqs == 0 || ++timeo > 200)
+ if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
break;
mdelay(1);
@@ -2725,6 +2726,12 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *eqo;
int i;
+ /* This protection is needed as be_close() may be called even when the
+ * adapter is in cleared state (after eeh perm failure)
+ */
+ if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
+ return 0;
+
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3055,6 +3062,7 @@ static int be_clear(struct be_adapter *adapter)
be_clear_queues(adapter);
be_msix_disable(adapter);
+ adapter->flags &= ~BE_FLAGS_SETUP_DONE;
return 0;
}
@@ -3559,6 +3567,7 @@ static int be_setup(struct be_adapter *adapter)
adapter->phy.fc_autoneg = 1;
be_schedule_worker(adapter);
+ adapter->flags |= BE_FLAGS_SETUP_DONE;
return 0;
err:
be_clear(adapter);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d04b1c3..14786c8 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -91,7 +91,7 @@
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
-#define MVNETA_RGMII_SERDES_PROTO 0x0667
+#define MVNETA_QSGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -2721,29 +2721,44 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
}
/* Power up the port */
-static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
{
- u32 val;
+ u32 ctrl;
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
- if (phy_mode == PHY_INTERFACE_MODE_SGMII)
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
- else
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
+ ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+ /* Even though it might look weird, when we're configured in
+ * SGMII or QSGMII mode, the RGMII bit needs to be set.
+ */
+ switch(phy_mode) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
+ ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+ ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ ctrl |= MVNETA_GMAC2_PORT_RGMII;
+ break;
+ default:
+ return -EINVAL;
+ }
/* Cancel Port Reset */
- val &= ~MVNETA_GMAC2_PORT_RESET;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+ ctrl &= ~MVNETA_GMAC2_PORT_RESET;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
MVNETA_GMAC2_PORT_RESET) != 0)
continue;
+
+ return 0;
}
/* Device initialization routine */
@@ -2854,7 +2869,12 @@ static int mvneta_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "can't init eth hal\n");
goto err_free_stats;
}
- mvneta_port_power_up(pp, phy_mode);
+
+ err = mvneta_port_power_up(pp, phy_mode);
+ if (err < 0) {
+ dev_err(&pdev->dev, "can't power up port\n");
+ goto err_deinit;
+ }
dram_target_info = mv_mbus_dram_info();
if (dram_target_info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 70e9532..c2cd8d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
- spin_lock_init(&cq->lock);
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f085c2d..7e4b172 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1304,15 +1304,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_cq *cq;
- unsigned long flags;
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
- spin_lock_irqsave(&cq->lock, flags);
- napi_synchronize(&cq->napi);
- mlx4_en_process_rx_cq(dev, cq, 0);
- spin_unlock_irqrestore(&cq->lock, flags);
+ napi_schedule(&cq->napi);
}
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ae95f..cef267e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2301,13 +2301,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto err_release_regions;
- }
-
- dev = &priv->dev;
+ dev = pci_get_drvdata(pdev);
+ priv = mlx4_priv(dev);
dev->pdev = pdev;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
@@ -2374,10 +2369,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
} else {
atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, total_vfs);
- atomic_dec(&pf_loading);
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
err);
+ atomic_dec(&pf_loading);
err = 0;
} else {
mlx4_warn(dev, "Running in master mode\n");
@@ -2535,8 +2530,10 @@ slave_start:
mlx4_sense_init(dev);
mlx4_start_sense(dev);
- priv->pci_dev_data = pci_dev_data;
- pci_set_drvdata(pdev, dev);
+ priv->removed = 0;
+
+ if (mlx4_is_master(dev) && dev->num_vfs)
+ atomic_dec(&pf_loading);
return 0;
@@ -2588,6 +2585,9 @@ err_rel_own:
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
+ if (mlx4_is_master(dev) && dev->num_vfs)
+ atomic_dec(&pf_loading);
+
kfree(priv->dev.dev_vfs);
err_free_dev:
@@ -2604,85 +2604,110 @@ err_disable_pdev:
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct mlx4_priv *priv;
+ struct mlx4_dev *dev;
+
printk_once(KERN_INFO "%s", mlx4_version);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev = &priv->dev;
+ pci_set_drvdata(pdev, dev);
+ priv->pci_dev_data = id->driver_data;
+
return __mlx4_init_one(pdev, id->driver_data);
}
-static void mlx4_remove_one(struct pci_dev *pdev)
+static void __mlx4_remove_one(struct pci_dev *pdev)
{
struct mlx4_dev *dev = pci_get_drvdata(pdev);
struct mlx4_priv *priv = mlx4_priv(dev);
+ int pci_dev_data;
int p;
- if (dev) {
- /* in SRIOV it is not allowed to unload the pf's
- * driver while there are alive vf's */
- if (mlx4_is_master(dev)) {
- if (mlx4_how_many_lives_vf(dev))
- printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
- }
- mlx4_stop_sense(dev);
- mlx4_unregister_device(dev);
+ if (priv->removed)
+ return;
- for (p = 1; p <= dev->caps.num_ports; p++) {
- mlx4_cleanup_port_info(&priv->port[p]);
- mlx4_CLOSE_PORT(dev, p);
- }
+ pci_dev_data = priv->pci_dev_data;
- if (mlx4_is_master(dev))
- mlx4_free_resource_tracker(dev,
- RES_TR_FREE_SLAVES_ONLY);
-
- mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_qp_table(dev);
- mlx4_cleanup_srq_table(dev);
- mlx4_cleanup_cq_table(dev);
- mlx4_cmd_use_polling(dev);
- mlx4_cleanup_eq_table(dev);
- mlx4_cleanup_mcg_table(dev);
- mlx4_cleanup_mr_table(dev);
- mlx4_cleanup_xrcd_table(dev);
- mlx4_cleanup_pd_table(dev);
+ /* in SRIOV it is not allowed to unload the pf's
+ * driver while there are alive vf's */
+ if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
+ printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ mlx4_stop_sense(dev);
+ mlx4_unregister_device(dev);
- if (mlx4_is_master(dev))
- mlx4_free_resource_tracker(dev,
- RES_TR_FREE_STRUCTS_ONLY);
-
- iounmap(priv->kar);
- mlx4_uar_free(dev, &priv->driver_uar);
- mlx4_cleanup_uar_table(dev);
- if (!mlx4_is_slave(dev))
- mlx4_clear_steering(dev);
- mlx4_free_eq_table(dev);
- if (mlx4_is_master(dev))
- mlx4_multi_func_cleanup(dev);
- mlx4_close_hca(dev);
- if (mlx4_is_slave(dev))
- mlx4_multi_func_cleanup(dev);
- mlx4_cmd_cleanup(dev);
-
- if (dev->flags & MLX4_FLAG_MSI_X)
- pci_disable_msix(pdev);
- if (dev->flags & MLX4_FLAG_SRIOV) {
- mlx4_warn(dev, "Disabling SR-IOV\n");
- pci_disable_sriov(pdev);
- }
+ for (p = 1; p <= dev->caps.num_ports; p++) {
+ mlx4_cleanup_port_info(&priv->port[p]);
+ mlx4_CLOSE_PORT(dev, p);
+ }
- if (!mlx4_is_slave(dev))
- mlx4_free_ownership(dev);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev,
+ RES_TR_FREE_SLAVES_ONLY);
- kfree(dev->caps.qp0_tunnel);
- kfree(dev->caps.qp0_proxy);
- kfree(dev->caps.qp1_tunnel);
- kfree(dev->caps.qp1_proxy);
- kfree(dev->dev_vfs);
+ mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_qp_table(dev);
+ mlx4_cleanup_srq_table(dev);
+ mlx4_cleanup_cq_table(dev);
+ mlx4_cmd_use_polling(dev);
+ mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mcg_table(dev);
+ mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_xrcd_table(dev);
+ mlx4_cleanup_pd_table(dev);
- kfree(priv);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev,
+ RES_TR_FREE_STRUCTS_ONLY);
+
+ iounmap(priv->kar);
+ mlx4_uar_free(dev, &priv->driver_uar);
+ mlx4_cleanup_uar_table(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
+ mlx4_free_eq_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
+ mlx4_close_hca(dev);
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
+ mlx4_cmd_cleanup(dev);
+
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ pci_disable_msix(pdev);
+ if (dev->flags & MLX4_FLAG_SRIOV) {
+ mlx4_warn(dev, "Disabling SR-IOV\n");
+ pci_disable_sriov(pdev);
+ dev->num_vfs = 0;
}
+
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
+
+ kfree(dev->caps.qp0_tunnel);
+ kfree(dev->caps.qp0_proxy);
+ kfree(dev->caps.qp1_tunnel);
+ kfree(dev->caps.qp1_proxy);
+ kfree(dev->dev_vfs);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ memset(priv, 0, sizeof(*priv));
+ priv->pci_dev_data = pci_dev_data;
+ priv->removed = 1;
+}
+
+static void mlx4_remove_one(struct pci_dev *pdev)
+{
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ __mlx4_remove_one(pdev);
+ kfree(priv);
+ pci_set_drvdata(pdev, NULL);
}
int mlx4_restart_one(struct pci_dev *pdev)
@@ -2692,7 +2717,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
int pci_dev_data;
pci_dev_data = priv->pci_dev_data;
- mlx4_remove_one(pdev);
+ __mlx4_remove_one(pdev);
return __mlx4_init_one(pdev, pci_dev_data);
}
@@ -2747,7 +2772,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- mlx4_remove_one(pdev);
+ __mlx4_remove_one(pdev);
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -2755,11 +2780,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
- const struct pci_device_id *id;
- int ret;
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int ret;
- id = pci_match_id(mlx4_pci_table, pdev);
- ret = __mlx4_init_one(pdev, id->driver_data);
+ ret = __mlx4_init_one(pdev, priv->pci_dev_data);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf8be41..f9c4651 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -800,6 +800,7 @@ struct mlx4_priv {
spinlock_t ctx_lock;
int pci_dev_data;
+ int removed;
struct list_head pgdir_list;
struct mutex pgdir_mutex;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 7a733c2..04d9b6fe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -319,7 +319,6 @@ struct mlx4_en_cq {
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
int ring;
- spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
int size;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b48737d..ba20c72 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2139,8 +2139,6 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
- adapter->max_tx_rings = ahw->max_tx_ques;
- adapter->max_sds_rings = ahw->max_rx_ques;
/* eSwitch capability indicates vNIC mode.
* vNIC and SRIOV are mutually exclusive operational modes.
* If SR-IOV capability is detected, SR-IOV physical function
@@ -2161,6 +2159,7 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 max_sds_rings, max_tx_rings;
int ret;
ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2173,18 +2172,21 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
- adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
- adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
+ max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
- adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
- adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
return -EIO;
}
+ adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
+ adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
+
return 0;
}
@@ -2348,15 +2350,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
goto disable_intr;
}
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
err = qlcnic_83xx_setup_mbx_intr(adapter);
if (err)
goto disable_mbx_intr;
qlcnic_83xx_clear_function_resources(adapter);
-
- INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
-
+ qlcnic_dcb_enable(adapter->dcb);
qlcnic_83xx_initialize_nic(adapter, 1);
+ qlcnic_dcb_get_info(adapter->dcb);
/* Configure default, SR-IOV or Virtual NIC mode of operation */
err = qlcnic_83xx_configure_opmode(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 64dcbf3..c1e11f5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -883,8 +883,6 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
- adapter->max_tx_rings = npar_info->max_tx_ques;
- adapter->max_sds_rings = npar_info->max_rx_ques;
}
qlcnic_free_mbx_args(&cmd);
@@ -1356,6 +1354,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
arg2 &= ~BIT_3;
break;
case QLCNIC_ADD_VLAN:
+ arg1 &= ~(0x0ffff << 16);
arg1 |= (BIT_2 | BIT_5);
arg1 |= (esw_cfg->vlan_id << 16);
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 7d4f549..a51fe18 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -330,8 +330,6 @@ static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
goto out_free_cfg;
}
- qlcnic_dcb_get_info(dcb);
-
return 0;
out_free_cfg:
kfree(dcb->cfg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 309d056..dbf7539 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -670,7 +670,7 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
else
num_msix += adapter->drv_tx_rings;
- if (adapter->drv_rss_rings > 0)
+ if (adapter->drv_rss_rings > 0)
num_msix += adapter->drv_rss_rings;
else
num_msix += adapter->drv_sds_rings;
@@ -686,19 +686,15 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
return -ENOMEM;
}
-restore:
for (vector = 0; vector < num_msix; vector++)
adapter->msix_entries[vector].entry = vector;
+restore:
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
- adapter->ahw->num_msix = num_msix;
- if (adapter->drv_tss_rings > 0)
- adapter->drv_tx_rings = adapter->drv_tss_rings;
+ if (err > 0) {
+ if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
+ return -ENOSPC;
- if (adapter->drv_rss_rings > 0)
- adapter->drv_sds_rings = adapter->drv_rss_rings;
- } else {
netdev_info(adapter->netdev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
num_msix, err);
@@ -716,12 +712,20 @@ restore:
"Restoring %d Tx, %d SDS rings for total %d vectors.\n",
adapter->drv_tx_rings, adapter->drv_sds_rings,
num_msix);
- goto restore;
- err = -EIO;
+ goto restore;
+ } else if (err < 0) {
+ return err;
}
- return err;
+ adapter->ahw->num_msix = num_msix;
+ if (adapter->drv_tss_rings > 0)
+ adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ adapter->drv_sds_rings = adapter->drv_rss_rings;
+
+ return 0;
}
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
@@ -2528,8 +2532,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_hw;
}
- qlcnic_dcb_enable(adapter->dcb);
-
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -2549,7 +2551,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
+ qlcnic_dcb_enable(adapter->dcb);
+ qlcnic_dcb_get_info(adapter->dcb);
err = qlcnic_setup_intr(adapter);
+
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 14f748c..2801379 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -461,6 +461,16 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ if (pci_vfs_assigned(adapter->pdev)) {
+ netdev_err(adapter->netdev,
+ "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
+ adapter->portnum);
+ netdev_info(adapter->netdev,
+ "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
+ adapter->portnum);
+ return -EPERM;
+ }
+
rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 448d156..cd346e2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -354,7 +354,7 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
int i;
- for (i = 0; i < adapter->ahw->max_vnic_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
@@ -720,6 +720,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
+ u8 pci_func;
int i, ret;
u32 count;
@@ -729,26 +730,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
count = size / sizeof(struct qlcnic_npar_func_cfg);
for (i = 0; i < adapter->ahw->total_nic_func; i++) {
- if (qlcnic_is_valid_nic_func(adapter, i) < 0)
- continue;
if (adapter->npars[i].pci_func >= count) {
dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
__func__, adapter->ahw->total_nic_func, count);
continue;
}
- ret = qlcnic_get_nic_info(adapter, &nic_info, i);
- if (ret)
- return ret;
if (!adapter->npars[i].eswitch_status)
continue;
- np_cfg[i].pci_func = i;
- np_cfg[i].op_mode = (u8)nic_info.op_mode;
- np_cfg[i].port_num = nic_info.phys_port;
- np_cfg[i].fw_capab = nic_info.capabilities;
- np_cfg[i].min_bw = nic_info.min_tx_bw;
- np_cfg[i].max_bw = nic_info.max_tx_bw;
- np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
- np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+ pci_func = adapter->npars[i].pci_func;
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ continue;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+
+ np_cfg[pci_func].pci_func = pci_func;
+ np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
+ np_cfg[pci_func].port_num = nic_info.phys_port;
+ np_cfg[pci_func].fw_capab = nic_info.capabilities;
+ np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
+ np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
+ np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
+ np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
}
return size;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21c20ea0..b5ed30a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -738,8 +738,11 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
/* If it was a port reset, trigger reallocation of MC resources.
* Note that on an MC reset nothing needs to be done now because we'll
* detect the MC reset later and handle it then.
+ * For an FLR, we never get an MC reset event, but the MC has reset all
+ * resources assigned to us, so we have to trigger reallocation now.
*/
- if (reset_type == RESET_TYPE_ALL && !rc)
+ if ((reset_type == RESET_TYPE_ALL ||
+ reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
efx_ef10_reset_mc_allocations(efx);
return rc;
}
@@ -2141,6 +2144,11 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
return 0;
}
+static void efx_ef10_prepare_flr(struct efx_nic *efx)
+{
+ atomic_set(&efx->active_queues, 0);
+}
+
static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right)
{
@@ -3603,6 +3611,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 57b971e..63d595f 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -76,6 +76,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
@@ -83,7 +84,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
- [RESET_TYPE_MC_BIST] = "MC_BIST",
+ [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -1739,7 +1740,8 @@ static void efx_start_all(struct efx_nic *efx)
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
- if (efx->port_enabled || !netif_running(efx->net_dev))
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
return;
efx_start_port(efx);
@@ -2334,6 +2336,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
{
EFX_ASSERT_RESET_SERIALISED(efx);
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->prepare_flr(efx);
+
efx_stop_all(efx);
efx_disable_interrupts(efx);
@@ -2354,6 +2359,10 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
EFX_ASSERT_RESET_SERIALISED(efx);
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->finish_flr(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
@@ -2417,7 +2426,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
/* Clear flags for the scopes we covered. We assume the NIC and
* driver are now quiescent so that there is no race here.
*/
- efx->reset_pending &= -(1 << (method + 1));
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
/* Reinitialise bus-mastering, which may have been turned off before
* the reset was scheduled. This is still appropriate, even in the
@@ -2546,6 +2558,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
case RESET_TYPE_MC_BIST:
+ case RESET_TYPE_MCDI_TIMEOUT:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 75ef7ef..d1dbb5f 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
+ * @RESET_TYPE_MC_BIST: MC entering BIST mode.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,14 +151,16 @@ enum efx_loopback_mode {
* @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
+ * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
*/
enum reset_type {
- RESET_TYPE_INVISIBLE = 0,
- RESET_TYPE_RECOVER_OR_ALL = 1,
- RESET_TYPE_ALL = 2,
- RESET_TYPE_WORLD = 3,
- RESET_TYPE_RECOVER_OR_DISABLE = 4,
- RESET_TYPE_DISABLE = 5,
+ RESET_TYPE_INVISIBLE,
+ RESET_TYPE_RECOVER_OR_ALL,
+ RESET_TYPE_ALL,
+ RESET_TYPE_WORLD,
+ RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_MC_BIST,
+ RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
@@ -165,7 +168,13 @@ enum reset_type {
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
- RESET_TYPE_MC_BIST,
+ /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
+ * it doesn't fit the scope hierarchy (not well-ordered by inclusion).
+ * We encode this by having its enum value be greater than
+ * RESET_TYPE_MAX_METHOD. This also prevents issuing it with
+ * efx_ioctl_reset.
+ */
+ RESET_TYPE_MCDI_TIMEOUT,
RESET_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 8ec20b7..fae25a4 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2696,6 +2696,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .prepare_flr = efx_port_dummy_op_void,
+ .finish_flr = efx_farch_finish_flr,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
@@ -2790,6 +2792,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .prepare_flr = efx_port_dummy_op_void,
+ .finish_flr = efx_farch_finish_flr,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index a087613..0537381 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -741,6 +741,28 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
return rc;
}
+/* Reset queue and flush accounting after FLR
+ *
+ * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
+ * mastering was disabled), in which case we don't receive (RXQ) flush
+ * completion events. This means that efx->rxq_flush_outstanding remained at 4
+ * after the FLR; also, efx->active_queues was non-zero (as no flush completion
+ * events were received, and we didn't go through efx_check_tx_flush_complete())
+ * If we don't fix this up, on the next call to efx_realloc_channels() we won't
+ * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
+ * for batched flush requests; and the efx->active_queues gets messed up because
+ * we keep incrementing for the newly initialised queues, but it never went to
+ * zero previously. Then we get a timeout every time we try to restart the
+ * queues, as it doesn't go back to zero when we should be flushing the queues.
+ */
+void efx_farch_finish_flr(struct efx_nic *efx)
+{
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ atomic_set(&efx->active_queues, 0);
+}
+
+
/**************************************************************************
*
* Event queue processing
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 7bd4b14..5239cf9 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -52,12 +52,7 @@ static void efx_mcdi_timeout_async(unsigned long context);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
static bool efx_mcdi_poll_once(struct efx_nic *efx);
-
-static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
-{
- EFX_BUG_ON_PARANOID(!efx->mcdi);
- return &efx->mcdi->iface;
-}
+static void efx_mcdi_abandon(struct efx_nic *efx);
int efx_mcdi_init(struct efx_nic *efx)
{
@@ -558,6 +553,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
rc = 0;
}
+ efx_mcdi_abandon(efx);
+
/* Close the race with efx_mcdi_ev_cpl() executing just too late
* and completing a request we've just cancelled, by ensuring
* that the seqno check therein fails.
@@ -672,6 +669,9 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
if (efx->mc_bist_for_other_fn)
return -ENETDOWN;
+ if (mcdi->mode == MCDI_MODE_FAIL)
+ return -ENETDOWN;
+
efx_mcdi_acquire_sync(mcdi);
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
return 0;
@@ -812,7 +812,11 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
return;
mcdi = efx_mcdi(efx);
- if (mcdi->mode == MCDI_MODE_POLL)
+ /* If already in polling mode, nothing to do.
+ * If in fail-fast state, don't switch to polled completion.
+ * FLR recovery will do that later.
+ */
+ if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
return;
/* We can switch from event completion to polled completion, because
@@ -841,8 +845,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
mcdi = efx_mcdi(efx);
- /* We must be in polling mode so no more requests can be queued */
- BUG_ON(mcdi->mode != MCDI_MODE_POLL);
+ /* We must be in poll or fail mode so no more requests can be queued */
+ BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
del_timer_sync(&mcdi->async_timer);
@@ -875,8 +879,11 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
return;
mcdi = efx_mcdi(efx);
-
- if (mcdi->mode == MCDI_MODE_EVENTS)
+ /* If already in event completion mode, nothing to do.
+ * If in fail-fast state, don't switch to event completion. FLR
+ * recovery will do that later.
+ */
+ if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
return;
/* We can't switch from polled to event completion in the middle of a
@@ -966,6 +973,19 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx)
spin_unlock(&mcdi->iface_lock);
}
+/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
+ * to recover.
+ */
+static void efx_mcdi_abandon(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
+ return; /* it had already been done */
+ netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
+ efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
+}
+
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -1512,6 +1532,19 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
{
int rc;
+ /* If MCDI is down, we can't handle_assertion */
+ if (method == RESET_TYPE_MCDI_TIMEOUT) {
+ rc = pci_reset_function(efx->pci_dev);
+ if (rc)
+ return rc;
+ /* Re-enable polled MCDI completion */
+ if (efx->mcdi) {
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ mcdi->mode = MCDI_MODE_POLL;
+ }
+ return 0;
+ }
+
/* Recover from a failed assertion pre-reset */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 52931ae..56465f7 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -28,9 +28,16 @@ enum efx_mcdi_state {
MCDI_STATE_COMPLETED,
};
+/**
+ * enum efx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_POLL: poll for MCDI completion, until timeout
+ * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
enum efx_mcdi_mode {
MCDI_MODE_POLL,
MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
};
/**
@@ -104,6 +111,12 @@ struct efx_mcdi_data {
u32 fn_flags;
};
+static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
+{
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->iface;
+}
+
#ifdef CONFIG_SFC_MCDI_MON
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
{
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8a400a0..5bdae8e 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -972,6 +972,8 @@ struct efx_mtd_partition {
* (for Falcon architecture)
* @finish_flush: Clean up after flushing the DMA queues (for Falcon
* architecture)
+ * @prepare_flr: Prepare for an FLR
+ * @finish_flr: Clean up after an FLR
* @describe_stats: Describe statistics for ethtool
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
@@ -1100,6 +1102,8 @@ struct efx_nic_type {
int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx);
+ void (*prepare_flr)(struct efx_nic *efx);
+ void (*finish_flr)(struct efx_nic *efx);
size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index a001fae..d3ad8ed 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -757,6 +757,7 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
int efx_nic_flush_queues(struct efx_nic *efx);
void siena_prepare_flush(struct efx_nic *efx);
int efx_farch_fini_dmaq(struct efx_nic *efx);
+void efx_farch_finish_flr(struct efx_nic *efx);
void siena_finish_flush(struct efx_nic *efx);
void falcon_start_nic_stats(struct efx_nic *efx);
void falcon_stop_nic_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 23f3a6f..50ffefe 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -921,6 +921,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
+ .prepare_flr = efx_port_dummy_op_void,
+ .finish_flr = efx_farch_finish_flr,
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 430bb0d..e36f194 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -365,7 +365,7 @@ __at86rf230_read_subreg(struct at86rf230_local *lp,
dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
if (status == 0)
- *data = buf[1];
+ *data = (buf[1] & mask) >> shift;
return status;
}
@@ -1025,14 +1025,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return -EINVAL;
}
- rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
- if (rc)
- return rc;
- if (!status) {
- dev_err(&lp->spi->dev, "AVDD error\n");
- return -EINVAL;
- }
-
return 0;
}
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index e701433..9c4defd 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -32,29 +32,39 @@
struct mdio_gpio_info {
struct mdiobb_ctrl ctrl;
- int mdc, mdio;
+ int mdc, mdio, mdo;
+ int mdc_active_low, mdio_active_low, mdo_active_low;
};
static void *mdio_gpio_of_get_data(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mdio_gpio_platform_data *pdata;
+ enum of_gpio_flags flags;
int ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- ret = of_get_gpio(np, 0);
+ ret = of_get_gpio_flags(np, 0, &flags);
if (ret < 0)
return NULL;
pdata->mdc = ret;
+ pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW;
- ret = of_get_gpio(np, 1);
+ ret = of_get_gpio_flags(np, 1, &flags);
if (ret < 0)
return NULL;
pdata->mdio = ret;
+ pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+ ret = of_get_gpio_flags(np, 2, &flags);
+ if (ret > 0) {
+ pdata->mdo = ret;
+ pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW;
+ }
return pdata;
}
@@ -64,8 +74,19 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
+ if (bitbang->mdo) {
+ /* Separate output pin. Always set its value to high
+ * when changing direction. If direction is input,
+ * assume the pin serves as pull-up. If direction is
+ * output, the default value is high.
+ */
+ gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
+ return;
+ }
+
if (dir)
- gpio_direction_output(bitbang->mdio, 1);
+ gpio_direction_output(bitbang->mdio,
+ 1 ^ bitbang->mdio_active_low);
else
gpio_direction_input(bitbang->mdio);
}
@@ -75,7 +96,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- return gpio_get_value(bitbang->mdio);
+ return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
}
static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -83,7 +104,10 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- gpio_set_value(bitbang->mdio, what);
+ if (bitbang->mdo)
+ gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
+ else
+ gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
}
static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -91,7 +115,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- gpio_set_value(bitbang->mdc, what);
+ gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
}
static struct mdiobb_ops mdio_gpio_ops = {
@@ -110,18 +134,22 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
struct mdio_gpio_info *bitbang;
int i;
- bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL);
+ bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL);
if (!bitbang)
goto out;
bitbang->ctrl.ops = &mdio_gpio_ops;
bitbang->ctrl.reset = pdata->reset;
bitbang->mdc = pdata->mdc;
+ bitbang->mdc_active_low = pdata->mdc_active_low;
bitbang->mdio = pdata->mdio;
+ bitbang->mdio_active_low = pdata->mdio_active_low;
+ bitbang->mdo = pdata->mdo;
+ bitbang->mdo_active_low = pdata->mdo_active_low;
new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
if (!new_bus)
- goto out_free_bitbang;
+ goto out;
new_bus->name = "GPIO Bitbanged MDIO",
@@ -138,11 +166,18 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
- if (gpio_request(bitbang->mdc, "mdc"))
+ if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
+ goto out_free_bus;
+
+ if (devm_gpio_request(dev, bitbang->mdio, "mdio"))
goto out_free_bus;
- if (gpio_request(bitbang->mdio, "mdio"))
- goto out_free_mdc;
+ if (bitbang->mdo) {
+ if (devm_gpio_request(dev, bitbang->mdo, "mdo"))
+ goto out_free_bus;
+ gpio_direction_output(bitbang->mdo, 1);
+ gpio_direction_input(bitbang->mdio);
+ }
gpio_direction_output(bitbang->mdc, 0);
@@ -150,12 +185,8 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
return new_bus;
-out_free_mdc:
- gpio_free(bitbang->mdc);
out_free_bus:
free_mdio_bitbang(new_bus);
-out_free_bitbang:
- kfree(bitbang);
out:
return NULL;
}
@@ -163,13 +194,8 @@ out:
static void mdio_gpio_bus_deinit(struct device *dev)
{
struct mii_bus *bus = dev_get_drvdata(dev);
- struct mdio_gpio_info *bitbang = bus->priv;
- dev_set_drvdata(dev, NULL);
- gpio_free(bitbang->mdio);
- gpio_free(bitbang->mdc);
free_mdio_bitbang(bus);
- kfree(bitbang);
}
static void mdio_gpio_bus_destroy(struct device *dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c55e316..82355d5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1755,8 +1755,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
if (err)
return err;
- return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df,
- false);
+ return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
+ tos, ttl, df, false);
}
EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 84734a8..83c39e2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1521,11 +1521,7 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
cosa_putstatus(cosa, 0);
cosa_getdata8(cosa);
cosa_putstatus(cosa, SR_RST);
-#ifdef MODULE
msleep(500);
-#else
- udelay(5*100000);
-#endif
/* Disable all IRQs from the card */
cosa_putstatus(cosa, 0);
diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c
index e323b4d..34f97c3 100644
--- a/drivers/net/wireless/cw1200/debug.c
+++ b/drivers/net/wireless/cw1200/debug.c
@@ -41,6 +41,8 @@ static const char * const cw1200_debug_link_id[] = {
"REQ",
"SOFT",
"HARD",
+ "RESET",
+ "RESET_REMAP",
};
static const char *cw1200_debug_mode(int mode)
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 003a546..4c2d4ef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 8
-#define IWL3160_UCODE_API_MAX 8
+#define IWL7260_UCODE_API_MAX 9
+#define IWL3160_UCODE_API_MAX 9
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 8
@@ -244,3 +244,4 @@ const struct iwl_cfg iwl7265_n_cfg = {
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
+MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 685f7e8..fa858d54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -190,7 +190,7 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
cpu_to_le32(0xcc00aaaa),
cpu_to_le32(0x0000aaaa),
cpu_to_le32(0xc0004000),
- cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00004000),
cpu_to_le32(0xf0005000),
cpu_to_le32(0xf0005000),
},
@@ -213,16 +213,16 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
/* Tx Tx disabled */
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xeeaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xcc00ff28),
cpu_to_le32(0x0000aaaa),
cpu_to_le32(0xcc00aaaa),
cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xC0004000),
- cpu_to_le32(0xC0004000),
- cpu_to_le32(0xF0005000),
- cpu_to_le32(0xF0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
},
};
@@ -1262,6 +1262,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
u8 __maybe_unused lower_bound, upper_bound;
+ int ret;
u8 lut;
struct iwl_bt_coex_cmd *bt_cmd;
@@ -1318,5 +1319,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(bt_cmd->bt4_corun_lut40));
- return 0;
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ kfree(bt_cmd);
+ return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 4dd9ff4..f0cebf1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1332,6 +1332,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
+ iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 568abd6..9f52c5b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -59,7 +59,7 @@
/* max allowed rate miss before sync LQ cmd */
#define IWL_MISSED_RATE_MAX 15
#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
-
+#define RS_IDLE_TIMEOUT (5*HZ)
static u8 rs_ht_to_legacy[] = {
[IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
@@ -142,7 +142,7 @@ enum rs_column_mode {
RS_MIMO2,
};
-#define MAX_NEXT_COLUMNS 5
+#define MAX_NEXT_COLUMNS 7
#define MAX_COLUMN_CHECKS 3
typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
@@ -212,8 +212,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_MIMO2,
- RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
},
[RS_COLUMN_LEGACY_ANT_B] = {
@@ -223,8 +225,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_MIMO2,
- RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
},
[RS_COLUMN_SISO_ANT_A] = {
@@ -235,7 +239,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_SISO_ANT_B_SGI,
- RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -249,7 +255,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -265,6 +273,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
},
.checks = {
rs_siso_allow,
@@ -281,6 +291,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_MIMO2,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
},
.checks = {
rs_siso_allow,
@@ -296,6 +308,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
},
.checks = {
rs_mimo_allow,
@@ -311,6 +325,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
},
.checks = {
rs_mimo_allow,
@@ -503,10 +519,12 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
window->average_tpt = IWL_INVALID_VALUE;
}
-static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl)
+static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl)
{
int i;
+ IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&tbl->win[i]);
}
@@ -992,6 +1010,13 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
return;
}
+#ifdef CPTCFG_MAC80211_DEBUGFS
+ /* Disable last tx check if we are debugging with fixed rate */
+ if (lq_sta->dbg_fixed_rate) {
+ IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
+ return;
+ }
+#endif
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
@@ -1034,6 +1059,18 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
mac_index++;
}
+ if (time_after(jiffies,
+ (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
+ int tid;
+ IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_stop_tx_ba_session(sta, tid);
+
+ iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
+ return;
+ }
+ lq_sta->last_tx = jiffies;
+
/* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) ||
(rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
@@ -1186,9 +1223,26 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->visited_columns = 0;
}
+static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
+ const struct rs_tx_column *column)
+{
+ switch (column->mode) {
+ case RS_LEGACY:
+ return lq_sta->max_legacy_rate_idx;
+ case RS_SISO:
+ return lq_sta->max_siso_rate_idx;
+ case RS_MIMO2:
+ return lq_sta->max_mimo2_rate_idx;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return lq_sta->max_legacy_rate_idx;
+}
+
static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- const struct rs_tx_column *column,
- u32 bw)
+ const struct rs_tx_column *column,
+ u32 bw)
{
/* Used to choose among HT tables */
const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
@@ -1438,7 +1492,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
IWL_DEBUG_RATE(mvm,
"LQ: stay in table clear win\n");
- rs_rate_scale_clear_tbl_windows(tbl);
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
}
}
@@ -1446,8 +1500,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
* bitmaps and stats in active table (this will become the new
* "search" table). */
if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
- IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
- rs_rate_scale_clear_tbl_windows(tbl);
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
}
}
}
@@ -1485,14 +1538,14 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl)
{
- int i, j, n;
+ int i, j, max_rate;
enum rs_column next_col_id;
const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
const struct rs_tx_column *next_col;
allow_column_func_t allow_func;
u8 valid_ants = mvm->fw->valid_tx_ant;
const u16 *expected_tpt_tbl;
- s32 tpt, max_expected_tpt;
+ u16 tpt, max_expected_tpt;
for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
next_col_id = curr_col->next_columns[i];
@@ -1535,11 +1588,11 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(!expected_tpt_tbl))
continue;
- max_expected_tpt = 0;
- for (n = 0; n < IWL_RATE_COUNT; n++)
- if (expected_tpt_tbl[n] > max_expected_tpt)
- max_expected_tpt = expected_tpt_tbl[n];
+ max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
+ if (WARN_ON_ONCE(max_rate == IWL_RATE_INVALID))
+ continue;
+ max_expected_tpt = expected_tpt_tbl[max_rate];
if (tpt >= max_expected_tpt) {
IWL_DEBUG_RATE(mvm,
"Skip column %d: can't beat current TPT. Max expected %d current %d\n",
@@ -1547,14 +1600,15 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
continue;
}
+ IWL_DEBUG_RATE(mvm,
+ "Found potential column %d. Max expected %d current %d\n",
+ next_col_id, max_expected_tpt, tpt);
break;
}
if (i == MAX_NEXT_COLUMNS)
return RS_COLUMN_INVALID;
- IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
-
return next_col_id;
}
@@ -1640,85 +1694,76 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
{
enum rs_action action = RS_ACTION_STAY;
- /* Too many failures, decrease rate */
if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
IWL_DEBUG_RATE(mvm,
- "decrease rate because of low SR\n");
- action = RS_ACTION_DOWNSCALE;
- /* No throughput measured yet for adjacent rates; try increase. */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
- if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
- IWL_DEBUG_RATE(mvm,
- "Good SR and no high rate measurement. "
- "Increase rate\n");
- action = RS_ACTION_UPSCALE;
- } else if (low != IWL_RATE_INVALID) {
- IWL_DEBUG_RATE(mvm,
- "Remain in current rate\n");
- action = RS_ACTION_STAY;
- }
+ "Decrease rate because of low SR\n");
+ return RS_ACTION_DOWNSCALE;
}
- /* Both adjacent throughputs are measured, but neither one has better
- * throughput; we're using the best rate, don't change it!
- */
- else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) &&
- (high_tpt < current_tpt)) {
+ if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE) &&
+ (high != IWL_RATE_INVALID)) {
IWL_DEBUG_RATE(mvm,
- "Both high and low are worse. "
- "Maintain rate\n");
- action = RS_ACTION_STAY;
+ "No data about high/low rates. Increase rate\n");
+ return RS_ACTION_UPSCALE;
}
- /* At least one adjacent rate's throughput is measured,
- * and may have better performance.
- */
- else {
- /* Higher adjacent rate's throughput is measured */
- if (high_tpt != IWL_INVALID_VALUE) {
- /* Higher rate has better throughput */
- if (high_tpt > current_tpt &&
- sr >= IWL_RATE_INCREASE_TH) {
- IWL_DEBUG_RATE(mvm,
- "Higher rate is better and good "
- "SR. Increate rate\n");
- action = RS_ACTION_UPSCALE;
- } else {
- IWL_DEBUG_RATE(mvm,
- "Higher rate isn't better OR "
- "no good SR. Maintain rate\n");
- action = RS_ACTION_STAY;
- }
+ if ((high_tpt == IWL_INVALID_VALUE) &&
+ (high != IWL_RATE_INVALID) &&
+ (low_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "No data about high rate and low rate is worse. Increase rate\n");
+ return RS_ACTION_UPSCALE;
+ }
- /* Lower adjacent rate's throughput is measured */
- } else if (low_tpt != IWL_INVALID_VALUE) {
- /* Lower rate has better throughput */
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(mvm,
- "Lower rate is better. "
- "Decrease rate\n");
- action = RS_ACTION_DOWNSCALE;
- } else if (sr >= IWL_RATE_INCREASE_TH) {
- IWL_DEBUG_RATE(mvm,
- "Lower rate isn't better and "
- "good SR. Increase rate\n");
- action = RS_ACTION_UPSCALE;
- }
- }
+ if ((high_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt > current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "Higher rate is better. Increate rate\n");
+ return RS_ACTION_UPSCALE;
}
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it.
- */
- if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) &&
- ((sr > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * tbl->expected_tpt[low])))) {
+ if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "Both high and low are worse. Maintain rate\n");
+ return RS_ACTION_STAY;
+ }
+
+ if ((low_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt > current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "Lower rate is better\n");
+ action = RS_ACTION_DOWNSCALE;
+ goto out;
+ }
+
+ if ((low_tpt == IWL_INVALID_VALUE) &&
+ (low != IWL_RATE_INVALID)) {
IWL_DEBUG_RATE(mvm,
- "Sanity check failed. Maintain rate\n");
- action = RS_ACTION_STAY;
+ "No data about lower rate\n");
+ action = RS_ACTION_DOWNSCALE;
+ goto out;
+ }
+
+ IWL_DEBUG_RATE(mvm, "Maintain rate\n");
+
+out:
+ if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
+ if (sr >= RS_SR_NO_DECREASE) {
+ IWL_DEBUG_RATE(mvm,
+ "SR is above NO DECREASE. Avoid downscale\n");
+ action = RS_ACTION_STAY;
+ } else if (current_tpt > (100 * tbl->expected_tpt[low])) {
+ IWL_DEBUG_RATE(mvm,
+ "Current TPT is higher than max expected in low rate. Avoid downscale\n");
+ action = RS_ACTION_STAY;
+ } else {
+ IWL_DEBUG_RATE(mvm, "Decrease rate\n");
+ }
}
return action;
@@ -1792,6 +1837,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
"Aggregation changed: prev %d current %d. Update expected TPT table\n",
prev_agg, lq_sta->is_agg);
rs_set_expected_tpt_table(lq_sta, tbl);
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
}
/* current tx rate */
@@ -2021,7 +2067,7 @@ lq_update:
if (lq_sta->search_better_tbl) {
/* Access the "search" table, clear its history. */
tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- rs_rate_scale_clear_tbl_windows(tbl);
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
/* Use new "search" start rate */
index = tbl->rate.index;
@@ -2042,8 +2088,18 @@ lq_update:
* stay with best antenna legacy modulation for a while
* before next round of mode comparisons. */
tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
- if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) {
+ if (is_legacy(&tbl1->rate)) {
IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
+
+ if (tid != IWL_MAX_TID_COUNT) {
+ tid_data = &sta_priv->tid_data[tid];
+ if (tid_data->state != IWL_AGG_OFF) {
+ IWL_DEBUG_RATE(mvm,
+ "Stop aggregation on tid %d\n",
+ tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ }
rs_set_stay_in_table(mvm, 1, lq_sta);
} else {
/* If we're in an HT mode, and all 3 mode switch actions
@@ -2342,9 +2398,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->lq.sta_id = sta_priv->sta_id;
for (j = 0; j < LQ_SIZE; j++)
- rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]);
+ rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
lq_sta->flush_timer = 0;
+ lq_sta->last_tx = jiffies;
IWL_DEBUG_RATE(mvm,
"LQ: *** rate scale station global init for station %d ***\n",
@@ -2388,11 +2445,22 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->is_vht = true;
}
- IWL_DEBUG_RATE(mvm,
- "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n",
+ lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate,
+ BITS_PER_LONG);
+ lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate,
+ BITS_PER_LONG);
+ lq_sta->max_mimo2_rate_idx = find_last_bit(&lq_sta->active_mimo2_rate,
+ BITS_PER_LONG);
+
+ IWL_DEBUG_RATE(mvm, "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d\n",
+ lq_sta->active_legacy_rate,
lq_sta->active_siso_rate,
lq_sta->active_mimo2_rate,
lq_sta->is_vht);
+ IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
+ lq_sta->max_legacy_rate_idx,
+ lq_sta->max_siso_rate_idx,
+ lq_sta->max_mimo2_rate_idx);
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
@@ -2547,6 +2615,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
if (is_siso(&rate)) {
num_rates = RS_SECONDARY_SISO_NUM_RATES;
num_retries = RS_SECONDARY_SISO_RETRIES;
+ lq_cmd->mimo_delim = index;
} else if (is_legacy(&rate)) {
num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
num_retries = RS_LEGACY_RETRIES_PER_RATE;
@@ -2749,7 +2818,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
return -ENOMEM;
desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
- desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+ desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
lq_sta->total_failed, lq_sta->total_success,
lq_sta->active_legacy_rate);
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 3332b39..0acfac9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -156,6 +156,7 @@ enum {
#define IWL_RATE_HIGH_TH 10880 /* 85% */
#define IWL_RATE_INCREASE_TH 6400 /* 50% */
#define RS_SR_FORCE_DECREASE 1920 /* 15% */
+#define RS_SR_NO_DECREASE 10880 /* 85% */
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
@@ -310,13 +311,20 @@ struct iwl_lq_sta {
u32 visited_columns; /* Bitmask marking which Tx columns were
* explored during a search cycle
*/
+ u64 last_tx;
bool is_vht;
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
- u16 active_legacy_rate;
- u16 active_siso_rate;
- u16 active_mimo2_rate;
+ unsigned long active_legacy_rate;
+ unsigned long active_siso_rate;
+ unsigned long active_mimo2_rate;
+
+ /* Highest rate per Tx mode */
+ u8 max_legacy_rate_idx;
+ u8 max_siso_rate_idx;
+ u8 max_mimo2_rate_idx;
+
s8 max_rate_idx; /* Max rate set by user */
u8 missed_rate_counter;
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 8401627..88809b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -274,7 +274,8 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
return -EINVAL;
if (changed_vif->type != NL80211_IFTYPE_STATION) {
new_state = SF_UNINIT;
- } else if (changed_vif->bss_conf.assoc) {
+ } else if (changed_vif->bss_conf.assoc &&
+ changed_vif->bss_conf.dtim_period) {
mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
sta_id = mvmvif->ap_sta_id;
new_state = SF_FULL_ON;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index edb015c..3d1d57f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -373,12 +373,14 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 77db088..9c771b3 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -292,6 +292,12 @@ process_start:
while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
mwifiex_handle_rx_packet(adapter, skb);
+ /* Check for event */
+ if (adapter->event_received) {
+ adapter->event_received = false;
+ mwifiex_process_event(adapter);
+ }
+
/* Check for Cmd Resp */
if (adapter->cmd_resp_received) {
adapter->cmd_resp_received = false;
@@ -304,12 +310,6 @@ process_start:
}
}
- /* Check for event */
- if (adapter->event_received) {
- adapter->event_received = false;
- mwifiex_process_event(adapter);
- }
-
/* Check if we need to confirm Sleep Request
received previously */
if (adapter->ps_state == PS_STATE_PRE_SLEEP) {
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 8942706..536c14a 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -60,9 +60,10 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
int status;
/* Wait for completion */
- status = wait_event_interruptible(adapter->cmd_wait_q.wait,
- *(cmd_queued->condition));
- if (status) {
+ status = wait_event_interruptible_timeout(adapter->cmd_wait_q.wait,
+ *(cmd_queued->condition),
+ (12 * HZ));
+ if (status <= 0) {
dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
mwifiex_cancel_all_pending_cmd(adapter);
return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 1a8d321..cf61d6e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -88,7 +88,7 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
bool recontend_queue = false;
u32 q_len = 0;
u8 q_num = INVALID_QUEUE;
- u8 ii, min = 0;
+ u8 ii = 0, min = 0;
if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
if (!common->mgmt_q_block)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 7369429..1b28cda 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -841,16 +841,6 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
rsi_dbg(MGMT_TX_ZONE,
"%s: Sending scan req frame\n", __func__);
- skb = dev_alloc_skb(FRAME_DESC_SZ);
- if (!skb) {
- rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
- __func__);
- return -ENOMEM;
- }
-
- memset(skb->data, 0, FRAME_DESC_SZ);
- mgmt_frame = (struct rsi_mac_frame *)skb->data;
-
if (common->band == IEEE80211_BAND_5GHZ) {
if ((channel >= 36) && (channel <= 64))
channel = ((channel - 32) / 4);
@@ -868,6 +858,16 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
}
}
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
mgmt_frame->desc_word[4] = cpu_to_le16(channel);
@@ -966,6 +966,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
if (!selected_rates) {
rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n",
__func__);
+ dev_kfree_skb(skb);
return -ENOMEM;
}
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 398f3d2..a76e98e 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -68,6 +68,26 @@ struct wl18xx_event_mailbox {
/* bitmap of inactive stations (by HLID) */
__le32 inactive_sta_bitmap;
+
+ /* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */
+ u8 rx_ba_role_id;
+ u8 rx_ba_link_id;
+ u8 rx_ba_win_size;
+ u8 padding;
+
+ /* smart config */
+ u8 sc_ssid_len;
+ u8 sc_pwd_len;
+ u8 sc_token_len;
+ u8 padding1;
+ u8 sc_ssid[32];
+ u8 sc_pwd[32];
+ u8 sc_token[32];
+
+ /* smart config sync channel */
+ u8 sc_sync_channel;
+ u8 sc_sync_band;
+ u8 padding2[2];
} __packed;
int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 1f9a360..16d1028 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -158,6 +158,11 @@ EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
void wlcore_event_dummy_packet(struct wl1271 *wl)
{
+ if (wl->plt) {
+ wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
+ return;
+ }
+
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
wl1271_tx_dummy_packet(wl);
}
OpenPOWER on IntegriCloud