summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/dev.c4
-rw-r--r--drivers/net/can/m_can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c219
-rw-r--r--drivers/net/can/rcar_can.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c5
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/esd_usb2.c3
-rw-r--r--drivers/net/can/usb/gs_usb.c1
-rw-r--r--drivers/net/can/xilinx_can.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c58
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c11
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c9
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c11
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c39
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c6
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c20
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c13
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c1
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ieee802154/fakehard.c13
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/phy.c36
-rw-r--r--drivers/net/ppp/ppp_generic.c40
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/tun.c28
-rw-r--r--drivers/net/usb/asix_devices.c14
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c37
-rw-r--r--drivers/net/vxlan.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/b43/phy_common.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/of.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c7
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c50
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c15
88 files changed, 1112 insertions, 498 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c9ac06c..a5115fb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2471,7 +2471,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR)
bond_update_slave_arr(bond, NULL);
- } else if (do_failover) {
+ }
+ if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 02492d2..2cfe501 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -110,7 +110,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
long rate;
u64 v64;
- /* Use CIA recommended sample points */
+ /* Use CiA recommended sample points */
if (bt->sample_point) {
sampl_pt = bt->sample_point;
} else {
@@ -382,7 +382,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
BUG_ON(idx >= priv->echo_skb_max);
if (priv->echo_skb[idx]) {
- kfree_skb(priv->echo_skb[idx]);
+ dev_kfree_skb_any(priv->echo_skb[idx]);
priv->echo_skb[idx] = NULL;
}
}
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index fca5482..04f20dd 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,4 +1,5 @@
config CAN_M_CAN
+ depends on HAS_IOMEM
tristate "Bosch M_CAN devices"
---help---
Say Y here if you want to support for Bosch M_CAN controller.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 10d571e..d7bc462 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -105,14 +105,36 @@ enum m_can_mram_cfg {
MRAM_CFG_NUM,
};
+/* Fast Bit Timing & Prescaler Register (FBTP) */
+#define FBTR_FBRP_MASK 0x1f
+#define FBTR_FBRP_SHIFT 16
+#define FBTR_FTSEG1_SHIFT 8
+#define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT)
+#define FBTR_FTSEG2_SHIFT 4
+#define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT)
+#define FBTR_FSJW_SHIFT 0
+#define FBTR_FSJW_MASK 0x3
+
/* Test Register (TEST) */
#define TEST_LBCK BIT(4)
/* CC Control Register(CCCR) */
-#define CCCR_TEST BIT(7)
-#define CCCR_MON BIT(5)
-#define CCCR_CCE BIT(1)
-#define CCCR_INIT BIT(0)
+#define CCCR_TEST BIT(7)
+#define CCCR_CMR_MASK 0x3
+#define CCCR_CMR_SHIFT 10
+#define CCCR_CMR_CANFD 0x1
+#define CCCR_CMR_CANFD_BRS 0x2
+#define CCCR_CMR_CAN 0x3
+#define CCCR_CME_MASK 0x3
+#define CCCR_CME_SHIFT 8
+#define CCCR_CME_CAN 0
+#define CCCR_CME_CANFD 0x1
+#define CCCR_CME_CANFD_BRS 0x2
+#define CCCR_TEST BIT(7)
+#define CCCR_MON BIT(5)
+#define CCCR_CCE BIT(1)
+#define CCCR_INIT BIT(0)
+#define CCCR_CANFD 0x10
/* Bit Timing & Prescaler Register (BTP) */
#define BTR_BRP_MASK 0x3ff
@@ -204,6 +226,7 @@ enum m_can_mram_cfg {
/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
#define M_CAN_RXESC_8BYTES 0x0
+#define M_CAN_RXESC_64BYTES 0x777
/* Tx Buffer Configuration(TXBC) */
#define TXBC_NDTB_OFF 16
@@ -211,6 +234,7 @@ enum m_can_mram_cfg {
/* Tx Buffer Element Size Configuration(TXESC) */
#define TXESC_TBDS_8BYTES 0x0
+#define TXESC_TBDS_64BYTES 0x7
/* Tx Event FIFO Con.guration (TXEFC) */
#define TXEFC_EFS_OFF 16
@@ -219,11 +243,11 @@ enum m_can_mram_cfg {
/* Message RAM Configuration (in bytes) */
#define SIDF_ELEMENT_SIZE 4
#define XIDF_ELEMENT_SIZE 8
-#define RXF0_ELEMENT_SIZE 16
-#define RXF1_ELEMENT_SIZE 16
+#define RXF0_ELEMENT_SIZE 72
+#define RXF1_ELEMENT_SIZE 72
#define RXB_ELEMENT_SIZE 16
#define TXE_ELEMENT_SIZE 8
-#define TXB_ELEMENT_SIZE 16
+#define TXB_ELEMENT_SIZE 72
/* Message RAM Elements */
#define M_CAN_FIFO_ID 0x0
@@ -231,11 +255,17 @@ enum m_can_mram_cfg {
#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
/* Rx Buffer Element */
+/* R0 */
#define RX_BUF_ESI BIT(31)
#define RX_BUF_XTD BIT(30)
#define RX_BUF_RTR BIT(29)
+/* R1 */
+#define RX_BUF_ANMF BIT(31)
+#define RX_BUF_EDL BIT(21)
+#define RX_BUF_BRS BIT(20)
/* Tx Buffer Element */
+/* R0 */
#define TX_BUF_XTD BIT(30)
#define TX_BUF_RTR BIT(29)
@@ -296,6 +326,7 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv,
if (enable) {
/* enable m_can configuration */
m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
+ udelay(5);
/* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
} else {
@@ -326,41 +357,67 @@ static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
m_can_write(priv, M_CAN_ILE, 0x0);
}
-static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf,
- u32 rxfs)
+static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
{
+ struct net_device_stats *stats = &dev->stats;
struct m_can_priv *priv = netdev_priv(dev);
- u32 id, fgi;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 id, fgi, dlc;
+ int i;
/* calculate the fifo get index for where to read data */
fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF;
+ dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
+ if (dlc & RX_BUF_EDL)
+ skb = alloc_canfd_skb(dev, &cf);
+ else
+ skb = alloc_can_skb(dev, (struct can_frame **)&cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ if (dlc & RX_BUF_EDL)
+ cf->len = can_dlc2len((dlc >> 16) & 0x0F);
+ else
+ cf->len = get_can_dlc((dlc >> 16) & 0x0F);
+
id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
if (id & RX_BUF_XTD)
cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
cf->can_id = (id >> 18) & CAN_SFF_MASK;
- if (id & RX_BUF_RTR) {
+ if (id & RX_BUF_ESI) {
+ cf->flags |= CANFD_ESI;
+ netdev_dbg(dev, "ESI Error\n");
+ }
+
+ if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) {
cf->can_id |= CAN_RTR_FLAG;
} else {
- id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
- cf->can_dlc = get_can_dlc((id >> 16) & 0x0F);
- *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi,
- M_CAN_FIFO_DATA(0));
- *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi,
- M_CAN_FIFO_DATA(1));
+ if (dlc & RX_BUF_BRS)
+ cf->flags |= CANFD_BRS;
+
+ for (i = 0; i < cf->len; i += 4)
+ *(u32 *)(cf->data + i) =
+ m_can_fifo_read(priv, fgi,
+ M_CAN_FIFO_DATA(i / 4));
}
/* acknowledge rx fifo 0 */
m_can_write(priv, M_CAN_RXF0A, fgi);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->len;
+
+ netif_receive_skb(skb);
}
static int m_can_do_rx_poll(struct net_device *dev, int quota)
{
struct m_can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
- struct can_frame *frame;
u32 pkts = 0;
u32 rxfs;
@@ -374,18 +431,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
if (rxfs & RXFS_RFL)
netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
- skb = alloc_can_skb(dev, &frame);
- if (!skb) {
- stats->rx_dropped++;
- return pkts;
- }
-
- m_can_read_fifo(dev, frame, rxfs);
-
- stats->rx_packets++;
- stats->rx_bytes += frame->can_dlc;
-
- netif_receive_skb(skb);
+ m_can_read_fifo(dev, rxfs);
quota--;
pkts++;
@@ -481,11 +527,23 @@ static int m_can_handle_lec_err(struct net_device *dev,
return 1;
}
+static int __m_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct m_can_priv *priv = netdev_priv(dev);
+ unsigned int ecr;
+
+ ecr = m_can_read(priv, M_CAN_ECR);
+ bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
+ bec->txerr = ecr & ECR_TEC_MASK;
+
+ return 0;
+}
+
static int m_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
struct m_can_priv *priv = netdev_priv(dev);
- unsigned int ecr;
int err;
err = clk_prepare_enable(priv->hclk);
@@ -498,9 +556,7 @@ static int m_can_get_berr_counter(const struct net_device *dev,
return err;
}
- ecr = m_can_read(priv, M_CAN_ECR);
- bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
- bec->txerr = ecr & ECR_TEC_MASK;
+ __m_can_get_berr_counter(dev, bec);
clk_disable_unprepare(priv->cclk);
clk_disable_unprepare(priv->hclk);
@@ -544,7 +600,7 @@ static int m_can_handle_state_change(struct net_device *dev,
if (unlikely(!skb))
return 0;
- m_can_get_berr_counter(dev, &bec);
+ __m_can_get_berr_counter(dev, &bec);
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
@@ -596,14 +652,14 @@ static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
if ((psr & PSR_EP) &&
(priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
- netdev_dbg(dev, "entered error warning state\n");
+ netdev_dbg(dev, "entered error passive state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_ERROR_PASSIVE);
}
if ((psr & PSR_BO) &&
(priv->can.state != CAN_STATE_BUS_OFF)) {
- netdev_dbg(dev, "entered error warning state\n");
+ netdev_dbg(dev, "entered error bus off state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_BUS_OFF);
}
@@ -615,7 +671,7 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
{
if (irqstatus & IR_WDI)
netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
- if (irqstatus & IR_BEU)
+ if (irqstatus & IR_ELO)
netdev_err(dev, "Error Logging Overflow\n");
if (irqstatus & IR_BEU)
netdev_err(dev, "Bit Error Uncorrected\n");
@@ -733,10 +789,23 @@ static const struct can_bittiming_const m_can_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const m_can_data_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 16,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -747,7 +816,17 @@ static int m_can_set_bittiming(struct net_device *dev)
reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) |
(tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT);
m_can_write(priv, M_CAN_BTP, reg_btp);
- netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ brp = dbt->brp - 1;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 1;
+ reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) |
+ (tseg1 << FBTR_FTSEG1_SHIFT) |
+ (tseg2 << FBTR_FTSEG2_SHIFT);
+ m_can_write(priv, M_CAN_FBTP, reg_btp);
+ }
return 0;
}
@@ -767,8 +846,8 @@ static void m_can_chip_config(struct net_device *dev)
m_can_config_endisable(priv, true);
- /* RX Buffer/FIFO Element Size 8 bytes data field */
- m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES);
+ /* RX Buffer/FIFO Element Size 64 bytes data field */
+ m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
/* Accept Non-matching Frames Into FIFO 0 */
m_can_write(priv, M_CAN_GFC, 0x0);
@@ -777,8 +856,8 @@ static void m_can_chip_config(struct net_device *dev)
m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) |
priv->mcfg[MRAM_TXB].off);
- /* only support 8 bytes firstly */
- m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES);
+ /* support 64 bytes payload */
+ m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES);
m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) |
priv->mcfg[MRAM_TXE].off);
@@ -793,7 +872,8 @@ static void m_can_chip_config(struct net_device *dev)
RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off);
cccr = m_can_read(priv, M_CAN_CCCR);
- cccr &= ~(CCCR_TEST | CCCR_MON);
+ cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
+ (CCCR_CME_MASK << CCCR_CME_SHIFT));
test = m_can_read(priv, M_CAN_TEST);
test &= ~TEST_LBCK;
@@ -805,6 +885,9 @@ static void m_can_chip_config(struct net_device *dev)
test |= TEST_LBCK;
}
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
+
m_can_write(priv, M_CAN_CCCR, cccr);
m_can_write(priv, M_CAN_TEST, test);
@@ -869,11 +952,13 @@ static struct net_device *alloc_m_can_dev(void)
priv->dev = dev;
priv->can.bittiming_const = &m_can_bittiming_const;
+ priv->can.data_bittiming_const = &m_can_data_bittiming_const;
priv->can.do_set_mode = m_can_set_mode;
priv->can.do_get_berr_counter = m_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_BERR_REPORTING;
+ CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_FD;
return dev;
}
@@ -956,8 +1041,9 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_priv *priv = netdev_priv(dev);
- struct can_frame *cf = (struct can_frame *)skb->data;
- u32 id;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 id, cccr;
+ int i;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -976,11 +1062,28 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
/* message ram configuration */
m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
- m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16);
- m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0));
- m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4));
+ m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16);
+
+ for (i = 0; i < cf->len; i += 4)
+ m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4),
+ *(u32 *)(cf->data + i));
+
can_put_echo_skb(skb, dev, 0);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ cccr = m_can_read(priv, M_CAN_CCCR);
+ cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
+ if (can_is_canfd_skb(skb)) {
+ if (cf->flags & CANFD_BRS)
+ cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT;
+ else
+ cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT;
+ } else {
+ cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
+ }
+ m_can_write(priv, M_CAN_CCCR, cccr);
+ }
+
/* enable first TX buffer to start transfer */
m_can_write(priv, M_CAN_TXBTIE, 0x1);
m_can_write(priv, M_CAN_TXBAR, 0x1);
@@ -992,6 +1095,7 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_open = m_can_open,
.ndo_stop = m_can_close,
.ndo_start_xmit = m_can_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int register_m_can_dev(struct net_device *dev)
@@ -1009,7 +1113,7 @@ static int m_can_of_parse_mram(struct platform_device *pdev,
struct resource *res;
void __iomem *addr;
u32 out_val[MRAM_CFG_LEN];
- int ret;
+ int i, start, end, ret;
/* message ram could be shared */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
@@ -1060,6 +1164,15 @@ static int m_can_of_parse_mram(struct platform_device *pdev,
priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = priv->mcfg[MRAM_SIDF].off;
+ end = priv->mcfg[MRAM_TXB].off +
+ priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+ for (i = start; i < end; i += 4)
+ writel(0x0, priv->mram_base + i);
+
return 0;
}
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 1abe133..9718248 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -628,6 +628,7 @@ static const struct net_device_ops rcar_can_netdev_ops = {
.ndo_open = rcar_can_open,
.ndo_stop = rcar_can_close,
.ndo_start_xmit = rcar_can_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 8ff3424..15c00fa 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -214,7 +214,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
struct net_device *dev;
struct sja1000_priv *priv;
struct kvaser_pci *board;
- int err, init_step;
+ int err;
dev = alloc_sja1000dev(sizeof(struct kvaser_pci));
if (dev == NULL)
@@ -235,7 +235,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
if (channel == 0) {
board->xilinx_ver =
ioread8(board->res_addr + XILINX_VERINT) >> 4;
- init_step = 2;
/* Assert PTADR# - we're in passive mode so the other bits are
not important */
@@ -264,8 +263,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
- init_step = 4;
-
dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n",
priv->reg_base, board->conf_addr, dev->irq);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 00f2534..29d3f09 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -434,10 +434,9 @@ static void ems_usb_read_bulk_callback(struct urb *urb)
if (urb->actual_length > CPC_HEADER_SIZE) {
struct ems_cpc_msg *msg;
u8 *ibuf = urb->transfer_buffer;
- u8 msg_count, again, start;
+ u8 msg_count, start;
msg_count = ibuf[0] & ~0x80;
- again = ibuf[0] & 0x80;
start = CPC_HEADER_SIZE;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index b7c9e8b..c063a54 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -464,7 +464,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
{
struct esd_tx_urb_context *context = urb->context;
struct esd_usb2_net_priv *priv;
- struct esd_usb2 *dev;
struct net_device *netdev;
size_t size = sizeof(struct esd_usb2_msg);
@@ -472,7 +471,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
priv = context->priv;
netdev = priv->netdev;
- dev = priv->usb2;
/* free up our allocated buffer */
usb_free_coherent(urb->dev, size,
@@ -1143,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
}
}
unlink_all_urbs(dev);
+ kfree(dev);
}
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 04b0f84..009acc8 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -718,6 +718,7 @@ static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 5e8b560..8a998e3 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -300,7 +300,8 @@ static int xcan_set_bittiming(struct net_device *ndev)
static int xcan_chip_start(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
- u32 err, reg_msr, reg_sr_mask;
+ u32 reg_msr, reg_sr_mask;
+ int err;
unsigned long timeout;
/* Check if it is in reset mode */
@@ -961,6 +962,7 @@ static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
.ndo_start_xmit = xcan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
/**
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b962596..4f4c2a7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+ reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
+ core_writel(priv, reg, CORE_WATCHDOG_CTRL);
+
+ do {
+ reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+ if (!(reg & SOFTWARE_RESET))
+ break;
+
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int bcm_sf2_sw_setup(struct dsa_switch *ds)
{
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
*base = of_iomap(dn, i);
if (*base == NULL) {
pr_err("unable to find register: %s\n", reg_names[i]);
- return -ENODEV;
+ ret = -ENOMEM;
+ goto out_unmap;
}
base++;
}
+ ret = bcm_sf2_sw_rst(priv);
+ if (ret) {
+ pr_err("unable to software reset switch: %d\n", ret);
+ goto out_unmap;
+ }
+
/* Disable all interrupts and request them */
intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
@@ -484,7 +514,8 @@ out_free_irq0:
out_unmap:
base = &priv->core;
for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- iounmap(*base);
+ if (*base)
+ iounmap(*base);
base++;
}
return ret;
@@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
return 0;
}
-static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
-{
- unsigned int timeout = 1000;
- u32 reg;
-
- reg = core_readl(priv, CORE_WATCHDOG_CTRL);
- reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
- core_writel(priv, reg, CORE_WATCHDOG_CTRL);
-
- do {
- reg = core_readl(priv, CORE_WATCHDOG_CTRL);
- if (!(reg & SOFTWARE_RESET))
- break;
-
- usleep_range(1000, 2000);
- } while (timeout-- > 0);
-
- if (timeout == 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 63ea194..7ba83ff 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
+{
+ if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
+ return false;
+
+ if (ioread32(p->ring_csr_addr + SRST_ADDR))
+ return false;
+
+ return true;
+}
+
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
u32 val;
+ if (!xgene_ring_mgr_init(pdata))
+ return -ENODEV;
+
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
val |= SCAN_AUTO_INCR;
MGMT_CLOCK_SEL_SET(&val, 1);
xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
+
+ return 0;
}
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 3855858..ec45f32 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -104,6 +104,9 @@ enum xgene_enet_rm {
#define BLOCK_ETH_MAC_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
+#define CLKEN_ADDR 0xc208
+#define SRST_ADDR 0xc200
+
#define MAC_ADDR_REG_OFFSET 0x00
#define MAC_COMMAND_REG_OFFSET 0x04
#define MAC_WRITE_REG_OFFSET 0x08
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
extern struct xgene_mac_ops xgene_gmac_ops;
extern struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3c208cc..1236696 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
- u8 cpu_bufnum = 0, eth_bufnum = 0;
- u8 bp_bufnum = 0x20;
- u16 ring_id, ring_num = 0;
+ u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
+ u8 bp_bufnum = START_BP_BUFNUM;
+ u16 ring_id, ring_num = START_RING_NUM;
int ret;
/* allocate rx descriptor ring */
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
u16 dst_ring_num;
int ret;
- pdata->port_ops->reset(pdata);
+ ret = pdata->port_ops->reset(pdata);
+ if (ret)
+ return ret;
ret = xgene_enet_create_desc_rings(ndev);
if (ret) {
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
return ret;
err:
+ unregister_netdev(ndev);
free_netdev(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 874e5a0..f9958fa 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -38,6 +38,9 @@
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
+#define START_ETH_BUFNUM 2
+#define START_BP_BUFNUM 0x22
+#define START_RING_NUM 8
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
@@ -83,7 +86,7 @@ struct xgene_mac_ops {
};
struct xgene_port_ops {
- void (*reset)(struct xgene_enet_pdata *pdata);
+ int (*reset)(struct xgene_enet_pdata *pdata);
void (*cle_bypass)(struct xgene_enet_pdata *pdata,
u32 dst_ring_num, u16 bufpool_id);
void (*shutdown)(struct xgene_enet_pdata *pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index c22f326..f5d4f68 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
xgene_sgmac_rxtx(p, TX_EN, false);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *p)
+static int xgene_enet_reset(struct xgene_enet_pdata *p)
{
+ if (!xgene_ring_mgr_init(p))
+ return -ENODEV;
+
clk_prepare_enable(p->clk);
clk_disable_unprepare(p->clk);
clk_prepare_enable(p->clk);
xgene_enet_ecc_init(p);
xgene_enet_config_ring_if_assoc(p);
+
+ return 0;
}
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 67d0720..a18a9d1 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
+ if (!xgene_ring_mgr_init(pdata))
+ return -ENODEV;
+
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
xgene_enet_ecc_init(pdata);
xgene_enet_config_ring_if_assoc(pdata);
+
+ return 0;
}
static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 3a6778a..531bb7c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* We just need one DMA descriptor which is DMA-able, since writing to
* the port will allocate a new descriptor in its internal linked-list
*/
- p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+ GFP_KERNEL);
if (!p) {
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
return -ENOMEM;
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
if (!(reg & TDMA_DISABLED))
netdev_warn(priv->netdev, "TDMA not stopped!\n");
+ /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
+ * fail, so by checking this pointer we know whether the TX ring was
+ * fully initialized or not.
+ */
+ if (!ring->cbs)
+ return;
+
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
ring->cbs = NULL;
if (ring->desc_dma) {
- dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+ dma_free_coherent(kdev, sizeof(struct dma_desc),
+ ring->desc_cpu, ring->desc_dma);
ring->desc_dma = 0;
}
ring->size = 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fdc9ec0..da1a250 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
+ /* Re-configure the port multiplexer towards the PHY device */
+ bcmgenet_mii_config(priv->dev, false);
+
+ phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+
bcmgenet_netif_start(dev);
return 0;
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
bcmgenet_netif_stop(dev);
+ /* Really kill the PHY state machine and disconnect from it */
+ phy_disconnect(priv->phydev);
+
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev);
/* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev);
+ bcmgenet_mii_config(priv->dev, false);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index dbf524e..31b2da5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev, bool init);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev);
+void bcmgenet_mii_setup(struct net_device *dev);
/* Wake-on-LAN routines */
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ff799a..933cd7e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
/* setup netdev link state when PHY link status change and
* update UMAC and RGMII block when link up
*/
-static void bcmgenet_mii_setup(struct net_device *dev)
+void bcmgenet_mii_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
}
-int bcmgenet_mii_config(struct net_device *dev)
+int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev)
return -EINVAL;
}
- dev_info(kdev, "configuring instance for %s\n", phy_name);
+ if (init)
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
- ret = bcmgenet_mii_config(dev);
+ ret = bcmgenet_mii_config(dev, true);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index dbb41c1..77f8f83 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp)
if (tnapi->rx_rcb)
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+ if (tnapi->prodring.rx_std &&
+ tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
tg3_free_rings(tp);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6fe300e..4fe3360 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
app.protocol = dcb->app_priority[i].protocolid;
if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+ app.priority = dcb->app_priority[i].user_prio_map;
app.selector = dcb->app_priority[i].sel_field + 1;
- err = dcb_ieee_setapp(dev, &app);
+ err = dcb_ieee_delapp(dev, &app);
} else {
app.selector = !!(dcb->app_priority[i].sel_field);
err = dcb_setapp(dev, &app);
@@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
case CXGB4_DCB_INPUT_FW_ENABLED: {
/* we're going to use Firmware DCB */
dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
- dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+ dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
+ if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
+ dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
+ else
+ dcb->supported |= DCB_CAP_DCBX_VER_CEE;
break;
}
@@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
*up_tc_map = (1 << tc);
/* prio_type is link strict */
- *prio_type = 0x2;
+ if (*pgid != 0xF)
+ *prio_type = 0x2;
}
static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
u8 *prio_type, u8 *pgid, u8 *bw_per,
u8 *up_tc_map)
{
- return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1);
+ /* tc 0 is written at MSB position */
+ return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+ up_tc_map, 1);
}
@@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
u8 *prio_type, u8 *pgid, u8 *bw_per,
u8 *up_tc_map)
{
- return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0);
+ /* tc 0 is written at MSB position */
+ return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+ up_tc_map, 0);
}
static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
struct fw_port_cmd pcmd;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = pi->adapter;
+ int fw_tc = 7 - tc;
u32 _pgid;
int err;
@@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
}
_pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
- _pgid &= ~(0xF << (tc * 4));
- _pgid |= pgid << (tc * 4);
+ _pgid &= ~(0xF << (fw_tc * 4));
+ _pgid |= pgid << (fw_tc * 4);
pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
priority >= CXGB4_MAX_PRIORITY)
*pfccfg = 0;
else
- *pfccfg = (pi->dcb.pfcen >> priority) & 1;
+ *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
}
/* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
if (pfccfg)
- pcmd.u.dcb.pfc.pfcen |= (1 << priority);
+ pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
else
- pcmd.u.dcb.pfc.pfcen &= (~(1 << priority));
+ pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
if (err != FW_PORT_DCB_CFG_SUCCESS) {
@@ -1071,7 +1082,7 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
- pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF;
+ pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF;
INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 5e1b314..39f2b13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control, sge_conm_ctrl;
+ u32 sge_control, sge_control2, sge_conm_ctrl;
+ unsigned int ingpadboundary, ingpackboundary;
int ret, egress_threshold;
/*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
sge_control = t4_read_reg(adap, SGE_CONTROL);
s->pktshift = PKTSHIFT_GET(sge_control);
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
- s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
- X_INGPADBOUNDARY_SHIFT);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
+ X_INGPADBOUNDARY_SHIFT);
+ if (is_t4(adap->params.chip)) {
+ s->fl_align = ingpadboundary;
+ } else {
+ /* T5 has a different interpretation of one of the PCIe Packing
+ * Boundary values.
+ */
+ sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
+ ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ s->fl_align = max(ingpadboundary, ingpackboundary);
+ }
if (adap->flags & USING_SOFT_PARAMS)
ret = t4_sge_init_soft(adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a9d9d74..163a2a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
HOSTPAGESIZEPF6(sge_hps) |
HOSTPAGESIZEPF7(sge_hps));
- t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE_MASK,
- INGPADBOUNDARY(fl_align_log - 5) |
- EGRSTATUSPAGESIZE(stat_len != 64));
-
+ if (is_t4(adap->params.chip)) {
+ t4_set_reg_field(adap, SGE_CONTROL,
+ INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE_MASK,
+ INGPADBOUNDARY(fl_align_log - 5) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+ } else {
+ /* T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good. For T5 the smallest
+ * Padding Boundary which we can select is 32 bytes which is
+ * larger than any known Memory Controller Line Size so we'll
+ * use that.
+ *
+ * T5 has a different interpretation of the "0" value for the
+ * Packing Boundary. This corresponds to 16 bytes instead of
+ * the expected 32 bytes. We never have a Packing Boundary
+ * less than 32 bytes so we can't use that special value but
+ * on the other hand, if we wanted 32 bytes, the best we can
+ * really do is 64 bytes.
+ */
+ if (fl_align <= 32) {
+ fl_align = 64;
+ fl_align_log = 6;
+ }
+ t4_set_reg_field(adap, SGE_CONTROL,
+ INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE_MASK,
+ INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, SGE_CONTROL2_A,
+ INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+ INGPACKBOUNDARY_V(fl_align_log -
+ INGPACKBOUNDARY_SHIFT_X));
+ }
/*
* Adjust various SGE Free List Host Buffer Sizes.
*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1024db..8d2de10 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -95,6 +95,7 @@
#define X_INGPADBOUNDARY_SHIFT 5
#define SGE_CONTROL 0x1008
+#define SGE_CONTROL2_A 0x1124
#define DCASYSTYPE 0x00080000U
#define RXPKTCPLMODE_MASK 0x00040000U
#define RXPKTCPLMODE_SHIFT 18
@@ -106,6 +107,7 @@
#define PKTSHIFT_SHIFT 10
#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
+#define INGPCIEBOUNDARY_32B_X 0
#define INGPCIEBOUNDARY_MASK 0x00000380U
#define INGPCIEBOUNDARY_SHIFT 7
#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
@@ -114,6 +116,14 @@
#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
>> INGPADBOUNDARY_SHIFT)
+#define INGPACKBOUNDARY_16B_X 0
+#define INGPACKBOUNDARY_SHIFT_X 5
+
+#define INGPACKBOUNDARY_S 16
+#define INGPACKBOUNDARY_M 0x7U
+#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
+#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
+ & INGPACKBOUNDARY_M)
#define EGRPCIEBOUNDARY_MASK 0x0000000eU
#define EGRPCIEBOUNDARY_SHIFT 1
#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c..3d06e77 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -299,6 +299,14 @@ struct sge {
u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
+ /* Decoded Adapter Parameters.
+ */
+ u32 fl_pg_order; /* large page allocation size */
+ u32 stat_len; /* length of status page at ring end */
+ u32 pktshift; /* padding between CPL & packet data */
+ u32 fl_align; /* response queue message alignment */
+ u32 fl_starve_thres; /* Free List starvation threshold */
+
/*
* Reverse maps from Absolute Queue IDs to associated queue pointers.
* The absolute Queue IDs are in a compact range which start at a
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 85036e6..fdd078d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
#include "../cxgb4/t4_msg.h"
/*
- * Decoded Adapter Parameters.
- */
-static u32 FL_PG_ORDER; /* large page allocation size */
-static u32 STAT_LEN; /* length of status page at ring end */
-static u32 PKTSHIFT; /* padding between CPL and packet data */
-static u32 FL_ALIGN; /* response queue message alignment */
-
-/*
* Constants ...
*/
enum {
@@ -102,12 +94,6 @@ enum {
MAX_TIMER_TX_RECLAIM = 100,
/*
- * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
- * timer will attempt to refill it.
- */
- FL_STARVE_THRES = 4,
-
- /*
* Suspend an Ethernet TX queue with fewer available descriptors than
* this. We always want to have room for a maximum sized packet:
* inline immediate data + MAX_SKB_FRAGS. This is the same as
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
/**
* fl_starving - return whether a Free List is starving.
+ * @adapter: pointer to the adapter
* @fl: the Free List
*
* Tests specified Free List to see whether the number of buffers
* available to the hardware has falled below our "starvation"
* threshold.
*/
-static inline bool fl_starving(const struct sge_fl *fl)
+static inline bool fl_starving(const struct adapter *adapter,
+ const struct sge_fl *fl)
{
- return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+ const struct sge *s = &adapter->sge;
+
+ return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
/**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
/**
* get_buf_size - return the size of an RX Free List buffer.
+ * @adapter: pointer to the associated adapter
* @sdesc: pointer to the software buffer descriptor
*/
-static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+static inline int get_buf_size(const struct adapter *adapter,
+ const struct rx_sw_desc *sdesc)
{
- return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
- ? (PAGE_SIZE << FL_PG_ORDER)
- : PAGE_SIZE;
+ const struct sge *s = &adapter->sge;
+
+ return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+ ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
}
/**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
- get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ get_buf_size(adapter, sdesc),
+ PCI_DMA_FROMDEVICE);
put_page(sdesc->page);
sdesc->page = NULL;
if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
- get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ get_buf_size(adapter, sdesc),
+ PCI_DMA_FROMDEVICE);
sdesc->page = NULL;
if (++fl->cidx == fl->size)
fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
int n, gfp_t gfp)
{
+ struct sge *s = &adapter->sge;
struct page *page;
dma_addr_t dma_addr;
unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
* If we don't support large pages, drop directly into the small page
* allocation code.
*/
- if (FL_PG_ORDER == 0)
+ if (s->fl_pg_order == 0)
goto alloc_small_pages;
while (n) {
page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- FL_PG_ORDER);
+ s->fl_pg_order);
if (unlikely(!page)) {
/*
* We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
fl->large_alloc_failed++;
break;
}
- poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+ poison_buf(page, PAGE_SIZE << s->fl_pg_order);
dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
- PAGE_SIZE << FL_PG_ORDER,
+ PAGE_SIZE << s->fl_pg_order,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
/*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
* because DMA mapping resources are typically
* critical resources once they become scarse.
*/
- __free_pages(page, FL_PG_ORDER);
+ __free_pages(page, s->fl_pg_order);
goto out;
}
dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
fl->pend_cred += cred;
ring_fl_db(adapter, fl);
- if (unlikely(fl_starving(fl))) {
+ if (unlikely(fl_starving(adapter, fl))) {
smp_wmb();
set_bit(fl->cntxt_id, adapter->sge.starving_fl);
}
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
const struct cpl_rx_pkt *pkt)
{
+ struct adapter *adapter = rxq->rspq.adapter;
+ struct sge *s = &adapter->sge;
int ret;
struct sk_buff *skb;
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
return;
}
- copy_frags(skb, gl, PKTSHIFT);
- skb->len = gl->tot_len - PKTSHIFT;
+ copy_frags(skb, gl, s->pktshift);
+ skb->len = gl->tot_len - s->pktshift;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
(rspq->netdev->features & NETIF_F_RXCSUM);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = rspq->adapter;
+ struct sge *s = &adapter->sge;
/*
* If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
rxq->stats.rx_drops++;
return 0;
}
- __skb_pull(skb, PKTSHIFT);
+ __skb_pull(skb, s->pktshift);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
static int process_responses(struct sge_rspq *rspq, int budget)
{
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = rspq->adapter;
+ struct sge *s = &adapter->sge;
int budget_left = budget;
while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
BUG_ON(frag >= MAX_SKB_FRAGS);
BUG_ON(rxq->fl.avail == 0);
sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
- bufsz = get_buf_size(sdesc);
+ bufsz = get_buf_size(adapter, sdesc);
fp->page = sdesc->page;
fp->offset = rspq->offset;
fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
*/
ret = rspq->handler(rspq, rspq->cur_desc, &gl);
if (likely(ret == 0))
- rspq->offset += ALIGN(fp->size, FL_ALIGN);
+ rspq->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&gl, &rxq->fl, frag);
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
* schedule napi but the FL is no longer starving.
* No biggie.
*/
- if (fl_starving(fl)) {
+ if (fl_starving(adapter, fl)) {
struct sge_eth_rxq *rxq;
rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
int intr_dest,
struct sge_fl *fl, rspq_handler_t hnd)
{
+ struct sge *s = &adapter->sge;
struct port_info *pi = netdev_priv(dev);
struct fw_iq_cmd cmd, rpl;
int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
sizeof(__be64), sizeof(struct rx_sw_desc),
- &fl->addr, &fl->sdesc, STAT_LEN);
+ &fl->addr, &fl->sdesc, s->stat_len);
if (!fl->desc) {
ret = -ENOMEM;
goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
* free list ring) in Egress Queue Units.
*/
flsz = (fl->size / FL_PER_EQ_UNIT +
- STAT_LEN / EQ_UNIT);
+ s->stat_len / EQ_UNIT);
/*
* Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *devq,
unsigned int iqid)
{
+ struct sge *s = &adapter->sge;
int ret, nentries;
struct fw_eq_eth_cmd cmd, rpl;
struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
* Calculate the size of the hardware TX Queue (including the Status
* Page on the end of the TX Queue) in units of TX Descriptors.
*/
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
/*
* Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
sizeof(struct tx_desc),
sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
if (!txq->q.desc)
return -ENOMEM;
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
*/
static void free_txq(struct adapter *adapter, struct sge_txq *tq)
{
+ struct sge *s = &adapter->sge;
+
dma_free_coherent(adapter->pdev_dev,
- tq->size * sizeof(*tq->desc) + STAT_LEN,
+ tq->size * sizeof(*tq->desc) + s->stat_len,
tq->desc, tq->phys_addr);
tq->cntxt_id = 0;
tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
struct sge_fl *fl)
{
+ struct sge *s = &adapter->sge;
unsigned int flid = fl ? fl->cntxt_id : 0xffff;
t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
if (fl) {
free_rx_bufs(adapter, fl, fl->avail);
dma_free_coherent(adapter->pdev_dev,
- fl->size * sizeof(*fl->desc) + STAT_LEN,
+ fl->size * sizeof(*fl->desc) + s->stat_len,
fl->desc, fl->addr);
kfree(fl->sdesc);
fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
u32 fl0 = sge_params->sge_fl_buffer_size[0];
u32 fl1 = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
+ unsigned int ingpadboundary, ingpackboundary;
/*
* Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
* Now translate the adapter parameters into our internal forms.
*/
if (fl1)
- FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
- STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
- ? 128 : 64);
- PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
- FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
- SGE_INGPADBOUNDARY_SHIFT);
+ s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+ ? 128 : 64);
+ s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
+ */
+ ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+ X_INGPADBOUNDARY_SHIFT);
+ if (is_t4(adapter->params.chip)) {
+ s->fl_align = ingpadboundary;
+ } else {
+ /* T5 has a different interpretation of one of the PCIe Packing
+ * Boundary values.
+ */
+ ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ s->fl_align = max(ingpadboundary, ingpackboundary);
+ }
+
+ /* A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ s->fl_starve_thres
+ = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
/*
* Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 95df61d..4b6a6d1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -134,11 +134,13 @@ struct dev_params {
*/
struct sge_params {
u32 sge_control; /* padding, boundaries, lengths, etc. */
+ u32 sge_control2; /* T5: more of the same */
u32 sge_host_page_size; /* RDMA page sizes */
u32 sge_queues_per_page; /* RDMA queues/page */
u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
+ u32 sge_congestion_control; /* congestion thresholds, etc. */
u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
u32 sge_timer_value_2_and_3;
u32 sge_timer_value_4_and_5;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index e984fdc..1e896b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
sge_params->sge_timer_value_2_and_3 = vals[5];
sge_params->sge_timer_value_4_and_5 = vals[6];
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
+ * SGE_CONTROL in order to determine how ingress packet data will be
+ * laid out in Packed Buffer Mode. Unfortunately, older versions of
+ * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
+ * failure grabbing it we throw an error since we can't figure out the
+ * right value.
+ */
+ if (!is_t4(adapter->params.chip)) {
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter->pdev_dev,
+ "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_params->sge_control2 = vals[0];
+ }
+
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
- v = t4vf_query_params(adapter, 1, params, vals);
+ params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
+ v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
sge_params->sge_ingress_rx_threshold = vals[0];
+ sge_params->sge_congestion_control = vals[1];
return 0;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 180e53f..73cf165 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
struct vnic_rq_buf *buf = rq->to_use;
if (buf->os_buf) {
- buf = buf->next;
- rq->to_use = buf;
- rq->ring.desc_avail--;
- if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
- /* Adding write memory barrier prevents compiler and/or
- * CPU reordering, thus avoiding descriptor posting
- * before descriptor is initialized. Otherwise, hardware
- * can read stale descriptor fields.
- */
- wmb();
- iowrite32(buf->index, &rq->ctrl->posted_index);
- }
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
return 0;
}
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
enic->rq_truncated_pkts++;
}
+ pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
+ buf->os_buf = NULL;
return;
}
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Buffer overflow
*/
+ pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
+ buf->os_buf = NULL;
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 9a18e79..597c463 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
return -EOPNOTSUPP;
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
+ if (nla_len(attr) < sizeof(mode))
+ return -EINVAL;
+
mode = nla_get_u16(attr);
if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
return -EINVAL;
@@ -4421,6 +4426,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
"Disabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
}
+
+static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
+{
+ return vxlan_gso_check(skb);
+}
#endif
static const struct net_device_ops be_netdev_ops = {
@@ -4450,6 +4460,7 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_BE2NET_VXLAN
.ndo_add_vxlan_port = be_add_vxlan_port,
.ndo_del_vxlan_port = be_del_vxlan_port,
+ .ndo_gso_check = be_gso_check,
#endif
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 50a851d..3dca494 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
return bufaddr;
}
+static void swap_buffer2(void *dst_buf, void *src_buf, int len)
+{
+ int i;
+ unsigned int *src = src_buf;
+ unsigned int *dst = dst_buf;
+
+ for (i = 0; i < len; i += 4, src++, dst++)
+ *dst = swab32p(src);
+}
+
static void fec_dump(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
}
static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length)
+ struct bufdesc *bdp, u32 length, bool swap)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct sk_buff *new_skb;
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, length);
+ if (!swap)
+ memcpy(new_skb->data, (*skb)->data, length);
+ else
+ swap_buffer2(new_skb->data, (*skb)->data, length);
*skb = new_skb;
return true;
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
u16 vlan_tag;
int index = 0;
bool is_copybreak;
+ bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
+ is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
+ need_swap);
if (!is_copybreak) {
skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (unlikely(!skb_new)) {
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, pkt_len - 4);
data = skb->data;
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (!is_copybreak && need_swap)
swap_buffer(data, pkt_len);
/* Extract the enhanced buffer descriptor */
@@ -3343,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_device_detach(ndev);
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
}
rtnl_unlock();
- fec_enet_clk_enable(ndev, false);
- pinctrl_pm_select_sleep_state(&fep->pdev->dev);
-
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -3367,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
return ret;
}
- pinctrl_pm_select_default_state(&fep->pdev->dev);
- ret = fec_enet_clk_enable(ndev, true);
- if (ret)
- goto failed_clk;
-
rtnl_lock();
if (netif_running(ndev)) {
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret) {
+ rtnl_unlock();
+ goto failed_clk;
+ }
fec_restart(ndev);
netif_tx_lock_bh(ndev);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a2d72a8..487cd9c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
/* igb_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
- kfree_rcu(q_vector, rcu);
+ if (q_vector)
+ kfree_rcu(q_vector, rcu);
}
/**
@@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter)
adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
for (i = 0; i < adapter->num_q_vectors; i++) {
- napi_synchronize(&(adapter->q_vector[i]->napi));
- napi_disable(&(adapter->q_vector[i]->napi));
+ if (adapter->q_vector[i]) {
+ napi_synchronize(&adapter->q_vector[i]->napi);
+ napi_disable(&adapter->q_vector[i]->napi);
+ }
}
@@ -3717,7 +3720,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- igb_free_tx_resources(adapter->tx_ring[i]);
+ if (adapter->tx_ring[i])
+ igb_free_tx_resources(adapter->tx_ring[i]);
}
void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
@@ -3782,7 +3786,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- igb_clean_tx_ring(adapter->tx_ring[i]);
+ if (adapter->tx_ring[i])
+ igb_clean_tx_ring(adapter->tx_ring[i]);
}
/**
@@ -3819,7 +3824,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- igb_free_rx_resources(adapter->rx_ring[i]);
+ if (adapter->rx_ring[i])
+ igb_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -3874,7 +3880,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- igb_clean_rx_ring(adapter->rx_ring[i]);
+ if (adapter->rx_ring[i])
+ igb_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -7404,6 +7411,8 @@ static int igb_resume(struct device *dev)
pci_restore_state(pdev);
pci_save_state(pdev);
+ if (!pci_device_is_present(pdev))
+ return -ENODEV;
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d2df4e3..cc51554 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* if SR-IOV and VMDQ are disabled - otherwise ensure
* that hardware VLAN filters remain enabled.
*/
- if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
- IXGBE_FLAG_SRIOV_ENABLED)))
+ if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED))
vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
} else {
if (netdev->flags & IFF_ALLMULTI) {
@@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
return -EOPNOTSUPP;
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
@@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
+ if (nla_len(attr) < sizeof(mode))
+ return -EINVAL;
+
mode = nla_get_u16(attr);
if (mode == BRIDGE_MODE_VEPA) {
reg = 0;
@@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
+ bool disable_dev = false;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -8369,13 +8375,14 @@ err_sw_init:
iounmap(adapter->io_addr);
kfree(adapter->mac_table);
err_ioremap:
+ disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
- if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ if (!adapter || disable_dev)
pci_disable_device(pdev);
return err;
}
@@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+ bool disable_dev;
ixgbe_dbg_adapter_exit(adapter);
@@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
kfree(adapter->mac_table);
+ disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
- if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index d47b19f..28b81ae0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
**/
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
- s32 status;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = false;
ixgbe_link_speed speed;
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
hw->phy.ops.write_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, autoneg_reg);
-
- return status;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b151a94..d44560d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
- struct sk_buff *skb;
tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++;
txq->tx_desc_count--;
- skb = NULL;
- if (cmd_sts & TX_LAST_DESC)
- skb = __skb_dequeue(&txq->tx_skb);
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr))
+ dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
+ desc->byte_cnt, DMA_TO_DEVICE);
+
+ if (cmd_sts & TX_ENABLE_INTERRUPT) {
+ struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
+
+ if (!WARN_ON(!skb))
+ dev_kfree_skb(skb);
+ }
if (cmd_sts & ERROR_SUMMARY) {
netdev_info(mp->dev, "tx error\n");
mp->dev->stats.tx_errors++;
}
- if (!IS_TSO_HEADER(txq, desc->buf_ptr))
- dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
- dev_kfree_skb(skb);
}
__netif_tx_unlock_bh(nq);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ece83f1..fdf3e38 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
{
struct mvpp2_prs_entry *pe;
int tid_aux, tid;
+ int ret = 0;
pe = mvpp2_prs_vlan_find(priv, tpid, ai);
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
break;
}
- if (tid <= tid_aux)
- return -EINVAL;
+ if (tid <= tid_aux) {
+ ret = -EINVAL;
+ goto error;
+ }
memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_hw_write(priv, pe);
+error:
kfree(pe);
- return 0;
+ return ret;
}
/* Get first free double vlan ai number */
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
unsigned int port_map)
{
struct mvpp2_prs_entry *pe;
- int tid_aux, tid, ai;
+ int tid_aux, tid, ai, ret = 0;
pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
/* Set ai value for new double vlan entry */
ai = mvpp2_prs_double_vlan_ai_free_get(priv);
- if (ai < 0)
- return ai;
+ if (ai < 0) {
+ ret = ai;
+ goto error;
+ }
/* Get first single/triple vlan tid */
for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
break;
}
- if (tid >= tid_aux)
- return -ERANGE;
+ if (tid >= tid_aux) {
+ ret = -ERANGE;
+ goto error;
+ }
memset(pe, 0, sizeof(struct mvpp2_prs_entry));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_tcam_port_map_set(pe, port_map);
mvpp2_prs_hw_write(priv, pe);
+error:
kfree(pe);
- return 0;
+ return ret;
}
/* IPv4 header parsing for fragmentation and L4 offset */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f3032fe..4d69e38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
#ifdef CONFIG_MLX4_EN_VXLAN
- if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+ if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
vxlan_get_rx_port(dev);
#endif
priv->port_up = true;
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 1);
out:
- if (ret)
+ if (ret) {
en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+ return;
+ }
+
+ /* set offloads */
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
int ret;
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
+ /* unset offloads */
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+ priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2342,6 +2355,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
}
+
+static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
+{
+ return vxlan_gso_check(skb);
+}
#endif
static const struct net_device_ops mlx4_netdev_ops = {
@@ -2373,6 +2391,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+ .ndo_gso_check = mlx4_en_gso_check,
#endif
};
@@ -2403,6 +2422,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
+#ifdef CONFIG_MLX4_EN_VXLAN
+ .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
+ .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+ .ndo_gso_check = mlx4_en_gso_check,
+#endif
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2568,13 +2592,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
- if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL;
- }
-
mdev->pndev[port] = dev;
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 5d2498d..cd5cf6d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE:
- count = get_param_l(&in_param);
+ count = get_param_l(&in_param) & 0xffffff;
align = get_param_h(&in_param);
err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a278238..ad2c96a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
+ eq->irqn = vecidx;
+ eq->dev = dev;
+ eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
eq->name, eq);
if (err)
goto err_eq;
- eq->irqn = vecidx;
- eq->dev = dev;
- eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-
err = mlx5_debug_eq_add(dev, eq);
if (err)
goto err_irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d8e8e4..71b10b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
dev->profile = &profile[prof_sel];
dev->event = mlx5_core_event;
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
err = mlx5_dev_init(dev, pdev);
if (err) {
dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
goto out;
}
- INIT_LIST_HEAD(&priv->ctx_list);
- spin_lock_init(&priv->ctx_lock);
err = mlx5_register_device(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 0b2a1cc..6130375 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
if (test_bit(__NX_RESETTING, &adapter->state))
goto reschedule;
- if (test_bit(__NX_DEV_UP, &adapter->state)) {
+ if (test_bit(__NX_DEV_UP, &adapter->state) &&
+ !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
if (!adapter->has_link_events) {
netxen_nic_handle_phy_intr(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index f5e29f7..a913b3a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -503,6 +503,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
}
+
+static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
+{
+ return vxlan_gso_check(skb);
+}
#endif
static const struct net_device_ops qlcnic_netdev_ops = {
@@ -526,6 +531,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#ifdef CONFIG_QLCNIC_VXLAN
.ndo_add_vxlan_port = qlcnic_add_vxlan_port,
.ndo_del_vxlan_port = qlcnic_del_vxlan_port,
+ .ndo_gso_check = qlcnic_gso_check,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index f3a4714..9a49f42 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -5,7 +5,6 @@
config NET_VENDOR_QUALCOMM
bool "Qualcomm devices"
default y
- depends on SPI_MASTER && OF_GPIO
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
config QCA7000
tristate "Qualcomm Atheros QCA7000 support"
- depends on SPI_MASTER && OF_GPIO
+ depends on SPI_MASTER && OF
---help---
This SPI protocol driver supports the Qualcomm Atheros QCA7000.
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 002d4cd..a77f05c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
EFX_MAX_CHANNELS,
resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
- BUG_ON(efx->max_channels == 0);
+ if (WARN_ON(efx->max_channels == 0))
+ return -EIO;
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 2c62208..6cc3cf6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
const struct of_device_id *match = NULL;
struct smc_local *lp;
struct net_device *ndev;
- struct resource *res, *ires;
+ struct resource *res;
unsigned int __iomem *addr;
unsigned long irq_flags = SMC_IRQ_FLAGS;
+ unsigned long irq_resflags;
int ret;
ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
goto out_free_netdev;
}
- ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!ires) {
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
ret = -ENODEV;
goto out_release_io;
}
-
- ndev->irq = ires->start;
-
- if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
- irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+ /*
+ * If this platform does not specify any special irqflags, or if
+ * the resource supplies a trigger, override the irqflags with
+ * the trigger flags from the resource.
+ */
+ irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+ if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
+ irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
ret = smc_request_attrib(pdev, ndev);
if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index affb29d..77ed745 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
spin_unlock(&pdata->mac_lock);
}
+static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ /* If the internal PHY is in General Power-Down mode, all, except the
+ * management interface, is powered-down and stays in that condition as
+ * long as Phy register bit 0.11 is HIGH.
+ *
+ * In that case, clear the bit 0.11, so the PHY powers up and we can
+ * access to the phy registers.
+ */
+ rc = phy_read(pdata->phy_dev, MII_BMCR);
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /* If the PHY general power-down bit is not set is not necessary to
+ * disable the general power down-mode.
+ */
+ if (rc & BMCR_PDOWN) {
+ rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ usleep_range(1000, 1500);
+ }
+
+ return 0;
+}
+
static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
{
int rc = 0;
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
return rc;
}
- /*
- * If energy is detected the PHY is already awake so is not necessary
- * to disable the energy detect power-down mode.
- */
- if ((rc & MII_LAN83C185_EDPWRDOWN) &&
- !(rc & MII_LAN83C185_ENERGYON)) {
+ /* Only disable if energy detect mode is already enabled */
+ if (rc & MII_LAN83C185_EDPWRDOWN) {
/* Disable energy detect mode for this SMSC Transceivers */
rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
rc & (~MII_LAN83C185_EDPWRDOWN));
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
}
-
- mdelay(1);
+ /* Allow PHY to wakeup */
+ mdelay(2);
}
return 0;
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
/* Only enable if energy detect mode is already disabled */
if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
- mdelay(100);
/* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
}
-
- mdelay(1);
}
return 0;
}
@@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
int ret;
/*
+ * Make sure to power-up the PHY chip before doing a reset, otherwise
+ * the reset fails.
+ */
+ ret = smsc911x_phy_general_power_up(pdata);
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
+ return ret;
+ }
+
+ /*
* LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
* are initialized in a Energy Detect Power-Down mode that prevents
* the MAC chip to be software reseted. So we have to wakeup the PHY
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6f77a46..18c46bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
bool stmmac_eee_init(struct stmmac_priv *priv)
{
char *phy_bus_name = priv->plat->phy_bus_name;
+ unsigned long flags;
bool ret = false;
/* Using PCS we cannot dial with the phy registers at this stage
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
* changed).
* In that case the driver disable own timers.
*/
+ spin_lock_irqsave(&priv->lock, flags);
if (priv->eee_active) {
pr_debug("stmmac: disable EEE\n");
del_timer_sync(&priv->eee_ctrl_timer);
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
tx_lpi_timer);
}
priv->eee_active = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
goto out;
}
/* Activate the EEE and start timers */
+ spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
init_timer(&priv->eee_ctrl_timer);
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Set HW EEE according to the speed */
priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
- pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
-
ret = true;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
}
out:
return ret;
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
/* At this stage, it could be needed to setup the EEE or adjust some
* MAC related HW registers.
*/
priv->eee_enabled = stmmac_eee_init(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
/**
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
}
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
- int i)
+ int i, gfp_t flags)
{
struct sk_buff *skb;
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
- GFP_KERNEL);
+ flags);
if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
else
p = priv->dma_rx + i;
- ret = stmmac_init_rx_buffers(priv, p, i);
+ ret = stmmac_init_rx_buffers(priv, p, i, flags);
if (ret)
goto err_init_rx_buffers;
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- ret = init_dma_desc_rings(dev);
- if (ret < 0) {
- pr_err("%s: DMA descriptors initialization failed\n", __func__);
- return ret;
- }
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
}
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
- priv->eee_enabled = stmmac_eee_init(priv);
-
- stmmac_init_tx_coalesce(priv);
-
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
priv->rx_riwt = MAX_DMA_RIWT;
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
goto dma_desc_error;
}
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ goto init_error;
+ }
+
ret = stmmac_hw_setup(dev);
if (ret < 0) {
pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
}
+ stmmac_init_tx_coalesce(priv);
+
if (priv->phydev)
phy_start(priv->phydev);
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
unsigned int enh_desc = priv->plat->enh_desc;
+ spin_lock(&priv->tx_lock);
+
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+ spin_unlock(&priv->tx_lock);
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
/* This is a hard error, log it. */
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- spin_lock(&priv->tx_lock);
-
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
dma_map_err:
+ spin_unlock(&priv->tx_lock);
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- spin_lock(&priv->lock);
priv->hw->mac->set_filter(priv->hw, dev);
- spin_unlock(&priv->lock);
}
/**
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
stmmac_set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- clk_disable_unprepare(priv->stmmac_clk);
+ clk_disable(priv->stmmac_clk);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
} else {
pinctrl_pm_select_default_state(priv->device);
/* enable the clk prevously disabled */
- clk_prepare_enable(priv->stmmac_clk);
+ clk_enable(priv->stmmac_clk);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
+ init_dma_desc_rings(ndev, GFP_ATOMIC);
stmmac_hw_setup(ndev);
+ stmmac_init_tx_coalesce(priv);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index db56fa7..5b0da39 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
*/
plat->maxmtu = JUMBO_LEN;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -270,6 +264,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
return PTR_ERR(addr);
plat_dat = dev_get_platdata(&pdev->dev);
+
+ /* Set default value for multicast hash bins */
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat_dat->unicast_filter_entries = 1;
+
if (pdev->dev.of_node) {
if (!plat_dat)
plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72c8525..9c014803b0 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
HMD(("init rxring, "));
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
+ u32 mapping;
skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hp->dma_dev, mapping)) {
+ dev_kfree_skb_any(skb);
+ hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+ continue;
+ }
hme_write_rxd(hp, &hb->happy_meal_rxd[i],
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
- dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
- DMA_FROM_DEVICE));
+ mapping);
skb_reserve(skb, RX_OFFSET);
}
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb = hp->rx_skbs[elem];
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
+ u32 mapping;
/* Now refill the entry, if we can. */
new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
drops++;
goto drop_it;
}
+ skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ mapping = dma_map_single(hp->dma_dev, new_skb->data,
+ RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+ dev_kfree_skb_any(new_skb);
+ drops++;
+ goto drop_it;
+ }
+
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
- skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
- dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
- DMA_FROM_DEVICE));
+ mapping);
skb_reserve(new_skb, RX_OFFSET);
/* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
+ u32 first_len, u32 first_entry, u32 entry)
+{
+ struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+
+ dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
+
+ first_entry = NEXT_TX(first_entry);
+ while (first_entry != entry) {
+ struct happy_meal_txd *this = &txbase[first_entry];
+ u32 addr, len;
+
+ addr = hme_read_desc32(hp, &this->tx_addr);
+ len = hme_read_desc32(hp, &this->tx_flags);
+ len &= TXFLAG_SIZE;
+ dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
+ goto out_dma_error;
tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
(tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
first_len = skb_headlen(skb);
first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
+ goto out_dma_error;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
len = skb_frag_size(this_frag);
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
0, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+ unmap_partial_tx_skb(hp, first_mapping, first_len,
+ first_entry, entry);
+ goto out_dma_error;
+ }
this_txflags = tx_flags;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
+
+out_dma_error:
+ hp->tx_skbs[hp->tx_new] = NULL;
+ spin_unlock_irq(&hp->happy_lock);
+
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index d879448..c560f9a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -129,9 +129,9 @@ do { \
#define CPSW_VLAN_AWARE BIT(1)
#define CPSW_ALE_VLAN_AWARE 1
-#define CPSW_FIFO_NORMAL_MODE (0 << 15)
-#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
-#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+#define CPSW_FIFO_NORMAL_MODE (0 << 16)
+#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
+#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
#define CPSW_INTPACEEN (0x3f << 16)
#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 3ae8387..097ebe7 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
{
if (!ale)
return -EINVAL;
- cpsw_ale_stop(ale);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
kfree(ale);
return 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index ab92f67..4a4388b 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
switch (ptp_class & PTP_CLASS_PMASK) {
case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
break;
case PTP_CLASS_IPV6:
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 9ce854f..6cbc56a 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -377,17 +377,20 @@ static int ieee802154fake_probe(struct platform_device *pdev)
err = wpan_phy_register(phy);
if (err)
- goto out;
+ goto err_phy_reg;
err = register_netdev(dev);
- if (err < 0)
- goto out;
+ if (err)
+ goto err_netdev_reg;
dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
return 0;
-out:
- unregister_netdev(dev);
+err_netdev_reg:
+ wpan_phy_unregister(phy);
+err_phy_reg:
+ free_netdev(dev);
+ wpan_phy_free(phy);
return err;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6f226de..880cc09 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
vnet_hdr->csum_start = skb_checksum_start_offset(skb);
+ if (vlan_tx_tag_present(skb))
+ vnet_hdr->csum_start += VLAN_HLEN;
vnet_hdr->csum_offset = skb->csum_offset;
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2954052..e22e602 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
switch (type & PTP_CLASS_PMASK) {
case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
break;
case PTP_CLASS_IPV6:
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
@@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type)
switch (type & PTP_CLASS_PMASK) {
case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
break;
case PTP_CLASS_IPV6:
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1dfffdc..767cd11 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mii_data = if_mii(ifr);
u16 val = mii_data->val_in;
+ bool change_autoneg = false;
switch (cmd) {
case SIOCGMIIPHY:
@@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mii_data->phy_id == phydev->addr) {
switch (mii_data->reg_num) {
case MII_BMCR:
- if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
+ if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ change_autoneg = true;
phydev->autoneg = AUTONEG_DISABLE;
- else
+ if (val & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ if (val & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (val & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else phydev->speed = SPEED_10;
+ }
+ else {
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ change_autoneg = true;
phydev->autoneg = AUTONEG_ENABLE;
- if (!phydev->autoneg && (val & BMCR_FULLDPLX))
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
- if (!phydev->autoneg && (val & BMCR_SPEED1000))
- phydev->speed = SPEED_1000;
- else if (!phydev->autoneg &&
- (val & BMCR_SPEED100))
- phydev->speed = SPEED_100;
+ }
break;
case MII_ADVERTISE:
- phydev->advertising = val;
+ phydev->advertising = mii_adv_to_ethtool_adv_t(val);
+ change_autoneg = true;
break;
default:
/* do nothing */
@@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mii_data->reg_num == MII_BMCR &&
val & BMCR_RESET)
return phy_init_hw(phydev);
+
+ if (change_autoneg)
+ return phy_start_aneg(phydev);
+
return 0;
case SIOCSHWTSTAMP:
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 68c3a3f..794a473 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = get_filter(argp, &code);
if (err >= 0) {
+ struct bpf_prog *pass_filter = NULL;
struct sock_fprog_kern fprog = {
.len = err,
.filter = code,
};
- ppp_lock(ppp);
- if (ppp->pass_filter) {
- bpf_prog_destroy(ppp->pass_filter);
- ppp->pass_filter = NULL;
+ err = 0;
+ if (fprog.filter)
+ err = bpf_prog_create(&pass_filter, &fprog);
+ if (!err) {
+ ppp_lock(ppp);
+ if (ppp->pass_filter)
+ bpf_prog_destroy(ppp->pass_filter);
+ ppp->pass_filter = pass_filter;
+ ppp_unlock(ppp);
}
- if (fprog.filter != NULL)
- err = bpf_prog_create(&ppp->pass_filter,
- &fprog);
- else
- err = 0;
kfree(code);
- ppp_unlock(ppp);
}
break;
}
@@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = get_filter(argp, &code);
if (err >= 0) {
+ struct bpf_prog *active_filter = NULL;
struct sock_fprog_kern fprog = {
.len = err,
.filter = code,
};
- ppp_lock(ppp);
- if (ppp->active_filter) {
- bpf_prog_destroy(ppp->active_filter);
- ppp->active_filter = NULL;
+ err = 0;
+ if (fprog.filter)
+ err = bpf_prog_create(&active_filter, &fprog);
+ if (!err) {
+ ppp_lock(ppp);
+ if (ppp->active_filter)
+ bpf_prog_destroy(ppp->active_filter);
+ ppp->active_filter = active_filter;
+ ppp_unlock(ppp);
}
- if (fprog.filter != NULL)
- err = bpf_prog_create(&ppp->active_filter,
- &fprog);
- else
- err = 0;
kfree(code);
- ppp_unlock(ppp);
}
break;
}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1aff970..1dc628f 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
int len = sizeof(struct sockaddr_pppox);
struct sockaddr_pppox sp;
- sp.sa_family = AF_PPPOX;
+ memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
+
+ sp.sa_family = AF_PPPOX;
sp.sa_protocol = PX_PROTO_PPTP;
sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7302398..9dd3746 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
int vlan_offset = 0, copied;
+ int vlan_hlen = 0;
+ int vnet_hdr_sz = 0;
+
+ if (vlan_tx_tag_present(skb))
+ vlan_hlen = VLAN_HLEN;
+
+ if (tun->flags & TUN_VNET_HDR)
+ vnet_hdr_sz = tun->vnet_hdr_sz;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
return -EINVAL;
- if (len < skb->len) {
+ if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
/* Packet will be striped */
pi.flags |= TUN_PKT_STRIP;
}
@@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
total += sizeof(pi);
}
- if (tun->flags & TUN_VNET_HDR) {
+ if (vnet_hdr_sz) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= tun->vnet_hdr_sz) < 0)
+ if ((len -= vnet_hdr_sz) < 0)
return -EINVAL;
if (skb_is_gso(skb)) {
@@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- gso.csum_start = skb_checksum_start_offset(skb);
+ gso.csum_start = skb_checksum_start_offset(skb) +
+ vlan_hlen;
gso.csum_offset = skb->csum_offset;
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
@@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
sizeof(gso))))
return -EFAULT;
- total += tun->vnet_hdr_sz;
+ total += vnet_hdr_sz;
}
copied = total;
- total += skb->len;
- if (!vlan_tx_tag_present(skb)) {
- len = min_t(int, skb->len, len);
- } else {
+ len = min_t(int, skb->len + vlan_hlen, len);
+ total += skb->len + vlan_hlen;
+ if (vlan_hlen) {
int copy, ret;
struct {
__be16 h_vlan_proto;
@@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
- len = min_t(int, skb->len + VLAN_HLEN, len);
- total += VLAN_HLEN;
copy = min_t(int, vlan_offset, len);
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2c05f6c..816d511 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
}
- ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
- if (ret < 0)
- return ret;
-
- msleep(150);
-
- ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
- if (ret < 0)
- return ret;
-
- msleep(150);
-
- ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
+ ax88772_reset(dev);
/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 22756db..b8a82b8 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -780,6 +780,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ec2a8b4..b0bc8ea 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1673,6 +1673,40 @@ static const struct attribute_group virtio_net_mrg_rx_group = {
};
#endif
+static bool virtnet_fail_on_feature(struct virtio_device *vdev,
+ unsigned int fbit,
+ const char *fname, const char *dname)
+{
+ if (!virtio_has_feature(vdev, fbit))
+ return false;
+
+ dev_err(&vdev->dev, "device advertises feature %s but not %s",
+ fname, dname);
+
+ return true;
+}
+
+#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
+ virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
+
+static bool virtnet_validate_features(struct virtio_device *vdev)
+{
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
+ (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
+ "VIRTIO_NET_F_CTRL_VQ"))) {
+ return false;
+ }
+
+ return true;
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err;
@@ -1680,6 +1714,9 @@ static int virtnet_probe(struct virtio_device *vdev)
struct virtnet_info *vi;
u16 max_queue_pairs;
+ if (!virtnet_validate_features(vdev))
+ return -EINVAL;
+
/* Find if host supports multiqueue virtio_net device */
err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
struct virtio_net_config,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ca30982..be4649a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -67,12 +67,6 @@
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
-/* VXLAN protocol header */
-struct vxlanhdr {
- __be32 vx_flags;
- __be32 vx_vni;
-};
-
/* UDP port for VXLAN traffic.
* The IANA assigned port is 4789, but the Linux default is 8472
* for compatibility with early adopters.
@@ -275,13 +269,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
-/* Find VXLAN socket based on network namespace and UDP port */
-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
+/* Find VXLAN socket based on network namespace, address family and UDP port */
+static struct vxlan_sock *vxlan_find_sock(struct net *net,
+ sa_family_t family, __be16 port)
{
struct vxlan_sock *vs;
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
- if (inet_sk(vs->sock->sk)->inet_sport == port)
+ if (inet_sk(vs->sock->sk)->inet_sport == port &&
+ inet_sk(vs->sock->sk)->sk.sk_family == family)
return vs;
}
return NULL;
@@ -300,11 +296,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
}
/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
+ sa_family_t family, __be16 port)
{
struct vxlan_sock *vs;
- vs = vxlan_find_sock(net, port);
+ vs = vxlan_find_sock(net, family, port);
if (!vs)
return NULL;
@@ -621,6 +618,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
int err = -ENOSYS;
+ udp_tunnel_gro_complete(skb, nhoff);
+
eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
type = eh->h_proto;
@@ -1771,7 +1770,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
ip_rt_put(rt);
- dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+ dst->sa.sa_family, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1825,7 +1825,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
dst_release(ndst);
- dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+ dst->sa.sa_family, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1985,13 +1986,15 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs;
+ bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
+ vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
+ vxlan->dst_port);
if (vs) {
/* If we have a socket with same port already, reuse it */
atomic_inc(&vs->refcnt);
@@ -2303,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
if (ipv6) {
udp_conf.family = AF_INET6;
udp_conf.use_udp6_tx_checksums =
- !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+ !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
udp_conf.use_udp6_rx_checksums =
- !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+ !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
} else {
udp_conf.family = AF_INET;
udp_conf.local_ip.s_addr = INADDR_ANY;
@@ -2382,6 +2385,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
+ bool ipv6 = flags & VXLAN_F_IPV6;
vs = vxlan_socket_create(net, port, rcv, data, flags);
if (!IS_ERR(vs))
@@ -2391,7 +2395,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
return vs;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(net, port);
+ vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
if (vs) {
if (vs->rcv == rcv)
atomic_inc(&vs->refcnt);
@@ -2550,7 +2554,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
- if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
+ if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+ vxlan->dst_port)) {
pr_info("duplicate VNI %u\n", vni);
return -EEXIST;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 697c4ae..1e8ea5e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -664,6 +664,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
ah->enabled_cals |= TX_CL_CAL;
else
ah->enabled_cals &= ~TX_CL_CAL;
+
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
+ if (ah->is_clk_25mhz) {
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
+ REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
+ REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
+ } else {
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
+ REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
+ REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
+ }
+ udelay(100);
+ }
}
static void ar9003_hw_prog_ini(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8be4b14..2ad6057 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -861,19 +861,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(RTC_PLL_SETTLE_DELAY);
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
-
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
- if (ah->is_clk_25mhz) {
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
- REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
- REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
- } else {
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
- REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
- REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
- }
- udelay(100);
- }
}
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 30c66df..4f18a6b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -974,9 +974,8 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
struct ath_vif *avp;
/*
- * Pick the MAC address of the first interface as the new hardware
- * MAC address. The hardware will use it together with the BSSID mask
- * when matching addresses.
+ * The hardware will use primary station addr together with the
+ * BSSID mask when matching addresses.
*/
memset(iter_data, 0, sizeof(*iter_data));
memset(&iter_data->mask, 0xff, ETH_ALEN);
@@ -1205,6 +1204,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
list_add_tail(&avp->list, &avp->chanctx->vifs);
}
+ ath9k_calculate_summary_state(sc, avp->chanctx);
+
ath9k_assign_hw_queues(hw, vif);
an->sc = sc;
@@ -1274,6 +1275,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath_tx_node_cleanup(sc, &avp->mcast_node);
+ ath9k_calculate_summary_state(sc, avp->chanctx);
+
mutex_unlock(&sc->mutex);
}
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 1dfc682..ee27b06 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -300,9 +300,7 @@ void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg)
{
- assert_mac_suspended(dev);
- dev->phy.ops->phy_write(dev, destreg,
- dev->phy.ops->phy_read(dev, srcreg));
+ b43_phy_write(dev, destreg, b43_phy_read(dev, srcreg));
}
void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
index f05f527..927bffd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -40,8 +40,8 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
return;
irq = irq_of_parse_and_map(np, 0);
- if (irq < 0) {
- brcmf_err("interrupt could not be mapped: err=%d\n", irq);
+ if (!irq) {
+ brcmf_err("interrupt could not be mapped\n");
devm_kfree(dev, sdiodev->pdata);
return;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 8c0632e..16fef33 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -19,10 +19,10 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
-#include <linux/unaligned/access_ok.h>
#include <linux/interrupt.h>
#include <linux/bcma/bcma.h>
#include <linux/sched.h>
+#include <asm/unaligned.h>
#include <soc.h>
#include <chipcommon.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index dc13591..875d114 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -669,10 +669,12 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
goto finalize;
}
- if (!brcmf_usb_ioctl_resp_wait(devinfo))
+ if (!brcmf_usb_ioctl_resp_wait(devinfo)) {
+ usb_kill_urb(devinfo->ctl_urb);
ret = -ETIMEDOUT;
- else
+ } else {
memcpy(buffer, tmpbuf, buflen);
+ }
finalize:
kfree(tmpbuf);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 28fa25b..39b45c0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -299,6 +299,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
primary_offset = ch->center_freq1 - ch->chan->center_freq;
switch (ch->width) {
case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
ch_inf.bw = BRCMU_CHAN_BW_20;
WARN_ON(primary_offset != 0);
break;
@@ -323,6 +324,10 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
ch_inf.sb = BRCMU_CHAN_SB_LU;
}
break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
default:
WARN_ON_ONCE(1);
}
@@ -333,6 +338,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
case IEEE80211_BAND_5GHZ:
ch_inf.band = BRCMU_CHAN_BAND_5G;
break;
+ case IEEE80211_BAND_60GHZ:
default:
WARN_ON_ONCE(1);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 4f6e668..b894a84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api {
* @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
* @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
* which also implies support for the scheduler configuration command
+ * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
@@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
};
/* The default calibrate table size if not specified by firmware file */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index e0d9f19..eb03943 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (WARN_ON_ONCE(mvm->init_ucode_complete))
+ if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
return 0;
iwl_init_notification_wait(&mvm->notif_wait,
@@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
goto out;
}
+ mvm->calibrating = true;
+
/* Send TX valid antennas before triggering calibrations */
ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
@@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
MVM_UCODE_CALIB_TIMEOUT);
if (!ret)
mvm->init_ucode_complete = true;
+
+ if (ret && iwl_mvm_is_radio_killed(mvm)) {
+ IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
+ ret = 1;
+ }
goto out;
error:
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
+ mvm->calibrating = false;
if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 585fe5b..b6d2683 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -788,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
mvm->scan_status = IWL_MVM_SCAN_NONE;
mvm->ps_disabled = false;
+ mvm->calibrating = false;
/* just in case one was running */
ieee80211_remain_on_channel_expired(mvm->hw);
@@ -2447,9 +2448,15 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- /* Use aux roc framework (HS20) */
- ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
- vif, duration);
+ if (mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+ /* Use aux roc framework (HS20) */
+ ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
+ vif, duration);
+ goto out_unlock;
+ }
+ IWL_ERR(mvm, "hotspot not supported\n");
+ ret = -EINVAL;
goto out_unlock;
case NL80211_IFTYPE_P2P_DEVICE:
/* handle below */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b153ced..845429c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -548,6 +548,7 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
bool init_ucode_complete;
+ bool calibrating;
u32 error_event_table;
u32 log_event_table;
u32 umac_error_event_table;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 48cb25a..5b719ee 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -424,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
mvm->sf_state = SF_UNINIT;
mvm->low_latency_agg_frame_limit = 6;
+ mvm->cur_ucode = IWL_UCODE_INIT;
mutex_init(&mvm->mutex);
mutex_init(&mvm->d0i3_suspend_mutex);
@@ -752,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ bool calibrating = ACCESS_ONCE(mvm->calibrating);
if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
@@ -760,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
- return state && mvm->cur_ucode != IWL_UCODE_INIT;
+ /* iwl_run_init_mvm_ucode is waiting for results, abort it */
+ if (calibrating)
+ iwl_abort_notification_waits(&mvm->notif_wait);
+
+ /*
+ * Stop the device if we run OPERATIONAL firmware or if we are in the
+ * middle of the calibrations.
+ */
+ return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
}
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index b280d5d..7554f70 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -602,16 +602,6 @@ static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
SCAN_COMPLETE_NOTIFICATION };
int ret;
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return 0;
-
- if (iwl_mvm_is_radio_killed(mvm)) {
- ieee80211_scan_completed(mvm->hw, true);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- return 0;
- }
-
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
scan_abort_notif,
ARRAY_SIZE(scan_abort_notif),
@@ -1400,6 +1390,16 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
{
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+ return 0;
+
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ return 0;
+ }
+
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
return iwl_mvm_scan_offload_stop(mvm, true);
return iwl_mvm_cancel_regular_scan(mvm);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 3781b02..dd2f3f8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -915,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -945,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status);
- clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
clear_bit(STATUS_TPOWER_PMI, &trans->status);
clear_bit(STATUS_RFKILL, &trans->status);
@@ -1894,8 +1894,7 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
int reg;
__le32 *val;
- prph_len += sizeof(*data) + sizeof(*prph) +
- num_bytes_in_chunk;
+ prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
(*data)->len = cpu_to_le32(sizeof(*prph) +
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index babbdc1..c9ad4cf 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
if (err != 0) {
printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
err);
- goto failed_hw;
+ goto failed_bind;
}
skb_queue_head_init(&data->pending);
@@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
return idx;
failed_hw:
+ device_release_driver(data->dev);
+failed_bind:
device_unregister(data->dev);
failed_drvdata:
ieee80211_free_hw(hw);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 8e68f87..66ff364 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
skb_trim(skb, frame_length);
}
-void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
+/*
+ * H/W needs L2 padding between the header and the paylod if header size
+ * is not 4 bytes aligned.
+ */
+void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
{
- unsigned int payload_length = skb->len - header_length;
- unsigned int header_align = ALIGN_SIZE(skb, 0);
- unsigned int payload_align = ALIGN_SIZE(skb, header_length);
- unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
+ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
- /*
- * Adjust the header alignment if the payload needs to be moved more
- * than the header.
- */
- if (payload_align > header_align)
- header_align += 4;
-
- /* There is nothing to do if no alignment is needed */
- if (!header_align)
+ if (!l2pad)
return;
- /* Reserve the amount of space needed in front of the frame */
- skb_push(skb, header_align);
-
- /*
- * Move the header.
- */
- memmove(skb->data, skb->data + header_align, header_length);
-
- /* Move the payload, if present and if required */
- if (payload_length && payload_align)
- memmove(skb->data + header_length + l2pad,
- skb->data + header_length + l2pad + payload_align,
- payload_length);
-
- /* Trim the skb to the correct size */
- skb_trim(skb, header_length + l2pad + payload_length);
+ skb_push(skb, l2pad);
+ memmove(skb->data, skb->data + l2pad, hdr_len);
}
-void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
+void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
{
- /*
- * L2 padding is only present if the skb contains more than just the
- * IEEE 802.11 header.
- */
- unsigned int l2pad = (skb->len > header_length) ?
- L2PAD_SIZE(header_length) : 0;
+ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
if (!l2pad)
return;
- memmove(skb->data + l2pad, skb->data, header_length);
+ memmove(skb->data + l2pad, skb->data, hdr_len);
skb_pull(skb, l2pad);
}
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 25daa87..846a2e6 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -842,7 +842,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
break;
}
/* handle command packet here */
- if (rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
+ if (rtlpriv->cfg->ops->rx_command_packet &&
+ rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
dev_kfree_skb_any(skb);
goto end;
}
@@ -1127,9 +1128,14 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
__skb_queue_tail(&ring->queue, pskb);
- rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
- &temp_one);
-
+ if (rtlpriv->use_new_trx_flow) {
+ temp_one = 4;
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true,
+ HW_DESC_OWN, (u8 *)&temp_one);
+ } else {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
+ &temp_one);
+ }
return;
}
@@ -1370,9 +1376,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
ring->desc = NULL;
if (rtlpriv->use_new_trx_flow) {
pci_free_consistent(rtlpci->pdev,
- sizeof(*ring->desc) * ring->entries,
+ sizeof(*ring->buffer_desc) * ring->entries,
ring->buffer_desc, ring->buffer_desc_dma);
- ring->desc = NULL;
+ ring->buffer_desc = NULL;
}
}
@@ -1543,7 +1549,6 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
- ring->idx = (ring->idx + 1) % ring->entries;
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -2244,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev,
/*like read eeprom and so on */
rtlpriv->cfg->ops->read_eeprom_info(hw);
+ if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+ err = -ENODEV;
+ goto fail3;
+ }
+ rtlpriv->cfg->ops->init_sw_leds(hw);
+
+ /*aspm */
+ rtl_pci_init_aspm(hw);
+
/* Init mac80211 sw */
err = rtl_init_core(hw);
if (err) {
@@ -2259,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
goto fail3;
}
- if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
- err = -ENODEV;
- goto fail3;
- }
- rtlpriv->cfg->ops->init_sw_leds(hw);
-
- /*aspm */
- rtl_pci_init_aspm(hw);
-
err = ieee80211_register_hw(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 00e0670..5761d5b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -1201,6 +1201,9 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
}
+ if (type != NL80211_IFTYPE_AP &&
+ rtlpriv->mac80211.link_state < MAC80211_LINKED)
+ bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK;
rtl_write_byte(rtlpriv, (MSR), bt_msr);
temp = rtl_read_dword(rtlpriv, TCR);
@@ -1262,6 +1265,7 @@ void rtl92se_enable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]);
/* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */
rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F);
+ rtlpci->irq_enabled = true;
}
void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
@@ -1276,8 +1280,7 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
rtlpci = rtl_pcidev(rtl_pcipriv(hw));
rtl_write_dword(rtlpriv, INTA_MASK, 0);
rtl_write_dword(rtlpriv, INTA_MASK + 4, 0);
-
- synchronize_irq(rtlpci->pdev->irq);
+ rtlpci->irq_enabled = false;
}
static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 77c5b5f..4b4612f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -399,6 +399,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
case 2:
currentcmd = &postcommoncmd[*step];
break;
+ default:
+ return true;
}
if (currentcmd->cmdid == CMDID_END) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index aadba29..fb00386 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -236,6 +236,19 @@ static void rtl92s_deinit_sw_vars(struct ieee80211_hw *hw)
}
}
+static bool rtl92se_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue,
+ u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8)rtl92se_get_desc(entry, true, HW_DESC_OWN);
+
+ if (own)
+ return false;
+ return true;
+}
+
static struct rtl_hal_ops rtl8192se_hal_ops = {
.init_sw_vars = rtl92s_init_sw_vars,
.deinit_sw_vars = rtl92s_deinit_sw_vars,
@@ -269,6 +282,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
.led_control = rtl92se_led_control,
.set_desc = rtl92se_set_desc,
.get_desc = rtl92se_get_desc,
+ .is_tx_desc_closed = rtl92se_is_tx_desc_closed,
.tx_polling = rtl92se_tx_polling,
.enable_hw_sec = rtl92se_enable_hw_security_config,
.set_key = rtl92se_set_key,
@@ -306,6 +320,8 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
.maps[MAC_RCR_ACRC32] = RCR_ACRC32,
.maps[MAC_RCR_ACF] = RCR_ACF,
.maps[MAC_RCR_AAP] = RCR_AAP,
+ .maps[MAC_HIMR] = INTA_MASK,
+ .maps[MAC_HIMRE] = INTA_MASK + 4,
.maps[EFUSE_TEST] = REG_EFUSE_TEST,
.maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 310d316..8ec8200 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC)
macid = sta->aid + 1;
if (wirelessmode == WIRELESS_MODE_N_5G ||
- wirelessmode == WIRELESS_MODE_AC_5G)
- ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ];
+ wirelessmode == WIRELESS_MODE_AC_5G ||
+ wirelessmode == WIRELESS_MODE_A)
+ ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
else
ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 4e56a27..fab0d4b 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -39,7 +39,7 @@ struct backend_info {
static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
static void connect(struct backend_info *be);
static int read_xenbus_vif_flags(struct backend_info *be);
-static void backend_create_xenvif(struct backend_info *be);
+static int backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
static void set_backend_state(struct backend_info *be,
enum xenbus_state state);
@@ -352,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev,
be->state = XenbusStateInitWait;
/* This kicks hotplug scripts, so do it immediately. */
- backend_create_xenvif(be);
+ err = backend_create_xenvif(be);
+ if (err)
+ goto fail;
return 0;
@@ -397,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev,
}
-static void backend_create_xenvif(struct backend_info *be)
+static int backend_create_xenvif(struct backend_info *be)
{
int err;
long handle;
struct xenbus_device *dev = be->dev;
if (be->vif != NULL)
- return;
+ return 0;
err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
if (err != 1) {
xenbus_dev_fatal(dev, err, "reading handle");
- return;
+ return (err < 0) ? err : -EINVAL;
}
be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
@@ -417,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be)
err = PTR_ERR(be->vif);
be->vif = NULL;
xenbus_dev_fatal(dev, err, "creating interface");
- return;
+ return err;
}
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
+ return 0;
}
static void backend_disconnect(struct backend_info *be)
OpenPOWER on IntegriCloud