diff options
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 28 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 45 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-main.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe.h | 6 | ||||
-rw-r--r-- | drivers/net/phy/amd-xgbe-phy.c | 98 |
7 files changed, 127 insertions, 90 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 29a0927..34c28aa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -365,6 +365,8 @@ #define MAC_HWF0R_TXCOESEL_WIDTH 1 #define MAC_HWF0R_VLHASH_INDEX 4 #define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF1R_ADDR64_INDEX 14 +#define MAC_HWF1R_ADDR64_WIDTH 2 #define MAC_HWF1R_ADVTHWORD_INDEX 13 #define MAC_HWF1R_ADVTHWORD_WIDTH 1 #define MAC_HWF1R_DBGMEMA_INDEX 19 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 400757b..80dd7a9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1068,7 +1068,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) rdesc->desc3 = 0; /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); } static void xgbe_tx_desc_init(struct xgbe_channel *channel) @@ -1124,12 +1124,12 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) * is written to the descriptor(s) before setting the OWN bit * for the descriptor */ - wmb(); + dma_wmb(); XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); } static void xgbe_rx_desc_init(struct xgbe_channel *channel) @@ -1358,18 +1358,20 @@ static void xgbe_tx_start_xmit(struct xgbe_channel *channel, struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring_data *rdata; + /* Make sure everything is written before the register write */ + wmb(); + /* Issue a poll command to Tx DMA by writing address * of next immediate free descriptor */ rdata = XGBE_GET_DESC_DATA(ring, ring->cur); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, lower_32_bits(rdata->rdesc_dma)); - /* Start the Tx coalescing timer */ + /* Start the Tx timer */ if (pdata->tx_usecs && !channel->tx_timer_active) { channel->tx_timer_active = 1; - hrtimer_start(&channel->tx_timer, - ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), - HRTIMER_MODE_REL); + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); } ring->tx.xmit_more = 0; @@ -1565,7 +1567,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) * is written to the descriptor(s) before setting the OWN bit * for the first descriptor */ - wmb(); + dma_wmb(); /* Set OWN bit for the first descriptor */ rdata = XGBE_GET_DESC_DATA(ring, start_index); @@ -1577,7 +1579,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) #endif /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); ring->cur = cur_index + 1; if (!packet->skb->xmit_more || @@ -1613,7 +1615,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) return 1; /* Make sure descriptor fields are read after reading the OWN bit */ - rmb(); + dma_rmb(); #ifdef XGMAC_ENABLE_RX_DESC_DUMP xgbe_dump_rx_desc(ring, rdesc, ring->cur); @@ -2004,7 +2006,8 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); - netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", + netdev_notice(pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", pdata->tx_q_count, ((fifo_size + 1) * 256)); } @@ -2019,7 +2022,8 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); - netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", + netdev_notice(pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", pdata->rx_q_count, ((fifo_size + 1) * 256)); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 885b02b..347fe24 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -411,11 +411,9 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) return IRQ_HANDLED; } -static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) +static void xgbe_tx_timer(unsigned long data) { - struct xgbe_channel *channel = container_of(timer, - struct xgbe_channel, - tx_timer); + struct xgbe_channel *channel = (struct xgbe_channel *)data; struct xgbe_prv_data *pdata = channel->pdata; struct napi_struct *napi; @@ -437,8 +435,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) channel->tx_timer_active = 0; DBGPR("<--xgbe_tx_timer\n"); - - return HRTIMER_NORESTART; } static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) @@ -454,9 +450,8 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) break; DBGPR(" %s adding tx timer\n", channel->name); - hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - channel->tx_timer.function = xgbe_tx_timer; + setup_timer(&channel->tx_timer, xgbe_tx_timer, + (unsigned long)channel); } DBGPR("<--xgbe_init_tx_timers\n"); @@ -475,8 +470,7 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata) break; DBGPR(" %s deleting tx timer\n", channel->name); - channel->tx_timer_active = 0; - hrtimer_cancel(&channel->tx_timer); + del_timer_sync(&channel->tx_timer); } DBGPR("<--xgbe_stop_tx_timers\n"); @@ -519,6 +513,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) RXFIFOSIZE); hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TXFIFOSIZE); + hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); @@ -553,6 +548,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) break; } + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + /* The Queue, Channel and TC counts are zero based so increment them * to get the actual number */ @@ -692,6 +702,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_init_rx_coalesce\n"); pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); + pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; hw_if->config_rx_coalesce(pdata); @@ -1800,6 +1811,9 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) ring->dirty++; } + /* Make sure everything is written before the register write */ + wmb(); + /* Update the Rx Tail Pointer Register with address of * the last cleaned entry */ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); @@ -1807,16 +1821,15 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) lower_32_bits(rdata->rdesc_dma)); } -static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, +static struct sk_buff *xgbe_create_skb(struct napi_struct *napi, struct xgbe_ring_data *rdata, unsigned int *len) { - struct net_device *netdev = pdata->netdev; struct sk_buff *skb; u8 *packet; unsigned int copy_len; - skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len); + skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); if (!skb) return NULL; @@ -1863,7 +1876,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) /* Make sure descriptor fields are read after reading the OWN * bit */ - rmb(); + dma_rmb(); #ifdef XGMAC_ENABLE_TX_DESC_DUMP xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); @@ -1986,7 +1999,7 @@ read_again: rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); - skb = xgbe_create_skb(pdata, rdata, &put_len); + skb = xgbe_create_skb(napi, rdata, &put_len); if (!skb) { error = 1; goto skip_data; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index ebf4893..b4f6eaa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -291,7 +291,6 @@ static int xgbe_get_settings(struct net_device *netdev, return -ENODEV; ret = phy_ethtool_gset(pdata->phydev, cmd); - cmd->transceiver = XCVR_EXTERNAL; DBGPR("<--xgbe_get_settings\n"); @@ -378,18 +377,14 @@ static int xgbe_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int riwt; DBGPR("-->xgbe_get_coalesce\n"); memset(ec, 0, sizeof(struct ethtool_coalesce)); - riwt = pdata->rx_riwt; - ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt); + ec->rx_coalesce_usecs = pdata->rx_usecs; ec->rx_max_coalesced_frames = pdata->rx_frames; - ec->tx_coalesce_usecs = pdata->tx_usecs; ec->tx_max_coalesced_frames = pdata->tx_frames; DBGPR("<--xgbe_get_coalesce\n"); @@ -403,13 +398,14 @@ static int xgbe_set_coalesce(struct net_device *netdev, struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; unsigned int rx_frames, rx_riwt, rx_usecs; - unsigned int tx_frames, tx_usecs; + unsigned int tx_frames; DBGPR("-->xgbe_set_coalesce\n"); /* Check for not supported parameters */ if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs) || (ec->tx_coalesce_usecs_irq) || (ec->tx_max_coalesced_frames_irq) || (ec->stats_block_coalesce_usecs) || @@ -439,17 +435,17 @@ static int xgbe_set_coalesce(struct net_device *netdev, } rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs); + rx_usecs = ec->rx_coalesce_usecs; rx_frames = ec->rx_max_coalesced_frames; /* Use smallest possible value if conversion resulted in zero */ - if (ec->rx_coalesce_usecs && !rx_riwt) + if (rx_usecs && !rx_riwt) rx_riwt = 1; /* Check the bounds of values for Rx */ if (rx_riwt > XGMAC_MAX_DMA_RIWT) { - rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT); netdev_alert(netdev, "rx-usec is limited to %d usecs\n", - rx_usecs); + hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT)); return -EINVAL; } if (rx_frames > pdata->rx_desc_count) { @@ -458,7 +454,6 @@ static int xgbe_set_coalesce(struct net_device *netdev, return -EINVAL; } - tx_usecs = ec->tx_coalesce_usecs; tx_frames = ec->tx_max_coalesced_frames; /* Check the bounds of values for Tx */ @@ -469,10 +464,10 @@ static int xgbe_set_coalesce(struct net_device *netdev, } pdata->rx_riwt = rx_riwt; + pdata->rx_usecs = rx_usecs; pdata->rx_frames = rx_frames; hw_if->config_rx_coalesce(pdata); - pdata->tx_usecs = tx_usecs; pdata->tx_frames = tx_frames; hw_if->config_tx_coalesce(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 32dd651..2e4c22d9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -374,15 +374,6 @@ static int xgbe_probe(struct platform_device *pdev) pdata->awcache = XGBE_DMA_SYS_AWCACHE; } - /* Set the DMA mask */ - if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); - if (ret) { - dev_err(dev, "dma_set_mask_and_coherent failed\n"); - goto err_io; - } - /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) { @@ -409,6 +400,16 @@ static int xgbe_probe(struct platform_device *pdev) /* Set default configuration data */ xgbe_default_config(pdata); + /* Set the DMA mask */ + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + ret = dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(pdata->hw_feat.dma_width)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed\n"); + goto err_io; + } + /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 1eea3e5..dd74242 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -222,7 +222,7 @@ ((_idx) & ((_ring)->rdesc_count - 1))) /* Default coalescing parameters */ -#define XGMAC_INIT_DMA_TX_USECS 50 +#define XGMAC_INIT_DMA_TX_USECS 1000 #define XGMAC_INIT_DMA_TX_FRAMES 25 #define XGMAC_MAX_DMA_RIWT 0xff @@ -410,7 +410,7 @@ struct xgbe_channel { unsigned int saved_ier; unsigned int tx_timer_active; - struct hrtimer tx_timer; + struct timer_list tx_timer; struct xgbe_ring *tx_ring; struct xgbe_ring *rx_ring; @@ -632,6 +632,7 @@ struct xgbe_hw_features { unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ unsigned int dcb; /* DCB Feature */ unsigned int sph; /* Split Header Feature */ unsigned int tso; /* TCP Segmentation Offload */ @@ -715,6 +716,7 @@ struct xgbe_prv_data { /* Rx coalescing settings */ unsigned int rx_riwt; + unsigned int rx_usecs; unsigned int rx_frames; /* Current Rx buffer size */ diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index 32efbd4..fb276f6 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -78,6 +78,7 @@ #include <linux/bitops.h> #include <linux/property.h> #include <linux/acpi.h> +#include <linux/jiffies.h> MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); MODULE_LICENSE("Dual BSD/GPL"); @@ -100,6 +101,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define XGBE_PHY_SPEED_2500 1 #define XGBE_PHY_SPEED_10000 2 +#define XGBE_AN_MS_TIMEOUT 500 + #define XGBE_AN_INT_CMPLT 0x01 #define XGBE_AN_INC_LINK 0x02 #define XGBE_AN_PG_RCV 0x04 @@ -434,6 +437,7 @@ struct amd_xgbe_phy_priv { unsigned int an_supported; unsigned int parallel_detect; unsigned int fec_ability; + unsigned long an_start; unsigned int lpm_ctrl; /* CTRL1 for resume */ }; @@ -902,8 +906,23 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; enum amd_xgbe_phy_rx *state; + unsigned long an_timeout; int ret; + if (!priv->an_start) { + priv->an_start = jiffies; + } else { + an_timeout = priv->an_start + + msecs_to_jiffies(XGBE_AN_MS_TIMEOUT); + if (time_after(jiffies, an_timeout)) { + /* Auto-negotiation timed out, reset state */ + priv->kr_state = AMD_XGBE_RX_BPA; + priv->kx_state = AMD_XGBE_RX_BPA; + + priv->an_start = jiffies; + } + } + state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state : &priv->kx_state; @@ -932,8 +951,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) if (amd_xgbe_phy_in_kr_mode(phydev)) { priv->kr_state = AMD_XGBE_RX_ERROR; - if (!(phydev->supported & SUPPORTED_1000baseKX_Full) && - !(phydev->supported & SUPPORTED_2500baseX_Full)) + if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) && + !(phydev->advertising & SUPPORTED_2500baseX_Full)) return AMD_XGBE_AN_NO_LINK; if (priv->kx_state != AMD_XGBE_RX_BPA) @@ -941,7 +960,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) } else { priv->kx_state = AMD_XGBE_RX_ERROR; - if (!(phydev->supported & SUPPORTED_10000baseKR_Full)) + if (!(phydev->advertising & SUPPORTED_10000baseKR_Full)) return AMD_XGBE_AN_NO_LINK; if (priv->kr_state != AMD_XGBE_RX_BPA) @@ -1078,6 +1097,7 @@ again: priv->an_state = AMD_XGBE_AN_READY; priv->kr_state = AMD_XGBE_RX_BPA; priv->kx_state = AMD_XGBE_RX_BPA; + priv->an_start = 0; } if (cur_state != priv->an_state) @@ -1101,7 +1121,7 @@ static int amd_xgbe_an_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->supported & SUPPORTED_10000baseR_FEC) + if (phydev->advertising & SUPPORTED_10000baseR_FEC) ret |= 0xc000; else ret &= ~0xc000; @@ -1113,13 +1133,13 @@ static int amd_xgbe_an_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->supported & SUPPORTED_10000baseKR_Full) + if (phydev->advertising & SUPPORTED_10000baseKR_Full) ret |= 0x80; else ret &= ~0x80; - if ((phydev->supported & SUPPORTED_1000baseKX_Full) || - (phydev->supported & SUPPORTED_2500baseX_Full)) + if ((phydev->advertising & SUPPORTED_1000baseKX_Full) || + (phydev->advertising & SUPPORTED_2500baseX_Full)) ret |= 0x20; else ret &= ~0x20; @@ -1131,12 +1151,12 @@ static int amd_xgbe_an_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->supported & SUPPORTED_Pause) + if (phydev->advertising & SUPPORTED_Pause) ret |= 0x400; else ret &= ~0x400; - if (phydev->supported & SUPPORTED_Asym_Pause) + if (phydev->advertising & SUPPORTED_Asym_Pause) ret |= 0x800; else ret &= ~0x800; @@ -1212,38 +1232,14 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev) priv->an_irq_allocated = 1; } - ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY); - if (ret < 0) - return ret; - priv->fec_ability = ret & XGBE_PHY_FEC_MASK; - - /* Initialize supported features */ - phydev->supported = SUPPORTED_Autoneg; - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->supported |= SUPPORTED_Backplane; - phydev->supported |= SUPPORTED_10000baseKR_Full; - switch (priv->speed_set) { - case AMD_XGBE_PHY_SPEEDSET_1000_10000: - phydev->supported |= SUPPORTED_1000baseKX_Full; - break; - case AMD_XGBE_PHY_SPEEDSET_2500_10000: - phydev->supported |= SUPPORTED_2500baseX_Full; - break; - } - - if (priv->fec_ability & XGBE_PHY_FEC_ENABLE) - phydev->supported |= SUPPORTED_10000baseR_FEC; - - phydev->advertising = phydev->supported; - /* Set initial mode - call the mode setting routines * directly to insure we are properly configured */ - if (phydev->supported & SUPPORTED_10000baseKR_Full) + if (phydev->advertising & SUPPORTED_10000baseKR_Full) ret = amd_xgbe_phy_xgmii_mode(phydev); - else if (phydev->supported & SUPPORTED_1000baseKX_Full) + else if (phydev->advertising & SUPPORTED_1000baseKX_Full) ret = amd_xgbe_phy_gmii_mode(phydev); - else if (phydev->supported & SUPPORTED_2500baseX_Full) + else if (phydev->advertising & SUPPORTED_2500baseX_Full) ret = amd_xgbe_phy_gmii_2500_mode(phydev); else ret = -EINVAL; @@ -1315,10 +1311,10 @@ static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev) disable_irq(priv->an_irq); /* Start auto-negotiation in a supported mode */ - if (phydev->supported & SUPPORTED_10000baseKR_Full) + if (phydev->advertising & SUPPORTED_10000baseKR_Full) ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR); - else if ((phydev->supported & SUPPORTED_1000baseKX_Full) || - (phydev->supported & SUPPORTED_2500baseX_Full)) + else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) || + (phydev->advertising & SUPPORTED_2500baseX_Full)) ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX); else ret = -EINVAL; @@ -1746,6 +1742,29 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) sizeof(priv->serdes_dfe_tap_ena)); } + /* Initialize supported features */ + phydev->supported = SUPPORTED_Autoneg; + phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + phydev->supported |= SUPPORTED_Backplane; + phydev->supported |= SUPPORTED_10000baseKR_Full; + switch (priv->speed_set) { + case AMD_XGBE_PHY_SPEEDSET_1000_10000: + phydev->supported |= SUPPORTED_1000baseKX_Full; + break; + case AMD_XGBE_PHY_SPEEDSET_2500_10000: + phydev->supported |= SUPPORTED_2500baseX_Full; + break; + } + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY); + if (ret < 0) + return ret; + priv->fec_ability = ret & XGBE_PHY_FEC_MASK; + if (priv->fec_ability & XGBE_PHY_FEC_ENABLE) + phydev->supported |= SUPPORTED_10000baseR_FEC; + + phydev->advertising = phydev->supported; + phydev->priv = priv; if (!priv->adev || acpi_disabled) @@ -1817,6 +1836,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = { .phy_id_mask = XGBE_PHY_MASK, .name = "AMD XGBE PHY", .features = 0, + .flags = PHY_IS_INTERNAL, .probe = amd_xgbe_phy_probe, .remove = amd_xgbe_phy_remove, .soft_reset = amd_xgbe_phy_soft_reset, |