summaryrefslogtreecommitdiffstats
path: root/drivers/net/stmmac/stmmac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/stmmac/stmmac_main.c86
1 files changed, 23 insertions, 63 deletions
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index cc973fc..e15c4a0 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -116,9 +116,6 @@ static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-#define RX_NO_COALESCE 1 /* Always interrupt on completion */
-#define TX_NO_COALESCE -1 /* No moderation by default */
-
/* Pay attention to tune this parameter; take care of both
* hardware capability and network stabitily/performance impact.
* Many tests showed that ~4ms latency seems to be good enough. */
@@ -139,7 +136,6 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
-static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
/**
* stmmac_verify_args - verify the driver parameters.
@@ -831,6 +827,7 @@ static int stmmac_open(struct net_device *dev)
pr_info("stmmac: Rx Checksum Offload Engine supported\n");
if (priv->plat->tx_coe)
pr_info("\tTX Checksum insertion supported\n");
+ netdev_update_features(dev);
/* Initialise the MMC (if present) to disable all interrupts. */
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
@@ -934,46 +931,6 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
-/*
- * To perform emulated hardware segmentation on skb.
- */
-static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
-{
- struct sk_buff *segs, *curr_skb;
- int gso_segs = skb_shinfo(skb)->gso_segs;
-
- /* Estimate the number of fragments in the worst case */
- if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
- netif_stop_queue(priv->dev);
- TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
- __func__);
- if (stmmac_tx_avail(priv) < gso_segs)
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(priv->dev);
- }
- TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
- skb, skb->len);
-
- segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
- if (IS_ERR(segs))
- goto sw_tso_end;
-
- do {
- curr_skb = segs;
- segs = segs->next;
- TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
- "*next %p\n", curr_skb->len, curr_skb, segs);
- curr_skb->next = NULL;
- stmmac_xmit(curr_skb, priv->dev);
- } while (segs);
-
-sw_tso_end:
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
struct net_device *dev,
int csum_insertion)
@@ -1051,16 +1008,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
!skb_is_gso(skb) ? "isn't" : "is");
#endif
- if (unlikely(skb_is_gso(skb)))
- return stmmac_sw_tso(priv, skb);
-
- if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
- if (unlikely((!priv->plat->tx_coe) ||
- (priv->no_csum_insertion)))
- skb_checksum_help(skb);
- else
- csum_insertion = 1;
- }
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
desc = priv->dma_tx + entry;
first = desc;
@@ -1380,18 +1328,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->rx_coe)
+ features &= ~NETIF_F_RXCSUM;
+ if (!priv->plat->tx_coe)
+ features &= ~NETIF_F_ALL_CSUM;
+
/* Some GMAC devices have a bugged Jumbo frame support that
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
* the TX csum insertionin the TDES and not use SF. */
- if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
- priv->no_csum_insertion = 1;
- else
- priv->no_csum_insertion = 0;
+ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_ALL_CSUM;
- dev->mtu = new_mtu;
-
- return 0;
+ return features;
}
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
@@ -1471,6 +1430,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_start_xmit = stmmac_xmit,
.ndo_stop = stmmac_release,
.ndo_change_mtu = stmmac_change_mtu,
+ .ndo_fix_features = stmmac_fix_features,
.ndo_set_multicast_list = stmmac_multicast_list,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
@@ -1501,8 +1461,8 @@ static int stmmac_probe(struct net_device *dev)
dev->netdev_ops = &stmmac_netdev_ops;
stmmac_set_ethtool_ops(dev);
- dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
dev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
OpenPOWER on IntegriCloud