diff options
-rw-r--r-- | drivers/net/sfc/efx.c | 31 | ||||
-rw-r--r-- | drivers/net/sfc/efx.h | 2 | ||||
-rw-r--r-- | drivers/net/sfc/ethtool.c | 6 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 64 | ||||
-rw-r--r-- | drivers/net/sfc/nic.c | 51 | ||||
-rw-r--r-- | drivers/net/sfc/regs.h | 6 | ||||
-rw-r--r-- | drivers/net/sfc/selftest.c | 2 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 90 | ||||
-rw-r--r-- | net/core/dev.c | 3 | ||||
-rw-r--r-- | net/sched/sch_mqprio.c | 14 |
10 files changed, 204 insertions, 65 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 002bac7..d4e0425 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx) efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); - efx_for_each_channel_tx_queue(tx_queue, channel) + efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_fini_tx_queue(tx_queue); efx_fini_eventq(channel); } @@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel) efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); - efx_for_each_channel_tx_queue(tx_queue, channel) + efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); efx_remove_eventq(channel); } @@ -1271,21 +1271,8 @@ static void efx_remove_interrupts(struct efx_nic *efx) static void efx_set_channels(struct efx_nic *efx) { - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; - - /* Channel pointers were set in efx_init_struct() but we now - * need to clear them for TX queues in any RX-only channels. */ - efx_for_each_channel(channel, efx) { - if (channel->channel - efx->tx_channel_offset >= - efx->n_tx_channels) { - efx_for_each_channel_tx_queue(tx_queue, channel) - tx_queue->channel = NULL; - } - } } static int efx_probe_nic(struct efx_nic *efx) @@ -1531,9 +1518,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, efx->irq_rx_adaptive = rx_adaptive; efx->irq_rx_moderation = rx_ticks; efx_for_each_channel(channel, efx) { - if (efx_channel_get_rx_queue(channel)) + if (efx_channel_has_rx_queue(channel)) channel->irq_moderation = rx_ticks; - else if (efx_channel_get_tx_queue(channel, 0)) + else if (efx_channel_has_tx_queues(channel)) channel->irq_moderation = tx_ticks; } } @@ -1849,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif + .ndo_setup_tc = efx_setup_tc, }; static void efx_update_name(struct efx_nic *efx) @@ -1910,10 +1898,8 @@ static int efx_register_netdev(struct efx_nic *efx) efx_for_each_channel(channel, efx) { struct efx_tx_queue *tx_queue; - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->core_txq = netdev_get_tx_queue( - efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); - } + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_init_tx_queue_core_txq(tx_queue); } /* Always start with carrier off; PHY events will detect the link */ @@ -2401,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, int i, rc; /* Allocate and initialise a struct net_device and struct efx_nic */ - net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); + net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, + EFX_MAX_RX_QUEUES); if (!net_dev) return -ENOMEM; net_dev->features |= (type->offload_features | NETIF_F_SG | diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index d43a7e5..0cb198a 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h @@ -29,6 +29,7 @@ extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); +extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); extern netdev_tx_t @@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 713969a..272cfe7 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, /* Find lowest IRQ moderation across all used TX queues */ coalesce->tx_coalesce_usecs_irq = ~((u32) 0); efx_for_each_channel(channel, efx) { - if (!efx_channel_get_tx_queue(channel, 0)) + if (!efx_channel_has_tx_queues(channel)) continue; if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { if (channel->channel < efx->n_rx_channels) @@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, /* If the channel is shared only allow RX parameters to be set */ efx_for_each_channel(channel, efx) { - if (efx_channel_get_rx_queue(channel) && - efx_channel_get_tx_queue(channel, 0) && + if (efx_channel_has_rx_queue(channel) && + efx_channel_has_tx_queues(channel) && tx_usecs) { netif_err(efx, drv, efx->net_dev, "Channel is shared. " "Only RX coalescing may be set\n"); diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index c652702..96e22ad 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -63,10 +63,12 @@ /* Checksum generation is a per-queue option in hardware, so each * queue visible to the networking core is backed by two hardware TX * queues. */ -#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS -#define EFX_TXQ_TYPE_OFFLOAD 1 -#define EFX_TXQ_TYPES 2 -#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) +#define EFX_MAX_TX_TC 2 +#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) +#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ +#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ +#define EFX_TXQ_TYPES 4 +#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) /** * struct efx_special_buffer - An Efx special buffer @@ -140,6 +142,7 @@ struct efx_tx_buffer { * @buffer: The software buffer ring * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. + * @initialised: Has hardware queue been initialised? * @flushed: Used when handling queue flushing * @read_count: Current read pointer. * This is the number of buffers that have been removed from both rings. @@ -182,6 +185,7 @@ struct efx_tx_queue { struct efx_tx_buffer *buffer; struct efx_special_buffer txd; unsigned int ptr_mask; + bool initialised; enum efx_flush_state flushed; /* Members used mainly on the completion path */ @@ -377,7 +381,7 @@ struct efx_channel { bool rx_pkt_csummed; struct efx_rx_queue rx_queue; - struct efx_tx_queue tx_queue[2]; + struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; }; enum efx_led_mode { @@ -938,18 +942,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; } +static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) +{ + return channel->channel - channel->efx->tx_channel_offset < + channel->efx->n_tx_channels; +} + static inline struct efx_tx_queue * efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) { - struct efx_tx_queue *tx_queue = channel->tx_queue; - EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); - return tx_queue->channel ? tx_queue + type : NULL; + EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || + type >= EFX_TXQ_TYPES); + return &channel->tx_queue[type]; +} + +static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) +{ + return !(tx_queue->efx->net_dev->num_tc < 2 && + tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); } /* Iterate over all TX queues belonging to a channel */ #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ - for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ - _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ + if (!efx_channel_has_tx_queues(_channel)) \ + ; \ + else \ + for (_tx_queue = (_channel)->tx_queue; \ + _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ + efx_tx_queue_used(_tx_queue); \ + _tx_queue++) + +/* Iterate over all possible TX queues belonging to a channel */ +#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ + for (_tx_queue = (_channel)->tx_queue; \ + _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ _tx_queue++) static inline struct efx_rx_queue * @@ -959,18 +985,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index) return &efx->channel[index]->rx_queue; } +static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) +{ + return channel->channel < channel->efx->n_rx_channels; +} + static inline struct efx_rx_queue * efx_channel_get_rx_queue(struct efx_channel *channel) { - return channel->channel < channel->efx->n_rx_channels ? - &channel->rx_queue : NULL; + EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); + return &channel->rx_queue; } /* Iterate over all RX queues belonging to a channel */ #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ - for (_rx_queue = efx_channel_get_rx_queue(channel); \ - _rx_queue; \ - _rx_queue = NULL) + if (!efx_channel_has_rx_queue(_channel)) \ + ; \ + else \ + for (_rx_queue = &(_channel)->rx_queue; \ + _rx_queue; \ + _rx_queue = NULL) static inline struct efx_channel * efx_rx_queue_channel(struct efx_rx_queue *rx_queue) diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index da38659..1d0b8b6 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) void efx_nic_init_tx(struct efx_tx_queue *tx_queue) { - efx_oword_t tx_desc_ptr; struct efx_nic *efx = tx_queue->efx; + efx_oword_t reg; tx_queue->flushed = FLUSH_NONE; @@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) efx_init_special_buffer(efx, &tx_queue->txd); /* Push TX descriptor ring to card */ - EFX_POPULATE_OWORD_10(tx_desc_ptr, + EFX_POPULATE_OWORD_10(reg, FRF_AZ_TX_DESCQ_EN, 1, FRF_AZ_TX_ISCSI_DDIG_EN, 0, FRF_AZ_TX_ISCSI_HDIG_EN, 0, @@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; - EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); - EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); } - efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, + efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, tx_queue->queue); if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { - efx_oword_t reg; - /* Only 128 bits in this register */ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); @@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) set_bit_le(tx_queue->queue, (void *)®); efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); } + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_POPULATE_OWORD_1(reg, + FRF_BZ_TX_PACE, + (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + FFE_BZ_TX_PACE_OFF : + FFE_BZ_TX_PACE_RESERVED); + efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, + tx_queue->queue); + } } static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) @@ -1238,8 +1246,10 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Flush all tx queues in parallel */ efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_flush_tx_queue(tx_queue); + efx_for_each_possible_channel_tx_queue(tx_queue, channel) { + if (tx_queue->initialised) + efx_flush_tx_queue(tx_queue); + } } /* The hardware supports four concurrent rx flushes, each of which may @@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) ++rx_pending; } } - efx_for_each_channel_tx_queue(tx_queue, channel) { - if (tx_queue->flushed != FLUSH_DONE) + efx_for_each_possible_channel_tx_queue(tx_queue, channel) { + if (tx_queue->initialised && + tx_queue->flushed != FLUSH_DONE) ++tx_pending; } } @@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Mark the queues as all flushed. We're going to return failure * leading to a reset, or fake up success anyway */ efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - if (tx_queue->flushed != FLUSH_DONE) + efx_for_each_possible_channel_tx_queue(tx_queue, channel) { + if (tx_queue->initialised && + tx_queue->flushed != FLUSH_DONE) netif_err(efx, hw, efx->net_dev, "tx queue %d flush command timed out\n", tx_queue->queue); @@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_POPULATE_OWORD_4(temp, + /* Default values */ + FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, + FRF_BZ_TX_PACE_SB_AF, 0xb, + FRF_BZ_TX_PACE_FB_BASE, 0, + /* Allow large pace values in the + * fast bin. */ + FRF_BZ_TX_PACE_BIN_TH, + FFE_BZ_TX_PACE_RESERVED); + efx_writeo(efx, &temp, FR_BZ_TX_PACE); + } } /* Register dump */ diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h index 96430ed..8227de6 100644 --- a/drivers/net/sfc/regs.h +++ b/drivers/net/sfc/regs.h @@ -2907,6 +2907,12 @@ #define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 #define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 +/* TX_PACE_TBL */ +/* Values >20 are documented as reserved, but will result in a queue going + * into the fast bin with a pace value of zero. */ +#define FFE_BZ_TX_PACE_OFF 0 +#define FFE_BZ_TX_PACE_RESERVED 21 + /* DRIVER_EV */ /* Sub-fields of an RX flush completion event */ #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index 0ebfb99..f936892 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c @@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, goto out; } - /* Test both types of TX queue */ + /* Test all enabled types of TX queue */ efx_for_each_channel_tx_queue(tx_queue, channel) { state->offload_csum = (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD); diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 2f5e9da..1a51653 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_tx_queue *tx_queue; + unsigned index, type; if (unlikely(efx->port_inhibited)) return NETDEV_TX_BUSY; - tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), - skb->ip_summed == CHECKSUM_PARTIAL ? - EFX_TXQ_TYPE_OFFLOAD : 0); + index = skb_get_queue_mapping(skb); + type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; + if (index >= efx->n_tx_channels) { + index -= efx->n_tx_channels; + type |= EFX_TXQ_TYPE_HIGHPRI; + } + tx_queue = efx_get_tx_queue(efx, index, type); return efx_enqueue_skb(tx_queue, skb); } +void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + + /* Must be inverse of queue lookup in efx_hard_start_xmit() */ + tx_queue->core_txq = + netdev_get_tx_queue(efx->net_dev, + tx_queue->queue / EFX_TXQ_TYPES + + ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + efx->n_tx_channels : 0)); +} + +int efx_setup_tc(struct net_device *net_dev, u8 num_tc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + unsigned tc; + int rc; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) + return -EINVAL; + + if (num_tc == net_dev->num_tc) + return 0; + + for (tc = 0; tc < num_tc; tc++) { + net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; + net_dev->tc_to_txq[tc].count = efx->n_tx_channels; + } + + if (num_tc > net_dev->num_tc) { + /* Initialise high-priority queues as necessary */ + efx_for_each_channel(channel, efx) { + efx_for_each_possible_channel_tx_queue(tx_queue, + channel) { + if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) + continue; + if (!tx_queue->buffer) { + rc = efx_probe_tx_queue(tx_queue); + if (rc) + return rc; + } + if (!tx_queue->initialised) + efx_init_tx_queue(tx_queue); + efx_init_tx_queue_core_txq(tx_queue); + } + } + } else { + /* Reduce number of classes before number of queues */ + net_dev->num_tc = num_tc; + } + + rc = netif_set_real_num_tx_queues(net_dev, + max_t(int, num_tc, 1) * + efx->n_tx_channels); + if (rc) + return rc; + + /* Do not destroy high-priority queues when they become + * unused. We would have to flush them first, and it is + * fairly difficult to flush a subset of TX queues. Leave + * it to efx_fini_channels(). + */ + + net_dev->num_tc = num_tc; + return 0; +} + void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; @@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); + + tx_queue->initialised = true; } void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) @@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) { + if (!tx_queue->initialised) + return; + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); + tx_queue->initialised = false; + /* Flush TX queue, remove descriptor ring */ efx_nic_fini_tx(tx_queue); @@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { + if (!tx_queue->buffer) + return; + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); diff --git a/net/core/dev.c b/net/core/dev.c index a413276..4580460 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1648,7 +1648,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) if (txq < 1 || txq > dev->num_tx_queues) return -EINVAL; - if (dev->reg_state == NETREG_REGISTERED) { + if (dev->reg_state == NETREG_REGISTERED || + dev->reg_state == NETREG_UNREGISTERING) { ASSERT_RTNL(); rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index effd4ee..ace37f9 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -29,18 +29,18 @@ static void mqprio_destroy(struct Qdisc *sch) struct mqprio_sched *priv = qdisc_priv(sch); unsigned int ntx; - if (!priv->qdiscs) - return; - - for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) - qdisc_destroy(priv->qdiscs[ntx]); + if (priv->qdiscs) { + for (ntx = 0; + ntx < dev->num_tx_queues && priv->qdiscs[ntx]; + ntx++) + qdisc_destroy(priv->qdiscs[ntx]); + kfree(priv->qdiscs); + } if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc) dev->netdev_ops->ndo_setup_tc(dev, 0); else netdev_set_num_tc(dev, 0); - - kfree(priv->qdiscs); } static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) |