diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-17 00:34:19 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-17 19:21:00 -0700 |
commit | e8a0464cc950972824e2e128028ae3db666ec1ed (patch) | |
tree | 5022b95396c0f3b313531bc39b19543c03551b9a /net/sched | |
parent | 070825b3840a743e21ebcc44f8279708a4fed977 (diff) | |
download | op-kernel-dev-e8a0464cc950972824e2e128028ae3db666ec1ed.zip op-kernel-dev-e8a0464cc950972824e2e128028ae3db666ec1ed.tar.gz |
netdev: Allocate multiple queues for TX.
alloc_netdev_mq() now allocates an array of netdev_queue
structures for TX, based upon the queue_count argument.
Furthermore, all accesses to the TX queues are now vectored
through the netdev_get_tx_queue() and netdev_for_each_tx_queue()
interfaces. This makes it easy to grep the tree for all
things that want to get to a TX queue of a net device.
Problem spots which are not really multiqueue aware yet, and
only work with one queue, can easily be spotted by grepping
for all netdev_get_tx_queue() calls that pass in a zero index.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/cls_api.c | 4 | ||||
-rw-r--r-- | net/sched/sch_api.c | 32 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 178 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 21 |
4 files changed, 169 insertions, 66 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index b483bbe..d0b0a9b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -166,7 +166,7 @@ replay: /* Find qdisc */ if (!parent) { - struct netdev_queue *dev_queue = &dev->tx_queue; + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); q = dev_queue->qdisc_sleeping; parent = q->handle; } else { @@ -410,7 +410,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) return skb->len; - dev_queue = &dev->tx_queue; + dev_queue = netdev_get_tx_queue(dev, 0); if (!tcm->tcm_parent) q = dev_queue->qdisc_sleeping; else diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 95873f8..830ccc5 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -183,9 +183,8 @@ EXPORT_SYMBOL(unregister_qdisc); (root qdisc, all its children, children of children etc.) */ -struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) +static struct Qdisc *__qdisc_lookup(struct netdev_queue *dev_queue, u32 handle) { - struct netdev_queue *dev_queue = &dev->tx_queue; struct Qdisc *q; list_for_each_entry(q, &dev_queue->qdisc_list, list) { @@ -195,6 +194,19 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) return NULL; } +struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + struct Qdisc *q = __qdisc_lookup(txq, handle); + if (q) + return q; + } + return NULL; +} + static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) { unsigned long cl; @@ -462,7 +474,7 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) } } else { - dev_queue = &dev->tx_queue; + dev_queue = netdev_get_tx_queue(dev, 0); oqdisc = dev_queue->qdisc_sleeping; /* Prune old scheduler */ @@ -742,7 +754,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) q = dev->rx_queue.qdisc; } } else { - struct netdev_queue *dev_queue = &dev->tx_queue; + struct netdev_queue *dev_queue; + dev_queue = netdev_get_tx_queue(dev, 0); q = dev_queue->qdisc_sleeping; } if (!q) @@ -817,7 +830,8 @@ replay: q = dev->rx_queue.qdisc; } } else { - struct netdev_queue *dev_queue = &dev->tx_queue; + struct netdev_queue *dev_queue; + dev_queue = netdev_get_tx_queue(dev, 0); q = dev_queue->qdisc_sleeping; } @@ -899,7 +913,7 @@ create_n_graft: tcm->tcm_parent, tcm->tcm_parent, tca, &err); else - q = qdisc_create(dev, &dev->tx_queue, + q = qdisc_create(dev, netdev_get_tx_queue(dev, 0), tcm->tcm_parent, tcm->tcm_handle, tca, &err); if (q == NULL) { @@ -1025,7 +1039,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) if (idx > s_idx) s_q_idx = 0; q_idx = 0; - dev_queue = &dev->tx_queue; + dev_queue = netdev_get_tx_queue(dev, 0); list_for_each_entry(q, &dev_queue->qdisc_list, list) { if (q_idx < s_q_idx) { q_idx++; @@ -1098,7 +1112,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) /* Step 1. Determine qdisc handle X:0 */ - dev_queue = &dev->tx_queue; + dev_queue = netdev_get_tx_queue(dev, 0); if (pid != TC_H_ROOT) { u32 qid1 = TC_H_MAJ(pid); @@ -1275,7 +1289,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; t = 0; - dev_queue = &dev->tx_queue; + dev_queue = netdev_get_tx_queue(dev, 0); list_for_each_entry(q, &dev_queue->qdisc_list, list) { if (t < s_t || !q->ops->cl_ops || (tcm->tcm_parent && diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 243de93..4e2b865 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -40,20 +40,30 @@ */ void qdisc_lock_tree(struct net_device *dev) - __acquires(dev->tx_queue.lock) __acquires(dev->rx_queue.lock) { - spin_lock_bh(&dev->tx_queue.lock); + unsigned int i; + + local_bh_disable(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + spin_lock(&txq->lock); + } spin_lock(&dev->rx_queue.lock); } EXPORT_SYMBOL(qdisc_lock_tree); void qdisc_unlock_tree(struct net_device *dev) __releases(dev->rx_queue.lock) - __releases(dev->tx_queue.lock) { + unsigned int i; + spin_unlock(&dev->rx_queue.lock); - spin_unlock_bh(&dev->tx_queue.lock); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + spin_unlock(&txq->lock); + } + local_bh_enable(); } EXPORT_SYMBOL(qdisc_unlock_tree); @@ -212,22 +222,37 @@ void __qdisc_run(struct netdev_queue *txq) static void dev_watchdog(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; - struct netdev_queue *txq = &dev->tx_queue; netif_tx_lock(dev); - if (txq->qdisc != &noop_qdisc) { + if (!qdisc_tx_is_noop(dev)) { if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { - if (netif_queue_stopped(dev) && - time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { + int some_queue_stopped = 0; + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, i); + if (netif_tx_queue_stopped(txq)) { + some_queue_stopped = 1; + break; + } + } - printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", + if (some_queue_stopped && + time_after(jiffies, (dev->trans_start + + dev->watchdog_timeo))) { + printk(KERN_INFO "NETDEV WATCHDOG: %s: " + "transmit timed out\n", dev->name); dev->tx_timeout(dev); WARN_ON_ONCE(1); } - if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) + if (!mod_timer(&dev->watchdog_timer, + round_jiffies(jiffies + + dev->watchdog_timeo))) dev_hold(dev); } } @@ -542,9 +567,55 @@ void qdisc_destroy(struct Qdisc *qdisc) } EXPORT_SYMBOL(qdisc_destroy); +static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + if (txq->qdisc_sleeping != &noop_qdisc) + return false; + } + return true; +} + +static void attach_one_default_qdisc(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_unused) +{ + struct Qdisc *qdisc; + + if (dev->tx_queue_len) { + qdisc = qdisc_create_dflt(dev, dev_queue, + &pfifo_fast_ops, TC_H_ROOT); + if (!qdisc) { + printk(KERN_INFO "%s: activation failed\n", dev->name); + return; + } + list_add_tail(&qdisc->list, &dev_queue->qdisc_list); + } else { + qdisc = &noqueue_qdisc; + } + dev_queue->qdisc_sleeping = qdisc; +} + +static void transition_one_qdisc(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_need_watchdog) +{ + int *need_watchdog_p = _need_watchdog; + + spin_lock_bh(&dev_queue->lock); + rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping); + if (dev_queue->qdisc != &noqueue_qdisc) + *need_watchdog_p = 1; + spin_unlock_bh(&dev_queue->lock); +} + void dev_activate(struct net_device *dev) { - struct netdev_queue *txq = &dev->tx_queue; + int need_watchdog; /* No queueing discipline is attached to device; create default one i.e. pfifo_fast for devices, @@ -552,39 +623,27 @@ void dev_activate(struct net_device *dev) virtual interfaces */ - if (txq->qdisc_sleeping == &noop_qdisc) { - struct Qdisc *qdisc; - if (dev->tx_queue_len) { - qdisc = qdisc_create_dflt(dev, txq, - &pfifo_fast_ops, - TC_H_ROOT); - if (qdisc == NULL) { - printk(KERN_INFO "%s: activation failed\n", dev->name); - return; - } - list_add_tail(&qdisc->list, &txq->qdisc_list); - } else { - qdisc = &noqueue_qdisc; - } - txq->qdisc_sleeping = qdisc; - } + if (dev_all_qdisc_sleeping_noop(dev)) + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); if (!netif_carrier_ok(dev)) /* Delay activation until next carrier-on event */ return; - spin_lock_bh(&txq->lock); - rcu_assign_pointer(txq->qdisc, txq->qdisc_sleeping); - if (txq->qdisc != &noqueue_qdisc) { + need_watchdog = 0; + netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); + + if (need_watchdog) { dev->trans_start = jiffies; dev_watchdog_up(dev); } - spin_unlock_bh(&txq->lock); } -static void dev_deactivate_queue(struct netdev_queue *dev_queue, - struct Qdisc *qdisc_default) +static void dev_deactivate_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_qdisc_default) { + struct Qdisc *qdisc_default = _qdisc_default; struct Qdisc *qdisc; struct sk_buff *skb; @@ -603,12 +662,35 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue, kfree_skb(skb); } +static bool some_qdisc_is_running(struct net_device *dev, int lock) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *dev_queue; + int val; + + dev_queue = netdev_get_tx_queue(dev, i); + + if (lock) + spin_lock_bh(&dev_queue->lock); + + val = test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state); + + if (lock) + spin_unlock_bh(&dev_queue->lock); + + if (val) + return true; + } + return false; +} + void dev_deactivate(struct net_device *dev) { - struct netdev_queue *dev_queue = &dev->tx_queue; - int running; + bool running; - dev_deactivate_queue(dev_queue, &noop_qdisc); + netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); dev_watchdog_down(dev); @@ -617,17 +699,14 @@ void dev_deactivate(struct net_device *dev) /* Wait for outstanding qdisc_run calls. */ do { - while (test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state)) + while (some_qdisc_is_running(dev, 0)) yield(); /* * Double-check inside queue lock to ensure that all effects * of the queue run are visible when we return. */ - spin_lock_bh(&dev_queue->lock); - running = test_bit(__QUEUE_STATE_QDISC_RUNNING, - &dev_queue->state); - spin_unlock_bh(&dev_queue->lock); + running = some_qdisc_is_running(dev, 1); /* * The running flag should never be set at this point because @@ -642,8 +721,10 @@ void dev_deactivate(struct net_device *dev) static void dev_init_scheduler_queue(struct net_device *dev, struct netdev_queue *dev_queue, - struct Qdisc *qdisc) + void *_qdisc) { + struct Qdisc *qdisc = _qdisc; + dev_queue->qdisc = qdisc; dev_queue->qdisc_sleeping = qdisc; INIT_LIST_HEAD(&dev_queue->qdisc_list); @@ -652,18 +733,19 @@ static void dev_init_scheduler_queue(struct net_device *dev, void dev_init_scheduler(struct net_device *dev) { qdisc_lock_tree(dev); - dev_init_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); + netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); qdisc_unlock_tree(dev); setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); } -static void dev_shutdown_scheduler_queue(struct net_device *dev, - struct netdev_queue *dev_queue, - struct Qdisc *qdisc_default) +static void shutdown_scheduler_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_qdisc_default) { struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { dev_queue->qdisc = qdisc_default; @@ -676,8 +758,8 @@ static void dev_shutdown_scheduler_queue(struct net_device *dev, void dev_shutdown(struct net_device *dev) { qdisc_lock_tree(dev); - dev_shutdown_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); - dev_shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); + shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); BUG_TRAP(!timer_pending(&dev->watchdog_timer)); qdisc_unlock_tree(dev); } diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 8ac0598..44a2c34 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -111,7 +111,7 @@ teql_dequeue(struct Qdisc* sch) struct sk_buff *skb; skb = __skb_dequeue(&dat->q); - dat_queue = &dat->m->dev->tx_queue; + dat_queue = netdev_get_tx_queue(dat->m->dev, 0); if (skb == NULL) { struct net_device *m = qdisc_dev(dat_queue->qdisc); if (m) { @@ -155,10 +155,13 @@ teql_destroy(struct Qdisc* sch) if (q == master->slaves) { master->slaves = NEXT_SLAVE(q); if (q == master->slaves) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(master->dev, 0); master->slaves = NULL; - spin_lock_bh(&master->dev->tx_queue.lock); - qdisc_reset(master->dev->tx_queue.qdisc); - spin_unlock_bh(&master->dev->tx_queue.lock); + spin_lock_bh(&txq->lock); + qdisc_reset(txq->qdisc); + spin_unlock_bh(&txq->lock); } } skb_queue_purge(&dat->q); @@ -218,7 +221,8 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) static int __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) { - struct teql_sched_data *q = qdisc_priv(dev->tx_queue.qdisc); + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); + struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); struct neighbour *mn = skb->dst->neighbour; struct neighbour *n = q->ncache; @@ -254,7 +258,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * static inline int teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) { - if (dev->tx_queue.qdisc == &noop_qdisc) + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + if (txq->qdisc == &noop_qdisc) return -ENODEV; if (dev->header_ops == NULL || @@ -285,8 +290,10 @@ restart: do { struct net_device *slave = qdisc_dev(q); + struct netdev_queue *slave_txq; - if (slave->tx_queue.qdisc_sleeping != q) + slave_txq = netdev_get_tx_queue(slave, 0); + if (slave_txq->qdisc_sleeping != q) continue; if (netif_queue_stopped(slave) || __netif_subqueue_stopped(slave, subq) || |