diff options
25 files changed, 131 insertions, 66 deletions
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig index a233ed5..86cd75a 100644 --- a/drivers/isdn/Kconfig +++ b/drivers/isdn/Kconfig @@ -4,7 +4,7 @@ menuconfig ISDN bool "ISDN support" - depends on NET + depends on NET && NETDEVICES depends on !S390 && !UML ---help--- ISDN ("Integrated Services Digital Network", called RNIS in France) diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig index 2302fbe..9c6650e 100644 --- a/drivers/isdn/i4l/Kconfig +++ b/drivers/isdn/i4l/Kconfig @@ -6,7 +6,7 @@ if ISDN_I4L config ISDN_PPP bool "Support synchronous PPP" - depends on INET && NETDEVICES + depends on INET select SLHC help Over digital connections such as ISDN, there is no need to diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 8c610fa..e2a945e 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1312,7 +1312,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) } else return -EINVAL; break; -#ifdef CONFIG_NETDEVICES case IIOCNETGPN: /* Get peer phone number of a connected * isdn network interface */ @@ -1322,7 +1321,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) return isdn_net_getpeer(&phone, argp); } else return -EINVAL; -#endif default: return -EINVAL; } @@ -1352,7 +1350,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) case IIOCNETLCR: printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); return -ENODEV; -#ifdef CONFIG_NETDEVICES case IIOCNETAIF: /* Add a network-interface */ if (arg) { @@ -1491,7 +1488,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) return -EFAULT; return isdn_net_force_hangup(name); break; -#endif /* CONFIG_NETDEVICES */ case IIOCSETVER: dev->net_verbose = arg; printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index c65295d..6e5bdd1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) SHMEM_EEE_ADV_STATUS_SHIFT); if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { DP(BNX2X_MSG_ETHTOOL, - "Direct manipulation of EEE advertisment is not supported\n"); + "Direct manipulation of EEE advertisement is not supported\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 6dd0dd0..f6cfdc6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -9941,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, else rc = bnx2x_8483x_disable_eee(phy, params, vars); if (rc) { - DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); + DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); return rc; } } else { @@ -12987,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); break; default: - DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); + DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n"); } DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, old_status, status); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 32eec15..730ae2c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox) { struct fw_bye_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, BYE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) { struct fw_reset_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = htonl(reset); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); @@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, HOSTPAGESIZEPF7(sge_hps)); t4_set_reg_field(adap, SGE_CONTROL, - INGPADBOUNDARY(INGPADBOUNDARY_MASK) | + INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE_MASK, INGPADBOUNDARY(fl_align_log - 5) | EGRSTATUSPAGESIZE(stat_len != 64)); @@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, { struct fw_vi_enable_cmd c; + memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 1d03dcd..19ac096 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1353,8 +1353,11 @@ static int gfar_restore(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; - if (!netif_running(ndev)) + if (!netif_running(ndev)) { + netif_device_attach(ndev); + return 0; + } gfar_init_bds(ndev); init_registers(ndev); diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f8064df..92317e9 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1948,10 +1948,10 @@ jme_close(struct net_device *netdev) JME_NAPI_DISABLE(jme); - tasklet_disable(&jme->linkch_task); - tasklet_disable(&jme->txclean_task); - tasklet_disable(&jme->rxclean_task); - tasklet_disable(&jme->rxempty_task); + tasklet_kill(&jme->linkch_task); + tasklet_kill(&jme->txclean_task); + tasklet_kill(&jme->rxclean_task); + tasklet_kill(&jme->rxempty_task); jme_disable_rx_engine(jme); jme_disable_tx_engine(jme); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9b9c2ac..d19a143 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev) dev0 = hw->dev[0]; unregister_netdev(dev0); - tasklet_disable(&hw->phy_task); + tasklet_kill(&hw->phy_task); spin_lock_irq(&hw->hw_lock); hw->intr_mask = 0; diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 318fee9..e558edd 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev) /* Delay for receive task to stop scheduling itself. */ msleep(2000 / HZ); - tasklet_disable(&hw_priv->rx_tasklet); - tasklet_disable(&hw_priv->tx_tasklet); + tasklet_kill(&hw_priv->rx_tasklet); + tasklet_kill(&hw_priv->tx_tasklet); free_irq(dev->irq, hw_priv->dev); transmit_cleanup(hw_priv, 0); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e7ff886..927aa33 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3827,6 +3827,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_29: case RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_32: @@ -4519,6 +4521,9 @@ static void rtl_set_rx_mode(struct net_device *dev) mc_filter[1] = swab32(data); } + if (tp->mac_version == RTL_GIGA_MAC_VER_35) + mc_filter[1] = mc_filter[0] = 0xffffffff; + RTL_W32(MAR0 + 4, mc_filter[1]); RTL_W32(MAR0 + 0, mc_filter[0]); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 0793299..1d04754 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -990,7 +990,7 @@ static int axienet_stop(struct net_device *ndev) axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); - tasklet_disable(&lp->dma_err_tasklet); + tasklet_kill(&lp->dma_err_tasklet); free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index c81e278..08d55b6 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -31,6 +31,7 @@ #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/gfp.h> +#include <linux/if_vlan.h> /* @@ -92,7 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf) /* no jumbogram (16K) support for now */ - dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; + dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 7479a57..3286166 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1344,6 +1344,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, } else { u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); skb_push(skb, 4); + cpu_to_le32s(&csum_preamble); memcpy(skb->data, &csum_preamble, 4); } } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index cb04f90..edb81ed 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -359,10 +359,12 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, void usbnet_defer_kevent (struct usbnet *dev, int work) { set_bit (work, &dev->flags); - if (!schedule_work (&dev->kevent)) - netdev_err(dev->net, "kevent %d may have been dropped\n", work); - else + if (!schedule_work (&dev->kevent)) { + if (net_ratelimit()) + netdev_err(dev->net, "kevent %d may have been dropped\n", work); + } else { netdev_dbg(dev->net, "kevent %d scheduled\n", work); + } } EXPORT_SYMBOL_GPL(usbnet_defer_kevent); diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c index 192251a..282eede 100644 --- a/drivers/net/wireless/b43legacy/pio.c +++ b/drivers/net/wireless/b43legacy/pio.c @@ -382,7 +382,7 @@ static void cancel_transfers(struct b43legacy_pioqueue *queue) { struct b43legacy_pio_txpacket *packet, *tmp_packet; - tasklet_disable(&queue->txtask); + tasklet_kill(&queue->txtask); list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) free_txpacket(packet, 0); diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 6458764..4ec3c0d 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -20,6 +20,7 @@ #include <linux/ctype.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> +#include <linux/if_vlan.h> #include "u_ether.h" @@ -295,7 +296,7 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req) while (skb2) { if (status < 0 || ETH_HLEN > skb2->len - || skb2->len > ETH_FRAME_LEN) { + || skb2->len > VLAN_ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index f2dc6d8..38a9935 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -54,7 +54,8 @@ struct ptp_clock_request { * clock operations * * @adjfreq: Adjusts the frequency of the hardware clock. - * parameter delta: Desired period change in parts per billion. + * parameter delta: Desired frequency offset from nominal frequency + * in parts per billion * * @adjtime: Shifts the time of the hardware clock. * parameter delta: Desired change in nanoseconds. diff --git a/net/core/dev.c b/net/core/dev.c index 09cb3f6..bda6d00 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1666,7 +1666,7 @@ static inline int deliver_skb(struct sk_buff *skb, static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) { - if (ptype->af_packet_priv == NULL) + if (!ptype->af_packet_priv || !skb->sk) return false; if (ptype->id_match) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 76d4c2c..fad649a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2192,7 +2192,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb, goto skip; err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, - portid, seq, 0, NTF_SELF); + portid, seq, + RTM_NEWNEIGH, NTF_SELF); if (err < 0) return err; skip: diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 535584c..0c34bfa 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -892,13 +892,16 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc) { const struct inet_diag_handler *handler; + int err = 0; handler = inet_diag_lock_handler(r->sdiag_protocol); if (!IS_ERR(handler)) handler->dump(skb, cb, r, bc); + else + err = PTR_ERR(handler); inet_diag_unlock_handler(handler); - return skb->len; + return err ? : skb->len; } static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 0185679..d5cb3c4 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1633,9 +1633,9 @@ static size_t ip6gre_get_size(const struct net_device *dev) /* IFLA_GRE_OKEY */ nla_total_size(4) + /* IFLA_GRE_LOCAL */ - nla_total_size(4) + + nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GRE_REMOTE */ - nla_total_size(4) + + nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GRE_TTL */ nla_total_size(1) + /* IFLA_GRE_TOS */ @@ -1659,8 +1659,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || - nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) || - nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) || + nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) || + nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) || nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/ nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index ff36194..2edce30 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -535,7 +535,7 @@ static void ndisc_send_unsol_na(struct net_device *dev) { struct inet6_dev *idev; struct inet6_ifaddr *ifa; - struct in6_addr mcaddr; + struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT; idev = in6_dev_get(dev); if (!idev) @@ -543,7 +543,6 @@ static void ndisc_send_unsol_na(struct net_device *dev) read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { - addrconf_addr_solict_mult(&ifa->addr, &mcaddr); ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr, /*router=*/ !!idev->cnf.forwarding, /*solicited=*/ false, /*override=*/ true, diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index f0dd83c..9687fa1 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -84,18 +84,19 @@ * grp->index is the index of the group; and grp->slot_shift * is the shift for the corresponding (scaled) sigma_i. */ -#define QFQ_MAX_INDEX 19 -#define QFQ_MAX_WSHIFT 16 +#define QFQ_MAX_INDEX 24 +#define QFQ_MAX_WSHIFT 12 #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) -#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT) +#define QFQ_MAX_WSUM (16*QFQ_MAX_WEIGHT) #define FRAC_BITS 30 /* fixed point arithmetic */ #define ONE_FP (1UL << FRAC_BITS) #define IWSUM (ONE_FP/QFQ_MAX_WSUM) -#define QFQ_MTU_SHIFT 11 +#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ #define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX) +#define QFQ_MIN_LMAX 256 /* min possible lmax for a class */ /* * Possible group states. These values are used as indexes for the bitmaps @@ -231,6 +232,32 @@ static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl, q->wsum += delta_w; } +static void qfq_update_reactivate_class(struct qfq_sched *q, + struct qfq_class *cl, + u32 inv_w, u32 lmax, int delta_w) +{ + bool need_reactivation = false; + int i = qfq_calc_index(inv_w, lmax); + + if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { + /* + * shift cl->F back, to not charge the + * class for the not-yet-served head + * packet + */ + cl->F = cl->S; + /* remove class from its slot in the old group */ + qfq_deactivate_class(q, cl); + need_reactivation = true; + } + + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); + + if (need_reactivation) /* activate in new group */ + qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); +} + + static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { @@ -238,7 +265,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct qfq_class *cl = (struct qfq_class *)*arg; struct nlattr *tb[TCA_QFQ_MAX + 1]; u32 weight, lmax, inv_w; - int i, err; + int err; int delta_w; if (tca[TCA_OPTIONS] == NULL) { @@ -270,16 +297,14 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tb[TCA_QFQ_LMAX]) { lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); - if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) { + if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { pr_notice("qfq: invalid max length %u\n", lmax); return -EINVAL; } } else - lmax = 1UL << QFQ_MTU_SHIFT; + lmax = psched_mtu(qdisc_dev(sch)); if (cl != NULL) { - bool need_reactivation = false; - if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), @@ -291,24 +316,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (lmax == cl->lmax && inv_w == cl->inv_w) return 0; /* nothing to update */ - i = qfq_calc_index(inv_w, lmax); sch_tree_lock(sch); - if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { - /* - * shift cl->F back, to not charge the - * class for the not-yet-served head - * packet - */ - cl->F = cl->S; - /* remove class from its slot in the old group */ - qfq_deactivate_class(q, cl); - need_reactivation = true; - } - - qfq_update_class_params(q, cl, lmax, inv_w, delta_w); - - if (need_reactivation) /* activate in new group */ - qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); + qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w); sch_tree_unlock(sch); return 0; @@ -663,15 +672,48 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V) /* - * XXX we should make sure that slot becomes less than 32. - * This is guaranteed by the input values. - * roundedS is always cl->S rounded on grp->slot_shift bits. + * If the weight and lmax (max_pkt_size) of the classes do not change, + * then QFQ guarantees that the slot index is never higher than + * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM). + * + * With the current values of the above constants, the index is + * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18. + * + * When the weight of a class is increased or the lmax of the class is + * decreased, a new class with smaller slot size may happen to be + * activated. The activation of this class should be properly delayed + * to when the service of the class has finished in the ideal system + * tracked by QFQ. If the activation of the class is not delayed to + * this reference time instant, then this class may be unjustly served + * before other classes waiting for service. This may cause + * (unfrequently) the above bound to the slot index to be violated for + * some of these unlucky classes. + * + * Instead of delaying the activation of the new class, which is quite + * complex, the following inaccurate but simple solution is used: if + * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps + * of the class are shifted backward so as to let the slot index + * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if + * the slot index is above it, then the data structure implementing + * the bucket list either gets immediately corrupted or may get + * corrupted on a possible next packet arrival that causes the start + * time of the group to be shifted backward. */ static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, u64 roundedS) { u64 slot = (roundedS - grp->S) >> grp->slot_shift; - unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS; + unsigned int i; /* slot index in the bucket list */ + + if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { + u64 deltaS = roundedS - grp->S - + ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); + cl->S -= deltaS; + cl->F -= deltaS; + slot = QFQ_MAX_SLOTS - 2; + } + + i = (grp->front + slot) % QFQ_MAX_SLOTS; hlist_add_head(&cl->next, &grp->slots[i]); __set_bit(slot, &grp->full_slots); @@ -892,6 +934,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); + if (unlikely(cl->lmax < qdisc_pkt_len(skb))) { + pr_debug("qfq: increasing maxpkt from %u to %u for class %u", + cl->lmax, qdisc_pkt_len(skb), cl->common.classid); + qfq_update_reactivate_class(q, cl, cl->inv_w, + qdisc_pkt_len(skb), 0); + } + err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); diff --git a/net/tipc/handler.c b/net/tipc/handler.c index 111ff83..b36f0fc 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c @@ -116,7 +116,6 @@ void tipc_handler_stop(void) return; handler_enabled = 0; - tasklet_disable(&tipc_tasklet); tasklet_kill(&tipc_tasklet); spin_lock_bh(&qitem_lock); |