diff options
author | David S. Miller <davem@davemloft.net> | 2013-10-08 23:07:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-10-08 23:07:53 -0400 |
commit | 53af53ae83fe960ceb9ef74cac7915e9088f4266 (patch) | |
tree | 0cd5309f2a931d3f210aa3d2025aae64f2276e8c | |
parent | b343ca84b4e3ba65508503333c923a797801a588 (diff) | |
parent | 9684d7b0dab3cf3a897edd85dca501d413888d56 (diff) | |
download | op-kernel-dev-53af53ae83fe960ceb9ef74cac7915e9088f4266.zip op-kernel-dev-53af53ae83fe960ceb9ef74cac7915e9088f4266.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
include/linux/netdevice.h
net/core/sock.c
Trivial merge issues.
Removal of "extern" for functions declaration in netdevice.h
at the same time "const" was added to an argument.
Two parallel line additions in net/core/sock.c
Signed-off-by: David S. Miller <davem@davemloft.net>
62 files changed, 475 insertions, 207 deletions
diff --git a/Documentation/connector/ucon.c b/Documentation/connector/ucon.c index 4848db8..8a4da64 100644 --- a/Documentation/connector/ucon.c +++ b/Documentation/connector/ucon.c @@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg) nlh->nlmsg_seq = seq++; nlh->nlmsg_pid = getpid(); nlh->nlmsg_type = NLMSG_DONE; - nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); + nlh->nlmsg_len = size; nlh->nlmsg_flags = 0; m = NLMSG_DATA(nlh); diff --git a/MAINTAINERS b/MAINTAINERS index 744a239..43dfc82 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1785,6 +1785,7 @@ F: include/net/bluetooth/ BONDING DRIVER M: Jay Vosburgh <fubar@us.ibm.com> +M: Veaceslav Falico <vfalico@redhat.com> M: Andy Gospodarek <andy@greyhouse.net> L: netdev@vger.kernel.org W: http://sourceforge.net/projects/bonding/ diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index f50d223..99b44e0 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp) { if (fp->bpf_func != sk_run_filter) module_free(NULL, fp->bpf_func); + kfree(fp); } diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index bf56e33..2345bdb 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp) { if (fp->bpf_func != sk_run_filter) module_free(NULL, fp->bpf_func); + kfree(fp); } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 7092392..a5df511 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp) struct bpf_binary_header *header = (void *)addr; if (fp->bpf_func == sk_run_filter) - return; + goto free_filter; set_memory_rw(addr, header->pages); module_free(NULL, header); +free_filter: + kfree(fp); } diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 9c7be59e..218b6b2 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp) { if (fp->bpf_func != sk_run_filter) module_free(NULL, fp->bpf_func); + kfree(fp); } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 79c216a..516593e1 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -772,13 +772,21 @@ out: return; } +static void bpf_jit_free_deferred(struct work_struct *work) +{ + struct sk_filter *fp = container_of(work, struct sk_filter, work); + unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; + struct bpf_binary_header *header = (void *)addr; + + set_memory_rw(addr, header->pages); + module_free(NULL, header); + kfree(fp); +} + void bpf_jit_free(struct sk_filter *fp) { if (fp->bpf_func != sk_run_filter) { - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - set_memory_rw(addr, header->pages); - module_free(NULL, header); + INIT_WORK(&fp->work, bpf_jit_free_deferred); + schedule_work(&fp->work); } } diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 08ae128..c73fc2b 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); @@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ /* If cn_netlink_send() failed, the data is not sent */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } @@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); @@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } @@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); ev->what = which_id; ev->event_data.id.process_pid = task->pid; ev->event_data.id.process_tgid = task->tgid; @@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } @@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); @@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } @@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); @@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } @@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); @@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } @@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); @@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } @@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); @@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } @@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; + memset(&ev->event_data, 0, sizeof(ev->event_data)); msg->seq = rcvd_seq; ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); @@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = rcvd_ack + 1; msg->len = sizeof(*ev); + msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index 6ecfa75..a36749f 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) data = nlmsg_data(nlh); - memcpy(data, msg, sizeof(*data) + msg->len); + memcpy(data, msg, size); NETLINK_CB(skb).dst_group = group; @@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb) static void cn_rx_skb(struct sk_buff *__skb) { struct nlmsghdr *nlh; - int err; struct sk_buff *skb; + int len, err; skb = skb_get(__skb); if (skb->len >= NLMSG_HDRLEN) { nlh = nlmsg_hdr(skb); + len = nlmsg_len(nlh); - if (nlh->nlmsg_len < sizeof(struct cn_msg) || + if (len < (int)sizeof(struct cn_msg) || skb->len < nlh->nlmsg_len || - nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { + len > CONNECTOR_MAX_MSG_SIZE) { kfree_skb(skb); return; } diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index f9cba41..1870c47 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev) size_t size; size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ - size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */ + size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ - size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ - size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ + size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */ + size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ - size += sizeof(struct can_berr_counter); + size += nla_total_size(sizeof(struct can_berr_counter)); if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ - size += sizeof(struct can_bittiming_const); + size += nla_total_size(sizeof(struct can_bittiming_const)); return size; } diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 78d6d6b..48f5288 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -106,7 +106,6 @@ #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ #define XGMAC_ADDR_AE 0x80000000 -#define XGMAC_MAX_FILTER_ADDR 31 /* PMT Control and Status */ #define XGMAC_PMT_POINTER_RESET 0x80000000 @@ -384,6 +383,7 @@ struct xgmac_priv { struct device *device; struct napi_struct napi; + int max_macs; struct xgmac_extra_stats xstats; spinlock_t stats_lock; @@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev) netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", netdev_mc_count(dev), netdev_uc_count(dev)); - if (dev->flags & IFF_PROMISC) { - writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); - return; - } + if (dev->flags & IFF_PROMISC) + value |= XGMAC_FRAME_FILTER_PR; memset(hash_filter, 0, sizeof(hash_filter)); - if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { + if (netdev_uc_count(dev) > priv->max_macs) { use_hash = true; value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; } @@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev) goto out; } - if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { + if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) { use_hash = true; value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; } else { @@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) } out: - for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) - xgmac_set_mac_addr(ioaddr, NULL, reg); + for (i = reg; i <= priv->max_macs; i++) + xgmac_set_mac_addr(ioaddr, NULL, i); for (i = 0; i < XGMAC_NUM_HASH; i++) writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); @@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev) uid = readl(priv->base + XGMAC_VERSION); netdev_info(ndev, "h/w version is 0x%x\n", uid); + /* Figure out how many valid mac address filter registers we have */ + writel(1, priv->base + XGMAC_ADDR_HIGH(31)); + if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1) + priv->max_macs = 31; + else + priv->max_macs = 7; + writel(0, priv->base + XGMAC_DMA_INTR_ENA); ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq == -ENXIO) { diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index ebdac02..0ae3177 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2657,6 +2657,8 @@ static int igb_set_eee(struct net_device *netdev, (hw->phy.media_type != e1000_media_type_copper)) return -EOPNOTSUPP; + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + ret_val = igb_get_eee(netdev, &eee_curr); if (ret_val) return ret_val; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 99f16cb..4cfae6c 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); spin_unlock_bh(&mp->mib_counters_lock); - - mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } static void mib_counters_timer_wrapper(unsigned long _mp) { struct mv643xx_eth_private *mp = (void *)_mp; - mib_counters_update(mp); + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } @@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev) mp->int_mask |= INT_TX_END_0 << i; } + add_timer(&mp->mib_counters_timer); port_start(mp); wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); @@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, if (!ppdev) return -ENOMEM; ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + ppdev->dev.of_node = pnp; ret = platform_device_add_resources(ppdev, &res, 1); if (ret) @@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mp->mib_counters_timer.data = (unsigned long)mp; mp->mib_counters_timer.function = mib_counters_timer_wrapper; mp->mib_counters_timer.expires = jiffies + 30 * HZ; - add_timer(&mp->mib_counters_timer); spin_lock_init(&mp->mib_counters_lock); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index dec455c..afe2efa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, put_page(page); return -ENOMEM; } - page_alloc->size = PAGE_SIZE << order; + page_alloc->page_size = PAGE_SIZE << order; page_alloc->page = page; page_alloc->dma = dma; - page_alloc->offset = frag_info->frag_align; + page_alloc->page_offset = frag_info->frag_align; /* Not doing get_page() for each frag is a big win * on asymetric workloads. */ - atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); + atomic_set(&page->_count, + page_alloc->page_size / frag_info->frag_stride); return 0; } @@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, for (i = 0; i < priv->num_frags; i++) { frag_info = &priv->frag_info[i]; page_alloc[i] = ring_alloc[i]; - page_alloc[i].offset += frag_info->frag_stride; - if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) + page_alloc[i].page_offset += frag_info->frag_stride; + + if (page_alloc[i].page_offset + frag_info->frag_stride <= + ring_alloc[i].page_size) continue; + if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) goto out; } for (i = 0; i < priv->num_frags; i++) { frags[i] = ring_alloc[i]; - dma = ring_alloc[i].dma + ring_alloc[i].offset; + dma = ring_alloc[i].dma + ring_alloc[i].page_offset; ring_alloc[i] = page_alloc[i]; rx_desc->data[i].addr = cpu_to_be64(dma); } @@ -117,7 +121,7 @@ out: frag_info = &priv->frag_info[i]; if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, - page_alloc[i].size, PCI_DMA_FROMDEVICE); + page_alloc[i].page_size, PCI_DMA_FROMDEVICE); page = page_alloc[i].page; atomic_set(&page->_count, 1); put_page(page); @@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, int i) { const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; + - if (frags[i].offset + frag_info->frag_stride > frags[i].size) - dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, - PCI_DMA_FROMDEVICE); + if (next_frag_end > frags[i].page_size) + dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, + PCI_DMA_FROMDEVICE); if (frags[i].page) put_page(frags[i].page); @@ -161,7 +167,7 @@ out: page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->size, PCI_DMA_FROMDEVICE); + page_alloc->page_size, PCI_DMA_FROMDEVICE); page = page_alloc->page; atomic_set(&page->_count, 1); put_page(page); @@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, i, page_count(page_alloc->page)); dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->size, PCI_DMA_FROMDEVICE); - while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { + page_alloc->page_size, PCI_DMA_FROMDEVICE); + while (page_alloc->page_offset + frag_info->frag_stride < + page_alloc->page_size) { put_page(page_alloc->page); - page_alloc->offset += frag_info->frag_stride; + page_alloc->page_offset += frag_info->frag_stride; } page_alloc->page = NULL; } @@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, /* Save page reference in skb */ __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); - skb_frags_rx[nr].page_offset = frags[nr].offset; + skb_frags_rx[nr].page_offset = frags[nr].page_offset; skb->truesize += frag_info->frag_stride; frags[nr].page = NULL; } @@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, /* Get pointer to first fragment so we could copy the headers into the * (linear part of the) skb */ - va = page_address(frags[0].page) + frags[0].offset; + va = page_address(frags[0].page) + frags[0].page_offset; if (length <= SMALL_PACKET_SIZE) { /* We are copying all relevant data to the skb - temporarily @@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), DMA_FROM_DEVICE); ethh = (struct ethhdr *)(page_address(frags[0].page) + - frags[0].offset); + frags[0].page_offset); if (is_multicast_ether_addr(ethh->h_dest)) { struct mlx4_mac_entry *entry; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5e0aa56..bf06e36 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -237,8 +237,8 @@ struct mlx4_en_tx_desc { struct mlx4_en_rx_alloc { struct page *page; dma_addr_t dma; - u32 offset; - u32 size; + u32 page_offset; + u32 page_size; }; struct mlx4_en_tx_ring { diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index bd1a2d2..ea54d95 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev) irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { netdev_err(ndev, "irq_of_parse_and_map failed\n"); - return -EINVAL; + ret = -EINVAL; + goto irq_map_fail; } priv = netdev_priv(ndev); @@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, GFP_DMA | GFP_KERNEL); - if (priv->tx_desc_base == NULL) + if (priv->tx_desc_base == NULL) { + ret = -ENOMEM; goto init_fail; + } priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, &priv->rx_base, GFP_DMA | GFP_KERNEL); - if (priv->rx_desc_base == NULL) + if (priv->rx_desc_base == NULL) { + ret = -ENOMEM; goto init_fail; + } priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM, GFP_ATOMIC); - if (!priv->tx_buf_base) + if (!priv->tx_buf_base) { + ret = -ENOMEM; goto init_fail; + } priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM, GFP_ATOMIC); - if (!priv->rx_buf_base) + if (!priv->rx_buf_base) { + ret = -ENOMEM; goto init_fail; + } platform_set_drvdata(pdev, ndev); @@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev) init_fail: netdev_err(ndev, "init failed\n"); moxart_mac_free_memory(ndev); - +irq_map_fail: + free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 21d00a0..f07f2b0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2257,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = qlcnic_alloc_adapter_resources(adapter); if (err) - goto err_out_free_netdev; + goto err_out_free_wq; adapter->dev_rst_time = jiffies; adapter->ahw->revision_id = pdev->revision; @@ -2396,6 +2396,9 @@ err_out_disable_msi: err_out_free_hw: qlcnic_free_adapter_resources(adapter); +err_out_free_wq: + destroy_workqueue(adapter->qlcnic_wq); + err_out_free_netdev: free_netdev(netdev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c8df52b..ee8df99 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -620,12 +620,16 @@ static struct sh_eth_cpu_data sh7734_data = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + .rmcr_value = 0x00000001, .apr = 1, .mpr = 1, .tpauser = 1, .bculr = 1, .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .tsu = 1, diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 844ee75..676c3c0 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -754,6 +754,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), + EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), + EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), + EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), + EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), + EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), + EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), + EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), + EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), + EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), + EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), + EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), + EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), }; #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ @@ -808,44 +820,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ (1ULL << EF10_STAT_rx_length_error)) -#if BITS_PER_LONG == 64 -#define STAT_MASK_BITMAP(bits) (bits) -#else -#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32 -#endif - -static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx) -{ - static const unsigned long hunt_40g_stat_mask[] = { - STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | - HUNT_40G_EXTRA_STAT_MASK) - }; - static const unsigned long hunt_10g_only_stat_mask[] = { - STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | - HUNT_10G_ONLY_STAT_MASK) - }; +/* These statistics are only provided if the firmware supports the + * capability PM_AND_RXDP_COUNTERS. + */ +#define HUNT_PM_AND_RXDP_STAT_MASK ( \ + (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ + (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ + (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ + (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ + (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ + (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ + (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ + (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ + (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ + (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ + (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \ + (1ULL << EF10_STAT_rx_dp_emerg_wait)) + +static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) +{ + u64 raw_mask = HUNT_COMMON_STAT_MASK; u32 port_caps = efx_mcdi_phy_get_caps(efx); + struct efx_ef10_nic_data *nic_data = efx->nic_data; if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) - return hunt_40g_stat_mask; + raw_mask |= HUNT_40G_EXTRA_STAT_MASK; else - return hunt_10g_only_stat_mask; + raw_mask |= HUNT_10G_ONLY_STAT_MASK; + + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) + raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; + + return raw_mask; +} + +static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) +{ + u64 raw_mask = efx_ef10_raw_stat_mask(efx); + +#if BITS_PER_LONG == 64 + mask[0] = raw_mask; +#else + mask[0] = raw_mask & 0xffffffff; + mask[1] = raw_mask >> 32; +#endif } static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) { + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + + efx_ef10_get_stat_mask(efx, mask); return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, - efx_ef10_stat_mask(efx), names); + mask, names); } static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - const unsigned long *stats_mask = efx_ef10_stat_mask(efx); + DECLARE_BITMAP(mask, EF10_STAT_COUNT); __le64 generation_start, generation_end; u64 *stats = nic_data->stats; __le64 *dma_stats; + efx_ef10_get_stat_mask(efx, mask); + dma_stats = efx->stats_buffer.addr; nic_data = efx->nic_data; @@ -853,8 +893,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) if (generation_end == EFX_MC_STATS_GENERATION_INVALID) return 0; rmb(); - efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask, + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, stats, efx->stats_buffer.addr, false); + rmb(); generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; if (generation_end != generation_start) return -EAGAIN; @@ -873,12 +914,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats) { - const unsigned long *mask = efx_ef10_stat_mask(efx); + DECLARE_BITMAP(mask, EF10_STAT_COUNT); struct efx_ef10_nic_data *nic_data = efx->nic_data; u64 *stats = nic_data->stats; size_t stats_count = 0, index; int retry; + efx_ef10_get_stat_mask(efx, mask); + /* If we're unlucky enough to read statistics during the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index c082562..366c8e3 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached) { MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); size_t outlen; int rc; @@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, goto fail; } + /* We currently assume we have control of the external link + * and are completely trusted by firmware. Abort probing + * if that's not true for this function. + */ + if (driver_operating && + outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN && + (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) != + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) { + netif_err(efx, probe, efx->net_dev, + "This driver version only supports one function per port\n"); + return -ENODEV; + } + if (was_attached != NULL) *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); return 0; diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index b5cf624..e0a63dd 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -2574,8 +2574,58 @@ #define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ #define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ #define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ -#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */ -#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */ +/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +/* enum: PM discard_bb_overflow counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +/* enum: PM discard_vfifo_full counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +/* enum: RXDP counter: Number of packets dropped due to the queue being + * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 + * with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +/* enum: RXDP counter: Number of times an emergency descriptor fetch was + * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47 +/* enum: RXDP counter: Number of times the DPCPU waited for an existing + * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48 +/* enum: Start of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_START 0x40 +/* enum: End of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_END 0x5f #define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ #define MC_CMD_MAC_NSTATS 0x61 /* enum */ @@ -5065,6 +5115,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 /* RxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index c75009b..9c90bf5 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -480,8 +480,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, * @count: Length of the @desc array * @mask: Bitmask of which elements of @desc are enabled * @stats: Buffer to update with the converted statistics. The length - * of this array must be at least the number of set bits in the - * first @count bits of @mask. + * of this array must be at least @count. * @dma_buf: DMA buffer containing hardware statistics * @accumulate: If set, the converted values will be added rather than * directly stored to the corresponding elements of @stats @@ -514,11 +513,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, } if (accumulate) - *stats += val; + stats[index] += val; else - *stats = val; + stats[index] = val; } - - ++stats; } } diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 08883c8..11b6112 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -402,6 +402,18 @@ enum { EF10_STAT_rx_align_error, EF10_STAT_rx_length_error, EF10_STAT_rx_nodesc_drops, + EF10_STAT_rx_pm_trunc_bb_overflow, + EF10_STAT_rx_pm_discard_bb_overflow, + EF10_STAT_rx_pm_trunc_vfifo_full, + EF10_STAT_rx_pm_discard_vfifo_full, + EF10_STAT_rx_pm_trunc_qbb, + EF10_STAT_rx_pm_discard_qbb, + EF10_STAT_rx_pm_discard_mapping, + EF10_STAT_rx_dp_q_disabled_packets, + EF10_STAT_rx_dp_di_dropped_packets, + EF10_STAT_rx_dp_streaming_packets, + EF10_STAT_rx_dp_emerg_fetch, + EF10_STAT_rx_dp_emerg_wait, EF10_STAT_COUNT }; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7290f11..1fd8125 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1766,8 +1766,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->mac_control = prop; - if (!of_property_read_u32(node, "dual_emac", &prop)) - data->dual_emac = prop; + if (of_property_read_bool(node, "dual_emac")) + data->dual_emac = 1; /* * Populate all the child nodes here... @@ -1777,7 +1777,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (ret) pr_warn("Doesn't have any child node\n"); - for_each_node_by_name(slave_node, "slave") { + for_each_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; u32 phyid; @@ -1786,6 +1786,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, struct device_node *mdio_node; struct platform_device *mdio; + /* This is no slave child node, continue */ + if (strcmp(slave_node->name, "slave")) + continue; + parp = of_get_property(slave_node, "phy_id", &lenp); if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { pr_err("Missing slave[%d] phy_id property\n", i); diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 42e6dee..0632d34 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -82,7 +82,6 @@ struct mrf24j40 { struct mutex buffer_mutex; /* only used to protect buf */ struct completion tx_complete; - struct work_struct irqwork; u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */ }; @@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb) if (ret) goto err; + INIT_COMPLETION(devrec->tx_complete); + /* Set TXNTRIG bit of TXNCON to send packet */ ret = read_short_reg(devrec, REG_TXNCON, &val); if (ret) @@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb) val |= 0x4; write_short_reg(devrec, REG_TXNCON, val); - INIT_COMPLETION(devrec->tx_complete); - /* Wait for the device to send the TX complete interrupt. */ ret = wait_for_completion_interruptible_timeout( &devrec->tx_complete, @@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = { static irqreturn_t mrf24j40_isr(int irq, void *data) { struct mrf24j40 *devrec = data; - - disable_irq_nosync(irq); - - schedule_work(&devrec->irqwork); - - return IRQ_HANDLED; -} - -static void mrf24j40_isrwork(struct work_struct *work) -{ - struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork); u8 intstat; int ret; @@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work) mrf24j40_handle_rx(devrec); out: - enable_irq(devrec->spi->irq); + return IRQ_HANDLED; } static int mrf24j40_probe(struct spi_device *spi) @@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi) mutex_init(&devrec->buffer_mutex); init_completion(&devrec->tx_complete); - INIT_WORK(&devrec->irqwork, mrf24j40_isrwork); devrec->spi = spi; spi_set_drvdata(spi, devrec); @@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi) val &= ~0x3; /* Clear RX mode (normal) */ write_short_reg(devrec, REG_RXMCR, val); - ret = request_irq(spi->irq, - mrf24j40_isr, - IRQF_TRIGGER_FALLING, - dev_name(&spi->dev), - devrec); + ret = request_threaded_irq(spi->irq, + NULL, + mrf24j40_isr, + IRQF_TRIGGER_LOW|IRQF_ONESHOT, + dev_name(&spi->dev), + devrec); if (ret) { dev_err(printdev(devrec), "Unable to get IRQ"); @@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi) dev_dbg(printdev(devrec), "remove\n"); free_irq(spi->irq, devrec); - flush_work(&devrec->irqwork); /* TODO: Is this the right call? */ ieee802154_unregister_device(devrec->dev); ieee802154_free_device(devrec->dev); /* TODO: Will ieee802154_free_device() wait until ->xmit() is diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 807815f..7cb105c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, if (unlikely(!noblock)) add_wait_queue(&tfile->wq.wait, &wait); while (len) { - current->state = TASK_INTERRUPTIBLE; + if (unlikely(!noblock)) + current->state = TASK_INTERRUPTIBLE; /* Read frames from the queue */ if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) { @@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, break; } - current->state = TASK_RUNNING; - if (unlikely(!noblock)) + if (unlikely(!noblock)) { + current->state = TASK_RUNNING; remove_wait_queue(&tfile->wq.wait, &wait); + } return ret; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 13f5434..1ce8af2 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1969,15 +1969,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, struct sk_buff *skb) { + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath_frame_info *fi = get_frame_info(skb); struct list_head bf_head; - struct ath_buf *bf; - - bf = fi->bf; + struct ath_buf *bf = fi->bf; INIT_LIST_HEAD(&bf_head); list_add_tail(&bf->list, &bf_head); bf->bf_state.bf_type = 0; + if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { + bf->bf_state.bf_type = BUF_AMPDU; + ath_tx_addto_baw(sc, tid, bf); + } bf->bf_next = NULL; bf->bf_lastbf = bf; diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index fd77833..c2b91f5 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -358,10 +358,12 @@ process_start: } } while (true); - if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) + spin_lock_irqsave(&adapter->main_proc_lock, flags); + if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) { + spin_unlock_irqrestore(&adapter->main_proc_lock, flags); goto process_start; + } - spin_lock_irqsave(&adapter->main_proc_lock, flags); adapter->mwifiex_processing = false; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 76d95de..dc49e52 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops) goto exit_release_regions; } - pci_enable_msi(pci_dev); - hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; - goto exit_disable_msi; + goto exit_release_regions; } pci_set_drvdata(pci_dev, hw); @@ -152,9 +150,6 @@ exit_free_reg: exit_free_device: ieee80211_free_hw(hw); -exit_disable_msi: - pci_disable_msi(pci_dev); - exit_release_regions: pci_release_regions(pci_dev); @@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev) rt2x00pci_free_reg(rt2x00dev); ieee80211_free_hw(hw); - pci_disable_msi(pci_dev); - /* * Free the PCI device data. */ diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index b45bce2..1b08d87 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -39,11 +39,15 @@ static int connect_rings(struct backend_info *); static void connect(struct backend_info *); static void backend_create_xenvif(struct backend_info *be); static void unregister_hotplug_status_watch(struct backend_info *be); +static void set_backend_state(struct backend_info *be, + enum xenbus_state state); static int netback_remove(struct xenbus_device *dev) { struct backend_info *be = dev_get_drvdata(&dev->dev); + set_backend_state(be, XenbusStateClosed); + unregister_hotplug_status_watch(be); if (be->vif) { kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); diff --git a/include/linux/filter.h b/include/linux/filter.h index a6ac848..ff4e40c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -6,6 +6,7 @@ #include <linux/atomic.h> #include <linux/compat.h> +#include <linux/workqueue.h> #include <uapi/linux/filter.h> #ifdef CONFIG_COMPAT @@ -25,15 +26,19 @@ struct sk_filter { atomic_t refcnt; unsigned int len; /* Number of filter blocks */ + struct rcu_head rcu; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct sock_filter *filter); - struct rcu_head rcu; - struct sock_filter insns[0]; + union { + struct sock_filter insns[0]; + struct work_struct work; + }; }; -static inline unsigned int sk_filter_len(const struct sk_filter *fp) +static inline unsigned int sk_filter_size(unsigned int proglen) { - return fp->len * sizeof(struct sock_filter) + sizeof(*fp); + return max(sizeof(struct sk_filter), + offsetof(struct sk_filter, insns[proglen])); } extern int sk_filter(struct sock *sk, struct sk_buff *skb); @@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, } #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) #else +#include <linux/slab.h> static inline void bpf_jit_compile(struct sk_filter *fp) { } static inline void bpf_jit_free(struct sk_filter *fp) { + kfree(fp); } #define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns) #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6d77e0f..2e53b44 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2273,11 +2273,11 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) } #ifdef CONFIG_XPS -int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, +int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index); #else static inline int netif_set_xps_queue(struct net_device *dev, - struct cpumask *mask, + const struct cpumask *mask, u16 index) { return 0; diff --git a/include/net/sock.h b/include/net/sock.h index 7953254..7cf8d23 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1612,16 +1612,14 @@ static inline void sk_filter_release(struct sk_filter *fp) static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) { - unsigned int size = sk_filter_len(fp); - - atomic_sub(size, &sk->sk_omem_alloc); + atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc); sk_filter_release(fp); } static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) { atomic_inc(&fp->refcnt); - atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); + atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc); } /* diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index 0623ec4..56f1216 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -1,5 +1,6 @@ # UAPI Header export list header-y += tc_csum.h +header-y += tc_defact.h header-y += tc_gact.h header-y += tc_ipt.h header-y += tc_mirred.h diff --git a/include/linux/tc_act/tc_defact.h b/include/uapi/linux/tc_act/tc_defact.h index 6f65d07..17dddb4 100644 --- a/include/linux/tc_act/tc_defact.h +++ b/include/uapi/linux/tc_act/tc_defact.h @@ -6,7 +6,7 @@ struct tc_defact { tc_gen; }; - + enum { TCA_DEF_UNSPEC, TCA_DEF_TM, diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 3091297..c7e634a 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev) return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ nla_total_size(2) + /* IFLA_VLAN_ID */ - sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ + nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */ vlan_qos_map_size(vlan->nr_ingress_mappings) + vlan_qos_map_size(vlan->nr_egress_mappings); } diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index c72d1bc..1356af6 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -65,6 +65,7 @@ static int __init batadv_init(void) batadv_recv_handler_init(); batadv_iv_init(); + batadv_nc_init(); batadv_event_workqueue = create_singlethread_workqueue("bat_events"); @@ -142,7 +143,7 @@ int batadv_mesh_init(struct net_device *soft_iface) if (ret < 0) goto err; - ret = batadv_nc_init(bat_priv); + ret = batadv_nc_mesh_init(bat_priv); if (ret < 0) goto err; @@ -167,7 +168,7 @@ void batadv_mesh_free(struct net_device *soft_iface) batadv_vis_quit(bat_priv); batadv_gw_node_purge(bat_priv); - batadv_nc_free(bat_priv); + batadv_nc_mesh_free(bat_priv); batadv_dat_free(bat_priv); batadv_bla_free(bat_priv); diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index a487d46..4ecc0b6 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -35,6 +35,20 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if); /** + * batadv_nc_init - one-time initialization for network coding + */ +int __init batadv_nc_init(void) +{ + int ret; + + /* Register our packet type */ + ret = batadv_recv_handler_register(BATADV_CODED, + batadv_nc_recv_coded_packet); + + return ret; +} + +/** * batadv_nc_start_timer - initialise the nc periodic worker * @bat_priv: the bat priv with all the soft interface information */ @@ -45,10 +59,10 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv) } /** - * batadv_nc_init - initialise coding hash table and start house keeping + * batadv_nc_mesh_init - initialise coding hash table and start house keeping * @bat_priv: the bat priv with all the soft interface information */ -int batadv_nc_init(struct batadv_priv *bat_priv) +int batadv_nc_mesh_init(struct batadv_priv *bat_priv) { bat_priv->nc.timestamp_fwd_flush = jiffies; bat_priv->nc.timestamp_sniffed_purge = jiffies; @@ -70,11 +84,6 @@ int batadv_nc_init(struct batadv_priv *bat_priv) batadv_hash_set_lock_class(bat_priv->nc.coding_hash, &batadv_nc_decoding_hash_lock_class_key); - /* Register our packet type */ - if (batadv_recv_handler_register(BATADV_CODED, - batadv_nc_recv_coded_packet) < 0) - goto err; - INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); batadv_nc_start_timer(bat_priv); @@ -1721,12 +1730,11 @@ free_nc_packet: } /** - * batadv_nc_free - clean up network coding memory + * batadv_nc_mesh_free - clean up network coding memory * @bat_priv: the bat priv with all the soft interface information */ -void batadv_nc_free(struct batadv_priv *bat_priv) +void batadv_nc_mesh_free(struct batadv_priv *bat_priv) { - batadv_recv_handler_unregister(BATADV_CODED); cancel_delayed_work_sync(&bat_priv->nc.work); batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL); diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h index 85a4ec8..ddfa618 100644 --- a/net/batman-adv/network-coding.h +++ b/net/batman-adv/network-coding.h @@ -22,8 +22,9 @@ #ifdef CONFIG_BATMAN_ADV_NC -int batadv_nc_init(struct batadv_priv *bat_priv); -void batadv_nc_free(struct batadv_priv *bat_priv); +int batadv_nc_init(void); +int batadv_nc_mesh_init(struct batadv_priv *bat_priv); +void batadv_nc_mesh_free(struct batadv_priv *bat_priv); void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_neigh_node, @@ -46,12 +47,17 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv); #else /* ifdef CONFIG_BATMAN_ADV_NC */ -static inline int batadv_nc_init(struct batadv_priv *bat_priv) +static inline int batadv_nc_init(void) { return 0; } -static inline void batadv_nc_free(struct batadv_priv *bat_priv) +static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv) +{ + return 0; +} + +static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv) { return; } diff --git a/net/compat.c b/net/compat.c index f0a1ba6..8903258 100644 --- a/net/compat.c +++ b/net/compat.c @@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || __get_user(kmsg->msg_flags, &umsg->msg_flags)) return -EFAULT; + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) + return -EINVAL; kmsg->msg_name = compat_ptr(tmp1); kmsg->msg_iov = compat_ptr(tmp2); kmsg->msg_control = compat_ptr(tmp3); diff --git a/net/core/dev.c b/net/core/dev.c index fa0b2b0..1b6eadf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1916,7 +1916,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map, return new_map; } -int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) +int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) { struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; struct xps_map *map, *new_map; diff --git a/net/core/filter.c b/net/core/filter.c index 6438f29..01b7808 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu) struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); bpf_jit_free(fp); - kfree(fp); } EXPORT_SYMBOL(sk_filter_release_rcu); @@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp, if (fprog->filter == NULL) return -EINVAL; - fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL); + fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); if (!fp) return -ENOMEM; memcpy(fp->insns, fprog->filter, fsize); @@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct sk_filter *fp, *old_fp; unsigned int fsize = sizeof(struct sock_filter) * fprog->len; + unsigned int sk_fsize = sk_filter_size(fprog->len); int err; if (sock_flag(sk, SOCK_FILTER_LOCKED)) @@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) if (fprog->filter == NULL) return -EINVAL; - fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); + fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL); if (!fp) return -ENOMEM; if (copy_from_user(fp->insns, fprog->filter, fsize)) { - sock_kfree_s(sk, fp, fsize+sizeof(*fp)); + sock_kfree_s(sk, fp, sk_fsize); return -EFAULT; } diff --git a/net/core/sock.c b/net/core/sock.c index 2bd9b3f..fd6afa2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2331,6 +2331,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) #endif sk->sk_max_pacing_rate = ~0U; + sk->sk_pacing_rate = ~0U; /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index c85e71e..ff41b4d 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; + if (real_dev->type != ARPHRD_IEEE802154) + return -EINVAL; lowpan_dev_info(dev)->real_dev = real_dev; lowpan_dev_info(dev)->fragment_tag = 0; @@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, entry->ldev = dev; + /* Set the lowpan harware address to the wpan hardware address. */ + memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN); + mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); INIT_LIST_HEAD(&entry->list); list_add_tail(&entry->list, &lowpan_devices); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 2779037..ae19959 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -287,7 +287,7 @@ begintw: if (unlikely(!INET_TW_MATCH(sk, net, acookie, saddr, daddr, ports, dif))) { - sock_put(sk); + inet_twsk_put(inet_twsk(sk)); goto begintw; } goto out; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 727f436..6011615 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) RT_SCOPE_LINK); goto make_route; } - if (fl4->saddr) { + if (!fl4->saddr) { if (ipv4_is_multicast(fl4->daddr)) fl4->saddr = inet_select_addr(dev_out, 0, fl4->flowi4_scope); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fa6cf1f..47b8ab7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1326,7 +1326,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, tp->lost_cnt_hint -= tcp_skb_pcount(prev); } - TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; + TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + TCP_SKB_CB(prev)->end_seq++; + if (skb == tcp_highest_sack(sk)) tcp_advance_highest_sack(sk, skb); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e6bb825..c6f01f2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb unsigned int size = 0; unsigned int eff_sacks; + opts->options = 0; + #ifdef CONFIG_TCP_MD5SIG *md5 = tp->af_specific->md5_lookup(sk, sk); if (unlikely(*md5)) { diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 32b4a16..066640e 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -116,7 +116,7 @@ begintw: } if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr, ports, dif))) { - sock_put(sk); + inet_twsk_put(inet_twsk(sk)); goto begintw; } goto out; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 7bb5446..1ef1fa2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1173,9 +1173,8 @@ done: static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu) { - struct ip6_tnl *tunnel = netdev_priv(dev); if (new_mtu < 68 || - new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) + new_mtu > 0xFFF8 - dev->hard_header_len) return -EINVAL; dev->mtu = new_mtu; return 0; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a791552..583b77e 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) { - if (new_mtu < IPV6_MIN_MTU) { - return -EINVAL; + struct ip6_tnl *tnl = netdev_priv(dev); + + if (tnl->parms.proto == IPPROTO_IPIP) { + if (new_mtu < 68) + return -EINVAL; + } else { + if (new_mtu < IPV6_MIN_MTU) + return -EINVAL; } + if (new_mtu > 0xFFF8 - dev->hard_header_len) + return -EINVAL; dev->mtu = new_mtu; return 0; } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index feae495a..b076e83 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -115,6 +115,11 @@ struct l2tp_net { static void l2tp_session_set_header_len(struct l2tp_session *session, int version); static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); +static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) +{ + return sk->sk_user_data; +} + static inline struct l2tp_net *l2tp_pernet(struct net *net) { BUG_ON(!net); @@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk, return 0; #if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == PF_INET6) { + if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) { if (!uh->check) { LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); return 1; @@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, /* Queue the packet to IP for output */ skb->local_df = 1; #if IS_ENABLED(CONFIG_IPV6) - if (skb->sk->sk_family == PF_INET6) + if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped) error = inet6_csk_xmit(skb, NULL); else #endif @@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len /* Calculate UDP checksum if configured to do so */ #if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == PF_INET6) + if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) l2tp_xmit_ipv6_csum(sk, skb, udp_len); else #endif @@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb); */ static void l2tp_tunnel_destruct(struct sock *sk) { - struct l2tp_tunnel *tunnel; + struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); struct l2tp_net *pn; - tunnel = sk->sk_user_data; if (tunnel == NULL) goto end; @@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 } /* Check if this socket has already been prepped */ - tunnel = (struct l2tp_tunnel *)sk->sk_user_data; + tunnel = l2tp_tunnel(sk); if (tunnel != NULL) { /* This socket has already been prepped */ err = -EBUSY; @@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 if (cfg != NULL) tunnel->debug = cfg->debug; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) { + struct ipv6_pinfo *np = inet6_sk(sk); + + if (ipv6_addr_v4mapped(&np->saddr) && + ipv6_addr_v4mapped(&np->daddr)) { + struct inet_sock *inet = inet_sk(sk); + + tunnel->v4mapped = true; + inet->inet_saddr = np->saddr.s6_addr32[3]; + inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3]; + inet->inet_daddr = np->daddr.s6_addr32[3]; + } else { + tunnel->v4mapped = false; + } + } +#endif + /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ tunnel->encap = encap; if (encap == L2TP_ENCAPTYPE_UDP) { @@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; #if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == PF_INET6) + if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) udpv6_encap_enable(); else #endif diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 66a559b..6f251cb 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -194,6 +194,9 @@ struct l2tp_tunnel { struct sock *sock; /* Parent socket */ int fd; /* Parent fd, if tunnel socket * was created by userspace */ +#if IS_ENABLED(CONFIG_IPV6) + bool v4mapped; +#endif struct work_struct del_work; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 54395d7..674eac1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3056,6 +3056,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, case NL80211_IFTYPE_ADHOC: if (!bssid) return 0; + if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) + return 0; if (ieee80211_is_beacon(hdr->frame_control)) { return 1; } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { diff --git a/net/mac80211/util.c b/net/mac80211/util.c index e1b34a1..9c3200b 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2103,7 +2103,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; - int rate, skip, shift; + int rate, shift; u8 i, exrates, *pos; u32 basic_rates = sdata->vif.bss_conf.basic_rates; u32 rate_flags; @@ -2131,14 +2131,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, pos = skb_put(skb, exrates + 2); *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = exrates; - skip = 0; for (i = 8; i < sband->n_bitrates; i++) { u8 basic = 0; if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; - if (skip++ < 8) - continue; if (need_basic && basic_rates & BIT(i)) basic = 0x80; rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a2fef8b..a9dfdda 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -472,20 +472,16 @@ begin: if (f->credit > 0 || !q->rate_enable) goto out; - if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) { - rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate; + rate = q->flow_max_rate; + if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) + rate = min(skb->sk->sk_pacing_rate, rate); - rate = min(rate, q->flow_max_rate); - } else { - rate = q->flow_max_rate; - if (rate == ~0U) - goto out; - } - if (rate) { + if (rate != ~0U) { u32 plen = max(qdisc_pkt_len(skb), q->quantum); u64 len = (u64)plen * NSEC_PER_SEC; - do_div(len, rate); + if (likely(rate)) + do_div(len, rate); /* Since socket rate can change later, * clamp the delay to 125 ms. * TODO: maybe segment the too big skb, as in commit @@ -656,7 +652,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); if (tb[TCA_FQ_INITIAL_QUANTUM]) - q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); + q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); @@ -735,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) if (opts == NULL) goto nla_put_failure; + /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore, + * do not bother giving its value + */ if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || - nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) || nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) goto nla_put_failure; diff --git a/net/socket.c b/net/socket.c index ebed4b6..c226ace 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1964,6 +1964,16 @@ struct used_address { unsigned int name_len; }; +static int copy_msghdr_from_user(struct msghdr *kmsg, + struct msghdr __user *umsg) +{ + if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) + return -EFAULT; + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) + return -EINVAL; + return 0; +} + static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) @@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; - } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) - return -EFAULT; + } else { + err = copy_msghdr_from_user(msg_sys, msg); + if (err) + return err; + } if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; @@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; - } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) - return -EFAULT; + } else { + err = copy_msghdr_from_user(msg_sys, msg); + if (err) + return err; + } if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 9bc6db04..e7000be 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -47,12 +47,12 @@ static int net_ctl_permissions(struct ctl_table_header *head, /* Allow network administrator to have same access as root. */ if (ns_capable(net->user_ns, CAP_NET_ADMIN) || - uid_eq(root_uid, current_uid())) { + uid_eq(root_uid, current_euid())) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; } /* Allow netns root group to have the same access as the root group */ - if (gid_eq(root_gid, current_gid())) { + if (in_egroup_p(root_gid)) { int mode = (table->mode >> 3) & 7; return (mode << 3) | mode; } diff --git a/net/unix/diag.c b/net/unix/diag.c index d591091..86fa0f3 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r rep->udiag_family = AF_UNIX; rep->udiag_type = sk->sk_type; rep->udiag_state = sk->sk_state; + rep->pad = 0; rep->udiag_ino = sk_ino; sock_diag_save_cookie(sk, rep->udiag_cookie); diff --git a/net/wireless/core.c b/net/wireless/core.c index 6715396..fe8d4f2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy) /* check and set up bitrates */ ieee80211_set_bitrate_flags(wiphy); - + rtnl_lock(); res = device_add(&rdev->wiphy.dev); - if (res) - return res; - - res = rfkill_register(rdev->rfkill); if (res) { - device_del(&rdev->wiphy.dev); + rtnl_unlock(); return res; } - rtnl_lock(); /* set up regulatory info */ wiphy_regulatory_register(wiphy); @@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy) rdev->wiphy.registered = true; rtnl_unlock(); + + res = rfkill_register(rdev->rfkill); + if (res) { + rfkill_destroy(rdev->rfkill); + rdev->rfkill = NULL; + wiphy_unregister(&rdev->wiphy); + return res; + } + return 0; } EXPORT_SYMBOL(wiphy_register); @@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy) rtnl_unlock(); __count == 0; })); - rfkill_unregister(rdev->rfkill); + if (rdev->rfkill) + rfkill_unregister(rdev->rfkill); rtnl_lock(); rdev->wiphy.registered = false; diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 39bff7d..403fe29 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, if (chan->flags & IEEE80211_CHAN_DISABLED) continue; wdev->wext.ibss.chandef.chan = chan; + wdev->wext.ibss.chandef.center_freq1 = + chan->center_freq; break; } @@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, if (chan) { wdev->wext.ibss.chandef.chan = chan; wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; + wdev->wext.ibss.chandef.center_freq1 = freq; wdev->wext.ibss.channel_fixed = true; } else { /* cfg80211_ibss_wext_join will pick one if needed */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index af8d84a..626dc3b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) change = true; } - if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) && + if (flags && (*flags & MONITOR_FLAG_ACTIVE) && !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) return -EOPNOTSUPP; @@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, &flags); - if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) && + if (!err && (flags & MONITOR_FLAG_ACTIVE) && !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) return -EOPNOTSUPP; |