summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_sysfs.c41
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br_multicast.c19
-rw-r--r--net/core/dev_mcast.c5
-rw-r--r--net/core/ethtool.c103
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/sock.c19
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ipconfig.c57
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c50
-rw-r--r--net/ipv4/tcp_ipv4.c15
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/addrconf.c104
-rw-r--r--net/ipv6/fib6_rules.c11
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/ipv6/udp.c28
-rw-r--r--net/ipv6/xfrm6_policy.c3
-rw-r--r--net/llc/llc_c_ac.c2
-rw-r--r--net/llc/llc_conn.c3
-rw-r--r--net/mac80211/debugfs_netdev.c10
-rw-r--r--net/mac80211/mlme.c32
-rw-r--r--net/mac80211/rate.h5
-rw-r--r--net/mac80211/sta_info.c1
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/rfkill/input.c8
-rw-r--r--net/sctp/input.c42
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/sunrpc/xprtrdma/transport.c7
-rw-r--r--net/sunrpc/xprtsock.c9
-rw-r--r--net/tipc/bearer.c37
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/link.c9
-rw-r--r--net/tipc/net.c25
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tipc/subscr.c57
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/xfrm/xfrm_policy.c7
44 files changed, 477 insertions, 316 deletions
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 1a79a6c..cafb55b 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -405,20 +406,11 @@ static struct device_type bt_host = {
.release = bt_host_release,
};
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static int inquiry_cache_show(struct seq_file *f, void *p)
{
- struct hci_dev *hdev = file->private_data;
+ struct hci_dev *hdev = f->private;
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
- char buf[4096];
- int n = 0;
hci_dev_lock_bh(hdev);
@@ -426,23 +418,30 @@ static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf,
struct inquiry_data *data = &e->data;
bdaddr_t bdaddr;
baswap(&bdaddr, &data->bdaddr);
- n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
- batostr(&bdaddr),
- data->pscan_rep_mode, data->pscan_period_mode,
- data->pscan_mode, data->dev_class[2],
- data->dev_class[1], data->dev_class[0],
- __le16_to_cpu(data->clock_offset),
- data->rssi, data->ssp_mode, e->timestamp);
+ seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+ batostr(&bdaddr),
+ data->pscan_rep_mode, data->pscan_period_mode,
+ data->pscan_mode, data->dev_class[2],
+ data->dev_class[1], data->dev_class[0],
+ __le16_to_cpu(data->clock_offset),
+ data->rssi, data->ssp_mode, e->timestamp);
}
hci_dev_unlock_bh(hdev);
- return simple_read_from_buffer(userbuf, count, ppos, buf, n);
+ return 0;
+}
+
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, inquiry_cache_show, inode->i_private);
}
static const struct file_operations inquiry_cache_fops = {
- .open = inquiry_cache_open,
- .read = inquiry_cache_read,
+ .open = inquiry_cache_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
int hci_register_sysfs(struct hci_dev *hdev)
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 19a6b96..d115d5c 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -35,6 +35,7 @@ config BRIDGE
config BRIDGE_IGMP_SNOOPING
bool "IGMP snooping"
depends on BRIDGE
+ depends on INET
default y
---help---
If you say Y here, then the Ethernet bridge will be able selectively
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2559fb5..fd96a8d 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -38,7 +38,7 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
struct net_bridge_mdb_entry *mp;
struct hlist_node *p;
- hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
+ hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
if (dst == mp->addr)
return mp;
}
@@ -627,8 +627,8 @@ static void br_multicast_port_query_expired(unsigned long data)
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
- if (port && (port->state == BR_STATE_DISABLED ||
- port->state == BR_STATE_BLOCKING))
+ if (port->state == BR_STATE_DISABLED ||
+ port->state == BR_STATE_BLOCKING)
goto out;
if (port->multicast_startup_queries_sent <
@@ -823,6 +823,7 @@ static int br_multicast_query(struct net_bridge *br,
unsigned long max_delay;
unsigned long now = jiffies;
__be32 group;
+ int err = 0;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
@@ -841,12 +842,14 @@ static int br_multicast_query(struct net_bridge *br,
group = 0;
}
} else {
- if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
- return -EINVAL;
+ if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
+ err = -EINVAL;
+ goto out;
+ }
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs)
- return 0;
+ goto out;
max_delay = ih3->code ? 1 :
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE);
@@ -876,7 +879,7 @@ static int br_multicast_query(struct net_bridge *br,
out:
spin_unlock(&br->multicast_lock);
- return 0;
+ return err;
}
static void br_multicast_leave_group(struct net_bridge *br,
@@ -1135,7 +1138,7 @@ void br_multicast_stop(struct net_bridge *br)
if (mdb->old) {
spin_unlock_bh(&br->multicast_lock);
- synchronize_rcu_bh();
+ rcu_barrier_bh();
spin_lock_bh(&br->multicast_lock);
WARN_ON(mdb->old);
}
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index fd91569..3dc295b 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -97,8 +97,9 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
netif_addr_lock_bh(dev);
if (alen != dev->addr_len)
- return -EINVAL;
- err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
+ err = -EINVAL;
+ else
+ err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0f2f821..f4cb6b6 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
/*
@@ -199,10 +200,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_settings(dev, &cmd);
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
{
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -214,6 +212,10 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use
info.cmd = ETHTOOL_GDRVINFO;
ops->get_drvinfo(dev, &info);
+ /*
+ * this method of obtaining string set info is deprecated;
+ * Use ETHTOOL_GSSET_INFO instead.
+ */
if (ops->get_sset_count) {
int rc;
@@ -237,10 +239,67 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use
return 0;
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
+ void __user *useraddr)
+{
+ struct ethtool_sset_info info;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u64 sset_mask;
+ int i, idx = 0, n_bits = 0, ret, rc;
+ u32 *info_buf = NULL;
+
+ if (!ops->get_sset_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&info, useraddr, sizeof(info)))
+ return -EFAULT;
+
+ /* store copy of mask, because we zero struct later on */
+ sset_mask = info.sset_mask;
+ if (!sset_mask)
+ return 0;
+
+ /* calculate size of return buffer */
+ n_bits = hweight64(sset_mask);
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GSSET_INFO;
+
+ info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER);
+ if (!info_buf)
+ return -ENOMEM;
+
+ /*
+ * fill return buffer based on input bitmask and successful
+ * get_sset_count return
+ */
+ for (i = 0; i < 64; i++) {
+ if (!(sset_mask & (1ULL << i)))
+ continue;
+
+ rc = ops->get_sset_count(dev, i);
+ if (rc >= 0) {
+ info.sset_mask |= (1ULL << i);
+ info_buf[idx++] = rc;
+ }
+ }
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ goto out;
+
+ useraddr += offsetof(struct ethtool_sset_info, data);
+ if (copy_to_user(useraddr, info_buf, idx * sizeof(u32)))
+ goto out;
+
+ ret = 0;
+
+out:
+ kfree(info_buf);
+ return ret;
+}
+
+static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc cmd;
@@ -253,10 +312,7 @@ static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *usera
return dev->ethtool_ops->set_rxnfc(dev, &cmd);
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -328,10 +384,7 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
list->count++;
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rx_ntuple cmd;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -799,10 +852,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
return ret;
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
{
struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
@@ -816,10 +866,7 @@ static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *us
return 0;
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
{
struct ethtool_coalesce coalesce;
@@ -1229,10 +1276,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
return actor(dev, edata.data);
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
+static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
{
struct ethtool_flash efl;
@@ -1471,6 +1515,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXNTUPLE:
rc = ethtool_get_rx_ntuple(dev, useraddr);
break;
+ case ETHTOOL_GSSET_INFO:
+ rc = ethtool_get_sset_info(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d102f6d..6cee643 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -771,6 +771,8 @@ static __inline__ int neigh_max_probes(struct neighbour *n)
}
static void neigh_invalidate(struct neighbour *neigh)
+ __releases(neigh->lock)
+ __acquires(neigh->lock)
{
struct sk_buff *skb;
diff --git a/net/core/sock.c b/net/core/sock.c
index fcd397a..c5812bb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
- } else
- sk_add_backlog(sk, skb);
+ } else if (sk_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+
bh_unlock_sock(sk);
out:
sock_put(sk);
@@ -1139,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
sock_lock_init(newsk);
bh_lock_sock(newsk);
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+ newsk->sk_backlog.len = 0;
atomic_set(&newsk->sk_rmem_alloc, 0);
/*
@@ -1542,6 +1547,12 @@ static void __release_sock(struct sock *sk)
bh_lock_sock(sk);
} while ((skb = sk->sk_backlog.head) != NULL);
+
+ /*
+ * Doing the zeroing here guarantee we can not loop forever
+ * while a wild producer attempts to flood us.
+ */
+ sk->sk_backlog.len = 0;
}
/**
@@ -1874,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
@@ -2276,7 +2288,8 @@ out_free_request_sock_slab:
prot->rsk_prot->slab = NULL;
}
out_free_request_sock_slab_name:
- kfree(prot->rsk_prot->slab_name);
+ if (prot->rsk_prot)
+ kfree(prot->rsk_prot->slab_name);
out_free_sock_slab:
kmem_cache_destroy(prot->slab);
prot->slab = NULL;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index af226a0..0d508c3 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
* in main socket hash table and lock on listening
* socket does not protect us more.
*/
- sk_add_backlog(child, skb);
+ __sk_add_backlog(child, skb);
}
bh_unlock_sock(child);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index c0c5274..f47c9f7 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1144,12 +1144,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
if (saddr)
memcpy(&iph->saddr, saddr, 4);
-
- if (daddr) {
+ if (daddr)
memcpy(&iph->daddr, daddr, 4);
- return t->hlen;
- }
- if (iph->daddr && !ipv4_is_multicast(iph->daddr))
+ if (iph->daddr)
return t->hlen;
return -t->hlen;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 10a6a60..6789092 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -187,6 +187,16 @@ struct ic_device {
static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */
static struct net_device *ic_dev __initdata = NULL; /* Selected device */
+static bool __init ic_device_match(struct net_device *dev)
+{
+ if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
+ (!(dev->flags & IFF_LOOPBACK) &&
+ (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
+ strncmp(dev->name, "dummy", 5)))
+ return true;
+ return false;
+}
+
static int __init ic_open_devs(void)
{
struct ic_device *d, **last;
@@ -207,10 +217,7 @@ static int __init ic_open_devs(void)
for_each_netdev(&init_net, dev) {
if (dev->flags & IFF_LOOPBACK)
continue;
- if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
- (!(dev->flags & IFF_LOOPBACK) &&
- (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
- strncmp(dev->name, "dummy", 5))) {
+ if (ic_device_match(dev)) {
int able = 0;
if (dev->mtu >= 364)
able |= IC_BOOTP;
@@ -228,7 +235,7 @@ static int __init ic_open_devs(void)
}
if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) {
rtnl_unlock();
- return -1;
+ return -ENOMEM;
}
d->dev = dev;
*last = d;
@@ -253,7 +260,7 @@ static int __init ic_open_devs(void)
printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name);
else
printk(KERN_ERR "IP-Config: No network devices available.\n");
- return -1;
+ return -ENODEV;
}
return 0;
}
@@ -1303,6 +1310,32 @@ __be32 __init root_nfs_parse_addr(char *name)
return addr;
}
+#define DEVICE_WAIT_MAX 12 /* 12 seconds */
+
+static int __init wait_for_devices(void)
+{
+ int i;
+
+ msleep(CONF_PRE_OPEN);
+ for (i = 0; i < DEVICE_WAIT_MAX; i++) {
+ struct net_device *dev;
+ int found = 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ if (ic_device_match(dev)) {
+ found = 1;
+ break;
+ }
+ }
+ rtnl_unlock();
+ if (found)
+ return 0;
+ ssleep(1);
+ }
+ return -ENODEV;
+}
+
/*
* IP Autoconfig dispatcher.
*/
@@ -1313,6 +1346,7 @@ static int __init ip_auto_config(void)
#ifdef IPCONFIG_DYNAMIC
int retries = CONF_OPEN_RETRIES;
#endif
+ int err;
#ifdef CONFIG_PROC_FS
proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1325,12 +1359,15 @@ static int __init ip_auto_config(void)
#ifdef IPCONFIG_DYNAMIC
try_try_again:
#endif
- /* Give hardware a chance to settle */
- msleep(CONF_PRE_OPEN);
+ /* Wait for devices to appear */
+ err = wait_for_devices();
+ if (err)
+ return err;
/* Setup all network devices */
- if (ic_open_devs() < 0)
- return -1;
+ err = ic_open_devs();
+ if (err)
+ return err;
/* Give drivers a chance to settle */
ssleep(CONF_POST_OPEN);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 242ed23..4f1f337 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED),
SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
+ SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
+ SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b2ba558..d9b4024 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -146,7 +146,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
static int rt_garbage_collect(struct dst_ops *ops);
-static void rt_emergency_hash_rebuild(struct net *net);
static struct dst_ops ipv4_dst_ops = {
@@ -780,11 +779,30 @@ static void rt_do_flush(int process_context)
#define FRACT_BITS 3
#define ONE (1UL << FRACT_BITS)
+/*
+ * Given a hash chain and an item in this hash chain,
+ * find if a previous entry has the same hash_inputs
+ * (but differs on tos, mark or oif)
+ * Returns 0 if an alias is found.
+ * Returns ONE if rth has no alias before itself.
+ */
+static int has_noalias(const struct rtable *head, const struct rtable *rth)
+{
+ const struct rtable *aux = head;
+
+ while (aux != rth) {
+ if (compare_hash_inputs(&aux->fl, &rth->fl))
+ return 0;
+ aux = aux->u.dst.rt_next;
+ }
+ return ONE;
+}
+
static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
- struct rtable *rth, *aux, **rthp;
+ struct rtable *rth, **rthp;
unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0;
unsigned long delta;
@@ -835,15 +853,7 @@ nofree:
* attributes don't unfairly skew
* the length computation
*/
- for (aux = rt_hash_table[i].chain;;) {
- if (aux == rth) {
- length += ONE;
- break;
- }
- if (compare_hash_inputs(&aux->fl, &rth->fl))
- break;
- aux = aux->u.dst.rt_next;
- }
+ length += has_noalias(rt_hash_table[i].chain, rth);
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
@@ -1073,6 +1083,21 @@ work_done:
out: return 0;
}
+/*
+ * Returns number of entries in a hash chain that have different hash_inputs
+ */
+static int slow_chain_length(const struct rtable *head)
+{
+ int length = 0;
+ const struct rtable *rth = head;
+
+ while (rth) {
+ length += has_noalias(head, rth);
+ rth = rth->u.dst.rt_next;
+ }
+ return length >> FRACT_BITS;
+}
+
static int rt_intern_hash(unsigned hash, struct rtable *rt,
struct rtable **rp, struct sk_buff *skb)
{
@@ -1185,7 +1210,8 @@ restart:
rt_free(cand);
}
} else {
- if (chain_length > rt_chain_length_max) {
+ if (chain_length > rt_chain_length_max &&
+ slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
struct net *net = dev_net(rt->u.dst.dev);
int num = ++net->ipv4.current_rt_cache_rebuild_count;
if (!rt_caching(dev_net(rt->u.dst.dev))) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c3588b4..70df409 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1651,13 +1651,15 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!sk)
goto no_tcp_socket;
- if (iph->ttl < inet_sk(sk)->min_ttl)
- goto discard_and_relse;
-
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
+ if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+ goto discard_and_relse;
+ }
+
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
nf_reset(skb);
@@ -1682,8 +1684,11 @@ process:
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
}
- } else
- sk_add_backlog(sk, skb);
+ } else if (unlikely(sk_add_backlog(sk, skb))) {
+ bh_unlock_sock(sk);
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+ goto discard_and_relse;
+ }
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f206ee5..4199bc6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
* in main socket hash table and lock on listening
* socket does not protect us more.
*/
- sk_add_backlog(child, skb);
+ __sk_add_backlog(child, skb);
}
bh_unlock_sock(child);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4a1605d..f181b78 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2395,13 +2395,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct tcp_extend_values *xvp = tcp_xv(rvp);
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_cookie_values *cvp = tp->cookie_values;
struct tcphdr *th;
struct sk_buff *skb;
struct tcp_md5sig_key *md5;
int tcp_header_size;
int mss;
+ int s_data_desired = 0;
- skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
+ if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
+ s_data_desired = cvp->s_data_desired;
+ skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
if (skb == NULL)
return NULL;
@@ -2457,16 +2461,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
if (OPTION_COOKIE_EXTENSION & opts.options) {
- const struct tcp_cookie_values *cvp = tp->cookie_values;
-
- if (cvp != NULL &&
- cvp->s_data_constant &&
- cvp->s_data_desired > 0) {
- u8 *buf = skb_put(skb, cvp->s_data_desired);
+ if (s_data_desired) {
+ u8 *buf = skb_put(skb, s_data_desired);
/* copy data directly from the listening socket. */
- memcpy(buf, cvp->s_data_payload, cvp->s_data_desired);
- TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired;
+ memcpy(buf, cvp->s_data_payload, s_data_desired);
+ TCP_SKB_CB(skb)->end_seq += s_data_desired;
}
if (opts.hash_size > 0) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 608a544..7af756d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1371,8 +1371,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
+ else if (sk_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+ goto drop;
+ }
bh_unlock_sock(sk);
return rc;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 67107d6..e4a1483 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return 0;
}
-static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct rtable *rt = (struct rtable *)xdst->route;
- xdst->u.rt.fl = rt->fl;
+ xdst->u.rt.fl = *fl;
xdst->u.dst.dev = dev;
dev_hold(dev);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 88fd8c5..3381b43 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1380,6 +1380,8 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
if (dad_failed)
ifp->flags |= IFA_F_DADFAILED;
spin_unlock_bh(&ifp->lock);
+ if (dad_failed)
+ ipv6_ifa_notify(0, ifp);
in6_ifa_put(ifp);
#ifdef CONFIG_IPV6_PRIVACY
} else if (ifp->flags&IFA_F_TEMPORARY) {
@@ -2615,7 +2617,7 @@ static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
static int addrconf_ifdown(struct net_device *dev, int how)
{
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa, **bifa;
+ struct inet6_ifaddr *ifa, *keep_list, **bifa;
struct net *net = dev_net(dev);
int i;
@@ -2649,11 +2651,11 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&addrconf_hash_lock);
while ((ifa = *bifa) != NULL) {
if (ifa->idev == idev &&
- (how || !(ifa->flags&IFA_F_PERMANENT))) {
+ (how || !(ifa->flags&IFA_F_PERMANENT) ||
+ ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
*bifa = ifa->lst_next;
ifa->lst_next = NULL;
- addrconf_del_timer(ifa);
- in6_ifa_put(ifa);
+ __in6_ifa_put(ifa);
continue;
}
bifa = &ifa->lst_next;
@@ -2689,31 +2691,51 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&idev->lock);
}
#endif
- bifa = &idev->addr_list;
- while ((ifa = *bifa) != NULL) {
- if (how == 0 && (ifa->flags&IFA_F_PERMANENT)) {
- /* Retain permanent address on admin down */
+ keep_list = NULL;
+ bifa = &keep_list;
+ while ((ifa = idev->addr_list) != NULL) {
+ idev->addr_list = ifa->if_next;
+ ifa->if_next = NULL;
+
+ addrconf_del_timer(ifa);
+
+ /* If just doing link down, and address is permanent
+ and not link-local, then retain it. */
+ if (how == 0 &&
+ (ifa->flags&IFA_F_PERMANENT) &&
+ !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
+
+ /* Move to holding list */
+ *bifa = ifa;
bifa = &ifa->if_next;
- /* Restart DAD if needed when link comes back up */
- if ( !((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
- idev->cnf.accept_dad <= 0 ||
- (ifa->flags & IFA_F_NODAD)))
- ifa->flags |= IFA_F_TENTATIVE;
- } else {
- *bifa = ifa->if_next;
- ifa->if_next = NULL;
+ /* If not doing DAD on this address, just keep it. */
+ if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
+ idev->cnf.accept_dad <= 0 ||
+ (ifa->flags & IFA_F_NODAD))
+ continue;
+ /* If it was tentative already, no need to notify */
+ if (ifa->flags & IFA_F_TENTATIVE)
+ continue;
+
+ /* Flag it for later restoration when link comes up */
+ ifa->flags |= IFA_F_TENTATIVE;
+ in6_ifa_hold(ifa);
+ } else {
ifa->dead = 1;
- write_unlock_bh(&idev->lock);
+ }
+ write_unlock_bh(&idev->lock);
- __ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
- in6_ifa_put(ifa);
+ __ipv6_ifa_notify(RTM_DELADDR, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
+ in6_ifa_put(ifa);
- write_lock_bh(&idev->lock);
- }
+ write_lock_bh(&idev->lock);
}
+
+ idev->addr_list = keep_list;
+
write_unlock_bh(&idev->lock);
/* Step 5: Discard multicast list */
@@ -2739,28 +2761,29 @@ static int addrconf_ifdown(struct net_device *dev, int how)
static void addrconf_rs_timer(unsigned long data)
{
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
+ struct inet6_dev *idev = ifp->idev;
- if (ifp->idev->cnf.forwarding)
+ read_lock(&idev->lock);
+ if (idev->dead || !(idev->if_flags & IF_READY))
goto out;
- if (ifp->idev->if_flags & IF_RA_RCVD) {
- /*
- * Announcement received after solicitation
- * was sent
- */
+ if (idev->cnf.forwarding)
+ goto out;
+
+ /* Announcement received after solicitation was sent */
+ if (idev->if_flags & IF_RA_RCVD)
goto out;
- }
spin_lock(&ifp->lock);
- if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) {
+ if (ifp->probes++ < idev->cnf.rtr_solicits) {
/* The wait after the last probe can be shorter */
addrconf_mod_timer(ifp, AC_RS,
- (ifp->probes == ifp->idev->cnf.rtr_solicits) ?
- ifp->idev->cnf.rtr_solicit_delay :
- ifp->idev->cnf.rtr_solicit_interval);
+ (ifp->probes == idev->cnf.rtr_solicits) ?
+ idev->cnf.rtr_solicit_delay :
+ idev->cnf.rtr_solicit_interval);
spin_unlock(&ifp->lock);
- ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
+ ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
} else {
spin_unlock(&ifp->lock);
/*
@@ -2768,10 +2791,11 @@ static void addrconf_rs_timer(unsigned long data)
* assumption any longer.
*/
printk(KERN_DEBUG "%s: no IPv6 routers present\n",
- ifp->idev->dev->name);
+ idev->dev->name);
}
out:
+ read_unlock(&idev->lock);
in6_ifa_put(ifp);
}
@@ -2850,9 +2874,9 @@ static void addrconf_dad_timer(unsigned long data)
struct inet6_dev *idev = ifp->idev;
struct in6_addr mcaddr;
- read_lock_bh(&idev->lock);
- if (idev->dead) {
- read_unlock_bh(&idev->lock);
+ read_lock(&idev->lock);
+ if (idev->dead || !(idev->if_flags & IF_READY)) {
+ read_unlock(&idev->lock);
goto out;
}
@@ -2864,7 +2888,7 @@ static void addrconf_dad_timer(unsigned long data)
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
- read_unlock_bh(&idev->lock);
+ read_unlock(&idev->lock);
addrconf_dad_completed(ifp);
@@ -2874,7 +2898,7 @@ static void addrconf_dad_timer(unsigned long data)
ifp->probes--;
addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
spin_unlock(&ifp->lock);
- read_unlock_bh(&idev->lock);
+ read_unlock(&idev->lock);
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 551882b..5e463c43 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -84,18 +84,11 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
if ((rule->flags & FIB_RULE_FIND_SADDR) &&
r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) {
struct in6_addr saddr;
- unsigned int srcprefs = 0;
-
- if (flags & RT6_LOOKUP_F_SRCPREF_TMP)
- srcprefs |= IPV6_PREFER_SRC_TMP;
- if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC)
- srcprefs |= IPV6_PREFER_SRC_PUBLIC;
- if (flags & RT6_LOOKUP_F_SRCPREF_COA)
- srcprefs |= IPV6_PREFER_SRC_COA;
if (ipv6_dev_get_saddr(net,
ip6_dst_idev(&rt->u.dst)->dev,
- &flp->fl6_dst, srcprefs,
+ &flp->fl6_dst,
+ rt6_flags2srcprefs(flags),
&saddr))
goto again;
if (!ipv6_prefix_equal(&saddr, &r->src.addr,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b08879e..52cd3ef 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -819,15 +819,8 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
if (!ipv6_addr_any(&fl->fl6_src))
flags |= RT6_LOOKUP_F_HAS_SADDR;
- else if (sk) {
- unsigned int prefs = inet6_sk(sk)->srcprefs;
- if (prefs & IPV6_PREFER_SRC_TMP)
- flags |= RT6_LOOKUP_F_SRCPREF_TMP;
- if (prefs & IPV6_PREFER_SRC_PUBLIC)
- flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC;
- if (prefs & IPV6_PREFER_SRC_COA)
- flags |= RT6_LOOKUP_F_SRCPREF_COA;
- }
+ else if (sk)
+ flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6963a6b..9b6dbba 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1740,8 +1740,11 @@ process:
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
}
- } else
- sk_add_backlog(sk, skb);
+ } else if (unlikely(sk_add_backlog(sk, skb))) {
+ bh_unlock_sock(sk);
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+ goto discard_and_relse;
+ }
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 52b8347..3c0c9c7 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -583,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
- else
- sk_add_backlog(sk, skb1);
+ else if (sk_add_backlog(sk, skb1)) {
+ kfree_skb(skb1);
+ bh_unlock_sock(sk);
+ goto drop;
+ }
bh_unlock_sock(sk);
- } else {
- atomic_inc(&sk->sk_drops);
- UDP6_INC_STATS_BH(sock_net(sk),
- UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
- UDP6_INC_STATS_BH(sock_net(sk),
- UDP_MIB_INERRORS, IS_UDPLITE(sk));
+ continue;
}
+drop:
+ atomic_inc(&sk->sk_drops);
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_INERRORS, IS_UDPLITE(sk));
}
}
/*
@@ -754,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
+ else if (sk_add_backlog(sk, skb)) {
+ atomic_inc(&sk->sk_drops);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ goto discard;
+ }
bh_unlock_sock(sk);
sock_put(sk);
return 0;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index dbdc696..ae18165 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return 0;
}
-static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct rt6_info *rt = (struct rt6_info*)xdst->route;
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 019c780..86d6985 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
llc_conn_state_process(sk, skb);
else {
llc_set_backlog_type(skb, LLC_EVENT);
- sk_add_backlog(sk, skb);
+ __sk_add_backlog(sk, skb);
}
}
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index a8dde9b..a12144d 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -827,7 +827,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
else {
dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET);
- sk_add_backlog(sk, skb);
+ if (sk_add_backlog(sk, skb))
+ goto drop_unlock;
}
out:
bh_unlock_sock(sk);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9affe2c..b4ddb2f 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -48,20 +48,24 @@ static ssize_t ieee80211_if_write(
ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
{
u8 *buf;
- ssize_t ret = -ENODEV;
+ ssize_t ret;
- buf = kzalloc(count, GFP_KERNEL);
+ buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+ ret = -EFAULT;
if (copy_from_user(buf, userbuf, count))
- return -EFAULT;
+ goto freebuf;
+ ret = -ENODEV;
rtnl_lock();
if (sdata->dev->reg_state == NETREG_REGISTERED)
ret = (*write)(sdata, buf, count);
rtnl_unlock();
+freebuf:
+ kfree(buf);
return ret;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 41812a1..be5f723 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -177,7 +177,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
sta = sta_info_get(sdata, bssid);
if (sta)
rate_control_rate_update(local, sband, sta,
- IEEE80211_RC_HT_CHANGED);
+ IEEE80211_RC_HT_CHANGED,
+ local->oper_channel_type);
rcu_read_unlock();
}
@@ -435,10 +436,12 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
ieee80211_send_nullfunc(local, sdata, 1);
- if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
- conf->flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- }
+ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
+ return;
+
+ conf->flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
}
@@ -557,7 +560,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
(!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
ieee80211_send_nullfunc(local, sdata, 1);
- if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
+ if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
+ (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
local->hw.conf.flags |= IEEE80211_CONF_PS;
@@ -1893,8 +1897,20 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated) {
- mutex_unlock(&ifmgd->mtx);
- return -EALREADY;
+ if (!req->prev_bssid ||
+ memcmp(req->prev_bssid, ifmgd->associated->bssid,
+ ETH_ALEN)) {
+ /*
+ * We are already associated and the request was not a
+ * reassociation request from the current BSS, so
+ * reject it.
+ */
+ mutex_unlock(&ifmgd->mtx);
+ return -EALREADY;
+ }
+
+ /* Trying to reassociate - clear previous association state */
+ ieee80211_set_disassoc(sdata);
}
mutex_unlock(&ifmgd->mtx);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index b6108bc..065a9619 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -66,7 +66,8 @@ static inline void rate_control_rate_init(struct sta_info *sta)
static inline void rate_control_rate_update(struct ieee80211_local *local,
struct ieee80211_supported_band *sband,
- struct sta_info *sta, u32 changed)
+ struct sta_info *sta, u32 changed,
+ enum nl80211_channel_type oper_chan_type)
{
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
@@ -74,7 +75,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
if (ref && ref->ops->rate_update)
ref->ops->rate_update(ref->priv, sband, ista,
- priv_sta, changed);
+ priv_sta, changed, oper_chan_type);
}
static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 211c475..56422d8 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -434,6 +434,7 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
/* check if STA exists already */
if (sta_info_get_bss(sdata, sta->sta.addr)) {
spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
rcu_read_lock();
err = -EEXIST;
goto out_free;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 031a5e6..1612d41 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1688,6 +1688,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
{
switch (i->type) {
case PACKET_MR_MULTICAST:
+ if (i->alen != dev->addr_len)
+ return -EINVAL;
if (what > 0)
return dev_mc_add(dev, i->addr, i->alen, 0);
else
@@ -1700,6 +1702,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
return dev_set_allmulti(dev, what);
break;
case PACKET_MR_UNICAST:
+ if (i->alen != dev->addr_len)
+ return -EINVAL;
if (what > 0)
return dev_unicast_add(dev, i->addr);
else
@@ -1734,7 +1738,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
goto done;
err = -EINVAL;
- if (mreq->mr_alen != dev->addr_len)
+ if (mreq->mr_alen > dev->addr_len)
goto done;
err = -ENOBUFS;
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index a7295ad..3713d7e 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -212,6 +212,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type,
case KEY_WIMAX:
rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
break;
+ case KEY_RFKILL:
+ rfkill_schedule_toggle(RFKILL_TYPE_ALL);
+ break;
}
} else if (type == EV_SW && code == SW_RFKILL_ALL)
rfkill_schedule_evsw_rfkillall(data);
@@ -295,6 +298,11 @@ static const struct input_device_id rfkill_ids[] = {
.keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
},
{
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
+ },
+ {
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
.evbit = { BIT(EV_SW) },
.swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c0c973e..3d74b26 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *peer,
struct sctp_transport **pt);
-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
/* Calculate the SCTP checksum of an SCTP packet. */
@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
}
if (sock_owned_by_user(sk)) {
+ if (sctp_add_backlog(sk, skb)) {
+ sctp_bh_unlock_sock(sk);
+ sctp_chunk_free(chunk);
+ skb = NULL; /* sctp_chunk_free already freed the skb */
+ goto discard_release;
+ }
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
- sctp_add_backlog(sk, skb);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk_add_backlog(sk, skb);
- backloged = 1;
+ if (sk_add_backlog(sk, skb))
+ sctp_chunk_free(chunk);
+ else
+ backloged = 1;
} else
sctp_inq_push(inqueue, chunk);
@@ -362,22 +369,27 @@ done:
return 0;
}
-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_ep_common *rcvr = chunk->rcvr;
+ int ret;
- /* Hold the assoc/ep while hanging on the backlog queue.
- * This way, we know structures we need will not disappear from us
- */
- if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- sctp_association_hold(sctp_assoc(rcvr));
- else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
- sctp_endpoint_hold(sctp_ep(rcvr));
- else
- BUG();
+ ret = sk_add_backlog(sk, skb);
+ if (!ret) {
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear
+ * from us
+ */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_hold(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_hold(sctp_ep(rcvr));
+ else
+ BUG();
+ }
+ return ret;
- sk_add_backlog(sk, skb);
}
/* Handle icmp frag needed error. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f6d1e59..dfc5c12 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
percpu_counter_inc(&sctp_sockets_allocated);
+ /* Set socket backlog limit. */
+ sk->sk_backlog.limit = sysctl_sctp_rmem[1];
+
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 7018eef..f96c2fe 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -160,16 +160,15 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt)
(void)rpc_ntop(sap, buf, sizeof(buf));
xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
- (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
+ snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
- (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
- NIPQUAD(sin->sin_addr.s_addr));
+ snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
- (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
+ snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
/* netid */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7124129..75ab08e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -297,12 +297,11 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
switch (sap->sa_family) {
case AF_INET:
sin = xs_addr_in(xprt);
- (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
- NIPQUAD(sin->sin_addr.s_addr));
+ snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
break;
case AF_INET6:
sin6 = xs_addr_in6(xprt);
- (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
+ snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
break;
default:
BUG();
@@ -315,10 +314,10 @@ static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
struct sockaddr *sap = xs_addr(xprt);
char buf[128];
- (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
+ snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
- (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
+ snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 327011f..7809137 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -45,10 +45,10 @@
#define MAX_ADDR_STR 32
-static struct media *media_list = NULL;
+static struct media media_list[MAX_MEDIA];
static u32 media_count = 0;
-struct bearer *tipc_bearers = NULL;
+struct bearer tipc_bearers[MAX_BEARERS];
/**
* media_name_valid - validate media name
@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
int res = -EINVAL;
write_lock_bh(&tipc_net_lock);
- if (!media_list)
- goto exit;
+ if (tipc_mode != TIPC_NET_MODE) {
+ warn("Media <%s> rejected, not in networked mode yet\n", name);
+ goto exit;
+ }
if (!media_name_valid(name)) {
warn("Media <%s> rejected, illegal name\n", name);
goto exit;
@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
-int tipc_bearer_init(void)
-{
- int res;
-
- write_lock_bh(&tipc_net_lock);
- tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
- media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
- if (tipc_bearers && media_list) {
- res = 0;
- } else {
- kfree(tipc_bearers);
- kfree(media_list);
- tipc_bearers = NULL;
- media_list = NULL;
- res = -ENOMEM;
- }
- write_unlock_bh(&tipc_net_lock);
- return res;
-}
-
void tipc_bearer_stop(void)
{
u32 i;
- if (!tipc_bearers)
- return;
-
for (i = 0; i < MAX_BEARERS; i++) {
if (tipc_bearers[i].active)
tipc_bearers[i].publ.blocked = 1;
@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
if (tipc_bearers[i].active)
bearer_disable(tipc_bearers[i].publ.name);
}
- kfree(tipc_bearers);
- kfree(media_list);
- tipc_bearers = NULL;
- media_list = NULL;
media_count = 0;
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ca57348..000228e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -114,7 +114,7 @@ struct bearer_name {
struct link;
-extern struct bearer *tipc_bearers;
+extern struct bearer tipc_bearers[];
void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6f50f64..1a7e466 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1882,6 +1882,15 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
(msg_destnode(msg) != tipc_own_addr)))
goto cont;
+ /* Discard non-routeable messages destined for another node */
+
+ if (unlikely(!msg_isdata(msg) &&
+ (msg_destnode(msg) != tipc_own_addr))) {
+ if ((msg_user(msg) != CONN_MANAGER) &&
+ (msg_user(msg) != MSG_FRAGMENTER))
+ goto cont;
+ }
+
/* Locate unicast link endpoint that should handle message */
n_ptr = tipc_node_find(msg_prevnode(msg));
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7906608..f25b1cd 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,8 @@
*/
DEFINE_RWLOCK(tipc_net_lock);
-struct network tipc_net = { NULL };
+struct _zone *tipc_zones[256] = { NULL, };
+struct network tipc_net = { tipc_zones };
struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
{
@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
}
}
-static int net_init(void)
-{
- memset(&tipc_net, 0, sizeof(tipc_net));
- tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
- if (!tipc_net.zones) {
- return -ENOMEM;
- }
- return 0;
-}
-
static void net_stop(void)
{
u32 z_num;
- if (!tipc_net.zones)
- return;
-
- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+ for (z_num = 1; z_num <= tipc_max_zones; z_num++)
tipc_zone_delete(tipc_net.zones[z_num]);
- }
- kfree(tipc_net.zones);
- tipc_net.zones = NULL;
}
static void net_route_named_msg(struct sk_buff *buf)
@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
tipc_named_reinit();
tipc_port_reinit();
- if ((res = tipc_bearer_init()) ||
- (res = net_init()) ||
- (res = tipc_cltr_init()) ||
+ if ((res = tipc_cltr_init()) ||
(res = tipc_bclink_init())) {
return res;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1ea64f0..4b235fc 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
if (!sock_owned_by_user(sk)) {
res = filter_rcv(sk, buf);
} else {
- sk_add_backlog(sk, buf);
- res = TIPC_OK;
+ if (sk_add_backlog(sk, buf))
+ res = TIPC_ERR_OVERLOAD;
+ else
+ res = TIPC_OK;
}
bh_unlock_sock(sk);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ac91f0d..ff123e5 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -76,19 +76,6 @@ struct top_srv {
static struct top_srv topsrv = { 0 };
/**
- * htohl - convert value to endianness used by destination
- * @in: value to convert
- * @swap: non-zero if endianness must be reversed
- *
- * Returns converted value
- */
-
-static u32 htohl(u32 in, int swap)
-{
- return swap ? swab32(in) : in;
-}
-
-/**
* subscr_send_event - send a message containing a tipc_event to the subscriber
*
* Note: Must not hold subscriber's server port lock, since tipc_send() will
@@ -107,11 +94,11 @@ static void subscr_send_event(struct subscription *sub,
msg_sect.iov_base = (void *)&sub->evt;
msg_sect.iov_len = sizeof(struct tipc_event);
- sub->evt.event = htohl(event, sub->swap);
- sub->evt.found_lower = htohl(found_lower, sub->swap);
- sub->evt.found_upper = htohl(found_upper, sub->swap);
- sub->evt.port.ref = htohl(port_ref, sub->swap);
- sub->evt.port.node = htohl(node, sub->swap);
+ sub->evt.event = htonl(event);
+ sub->evt.found_lower = htonl(found_lower);
+ sub->evt.found_upper = htonl(found_upper);
+ sub->evt.port.ref = htonl(port_ref);
+ sub->evt.port.node = htonl(node);
tipc_send(sub->server_ref, 1, &msg_sect);
}
@@ -287,16 +274,23 @@ static void subscr_cancel(struct tipc_subscr *s,
{
struct subscription *sub;
struct subscription *sub_temp;
+ __u32 type, lower, upper;
int found = 0;
/* Find first matching subscription, exit if not found */
+ type = ntohl(s->seq.type);
+ lower = ntohl(s->seq.lower);
+ upper = ntohl(s->seq.upper);
+
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
- if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
- found = 1;
- break;
- }
+ if ((type == sub->seq.type) &&
+ (lower == sub->seq.lower) &&
+ (upper == sub->seq.upper)) {
+ found = 1;
+ break;
+ }
}
if (!found)
return;
@@ -325,16 +319,10 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
struct subscriber *subscriber)
{
struct subscription *sub;
- int swap;
-
- /* Determine subscriber's endianness */
-
- swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
/* Detect & process a subscription cancellation request */
- if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
- s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
+ if (ntohl(s->filter) & TIPC_SUB_CANCEL) {
subscr_cancel(s, subscriber);
return NULL;
}
@@ -359,11 +347,11 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
/* Initialize subscription object */
- sub->seq.type = htohl(s->seq.type, swap);
- sub->seq.lower = htohl(s->seq.lower, swap);
- sub->seq.upper = htohl(s->seq.upper, swap);
- sub->timeout = htohl(s->timeout, swap);
- sub->filter = htohl(s->filter, swap);
+ sub->seq.type = ntohl(s->seq.type);
+ sub->seq.lower = ntohl(s->seq.lower);
+ sub->seq.upper = ntohl(s->seq.upper);
+ sub->timeout = ntohl(s->timeout);
+ sub->filter = ntohl(s->filter);
if ((!(sub->filter & TIPC_SUB_PORTS) ==
!(sub->filter & TIPC_SUB_SERVICE)) ||
(sub->seq.lower > sub->seq.upper)) {
@@ -376,7 +364,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
sub->server_ref = subscriber->port_ref;
- sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
atomic_inc(&topsrv.subscription_count);
if (sub->timeout != TIPC_WAIT_FOREVER) {
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf..c20f496 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -53,7 +53,6 @@ typedef void (*tipc_subscr_event) (struct subscription *sub,
* @nameseq_list: adjacent subscriptions in name sequence's subscription list
* @subscription_list: adjacent subscriptions in subscriber's subscription list
* @server_ref: object reference of server port associated with subscription
- * @swap: indicates if subscriber uses opposite endianness in its messages
* @evt: template for events generated by subscription
*/
@@ -66,7 +65,6 @@ struct subscription {
struct list_head nameseq_list;
struct list_head subscription_list;
u32 server_ref;
- int swap;
struct tipc_event evt;
};
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 3e1efe5..52e3042 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
- sk_add_backlog(sk, skb);
+ queued = !sk_add_backlog(sk, skb);
}
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 34a5ef8..843e066 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1372,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return err;
}
-static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct xfrm_policy_afinfo *afinfo =
xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1381,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
if (!afinfo)
return -EINVAL;
- err = afinfo->fill_dst(xdst, dev);
+ err = afinfo->fill_dst(xdst, dev, fl);
xfrm_policy_put_afinfo(afinfo);
@@ -1486,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
- err = xfrm_fill_dst(xdst, dev);
+ err = xfrm_fill_dst(xdst, dev, fl);
if (err)
goto free_dst;
OpenPOWER on IntegriCloud