summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_dev.c12
-rw-r--r--net/9p/conv.c1
-rw-r--r--net/9p/mux.c9
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hci_sock.c28
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_forward.c21
-rw-r--r--net/bridge/br_if.c16
-rw-r--r--net/bridge/br_input.c51
-rw-r--r--net/bridge/br_netfilter.c14
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/bridge/netfilter/ebtables.c1
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/core/dev_mcast.c14
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/pktgen.c18
-rw-r--r--net/core/sock.c106
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ieee80211/ieee80211_rx.c6
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c56
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/ip_sockglue.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c11
-rw-r--r--net/ipv4/tcp_input.c27
-rw-r--r--net/ipv4/tcp_ipv4.c19
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/ndisc.c11
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/mac80211/ieee80211.c2
-rw-r--r--net/mac80211/rc80211_simple.c2
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/netfilter/nf_sockopt.c36
-rw-r--r--net/netfilter/nfnetlink_log.c13
-rw-r--r--net/netfilter/xt_tcpudp.c2
-rw-r--r--net/rose/rose_loopback.c4
-rw-r--r--net/rose/rose_route.c15
-rw-r--r--net/sched/act_api.c8
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_sfq.c53
-rw-r--r--net/sctp/associola.c21
-rw-r--r--net/sctp/bind_addr.c70
-rw-r--r--net/sctp/endpointola.c27
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c8
-rw-r--r--net/sctp/ipv6.c46
-rw-r--r--net/sctp/outqueue.c7
-rw-r--r--net/sctp/protocol.c79
-rw-r--r--net/sctp/sm_make_chunk.c176
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/sm_statefuns.c294
-rw-r--r--net/sctp/sm_statetable.c16
-rw-r--r--net/sctp/socket.c137
-rw-r--r--net/sctp/ulpqueue.c75
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/sysfs.c2
74 files changed, 1028 insertions, 621 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 1583c5e..2a54691 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -562,8 +562,6 @@ static int register_vlan_device(struct net_device *real_dev,
if (err < 0)
goto out_free_newdev;
- /* Account for reference in struct vlan_dev_info */
- dev_hold(real_dev);
#ifdef VLAN_DEBUG
printk(VLAN_DBG "Allocated new device successfully, returning.\n");
#endif
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4bab322..328759c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -116,12 +116,22 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type* ptype, struct net_device *orig_dev)
{
unsigned char *rawp = NULL;
- struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data);
+ struct vlan_hdr *vhdr;
unsigned short vid;
struct net_device_stats *stats;
unsigned short vlan_TCI;
__be16 proto;
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ return -1;
+
+ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+ vhdr = (struct vlan_hdr *)(skb->data);
+
/* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
vlan_TCI = ntohs(vhdr->h_vlan_TCI);
diff --git a/net/9p/conv.c b/net/9p/conv.c
index f2a041c..d979d95 100644
--- a/net/9p/conv.c
+++ b/net/9p/conv.c
@@ -796,6 +796,7 @@ struct p9_fcall *p9_create_twrite_u(u32 fid, u64 offset, u32 count,
if (err) {
kfree(fc);
fc = ERR_PTR(err);
+ goto error;
}
if (buf_check_overflow(bufp)) {
diff --git a/net/9p/mux.c b/net/9p/mux.c
index acb0388..5d70558 100644
--- a/net/9p/mux.c
+++ b/net/9p/mux.c
@@ -288,9 +288,10 @@ struct p9_conn *p9_conn_create(struct p9_transport *trans, int msize,
m->extended = extended;
m->trans = trans;
m->tagpool = p9_idpool_create();
- if (!m->tagpool) {
+ if (IS_ERR(m->tagpool)) {
+ mtmp = ERR_PTR(-ENOMEM);
kfree(m);
- return ERR_PTR(PTR_ERR(m->tagpool));
+ return mtmp;
}
m->err = 0;
@@ -308,8 +309,10 @@ struct p9_conn *p9_conn_create(struct p9_transport *trans, int msize,
memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
m->poll_task = NULL;
n = p9_mux_poll_start(m);
- if (n)
+ if (n) {
+ kfree(m);
return ERR_PTR(n);
+ }
n = trans->poll(trans, &m->pt);
if (n & POLLIN) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 63caa41..18e3afc 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -183,6 +183,7 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
struct sk_buff *skb;
__le16 param;
+ __u8 flt_type;
BT_DBG("%s %ld", hdev->name, opt);
@@ -233,11 +234,8 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
/* Optional initialization */
/* Clear Event Filters */
- {
- struct hci_cp_set_event_flt cp;
- cp.flt_type = HCI_FLT_CLEAR_ALL;
- hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
- }
+ flt_type = HCI_FLT_CLEAR_ALL;
+ hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type);
/* Page timeout ~20 secs */
param = cpu_to_le16(0x8000);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 1dae3df..5ccea5f 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -37,6 +37,7 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <linux/compat.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <net/sock.h>
@@ -70,15 +71,15 @@ static struct hci_sec_filter hci_sec_filter = {
{
{ 0x0 },
/* OGF_LINK_CTL */
- { 0xbe000006, 0x00000001, 0x000000, 0x00 },
+ { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
/* OGF_LINK_POLICY */
- { 0x00005200, 0x00000000, 0x000000, 0x00 },
+ { 0x00005200, 0x00000000, 0x00000000, 0x00 },
/* OGF_HOST_CTL */
- { 0xaab00200, 0x2b402aaa, 0x020154, 0x00 },
+ { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
/* OGF_INFO_PARAM */
- { 0x000002be, 0x00000000, 0x000000, 0x00 },
+ { 0x000002be, 0x00000000, 0x00000000, 0x00 },
/* OGF_STATUS_PARAM */
- { 0x000000ea, 0x00000000, 0x000000, 0x00 }
+ { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
}
};
@@ -342,9 +343,24 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
if (mask & HCI_CMSG_TSTAMP) {
struct timeval tv;
+ void *data;
+ int len;
skb_get_timestamp(skb, &tv);
- put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(tv), &tv);
+
+ data = &tv;
+ len = sizeof(tv);
+#ifdef CONFIG_COMPAT
+ if (msg->msg_flags & MSG_CMSG_COMPAT) {
+ struct compat_timeval ctv;
+ ctv.tv_sec = tv.tv_sec;
+ ctv.tv_usec = tv.tv_usec;
+ data = &ctv;
+ len = sizeof(ctv);
+ }
+#endif
+
+ put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
}
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 0eded17..99292e8 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -41,11 +41,11 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb, ETH_HLEN);
if (dest[0] & 1)
- br_flood_deliver(br, skb, 0);
+ br_flood_deliver(br, skb);
else if ((dst = __br_fdb_get(br, dest)) != NULL)
br_deliver(dst->dst, skb);
else
- br_flood_deliver(br, skb, 0);
+ br_flood_deliver(br, skb);
return 0;
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 69b7097..eb57502 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
if (hold_time(br) == 0)
return;
+ /* ignore packets unless we are using this port */
+ if (!(source->state == BR_STATE_LEARNING ||
+ source->state == BR_STATE_FORWARDING))
+ return;
+
fdb = fdb_find(head, addr);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index ada7f49..bdd7c35 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -100,24 +100,13 @@ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
}
/* called under bridge lock */
-static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
+static void br_flood(struct net_bridge *br, struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
struct net_bridge_port *p;
struct net_bridge_port *prev;
- if (clone) {
- struct sk_buff *skb2;
-
- if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- br->statistics.tx_dropped++;
- return;
- }
-
- skb = skb2;
- }
-
prev = NULL;
list_for_each_entry_rcu(p, &br->port_list, list) {
@@ -148,13 +137,13 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
/* called with rcu_read_lock */
-void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone)
+void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
{
- br_flood(br, skb, clone, __br_deliver);
+ br_flood(br, skb, __br_deliver);
}
/* called under bridge lock */
-void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone)
+void br_flood_forward(struct net_bridge *br, struct sk_buff *skb)
{
- br_flood(br, skb, clone, __br_forward);
+ br_flood(br, skb, __br_forward);
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 749f0e8..9272f12 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -33,17 +33,17 @@
*/
static int port_cost(struct net_device *dev)
{
- if (dev->ethtool_ops->get_settings) {
- struct ethtool_cmd ecmd = { ETHTOOL_GSET };
- int err = dev->ethtool_ops->get_settings(dev, &ecmd);
- if (!err) {
+ if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
+
+ if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
switch(ecmd.speed) {
- case SPEED_100:
- return 19;
- case SPEED_1000:
- return 4;
case SPEED_10000:
return 2;
+ case SPEED_1000:
+ return 4;
+ case SPEED_100:
+ return 19;
case SPEED_10:
return 100;
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5c18595..3a8a015 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -43,7 +43,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
struct net_bridge *br;
struct net_bridge_fdb_entry *dst;
- int passedup = 0;
+ struct sk_buff *skb2;
if (!p || p->state == BR_STATE_DISABLED)
goto drop;
@@ -55,39 +55,35 @@ int br_handle_frame_finish(struct sk_buff *skb)
if (p->state == BR_STATE_LEARNING)
goto drop;
- if (br->dev->flags & IFF_PROMISC) {
- struct sk_buff *skb2;
+ /* The packet skb2 goes to the local host (NULL to skip). */
+ skb2 = NULL;
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 != NULL) {
- passedup = 1;
- br_pass_frame_up(br, skb2);
- }
- }
+ if (br->dev->flags & IFF_PROMISC)
+ skb2 = skb;
+
+ dst = NULL;
if (is_multicast_ether_addr(dest)) {
br->statistics.multicast++;
- br_flood_forward(br, skb, !passedup);
- if (!passedup)
- br_pass_frame_up(br, skb);
- goto out;
+ skb2 = skb;
+ } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
+ skb2 = skb;
+ /* Do not forward the packet since it's local. */
+ skb = NULL;
}
- dst = __br_fdb_get(br, dest);
- if (dst != NULL && dst->is_local) {
- if (!passedup)
- br_pass_frame_up(br, skb);
- else
- kfree_skb(skb);
- goto out;
- }
+ if (skb2 == skb)
+ skb2 = skb_clone(skb, GFP_ATOMIC);
- if (dst != NULL) {
- br_forward(dst->dst, skb);
- goto out;
- }
+ if (skb2)
+ br_pass_frame_up(br, skb2);
- br_flood_forward(br, skb, 0);
+ if (skb) {
+ if (dst)
+ br_forward(dst->dst, skb);
+ else
+ br_flood_forward(br, skb);
+ }
out:
return 0;
@@ -101,9 +97,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
- if (p && p->state != BR_STATE_DISABLED)
+ if (p)
br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
-
return 0; /* process further */
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index fa77987..fc13130 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -183,7 +183,7 @@ int nf_bridge_copy_header(struct sk_buff *skb)
int err;
int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
- err = skb_cow(skb, header_size);
+ err = skb_cow_head(skb, header_size);
if (err)
return err;
@@ -509,8 +509,14 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
int (*okfn)(struct sk_buff *))
{
struct iphdr *iph;
- __u32 len;
struct sk_buff *skb = *pskb;
+ __u32 len = nf_bridge_encap_header_len(skb);
+
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ return NF_STOLEN;
+
+ if (unlikely(!pskb_may_pull(skb, len)))
+ goto out;
if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
IS_PPPOE_IPV6(skb)) {
@@ -518,8 +524,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
if (!brnf_call_ip6tables)
return NF_ACCEPT;
#endif
- if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
- goto out;
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
}
@@ -532,8 +536,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
!IS_PPPOE_IP(skb))
return NF_ACCEPT;
- if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
- goto out;
nf_bridge_pull_encap_header_rcsum(skb);
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 21bf3a9..e6dc6f5 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -170,12 +170,8 @@ extern int br_dev_queue_push_xmit(struct sk_buff *skb);
extern void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb);
extern int br_forward_finish(struct sk_buff *skb);
-extern void br_flood_deliver(struct net_bridge *br,
- struct sk_buff *skb,
- int clone);
-extern void br_flood_forward(struct net_bridge *br,
- struct sk_buff *skb,
- int clone);
+extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb);
+extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb);
/* br_if.c */
extern void br_port_carrier_check(struct net_bridge_port *p);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 4169a2a..6018d0e 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1513,6 +1513,7 @@ static struct nf_sockopt_ops ebt_sockopts =
.get_optmin = EBT_BASE_CTL,
.get_optmax = EBT_SO_GET_MAX + 1,
.get = do_ebt_get_ctl,
+ .owner = THIS_MODULE,
};
static int __init ebtables_init(void)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cb056f4..029b93e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -450,6 +450,9 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
__wsum csum;
int chunk = skb->len - hlen;
+ if (!chunk)
+ return 0;
+
/* Skip filled elements.
* Pretty silly, look at memcpy_toiovec, though 8)
*/
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 99aece1..20330c5 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -116,11 +116,13 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
*/
int dev_mc_sync(struct net_device *to, struct net_device *from)
{
- struct dev_addr_list *da;
+ struct dev_addr_list *da, *next;
int err = 0;
netif_tx_lock_bh(to);
- for (da = from->mc_list; da != NULL; da = da->next) {
+ da = from->mc_list;
+ while (da != NULL) {
+ next = da->next;
if (!da->da_synced) {
err = __dev_addr_add(&to->mc_list, &to->mc_count,
da->da_addr, da->da_addrlen, 0);
@@ -134,6 +136,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
__dev_addr_delete(&from->mc_list, &from->mc_count,
da->da_addr, da->da_addrlen, 0);
}
+ da = next;
}
if (!err)
__dev_set_rx_mode(to);
@@ -156,12 +159,14 @@ EXPORT_SYMBOL(dev_mc_sync);
*/
void dev_mc_unsync(struct net_device *to, struct net_device *from)
{
- struct dev_addr_list *da;
+ struct dev_addr_list *da, *next;
netif_tx_lock_bh(from);
netif_tx_lock_bh(to);
- for (da = from->mc_list; da != NULL; da = da->next) {
+ da = from->mc_list;
+ while (da != NULL) {
+ next = da->next;
if (!da->da_synced)
continue;
__dev_addr_delete(&to->mc_list, &to->mc_count,
@@ -169,6 +174,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
da->da_synced = 0;
__dev_addr_delete(&from->mc_list, &from->mc_count,
da->da_addr, da->da_addrlen, 0);
+ da = next;
}
__dev_set_rx_mode(to);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ca2a153..f7de8f2 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -33,6 +33,7 @@
#include <linux/rtnetlink.h>
#include <linux/random.h>
#include <linux/string.h>
+#include <linux/log2.h>
#define NEIGH_DEBUG 1
@@ -311,7 +312,7 @@ static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
NEIGH_CACHE_STAT_INC(tbl, hash_grows);
- BUG_ON(new_entries & (new_entries - 1));
+ BUG_ON(!is_power_of_2(new_entries));
new_hash = neigh_hash_alloc(new_entries);
if (!new_hash)
return;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7bae576..803d0c88 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -111,6 +111,9 @@
*
* 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com>
*
+ * Fixed src_mac command to set source mac of packet to value specified in
+ * command by Adit Ranadive <adit.262@gmail.com>
+ *
*/
#include <linux/sys.h>
#include <linux/types.h>
@@ -380,7 +383,6 @@ struct pktgen_thread {
/* Field for thread to receive "posted" events terminate, stop ifs etc. */
u32 control;
- int pid;
int cpu;
wait_queue_head_t queue;
@@ -1452,8 +1454,11 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "src_mac")) {
char *v = valstr;
+ unsigned char old_smac[ETH_ALEN];
unsigned char *m = pkt_dev->src_mac;
+ memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
+
len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
if (len < 0) {
return len;
@@ -1482,6 +1487,10 @@ static ssize_t pktgen_if_write(struct file *file,
}
}
+ /* Set up Src MAC */
+ if (compare_ether_addr(old_smac, pkt_dev->src_mac))
+ memcpy(&(pkt_dev->hh[6]), pkt_dev->src_mac, ETH_ALEN);
+
sprintf(pg_result, "OK: srcmac");
return count;
}
@@ -3331,8 +3340,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
}
if ((netif_queue_stopped(odev) ||
- netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
- need_resched()) {
+ (pkt_dev->skb &&
+ netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
+ need_resched()) {
idle_start = getCurUs();
if (!netif_running(odev)) {
@@ -3462,8 +3472,6 @@ static int pktgen_thread_worker(void *arg)
init_waitqueue_head(&t->queue);
- t->pid = current->pid;
-
pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid);
max_before_softirq = t->max_before_softirq;
diff --git a/net/core/sock.c b/net/core/sock.c
index cfed7d4..190de61 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -362,6 +362,61 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
}
EXPORT_SYMBOL(sk_dst_check);
+static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
+{
+ int ret = -ENOPROTOOPT;
+#ifdef CONFIG_NETDEVICES
+ char devname[IFNAMSIZ];
+ int index;
+
+ /* Sorry... */
+ ret = -EPERM;
+ if (!capable(CAP_NET_RAW))
+ goto out;
+
+ ret = -EINVAL;
+ if (optlen < 0)
+ goto out;
+
+ /* Bind this socket to a particular device like "eth0",
+ * as specified in the passed interface name. If the
+ * name is "" or the option length is zero the socket
+ * is not bound.
+ */
+ if (optlen > IFNAMSIZ - 1)
+ optlen = IFNAMSIZ - 1;
+ memset(devname, 0, sizeof(devname));
+
+ ret = -EFAULT;
+ if (copy_from_user(devname, optval, optlen))
+ goto out;
+
+ if (devname[0] == '\0') {
+ index = 0;
+ } else {
+ struct net_device *dev = dev_get_by_name(devname);
+
+ ret = -ENODEV;
+ if (!dev)
+ goto out;
+
+ index = dev->ifindex;
+ dev_put(dev);
+ }
+
+ lock_sock(sk);
+ sk->sk_bound_dev_if = index;
+ sk_dst_reset(sk);
+ release_sock(sk);
+
+ ret = 0;
+
+out:
+#endif
+
+ return ret;
+}
+
/*
* This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic.
@@ -390,6 +445,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
}
#endif
+ if (optname == SO_BINDTODEVICE)
+ return sock_bindtodevice(sk, optval, optlen);
+
if (optlen < sizeof(int))
return -EINVAL;
@@ -578,54 +636,6 @@ set_rcvbuf:
ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
break;
-#ifdef CONFIG_NETDEVICES
- case SO_BINDTODEVICE:
- {
- char devname[IFNAMSIZ];
-
- /* Sorry... */
- if (!capable(CAP_NET_RAW)) {
- ret = -EPERM;
- break;
- }
-
- /* Bind this socket to a particular device like "eth0",
- * as specified in the passed interface name. If the
- * name is "" or the option length is zero the socket
- * is not bound.
- */
-
- if (!valbool) {
- sk->sk_bound_dev_if = 0;
- } else {
- if (optlen > IFNAMSIZ - 1)
- optlen = IFNAMSIZ - 1;
- memset(devname, 0, sizeof(devname));
- if (copy_from_user(devname, optval, optlen)) {
- ret = -EFAULT;
- break;
- }
-
- /* Remove any cached route for this socket. */
- sk_dst_reset(sk);
-
- if (devname[0] == '\0') {
- sk->sk_bound_dev_if = 0;
- } else {
- struct net_device *dev = dev_get_by_name(devname);
- if (!dev) {
- ret = -ENODEV;
- break;
- }
- sk->sk_bound_dev_if = dev->ifindex;
- dev_put(dev);
- }
- }
- break;
- }
-#endif
-
-
case SO_ATTACH_FILTER:
ret = -EINVAL;
if (optlen == sizeof(struct sock_fprog)) {
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index fa6604f..8def682 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -814,7 +814,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
ifa = ifa->ifa_next, dn_idx++) {
if (dn_idx < skip_naddr)
- goto cont;
+ continue;
if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWADDR,
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index f2de2e4..6284c99b 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
frag = WLAN_GET_SEQ_FRAG(sc);
hdrlen = ieee80211_get_hdrlen(fc);
+ if (skb->len < hdrlen) {
+ printk(KERN_INFO "%s: invalid SKB length %d\n",
+ dev->name, skb->len);
+ goto rx_dropped;
+ }
+
/* Put this code here so that we avoid duplicating it in all
* Rx paths. - Jean II */
#ifdef CONFIG_WIRELESS_EXT
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index afb6c66..e475f2e 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -273,8 +273,6 @@ ieee80211softmac_assoc_work(struct work_struct *work)
ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
if (ieee80211softmac_start_scan(mac)) {
dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
- mac->associnfo.associating = 0;
- mac->associnfo.associated = 0;
}
goto out;
} else {
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index d054e92..5742dc8 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -70,44 +70,30 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
char *extra)
{
struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
- struct ieee80211softmac_network *n;
struct ieee80211softmac_auth_queue_item *authptr;
int length = 0;
check_assoc_again:
mutex_lock(&sm->associnfo.mutex);
- /* Check if we're already associating to this or another network
- * If it's another network, cancel and start over with our new network
- * If it's our network, ignore the change, we're already doing it!
- */
if((sm->associnfo.associating || sm->associnfo.associated) &&
(data->essid.flags && data->essid.length)) {
- /* Get the associating network */
- n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
- if(n && n->essid.len == data->essid.length &&
- !memcmp(n->essid.data, extra, n->essid.len)) {
- dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n",
- MAC_ARG(sm->associnfo.bssid));
- goto out;
- } else {
- dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
- /* Cancel assoc work */
- cancel_delayed_work(&sm->associnfo.work);
- /* We don't have to do this, but it's a little cleaner */
- list_for_each_entry(authptr, &sm->auth_queue, list)
- cancel_delayed_work(&authptr->work);
- sm->associnfo.bssvalid = 0;
- sm->associnfo.bssfixed = 0;
- sm->associnfo.associating = 0;
- sm->associnfo.associated = 0;
- /* We must unlock to avoid deadlocks with the assoc workqueue
- * on the associnfo.mutex */
- mutex_unlock(&sm->associnfo.mutex);
- flush_scheduled_work();
- /* Avoid race! Check assoc status again. Maybe someone started an
- * association while we flushed. */
- goto check_assoc_again;
- }
+ dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
+ /* Cancel assoc work */
+ cancel_delayed_work(&sm->associnfo.work);
+ /* We don't have to do this, but it's a little cleaner */
+ list_for_each_entry(authptr, &sm->auth_queue, list)
+ cancel_delayed_work(&authptr->work);
+ sm->associnfo.bssvalid = 0;
+ sm->associnfo.bssfixed = 0;
+ sm->associnfo.associating = 0;
+ sm->associnfo.associated = 0;
+ /* We must unlock to avoid deadlocks with the assoc workqueue
+ * on the associnfo.mutex */
+ mutex_unlock(&sm->associnfo.mutex);
+ flush_scheduled_work();
+ /* Avoid race! Check assoc status again. Maybe someone started an
+ * association while we flushed. */
+ goto check_assoc_again;
}
sm->associnfo.static_essid = 0;
@@ -128,7 +114,7 @@ check_assoc_again:
sm->associnfo.associating = 1;
/* queue lower level code to do work (if necessary) */
schedule_delayed_work(&sm->associnfo.work, 0);
-out:
+
mutex_unlock(&sm->associnfo.mutex);
return 0;
@@ -153,13 +139,13 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev,
data->essid.length = sm->associnfo.req_essid.len;
data->essid.flags = 1; /* active */
memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len);
- }
-
+ dprintk(KERN_INFO PFX "Getting essid from req_essid\n");
+ } else if (sm->associnfo.associated || sm->associnfo.associating) {
/* If we're associating/associated, return that */
- if (sm->associnfo.associated || sm->associnfo.associating) {
data->essid.length = sm->associnfo.associate_essid.len;
data->essid.flags = 1; /* active */
memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len);
+ dprintk(KERN_INFO PFX "Getting essid from associate_essid\n");
}
mutex_unlock(&sm->associnfo.mutex);
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 7a23e59..39f6211 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -46,7 +46,7 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
memcpy(daddr, optptr+optlen-4, 4);
/* Fall through */
default:
- memset(optptr+2, 0, optlen-2);
+ memset(optptr, 0, optlen);
}
l -= optlen;
optptr += optlen;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5b77bda..5dbe580 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1193,7 +1193,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
ifa = ifa->ifa_next, ip_idx++) {
if (ip_idx < s_ip_idx)
- goto cont;
+ continue;
if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWADDR, NLM_F_MULTI) <= 0)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index dbeacd8..def007e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -836,12 +836,16 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return inet_diag_get_exact(skb, nlh);
}
+static DEFINE_MUTEX(inet_diag_mutex);
+
static void inet_diag_rcv(struct sock *sk, int len)
{
unsigned int qlen = 0;
do {
+ mutex_lock(&inet_diag_mutex);
netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg);
+ mutex_unlock(&inet_diag_mutex);
} while (qlen);
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 4d54457..6b420ae 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -625,6 +625,10 @@ static int do_ip_setsockopt(struct sock *sk, int level,
{
struct ip_mreqn mreq;
+ err = -EPROTO;
+ if (inet_sk(sk)->is_icsk)
+ break;
+
if (optlen < sizeof(struct ip_mreq))
goto e_inval;
err = -EFAULT;
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 902fd57..f656d41 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -2339,6 +2339,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
.get_optmin = IP_VS_BASE_CTL,
.get_optmax = IP_VS_SO_GET_MAX+1,
.get = do_ip_vs_get_ctl,
+ .owner = THIS_MODULE,
};
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index d1149ab..29114a9 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1161,6 +1161,7 @@ static struct nf_sockopt_ops arpt_sockopts = {
.get_optmin = ARPT_BASE_CTL,
.get_optmax = ARPT_SO_GET_MAX+1,
.get = do_arpt_get_ctl,
+ .owner = THIS_MODULE,
};
static int __init arp_tables_init(void)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e1b402c..6486894 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -2296,6 +2296,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
#ifdef CONFIG_COMPAT
.compat_get = compat_do_ipt_get_ctl,
#endif
+ .owner = THIS_MODULE,
};
static struct xt_match icmp_matchstruct __read_mostly = {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d9b5177..f813e02 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -87,14 +87,10 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
if (iph == NULL)
return -NF_DROP;
- /* Never happen */
- if (iph->frag_off & htons(IP_OFFSET)) {
- if (net_ratelimit()) {
- printk(KERN_ERR "ipv4_get_l4proto: Frag of proto %u\n",
- iph->protocol);
- }
+ /* Conntrack defragments packets, we might still see fragments
+ * inside ICMP packets though. */
+ if (iph->frag_off & htons(IP_OFFSET))
return -NF_DROP;
- }
*dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
@@ -403,6 +399,7 @@ static struct nf_sockopt_ops so_getorigdst = {
.get_optmin = SO_ORIGINAL_DST,
.get_optmax = SO_ORIGINAL_DST+1,
.get = &getorigdst,
+ .owner = THIS_MODULE,
};
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f030435..f893e90 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
tcp_grow_window(sk, skb);
}
+static u32 tcp_rto_min(struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ u32 rto_min = TCP_RTO_MIN;
+
+ if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
+ rto_min = dst->metrics[RTAX_RTO_MIN-1];
+ return rto_min;
+}
+
/* Called to compute a smoothed rtt estimate. The data fed to this
* routine either comes from timestamps, or from segments that were
* known _not_ to have been retransmitted [see Karn/Partridge
@@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
if (tp->mdev_max < tp->rttvar)
tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
tp->rtt_seq = tp->snd_nxt;
- tp->mdev_max = TCP_RTO_MIN;
+ tp->mdev_max = tcp_rto_min(sk);
}
} else {
/* no previous measure. */
tp->srtt = m<<3; /* take the measured time to be rtt */
tp->mdev = m<<1; /* make sure rto = 3*rtt */
- tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
+ tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
tp->rtt_seq = tp->snd_nxt;
}
}
@@ -755,7 +765,15 @@ void tcp_update_metrics(struct sock *sk)
}
}
-/* Numbers are taken from RFC2414. */
+/* Numbers are taken from RFC3390.
+ *
+ * John Heffner states:
+ *
+ * The RFC specifies a window of no more than 4380 bytes
+ * unless 2*MSS > 4380. Reading the pseudocode in the RFC
+ * is a bit misleading because they use a clamp at 4380 bytes
+ * rather than use a multiplier in the relevant range.
+ */
__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
{
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -2402,6 +2420,9 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
__u32 dval = min(tp->fackets_out, packets_acked);
tp->fackets_out -= dval;
}
+ /* hint's skb might be NULL but we don't need to care */
+ tp->fastpath_cnt_hint -= min_t(u32, packets_acked,
+ tp->fastpath_cnt_hint);
tp->packets_out -= packets_acked;
BUG_ON(tcp_skb_pcount(skb) == 0);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9c94627..e089a97 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -833,8 +833,7 @@ static struct tcp_md5sig_key *
return NULL;
for (i = 0; i < tp->md5sig_info->entries4; i++) {
if (tp->md5sig_info->keys4[i].addr == addr)
- return (struct tcp_md5sig_key *)
- &tp->md5sig_info->keys4[i];
+ return &tp->md5sig_info->keys4[i].base;
}
return NULL;
}
@@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
if (key) {
/* Pre-existing entry - just update that one. */
- kfree(key->key);
- key->key = newkey;
- key->keylen = newkeylen;
+ kfree(key->base.key);
+ key->base.key = newkey;
+ key->base.keylen = newkeylen;
} else {
struct tcp_md5sig_info *md5sig;
@@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
md5sig->alloced4++;
}
md5sig->entries4++;
- md5sig->keys4[md5sig->entries4 - 1].addr = addr;
- md5sig->keys4[md5sig->entries4 - 1].key = newkey;
- md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen;
+ md5sig->keys4[md5sig->entries4 - 1].addr = addr;
+ md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
+ md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
}
return 0;
}
@@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
for (i = 0; i < tp->md5sig_info->entries4; i++) {
if (tp->md5sig_info->keys4[i].addr == addr) {
/* Free the key */
- kfree(tp->md5sig_info->keys4[i].key);
+ kfree(tp->md5sig_info->keys4[i].base.key);
tp->md5sig_info->entries4--;
if (tp->md5sig_info->entries4 == 0) {
@@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk)
if (tp->md5sig_info->entries4) {
int i;
for (i = 0; i < tp->md5sig_info->entries4; i++)
- kfree(tp->md5sig_info->keys4[i].key);
+ kfree(tp->md5sig_info->keys4[i].base.key);
tp->md5sig_info->entries4 = 0;
tcp_free_md5sig_pool();
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 2835535..69d4bd1 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -505,6 +505,8 @@ send:
out:
up->len = 0;
up->pending = 0;
+ if (!err)
+ UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, up->pcflag);
return err;
}
@@ -693,10 +695,8 @@ out:
ip_rt_put(rt);
if (free)
kfree(ipc.opt);
- if (!err) {
- UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
+ if (!err)
return len;
- }
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 91ef3be..45b4c82 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1021,7 +1021,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
hiscore.rule++;
}
if (ipv6_saddr_preferred(score.addr_type) ||
- (((ifa_result->flags &
+ (((ifa->flags &
(IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) {
score.attrs |= IPV6_SADDR_SCORE_PREFERRED;
if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5dead39..26de3c0 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1427,8 +1427,9 @@ void ip6_flush_pending_frames(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
- IP6_INC_STATS(ip6_dst_idev(skb->dst),
- IPSTATS_MIB_OUTDISCARDS);
+ if (skb->dst)
+ IP6_INC_STATS(ip6_dst_idev(skb->dst),
+ IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 761a910..6b038aa 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -554,6 +554,10 @@ done:
{
struct ipv6_mreq mreq;
+ retv = -EPROTO;
+ if (inet_sk(sk)->is_icsk)
+ break;
+
retv = -EFAULT;
if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0358e606..5b59665 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -736,7 +736,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
* so fail our DAD process
*/
addrconf_dad_failure(ifp);
- goto out;
+ return;
} else {
/*
* This is not a dad solicitation.
@@ -1268,9 +1268,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
if (ipv6_addr_equal(dest, target)) {
on_link = 1;
- } else if (!(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) {
+ } else if (ipv6_addr_type(target) !=
+ (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: target address is not link-local.\n");
+ "ICMPv6 Redirect: target address is not link-local unicast.\n");
return;
}
@@ -1344,9 +1345,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
}
if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
- !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) {
+ ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: target address is not link-local.\n");
+ "ICMPv6 Redirect: target address is not link-local unicast.\n");
return;
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index aeda617..cd9df02 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1462,6 +1462,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
.get_optmin = IP6T_BASE_CTL,
.get_optmax = IP6T_SO_GET_MAX+1,
.get = do_ip6t_get_ctl,
+ .owner = THIS_MODULE,
};
static struct xt_match icmp6_matchstruct __read_mostly = {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e27383d..77167af 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -882,11 +882,10 @@ back_from_confirm:
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = rawv6_push_pending_frames(sk, &fl, rp);
+ release_sock(sk);
}
done:
dst_release(dst);
- if (!inet->hdrincl)
- release_sock(sk);
out:
fl6_sock_release(flowlabel);
return err<0?err:len;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0f7defb..3e06799 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -539,7 +539,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
for (i = 0; i < tp->md5sig_info->entries6; i++) {
if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
- return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
+ return &tp->md5sig_info->keys6[i].base;
}
return NULL;
}
@@ -567,9 +567,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
if (key) {
/* modify existing entry - just update that one */
- kfree(key->key);
- key->key = newkey;
- key->keylen = newkeylen;
+ kfree(key->base.key);
+ key->base.key = newkey;
+ key->base.keylen = newkeylen;
} else {
/* reallocate new list if current one is full. */
if (!tp->md5sig_info) {
@@ -603,8 +603,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
peer);
- tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
- tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
+ tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
+ tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
tp->md5sig_info->entries6++;
}
@@ -626,7 +626,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
for (i = 0; i < tp->md5sig_info->entries6; i++) {
if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
/* Free the key */
- kfree(tp->md5sig_info->keys6[i].key);
+ kfree(tp->md5sig_info->keys6[i].base.key);
tp->md5sig_info->entries6--;
if (tp->md5sig_info->entries6 == 0) {
@@ -657,7 +657,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
if (tp->md5sig_info->entries6) {
for (i = 0; i < tp->md5sig_info->entries6; i++)
- kfree(tp->md5sig_info->keys6[i].key);
+ kfree(tp->md5sig_info->keys6[i].base.key);
tp->md5sig_info->entries6 = 0;
tcp_free_md5sig_pool();
}
@@ -668,7 +668,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
if (tp->md5sig_info->entries4) {
for (i = 0; i < tp->md5sig_info->entries4; i++)
- kfree(tp->md5sig_info->keys4[i].key);
+ kfree(tp->md5sig_info->keys4[i].base.key);
tp->md5sig_info->entries4 = 0;
tcp_free_md5sig_pool();
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4210951..c347f3e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -555,6 +555,8 @@ static int udp_v6_push_pending_frames(struct sock *sk)
out:
up->len = 0;
up->pending = 0;
+ if (!err)
+ UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, up->pcflag);
return err;
}
@@ -823,10 +825,8 @@ do_append_data:
release_sock(sk);
out:
fl6_sock_release(flowlabel);
- if (!err) {
- UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
+ if (!err)
return len;
- }
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 7286c38..ff2172f 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -5259,7 +5259,7 @@ static void __exit ieee80211_exit(void)
}
-module_init(ieee80211_init);
+subsys_initcall(ieee80211_init);
module_exit(ieee80211_exit);
MODULE_DESCRIPTION("IEEE 802.11 subsystem");
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index f6780d6..17b9f46 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -431,7 +431,7 @@ static void __exit rate_control_simple_exit(void)
}
-module_init(rate_control_simple_init);
+subsys_initcall(rate_control_simple_init);
module_exit(rate_control_simple_exit);
MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211");
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89ce815..7ab82b3 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -424,7 +424,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt)
skb_queue_head_init(&q->requeued[i]);
q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
qd->handle);
- if (q->queues[i] == 0) {
+ if (!q->queues[i]) {
q->queues[i] = &noop_qdisc;
printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
}
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 8b8ece7..e32761c 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -55,18 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
{
- /* No point being interruptible: we're probably in cleanup_module() */
- restart:
mutex_lock(&nf_sockopt_mutex);
- if (reg->use != 0) {
- /* To be woken by nf_sockopt call... */
- /* FIXME: Stuart Young's name appears gratuitously. */
- set_current_state(TASK_UNINTERRUPTIBLE);
- reg->cleanup_task = current;
- mutex_unlock(&nf_sockopt_mutex);
- schedule();
- goto restart;
- }
list_del(&reg->list);
mutex_unlock(&nf_sockopt_mutex);
}
@@ -86,10 +75,11 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
list_for_each(i, &nf_sockopts) {
ops = (struct nf_sockopt_ops *)i;
if (ops->pf == pf) {
+ if (!try_module_get(ops->owner))
+ goto out_nosup;
if (get) {
if (val >= ops->get_optmin
&& val < ops->get_optmax) {
- ops->use++;
mutex_unlock(&nf_sockopt_mutex);
ret = ops->get(sk, val, opt, len);
goto out;
@@ -97,23 +87,20 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
} else {
if (val >= ops->set_optmin
&& val < ops->set_optmax) {
- ops->use++;
mutex_unlock(&nf_sockopt_mutex);
ret = ops->set(sk, val, opt, *len);
goto out;
}
}
+ module_put(ops->owner);
}
}
+ out_nosup:
mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT;
out:
- mutex_lock(&nf_sockopt_mutex);
- ops->use--;
- if (ops->cleanup_task)
- wake_up_process(ops->cleanup_task);
- mutex_unlock(&nf_sockopt_mutex);
+ module_put(ops->owner);
return ret;
}
@@ -144,10 +131,12 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
list_for_each(i, &nf_sockopts) {
ops = (struct nf_sockopt_ops *)i;
if (ops->pf == pf) {
+ if (!try_module_get(ops->owner))
+ goto out_nosup;
+
if (get) {
if (val >= ops->get_optmin
&& val < ops->get_optmax) {
- ops->use++;
mutex_unlock(&nf_sockopt_mutex);
if (ops->compat_get)
ret = ops->compat_get(sk,
@@ -160,7 +149,6 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
} else {
if (val >= ops->set_optmin
&& val < ops->set_optmax) {
- ops->use++;
mutex_unlock(&nf_sockopt_mutex);
if (ops->compat_set)
ret = ops->compat_set(sk,
@@ -171,17 +159,15 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
goto out;
}
}
+ module_put(ops->owner);
}
}
+ out_nosup:
mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT;
out:
- mutex_lock(&nf_sockopt_mutex);
- ops->use--;
- if (ops->cleanup_task)
- wake_up_process(ops->cleanup_task);
- mutex_unlock(&nf_sockopt_mutex);
+ module_put(ops->owner);
return ret;
}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e185a5b..2351533 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -58,7 +58,6 @@ struct nfulnl_instance {
unsigned int qlen; /* number of nlmsgs in skb */
struct sk_buff *skb; /* pre-allocatd skb */
- struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
struct timer_list timer;
int peer_pid; /* PID of the peer process */
@@ -345,10 +344,12 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
static int
__nfulnl_send(struct nfulnl_instance *inst)
{
- int status;
+ int status = -1;
if (inst->qlen > 1)
- inst->lastnlh->nlmsg_type = NLMSG_DONE;
+ NLMSG_PUT(inst->skb, 0, 0,
+ NLMSG_DONE,
+ sizeof(struct nfgenmsg));
status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT);
if (status < 0) {
@@ -358,8 +359,8 @@ __nfulnl_send(struct nfulnl_instance *inst)
inst->qlen = 0;
inst->skb = NULL;
- inst->lastnlh = NULL;
+nlmsg_failure:
return status;
}
@@ -538,7 +539,6 @@ __build_packet_message(struct nfulnl_instance *inst,
}
nlh->nlmsg_len = inst->skb->tail - old_tail;
- inst->lastnlh = nlh;
return 0;
nlmsg_failure:
@@ -644,7 +644,8 @@ nfulnl_log_packet(unsigned int pf,
}
if (inst->qlen >= qthreshold ||
- (inst->skb && size > skb_tailroom(inst->skb))) {
+ (inst->skb && size >
+ skb_tailroom(inst->skb) - sizeof(struct nfgenmsg))) {
/* either the queue len is too high or we don't have
* enough room in the skb left. flush to userspace. */
UDEBUG("flushing old skb\n");
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index ab7d845..223f9bd 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -188,7 +188,7 @@ udp_checkentry(const char *tablename,
void *matchinfo,
unsigned int hook_mask)
{
- const struct xt_tcp *udpinfo = matchinfo;
+ const struct xt_udp *udpinfo = matchinfo;
/* Must specify no unknown invflags */
return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index cd01642..114df6e 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -79,7 +79,7 @@ static void rose_loopback_timer(unsigned long param)
skb_reset_transport_header(skb);
- sk = rose_find_socket(lci_o, &rose_loopback_neigh);
+ sk = rose_find_socket(lci_o, rose_loopback_neigh);
if (sk) {
if (rose_process_rx_frame(sk, skb) == 0)
kfree_skb(skb);
@@ -88,7 +88,7 @@ static void rose_loopback_timer(unsigned long param)
if (frametype == ROSE_CALL_REQUEST) {
if ((dev = rose_dev_get(dest)) != NULL) {
- if (rose_rx_call_request(skb, dev, &rose_loopback_neigh, lci_o) == 0)
+ if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
kfree_skb(skb);
} else {
kfree_skb(skb);
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index bbcbad1..96f61a7 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -45,7 +45,7 @@ static DEFINE_SPINLOCK(rose_neigh_list_lock);
static struct rose_route *rose_route_list;
static DEFINE_SPINLOCK(rose_route_list_lock);
-struct rose_neigh rose_loopback_neigh;
+struct rose_neigh *rose_loopback_neigh;
/*
* Add a new route to a node, and in the process add the node and the
@@ -362,7 +362,12 @@ out:
*/
void rose_add_loopback_neigh(void)
{
- struct rose_neigh *sn = &rose_loopback_neigh;
+ struct rose_neigh *sn;
+
+ rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL);
+ if (!rose_loopback_neigh)
+ return;
+ sn = rose_loopback_neigh;
sn->callsign = null_ax25_address;
sn->digipeat = NULL;
@@ -417,13 +422,13 @@ int rose_add_loopback_node(rose_address *address)
rose_node->mask = 10;
rose_node->count = 1;
rose_node->loopback = 1;
- rose_node->neighbour[0] = &rose_loopback_neigh;
+ rose_node->neighbour[0] = rose_loopback_neigh;
/* Insert at the head of list. Address is always mask=10 */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
- rose_loopback_neigh.count++;
+ rose_loopback_neigh->count++;
out:
spin_unlock_bh(&rose_node_list_lock);
@@ -454,7 +459,7 @@ void rose_del_loopback_node(rose_address *address)
rose_remove_node(rose_node);
- rose_loopback_neigh.count--;
+ rose_loopback_neigh->count--;
out:
spin_unlock_bh(&rose_node_list_lock);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index feef366..72cdb0f 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -68,7 +68,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
struct rtattr *r ;
- read_lock(hinfo->lock);
+ read_lock_bh(hinfo->lock);
s_i = cb->args[0];
@@ -96,7 +96,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
}
}
done:
- read_unlock(hinfo->lock);
+ read_unlock_bh(hinfo->lock);
if (n_i)
cb->args[0] += n_i;
return n_i;
@@ -156,13 +156,13 @@ struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
{
struct tcf_common *p;
- read_lock(hinfo->lock);
+ read_lock_bh(hinfo->lock);
for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
p = p->tcfc_next) {
if (p->tcfc_index == index)
break;
}
- read_unlock(hinfo->lock);
+ read_unlock_bh(hinfo->lock);
return p;
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 6085be5..17f6f27 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -56,7 +56,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct rtattr *r;
- read_lock(&police_lock);
+ read_lock_bh(&police_lock);
s_i = cb->args[0];
@@ -85,7 +85,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
}
}
done:
- read_unlock(&police_lock);
+ read_unlock_bh(&police_lock);
if (n_i)
cb->args[0] += n_i;
return n_i;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 8dbe369..d4d5d2f 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -502,7 +502,7 @@ static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
#ifdef CONFIG_NET_CLS_IND
if (tb[TCA_U32_INDEV-1]) {
- int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
+ err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
if (err < 0)
goto errout;
}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index e38c283..cbef3bb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -380,7 +380,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
int len = skb->len;
- int ret;
+ int uninitialized_var(ret);
struct cbq_class *cl = cbq_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 4a49db6..abd82fc 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (TC_H_MAJ(skb->priority) != sch->handle) {
err = tc_classify(skb, q->filter_list, &res);
#ifdef CONFIG_NET_CLS_ACT
- switch (tc_classify(skb, q->filter_list, &res)) {
+ switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 9579573..b542c87 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
+#include <linux/jhash.h>
#include <net/ip.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -95,7 +96,7 @@ struct sfq_sched_data
/* Variables */
struct timer_list perturb_timer;
- int perturbation;
+ u32 perturbation;
sfq_index tail; /* Index of current slot in round */
sfq_index max_depth; /* Maximal depth */
@@ -109,12 +110,7 @@ struct sfq_sched_data
static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
- int pert = q->perturbation;
-
- /* Have we any rotation primitives? If not, WHY? */
- h ^= (h1<<pert) ^ (h1>>(0x1F - pert));
- h ^= h>>10;
- return h & 0x3FF;
+ return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
}
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
@@ -256,6 +252,13 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
q->hash[x] = hash;
}
+ /* If selected queue has length q->limit, this means that
+ * all another queues are empty and that we do simple tail drop,
+ * i.e. drop _this_ packet.
+ */
+ if (q->qs[x].qlen >= q->limit)
+ return qdisc_drop(skb, sch);
+
sch->qstats.backlog += skb->len;
__skb_queue_tail(&q->qs[x], skb);
sfq_inc(q, x);
@@ -270,7 +273,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
q->tail = x;
}
}
- if (++sch->q.qlen < q->limit-1) {
+ if (++sch->q.qlen <= q->limit) {
sch->bstats.bytes += skb->len;
sch->bstats.packets++;
return 0;
@@ -294,6 +297,19 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
}
sch->qstats.backlog += skb->len;
__skb_queue_head(&q->qs[x], skb);
+ /* If selected queue has length q->limit+1, this means that
+ * all another queues are empty and we do simple tail drop.
+ * This packet is still requeued at head of queue, tail packet
+ * is dropped.
+ */
+ if (q->qs[x].qlen > q->limit) {
+ skb = q->qs[x].prev;
+ __skb_unlink(skb, &q->qs[x]);
+ sch->qstats.drops++;
+ sch->qstats.backlog -= skb->len;
+ kfree_skb(skb);
+ return NET_XMIT_CN;
+ }
sfq_inc(q, x);
if (q->qs[x].qlen == 1) { /* The flow is new */
if (q->tail == SFQ_DEPTH) { /* It is the first flow */
@@ -306,7 +322,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
q->tail = x;
}
}
- if (++sch->q.qlen < q->limit - 1) {
+ if (++sch->q.qlen <= q->limit) {
sch->qstats.requeues++;
return 0;
}
@@ -370,12 +386,10 @@ static void sfq_perturbation(unsigned long arg)
struct Qdisc *sch = (struct Qdisc*)arg;
struct sfq_sched_data *q = qdisc_priv(sch);
- q->perturbation = net_random()&0x1F;
+ get_random_bytes(&q->perturbation, 4);
- if (q->perturb_period) {
- q->perturb_timer.expires = jiffies + q->perturb_period;
- add_timer(&q->perturb_timer);
- }
+ if (q->perturb_period)
+ mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
}
static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
@@ -391,17 +405,17 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
q->perturb_period = ctl->perturb_period*HZ;
if (ctl->limit)
- q->limit = min_t(u32, ctl->limit, SFQ_DEPTH);
+ q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
qlen = sch->q.qlen;
- while (sch->q.qlen >= q->limit-1)
+ while (sch->q.qlen > q->limit)
sfq_drop(sch);
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
del_timer(&q->perturb_timer);
if (q->perturb_period) {
- q->perturb_timer.expires = jiffies + q->perturb_period;
- add_timer(&q->perturb_timer);
+ mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
+ get_random_bytes(&q->perturbation, 4);
}
sch_tree_unlock(sch);
return 0;
@@ -423,12 +437,13 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH;
q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH;
}
- q->limit = SFQ_DEPTH;
+ q->limit = SFQ_DEPTH - 1;
q->max_depth = 0;
q->tail = SFQ_DEPTH;
if (opt == NULL) {
q->quantum = psched_mtu(sch->dev);
q->perturb_period = 0;
+ get_random_bytes(&q->perturbation, 4);
} else {
int err = sfq_change(sch, opt);
if (err)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 498edb0..9bad8ba 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -99,7 +99,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Initialize the bind addr area. */
sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
- rwlock_init(&asoc->base.addr_lock);
asoc->state = SCTP_STATE_CLOSED;
@@ -727,7 +726,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
break;
case SCTP_TRANSPORT_DOWN:
- transport->state = SCTP_INACTIVE;
+ /* if the transort was never confirmed, do not transition it
+ * to inactive state.
+ */
+ if (transport->state != SCTP_UNCONFIRMED)
+ transport->state = SCTP_INACTIVE;
+
spc_state = SCTP_ADDR_UNREACHABLE;
break;
@@ -932,8 +936,6 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
{
struct sctp_transport *transport;
- sctp_read_lock(&asoc->base.addr_lock);
-
if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
(htons(asoc->peer.port) == paddr->v4.sin_port)) {
transport = sctp_assoc_lookup_paddr(asoc, paddr);
@@ -947,7 +949,6 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
transport = NULL;
out:
- sctp_read_unlock(&asoc->base.addr_lock);
return transport;
}
@@ -1371,19 +1372,13 @@ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
const union sctp_addr *laddr)
{
- int found;
+ int found = 0;
- sctp_read_lock(&asoc->base.addr_lock);
if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
- sctp_sk(asoc->base.sk))) {
+ sctp_sk(asoc->base.sk)))
found = 1;
- goto out;
- }
- found = 0;
-out:
- sctp_read_unlock(&asoc->base.addr_lock);
return found;
}
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index fdb287a..dfffa94 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -163,9 +163,15 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
addr->a.v4.sin_port = htons(bp->port);
addr->use_as_src = use_as_src;
+ addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
- list_add_tail(&addr->list, &bp->address_list);
+ INIT_RCU_HEAD(&addr->rcu);
+
+ /* We always hold a socket lock when calling this function,
+ * and that acts as a writer synchronizing lock.
+ */
+ list_add_tail_rcu(&addr->list, &bp->address_list);
SCTP_DBG_OBJCNT_INC(addr);
return 0;
@@ -174,23 +180,35 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
/* Delete an address from the bind address list in the SCTP_bind_addr
* structure.
*/
-int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
+int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
+ void fastcall (*rcu_call)(struct rcu_head *head,
+ void (*func)(struct rcu_head *head)))
{
- struct list_head *pos, *temp;
- struct sctp_sockaddr_entry *addr;
+ struct sctp_sockaddr_entry *addr, *temp;
- list_for_each_safe(pos, temp, &bp->address_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ /* We hold the socket lock when calling this function,
+ * and that acts as a writer synchronizing lock.
+ */
+ list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&addr->a, del_addr)) {
/* Found the exact match. */
- list_del(pos);
- kfree(addr);
- SCTP_DBG_OBJCNT_DEC(addr);
-
- return 0;
+ addr->valid = 0;
+ list_del_rcu(&addr->list);
+ break;
}
}
+ /* Call the rcu callback provided in the args. This function is
+ * called by both BH packet processing and user side socket option
+ * processing, but it works on different lists in those 2 contexts.
+ * Each context provides it's own callback, whether call_rcu_bh()
+ * or call_rcu(), to make sure that we wait for an appropriate time.
+ */
+ if (addr && !addr->valid) {
+ rcu_call(&addr->rcu, sctp_local_addr_free);
+ SCTP_DBG_OBJCNT_DEC(addr);
+ }
+
return -EINVAL;
}
@@ -300,15 +318,20 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
struct sctp_sock *opt)
{
struct sctp_sockaddr_entry *laddr;
- struct list_head *pos;
-
- list_for_each(pos, &bp->address_list) {
- laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
- if (opt->pf->cmp_addr(&laddr->a, addr, opt))
- return 1;
+ int match = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid)
+ continue;
+ if (opt->pf->cmp_addr(&laddr->a, addr, opt)) {
+ match = 1;
+ break;
+ }
}
+ rcu_read_unlock();
- return 0;
+ return match;
}
/* Find the first address in the bind address list that is not present in
@@ -323,18 +346,19 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
union sctp_addr *addr;
void *addr_buf;
struct sctp_af *af;
- struct list_head *pos;
int i;
- list_for_each(pos, &bp->address_list) {
- laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
-
+ /* This is only called sctp_send_asconf_del_ip() and we hold
+ * the socket lock in that code patch, so that address list
+ * can't change.
+ */
+ list_for_each_entry(laddr, &bp->address_list, list) {
addr_buf = (union sctp_addr *)addrs;
for (i = 0; i < addrcnt; i++) {
addr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af)
- return NULL;
+ break;
if (opt->pf->cmp_addr(&laddr->a, addr, opt))
break;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 1404a9e..8f485a0 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -92,7 +92,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
- rwlock_init(&ep->base.addr_lock);
/* Remember who we are attached to. */
ep->base.sk = sk;
@@ -225,21 +224,14 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
const union sctp_addr *laddr)
{
- struct sctp_endpoint *retval;
+ struct sctp_endpoint *retval = NULL;
- sctp_read_lock(&ep->base.addr_lock);
if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
- sctp_sk(ep->base.sk))) {
+ sctp_sk(ep->base.sk)))
retval = ep;
- goto out;
- }
}
- retval = NULL;
-
-out:
- sctp_read_unlock(&ep->base.addr_lock);
return retval;
}
@@ -261,9 +253,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
list_for_each(pos, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
if (rport == asoc->peer.port) {
- sctp_read_lock(&asoc->base.addr_lock);
*transport = sctp_assoc_lookup_paddr(asoc, paddr);
- sctp_read_unlock(&asoc->base.addr_lock);
if (*transport)
return asoc;
@@ -295,20 +285,17 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
const union sctp_addr *paddr)
{
- struct list_head *pos;
struct sctp_sockaddr_entry *addr;
struct sctp_bind_addr *bp;
- sctp_read_lock(&ep->base.addr_lock);
bp = &ep->base.bind_addr;
- list_for_each(pos, &bp->address_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
- if (sctp_has_association(&addr->a, paddr)) {
- sctp_read_unlock(&ep->base.addr_lock);
+ /* This function is called with the socket lock held,
+ * so the address_list can not change.
+ */
+ list_for_each_entry(addr, &bp->address_list, list) {
+ if (sctp_has_association(&addr->a, paddr))
return 1;
- }
}
- sctp_read_unlock(&ep->base.addr_lock);
return 0;
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 47e5601..f9a0c92 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -622,6 +622,14 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
goto discard;
+ /* RFC 4460, 2.11.2
+ * This will discard packets with INIT chunk bundled as
+ * subsequent chunks in the packet. When INIT is first,
+ * the normal INIT processing will discard the chunk.
+ */
+ if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
+ goto discard;
+
/* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
* or a COOKIE ACK the SCTP Packet should be silently
* discarded.
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 88aa224..e4ea7fd 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -130,6 +130,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
/* Force chunk->skb->data to chunk->chunk_end. */
skb_pull(chunk->skb,
chunk->chunk_end - chunk->skb->data);
+
+ /* Verify that we have at least chunk headers
+ * worth of buffer left.
+ */
+ if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
+ sctp_chunk_free(chunk);
+ chunk = queue->in_progress = NULL;
+ }
}
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f8aa23d..670fd27 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -77,13 +77,18 @@
#include <asm/uaccess.h>
-/* Event handler for inet6 address addition/deletion events. */
+/* Event handler for inet6 address addition/deletion events.
+ * The sctp_local_addr_list needs to be protocted by a spin lock since
+ * multiple notifiers (say IPv4 and IPv6) may be running at the same
+ * time and thus corrupt the list.
+ * The reader side is protected with RCU.
+ */
static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
- struct sctp_sockaddr_entry *addr;
- struct list_head *pos, *temp;
+ struct sctp_sockaddr_entry *addr = NULL;
+ struct sctp_sockaddr_entry *temp;
switch (ev) {
case NETDEV_UP:
@@ -94,19 +99,26 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
memcpy(&addr->a.v6.sin6_addr, &ifa->addr,
sizeof(struct in6_addr));
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
- list_add_tail(&addr->list, &sctp_local_addr_list);
+ addr->valid = 1;
+ spin_lock_bh(&sctp_local_addr_lock);
+ list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
+ spin_unlock_bh(&sctp_local_addr_lock);
}
break;
case NETDEV_DOWN:
- list_for_each_safe(pos, temp, &sctp_local_addr_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
- if (ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) {
- list_del(pos);
- kfree(addr);
+ spin_lock_bh(&sctp_local_addr_lock);
+ list_for_each_entry_safe(addr, temp,
+ &sctp_local_addr_list, list) {
+ if (ipv6_addr_equal(&addr->a.v6.sin6_addr,
+ &ifa->addr)) {
+ addr->valid = 0;
+ list_del_rcu(&addr->list);
break;
}
}
-
+ spin_unlock_bh(&sctp_local_addr_lock);
+ if (addr && !addr->valid)
+ call_rcu(&addr->rcu, sctp_local_addr_free);
break;
}
@@ -290,9 +302,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
union sctp_addr *saddr)
{
struct sctp_bind_addr *bp;
- rwlock_t *addr_lock;
struct sctp_sockaddr_entry *laddr;
- struct list_head *pos;
sctp_scope_t scope;
union sctp_addr *baddr = NULL;
__u8 matchlen = 0;
@@ -312,14 +322,14 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
scope = sctp_scope(daddr);
bp = &asoc->base.bind_addr;
- addr_lock = &asoc->base.addr_lock;
/* Go through the bind address list and find the best source address
* that matches the scope of the destination address.
*/
- sctp_read_lock(addr_lock);
- list_for_each(pos, &bp->address_list) {
- laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid)
+ continue;
if ((laddr->use_as_src) &&
(laddr->a.sa.sa_family == AF_INET6) &&
(scope <= sctp_scope(&laddr->a))) {
@@ -341,7 +351,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
__FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr));
}
- sctp_read_unlock(addr_lock);
+ rcu_read_unlock();
}
/* Make a copy of all potential local addresses. */
@@ -367,7 +377,9 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex;
+ addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
+ INIT_RCU_HEAD(&addr->rcu);
list_add_tail(&addr->list, addrlist);
}
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 992f361..28f4fe7 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
*/
if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
(!fast_retransmit && !chunk->tsn_gap_acked)) {
+ /* If this chunk was sent less then 1 rto ago, do not
+ * retransmit this chunk, but give the peer time
+ * to acknowlege it.
+ */
+ if ((jiffies - chunk->sent_at) < transport->rto)
+ continue;
+
/* RFC 2960 6.2.1 Processing a Received SACK
*
* C) Any time a DATA chunk is marked for
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e98579b..3d036cd 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -153,6 +153,9 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
addr->a.v4.sin_family = AF_INET;
addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
+ addr->valid = 1;
+ INIT_LIST_HEAD(&addr->list);
+ INIT_RCU_HEAD(&addr->rcu);
list_add_tail(&addr->list, addrlist);
}
}
@@ -192,16 +195,24 @@ static void sctp_free_local_addr_list(void)
}
}
+void sctp_local_addr_free(struct rcu_head *head)
+{
+ struct sctp_sockaddr_entry *e = container_of(head,
+ struct sctp_sockaddr_entry, rcu);
+ kfree(e);
+}
+
/* Copy the local addresses which are valid for 'scope' into 'bp'. */
int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
gfp_t gfp, int copy_flags)
{
struct sctp_sockaddr_entry *addr;
int error = 0;
- struct list_head *pos, *temp;
- list_for_each_safe(pos, temp, &sctp_local_addr_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+ if (!addr->valid)
+ continue;
if (sctp_in_scope(&addr->a, scope)) {
/* Now that the address is in scope, check to see if
* the address type is really supported by the local
@@ -213,7 +224,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
(copy_flags & SCTP_ADDR6_ALLOWED) &&
(copy_flags & SCTP_ADDR6_PEERSUPP)))) {
error = sctp_add_bind_addr(bp, &addr->a, 1,
- GFP_ATOMIC);
+ GFP_ATOMIC);
if (error)
goto end_copy;
}
@@ -221,6 +232,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
}
end_copy:
+ rcu_read_unlock();
return error;
}
@@ -416,9 +428,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
struct rtable *rt;
struct flowi fl;
struct sctp_bind_addr *bp;
- rwlock_t *addr_lock;
struct sctp_sockaddr_entry *laddr;
- struct list_head *pos;
struct dst_entry *dst = NULL;
union sctp_addr dst_saddr;
@@ -447,23 +457,20 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
goto out;
bp = &asoc->base.bind_addr;
- addr_lock = &asoc->base.addr_lock;
if (dst) {
/* Walk through the bind address list and look for a bind
* address that matches the source address of the returned dst.
*/
- sctp_read_lock(addr_lock);
- list_for_each(pos, &bp->address_list) {
- laddr = list_entry(pos, struct sctp_sockaddr_entry,
- list);
- if (!laddr->use_as_src)
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid || !laddr->use_as_src)
continue;
sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
goto out_unlock;
}
- sctp_read_unlock(addr_lock);
+ rcu_read_unlock();
/* None of the bound addresses match the source address of the
* dst. So release it.
@@ -475,10 +482,10 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
/* Walk through the bind address list and try to get a dst that
* matches a bind address as the source address.
*/
- sctp_read_lock(addr_lock);
- list_for_each(pos, &bp->address_list) {
- laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
-
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid)
+ continue;
if ((laddr->use_as_src) &&
(AF_INET == laddr->a.sa.sa_family)) {
fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
@@ -490,7 +497,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
}
out_unlock:
- sctp_read_unlock(addr_lock);
+ rcu_read_unlock();
out:
if (dst)
SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n",
@@ -600,13 +607,18 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr));
}
-/* Event handler for inet address addition/deletion events. */
+/* Event handler for inet address addition/deletion events.
+ * The sctp_local_addr_list needs to be protocted by a spin lock since
+ * multiple notifiers (say IPv4 and IPv6) may be running at the same
+ * time and thus corrupt the list.
+ * The reader side is protected with RCU.
+ */
static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- struct sctp_sockaddr_entry *addr;
- struct list_head *pos, *temp;
+ struct sctp_sockaddr_entry *addr = NULL;
+ struct sctp_sockaddr_entry *temp;
switch (ev) {
case NETDEV_UP:
@@ -615,19 +627,25 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
addr->a.v4.sin_family = AF_INET;
addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
- list_add_tail(&addr->list, &sctp_local_addr_list);
+ addr->valid = 1;
+ spin_lock_bh(&sctp_local_addr_lock);
+ list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
+ spin_unlock_bh(&sctp_local_addr_lock);
}
break;
case NETDEV_DOWN:
- list_for_each_safe(pos, temp, &sctp_local_addr_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ spin_lock_bh(&sctp_local_addr_lock);
+ list_for_each_entry_safe(addr, temp,
+ &sctp_local_addr_list, list) {
if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) {
- list_del(pos);
- kfree(addr);
+ addr->valid = 0;
+ list_del_rcu(&addr->list);
break;
}
}
-
+ spin_unlock_bh(&sctp_local_addr_lock);
+ if (addr && !addr->valid)
+ call_rcu(&addr->rcu, sctp_local_addr_free);
break;
}
@@ -1160,6 +1178,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Initialize the local address list. */
INIT_LIST_HEAD(&sctp_local_addr_list);
+ spin_lock_init(&sctp_local_addr_lock);
sctp_get_local_addr_list();
/* Register notifier for inet address additions/deletions. */
@@ -1227,6 +1246,9 @@ SCTP_STATIC __exit void sctp_exit(void)
sctp_v6_del_protocol();
inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
+ /* Unregister notifier for inet address additions/deletions. */
+ unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
+
/* Free the local address list. */
sctp_free_local_addr_list();
@@ -1240,9 +1262,6 @@ SCTP_STATIC __exit void sctp_exit(void)
inet_unregister_protosw(&sctp_stream_protosw);
inet_unregister_protosw(&sctp_seqpacket_protosw);
- /* Unregister notifier for inet address additions/deletions. */
- unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
-
sctp_sysctl_unregister();
list_del(&sctp_ipv4_specific.list);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 51c4d7f..23ae37e 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
* abort chunk.
*/
void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
- const void *payload, size_t paylen)
+ size_t paylen)
{
sctp_errhdr_t err;
__u16 len;
@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
len = sizeof(sctp_errhdr_t) + paylen;
err.length = htons(len);
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
- sctp_addto_chunk(chunk, paylen, payload);
}
/* 3.3.2 Initiation (INIT) (1)
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
/* Put the tsn back into network byte order. */
payload = htonl(tsn);
- sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload,
- sizeof(payload));
+ sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
+ sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
goto err_copy;
}
- sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
+ sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
+ sctp_addto_chunk(retval, paylen, payload);
if (paylen)
kfree(payload);
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
struct sctp_paramhdr phdr;
retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
- + sizeof(sctp_chunkhdr_t));
+ + sizeof(sctp_paramhdr_t));
if (!retval)
goto end;
- sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen);
+ sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
+ + sizeof(sctp_paramhdr_t));
phdr.type = htons(chunk->chunk_hdr->type);
phdr.length = chunk->chunk_hdr->length;
- sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr);
+ sctp_addto_chunk(retval, paylen, payload);
+ sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
end:
return retval;
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
if (!retval)
goto nodata;
- sctp_init_cause(retval, cause_code, payload, paylen);
+ sctp_init_cause(retval, cause_code, paylen);
+ sctp_addto_chunk(retval, paylen, payload);
nodata:
return retval;
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
void *target;
void *padding;
int chunklen = ntohs(chunk->chunk_hdr->length);
- int padlen = chunklen % 4;
+ int padlen = WORD_ROUND(chunklen) - chunklen;
padding = skb_put(chunk->skb, padlen);
target = skb_put(chunk->skb, len);
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
return target;
}
+/* Append bytes to the end of a parameter. Will panic if chunk is not big
+ * enough.
+ */
+void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
+{
+ void *target;
+ int chunklen = ntohs(chunk->chunk_hdr->length);
+
+ target = skb_put(chunk->skb, len);
+
+ memcpy(target, data, len);
+
+ /* Adjust the chunk length field. */
+ chunk->chunk_hdr->length = htons(chunklen + len);
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+
+ return target;
+}
+
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
@@ -1174,25 +1196,36 @@ out:
*/
void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
{
+ struct sctp_datamsg *msg;
+ struct sctp_chunk *lchunk;
+ struct sctp_stream *stream;
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
- /* This is the last possible instant to assign a SSN. */
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
- ssn = 0;
- } else {
- sid = ntohs(chunk->subh.data_hdr->stream);
- if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
- ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
- else
- ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
- }
+ /* All fragments will be on the same stream */
+ sid = ntohs(chunk->subh.data_hdr->stream);
+ stream = &chunk->asoc->ssnmap->out;
+
+ /* Now assign the sequence number to the entire message.
+ * All fragments must have the same stream sequence number.
+ */
+ msg = chunk->msg;
+ list_for_each_entry(lchunk, &msg->chunks, frag_list) {
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+ ssn = 0;
+ } else {
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
+ ssn = sctp_ssn_next(stream, sid);
+ else
+ ssn = sctp_ssn_peek(stream, sid);
+ }
- chunk->subh.data_hdr->ssn = htons(ssn);
- chunk->has_ssn = 1;
+ lchunk->subh.data_hdr->ssn = htons(ssn);
+ lchunk->has_ssn = 1;
+ }
}
/* Helper function to assign a TSN if needed. This assumes that both
@@ -1466,7 +1499,8 @@ no_hmac:
__be32 n = htonl(usecs);
sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
- &n, sizeof(n));
+ sizeof(n));
+ sctp_addto_chunk(*errp, sizeof(n), &n);
*error = -SCTP_IERROR_STALE_COOKIE;
} else
*error = -SCTP_IERROR_NOMEM;
@@ -1497,7 +1531,7 @@ no_hmac:
/* Also, add the destination address. */
if (list_empty(&retval->base.bind_addr.address_list)) {
sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
- GFP_ATOMIC);
+ GFP_ATOMIC);
}
retval->next_tsn = retval->c.initial_tsn;
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
report.num_missing = htonl(1);
report.type = paramtype;
sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
- &report, sizeof(report));
+ sizeof(report));
+ sctp_addto_chunk(*errp, sizeof(report), &report);
}
/* Stop processing this chunk. */
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk, 0);
if (*errp)
- sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0);
+ sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
/* Stop processing this chunk. */
return 0;
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk, payload_len);
if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error,
- sizeof(error));
- sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param);
+ sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
+ sizeof(error) + sizeof(sctp_paramhdr_t));
+ sctp_addto_chunk(*errp, sizeof(error), error);
+ sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
}
return 0;
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
if (!*errp)
*errp = sctp_make_op_error_space(asoc, chunk, len);
- if (*errp)
- sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED,
- param.v, len);
+ if (*errp) {
+ sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
+ sctp_addto_chunk(*errp, len, param.v);
+ }
/* Stop processing this chunk. */
return 0;
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
- if (*errp)
+ if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- param.v,
WORD_ROUND(ntohs(param.p->length)));
+ sctp_addto_chunk(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
+ }
break;
case SCTP_PARAM_ACTION_SKIP:
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- param.v,
WORD_ROUND(ntohs(param.p->length)));
+ sctp_addto_chunk(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
} else {
/* If there is no memory for generating the ERROR
* report as specified, an ABORT will be triggered
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
* VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet.
*/
- if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) {
+ if (param.v != (void*)chunk->chunk_end) {
sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
return 0;
}
@@ -2457,6 +2499,52 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
return SCTP_ERROR_NO_ERROR;
}
+/* Verify the ASCONF packet before we process it. */
+int sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_paramhdr *param_hdr, void *chunk_end,
+ struct sctp_paramhdr **errp) {
+ sctp_addip_param_t *asconf_param;
+ union sctp_params param;
+ int length, plen;
+
+ param.v = (sctp_paramhdr_t *) param_hdr;
+ while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
+ length = ntohs(param.p->length);
+ *errp = param.p;
+
+ if (param.v > chunk_end - length ||
+ length < sizeof(sctp_paramhdr_t))
+ return 0;
+
+ switch (param.p->type) {
+ case SCTP_PARAM_ADD_IP:
+ case SCTP_PARAM_DEL_IP:
+ case SCTP_PARAM_SET_PRIMARY:
+ asconf_param = (sctp_addip_param_t *)param.v;
+ plen = ntohs(asconf_param->param_hdr.length);
+ if (plen < sizeof(sctp_addip_param_t) +
+ sizeof(sctp_paramhdr_t))
+ return 0;
+ break;
+ case SCTP_PARAM_SUCCESS_REPORT:
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ if (length != sizeof(sctp_addip_param_t))
+ return 0;
+
+ break;
+ default:
+ break;
+ }
+
+ param.v += WORD_ROUND(length);
+ }
+
+ if (param.v != chunk_end)
+ return 0;
+
+ return 1;
+}
+
/* Process an incoming ASCONF chunk with the next expected serial no. and
* return an ASCONF_ACK chunk to be sent in response.
*/
@@ -2571,22 +2659,16 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
- sctp_local_bh_disable();
- sctp_write_lock(&asoc->base.addr_lock);
- list_for_each(pos, &bp->address_list) {
- saddr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ /* This is always done in BH context with a socket lock
+ * held, so the list can not change.
+ */
+ list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, &addr))
saddr->use_as_src = 1;
}
- sctp_write_unlock(&asoc->base.addr_lock);
- sctp_local_bh_enable();
break;
case SCTP_PARAM_DEL_IP:
- sctp_local_bh_disable();
- sctp_write_lock(&asoc->base.addr_lock);
- retval = sctp_del_bind_addr(bp, &addr);
- sctp_write_unlock(&asoc->base.addr_lock);
- sctp_local_bh_enable();
+ retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh);
list_for_each(pos, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport,
transports);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d9fad4f..8d78900 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_DISPOSITION_VIOLATION:
- printk(KERN_ERR "sctp protocol violation state %d "
- "chunkid %d\n", state, subtype.chunk);
+ if (net_ratelimit())
+ printk(KERN_ERR "sctp protocol violation state %d "
+ "chunkid %d\n", state, subtype.chunk);
break;
case SCTP_DISPOSITION_NOT_IMPL:
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
+ /* purge the fragmentation queue */
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
+
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 71cad56..a583d67 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -90,6 +90,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
+static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands);
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
@@ -98,6 +103,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
struct sctp_transport *transport);
static sctp_disposition_t sctp_sf_abort_violation(
+ const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
@@ -111,6 +117,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
void *arg,
sctp_cmd_seq_t *commands);
+static sctp_disposition_t sctp_sf_violation_paramlen(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands);
+
static sctp_disposition_t sctp_sf_violation_ctsn(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
@@ -118,6 +131,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
void *arg,
sctp_cmd_seq_t *commands);
+static sctp_disposition_t sctp_sf_violation_chunk(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands);
+
/* Small helper function that checks if the chunk length
* is of the appropriate length. The 'required_length' argument
* is set to be the size of a specific chunk we are testing.
@@ -181,16 +201,21 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
+ if (!sctp_vtag_verify_either(chunk, asoc))
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+
/* RFC 2960 6.10 Bundling
*
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
- return SCTP_DISPOSITION_VIOLATION;
+ return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
- if (!sctp_vtag_verify_either(chunk, asoc))
- return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
/* RFC 2960 10.2 SCTP-to-ULP
*
@@ -264,7 +289,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
- struct sock *sk;
int len;
/* 6.10 Bundling
@@ -285,16 +309,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
- sk = ep->base.sk;
- /* If the endpoint is not listening or if the number of associations
- * on the TCP-style socket exceed the max backlog, respond with an
- * ABORT.
- */
- if (!sctp_sstate(sk, LISTENING) ||
- (sctp_style(sk, TCP) &&
- sk_acceptq_is_full(sk)))
- return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
-
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
@@ -461,17 +475,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
- /* Make sure that the INIT-ACK chunk has a valid length */
- if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
- return sctp_sf_violation_chunklen(ep, asoc, type, arg,
- commands);
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
- return SCTP_DISPOSITION_VIOLATION;
+ return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+ /* Make sure that the INIT-ACK chunk has a valid length */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
@@ -590,12 +604,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
struct sctp_ulpevent *ev, *ai_ev = NULL;
int error = 0;
struct sctp_chunk *err_chk_p;
+ struct sock *sk;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
- return sctp_sf_ootb(ep, asoc, type, arg, commands);
+ return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* Make sure that the COOKIE_ECHO chunk has a valid length.
* In this case, we check that we have enough for at least a
@@ -605,6 +620,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* If the endpoint is not listening or if the number of associations
+ * on the TCP-style socket exceed the max backlog, respond with an
+ * ABORT.
+ */
+ sk = ep->base.sk;
+ if (!sctp_sstate(sk, LISTENING) ||
+ (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
+ return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
@@ -1032,19 +1056,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
/* This should never happen, but lets log it if so. */
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
- printk(KERN_WARNING
- "%s association %p could not find address "
- NIP6_FMT "\n",
- __FUNCTION__,
- asoc,
- NIP6(from_addr.v6.sin6_addr));
+ if (net_ratelimit())
+ printk(KERN_WARNING
+ "%s association %p could not find address "
+ NIP6_FMT "\n",
+ __FUNCTION__,
+ asoc,
+ NIP6(from_addr.v6.sin6_addr));
} else {
- printk(KERN_WARNING
- "%s association %p could not find address "
- NIPQUAD_FMT "\n",
- __FUNCTION__,
- asoc,
- NIPQUAD(from_addr.v4.sin_addr.s_addr));
+ if (net_ratelimit())
+ printk(KERN_WARNING
+ "%s association %p could not find address "
+ NIPQUAD_FMT "\n",
+ __FUNCTION__,
+ asoc,
+ NIPQUAD(from_addr.v4.sin_addr.s_addr));
}
return SCTP_DISPOSITION_DISCARD;
}
@@ -2495,6 +2521,11 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
struct sctp_chunk *reply;
+ /* Make sure that the chunk has a valid length */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
/* Since we are not going to really process this INIT, there
* is no point in verifying chunk boundries. Just generate
* the SHUTDOWN ACK.
@@ -2928,7 +2959,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
*
* The return value is the disposition of the chunk.
*/
-sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
@@ -2964,6 +2995,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ sctp_sf_pdiscard(ep, asoc, type, arg, commands);
return SCTP_DISPOSITION_CONSUME;
}
@@ -3124,14 +3156,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
- /* Break out if chunk length is less then minimal. */
+ /* Report violation if the chunk is less then minimal */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
- break;
-
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
- break;
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+ /* Now that we know we at least have a chunk header,
+ * do things that are type appropriate.
+ */
if (SCTP_CID_SHUTDOWN_ACK == ch->type)
ootb_shut_ack = 1;
@@ -3143,15 +3175,19 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
if (SCTP_CID_ABORT == ch->type)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
if (ootb_shut_ack)
- sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
+ return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
else
- sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
-
- return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
}
/*
@@ -3217,7 +3253,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
- return SCTP_DISPOSITION_CONSUME;
+ /* We need to discard the rest of the packet to prevent
+ * potential bomming attacks from additional bundled chunks.
+ * This is documented in SCTP Threats ID.
+ */
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
return SCTP_DISPOSITION_NOMEM;
@@ -3240,6 +3280,13 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
void *arg,
sctp_cmd_seq_t *commands)
{
+ struct sctp_chunk *chunk = arg;
+
+ /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
/* Although we do have an association in this case, it corresponds
* to a restarted association. So the packet is treated as an OOTB
* packet and the state function that handles OOTB SHUTDOWN_ACK is
@@ -3256,8 +3303,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *asconf_ack = NULL;
+ struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *hdr;
+ union sctp_addr_param *addr_param;
__u32 serial;
+ int length;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3273,6 +3323,20 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
hdr = (sctp_addiphdr_t *)chunk->skb->data;
serial = ntohl(hdr->serial);
+ addr_param = (union sctp_addr_param *)hdr->params;
+ length = ntohs(addr_param->p.length);
+ if (length < sizeof(sctp_paramhdr_t))
+ return sctp_sf_violation_paramlen(ep, asoc, type,
+ (void *)addr_param, commands);
+
+ /* Verify the ASCONF chunk before processing it. */
+ if (!sctp_verify_asconf(asoc,
+ (sctp_paramhdr_t *)((void *)addr_param + length),
+ (void *)chunk->chunk_end,
+ &err_param))
+ return sctp_sf_violation_paramlen(ep, asoc, type,
+ (void *)&err_param, commands);
+
/* ADDIP 4.2 C1) Compare the value of the serial number to the value
* the endpoint stored in a new association variable
* 'Peer-Serial-Number'.
@@ -3327,6 +3391,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
struct sctp_chunk *asconf_ack = arg;
struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
struct sctp_chunk *abort;
+ struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *addip_hdr;
__u32 sent_serial, rcvd_serial;
@@ -3344,6 +3409,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
rcvd_serial = ntohl(addip_hdr->serial);
+ /* Verify the ASCONF-ACK chunk before processing it. */
+ if (!sctp_verify_asconf(asoc,
+ (sctp_paramhdr_t *)addip_hdr->params,
+ (void *)asconf_ack->chunk_end,
+ &err_param))
+ return sctp_sf_violation_paramlen(ep, asoc, type,
+ (void *)&err_param, commands);
+
if (last_asconf) {
addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
sent_serial = ntohl(addip_hdr->serial);
@@ -3362,7 +3435,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
- sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0);
+ sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
@@ -3392,7 +3465,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
- sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0);
+ sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
@@ -3654,6 +3727,16 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
void *arg,
sctp_cmd_seq_t *commands)
{
+ struct sctp_chunk *chunk = arg;
+
+ /* Make sure that the chunk has a valid length.
+ * Since we don't know the chunk type, we use a general
+ * chunkhdr structure to make a comparison.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
return SCTP_DISPOSITION_DISCARD;
}
@@ -3709,6 +3792,13 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
void *arg,
sctp_cmd_seq_t *commands)
{
+ struct sctp_chunk *chunk = arg;
+
+ /* Make sure that the chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
return SCTP_DISPOSITION_VIOLATION;
}
@@ -3716,12 +3806,14 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
* Common function to handle a protocol violation.
*/
static sctp_disposition_t sctp_sf_abort_violation(
+ const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen)
{
+ struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
@@ -3730,30 +3822,51 @@ static sctp_disposition_t sctp_sf_abort_violation(
if (!abort)
goto nomem;
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ if (asoc) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
- if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
- sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
- SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
- sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
- SCTP_ERROR(ECONNREFUSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
- SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+ SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ }
} else {
- sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
- SCTP_ERROR(ECONNABORTED));
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
- SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ packet = sctp_ootb_pkt_new(asoc, chunk);
+
+ if (!packet)
+ goto nomem_pkt;
+
+ if (sctp_test_T_bit(abort))
+ packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+
+ abort->skb->sk = ep->base.sk;
+
+ sctp_packet_append_chunk(packet, abort);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
}
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
+ sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
return SCTP_DISPOSITION_ABORT;
+nomem_pkt:
+ sctp_chunk_free(abort);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
@@ -3786,7 +3899,24 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
{
char err_str[]="The following chunk had invalid length:";
- return sctp_sf_abort_violation(asoc, arg, commands, err_str,
+ return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
+
+/*
+ * Handle a protocol violation when the parameter length is invalid.
+ * "Invalid" length is identified as smaller then the minimal length a
+ * given parameter can be.
+ */
+static sctp_disposition_t sctp_sf_violation_paramlen(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands) {
+ char err_str[] = "The following parameter had invalid length:";
+
+ return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
@@ -3805,10 +3935,31 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
{
char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
- return sctp_sf_abort_violation(asoc, arg, commands, err_str,
+ return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
+/* Handle protocol violation of an invalid chunk bundling. For example,
+ * when we have an association and we recieve bundled INIT-ACK, or
+ * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
+ * statement from the specs. Additinally, there might be an attacker
+ * on the path and we may not want to continue this communication.
+ */
+static sctp_disposition_t sctp_sf_violation_chunk(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands)
+{
+ char err_str[]="The following chunk violates protocol:";
+
+ if (!asoc)
+ return sctp_sf_violation(ep, asoc, type, arg, commands);
+
+ return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
/***************************************************************************
* These are the state functions for handling primitive (Section 10) events.
***************************************************************************/
@@ -5175,7 +5326,22 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
* association exists, otherwise, use the peer's vtag.
*/
if (asoc) {
- vtag = asoc->peer.i.init_tag;
+ /* Special case the INIT-ACK as there is no peer's vtag
+ * yet.
+ */
+ switch(chunk->chunk_hdr->type) {
+ case SCTP_CID_INIT_ACK:
+ {
+ sctp_initack_chunk_t *initack;
+
+ initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
+ vtag = ntohl(initack->init_hdr.init_tag);
+ break;
+ }
+ default:
+ vtag = asoc->peer.i.init_tag;
+ break;
+ }
} else {
/* Special case the INIT and stale COOKIE_ECHO as there is no
* vtag yet.
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 70a91ec..ddb0ba3 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_violation), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
/* SCTP_STATE_EMPTY */
TYPE_SCTP_FUNC(sctp_sf_ootb),
/* SCTP_STATE_CLOSED */
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8),
+ TYPE_SCTP_FUNC(sctp_sf_ootb),
/* SCTP_STATE_COOKIE_WAIT */
TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
/* SCTP_STATE_COOKIE_ECHOED */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 01c6364..772fbfb 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
* The function sctp_get_port_local() does duplicate address
* detection.
*/
+ addr->v4.sin_port = htons(snum);
if ((ret = sctp_get_port_local(sk, addr))) {
if (ret == (long) sk) {
/* This endpoint has a conflicting address. */
@@ -366,14 +367,10 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
if (!bp->port)
bp->port = inet_sk(sk)->num;
- /* Add the address to the bind address list. */
- sctp_local_bh_disable();
- sctp_write_lock(&ep->base.addr_lock);
-
- /* Use GFP_ATOMIC since BHs are disabled. */
+ /* Add the address to the bind address list.
+ * Use GFP_ATOMIC since BHs will be disabled.
+ */
ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
- sctp_write_unlock(&ep->base.addr_lock);
- sctp_local_bh_enable();
/* Copy back into socket for getsockname() use. */
if (!ret) {
@@ -543,15 +540,12 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
if (i < addrcnt)
continue;
- /* Use the first address in bind addr list of association as
- * Address Parameter of ASCONF CHUNK.
+ /* Use the first valid address in bind addr list of
+ * association as Address Parameter of ASCONF CHUNK.
*/
- sctp_read_lock(&asoc->base.addr_lock);
bp = &asoc->base.bind_addr;
p = bp->address_list.next;
laddr = list_entry(p, struct sctp_sockaddr_entry, list);
- sctp_read_unlock(&asoc->base.addr_lock);
-
chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
addrcnt, SCTP_PARAM_ADD_IP);
if (!chunk) {
@@ -566,8 +560,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
/* Add the new addresses to the bind address list with
* use_as_src set to 0.
*/
- sctp_local_bh_disable();
- sctp_write_lock(&asoc->base.addr_lock);
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
addr = (union sctp_addr *)addr_buf;
@@ -577,8 +569,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
GFP_ATOMIC);
addr_buf += af->sockaddr_len;
}
- sctp_write_unlock(&asoc->base.addr_lock);
- sctp_local_bh_enable();
}
out:
@@ -650,13 +640,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
* socket routing and failover schemes. Refer to comments in
* sctp_do_bind(). -daisy
*/
- sctp_local_bh_disable();
- sctp_write_lock(&ep->base.addr_lock);
-
- retval = sctp_del_bind_addr(bp, sa_addr);
-
- sctp_write_unlock(&ep->base.addr_lock);
- sctp_local_bh_enable();
+ retval = sctp_del_bind_addr(bp, sa_addr, call_rcu);
addr_buf += af->sockaddr_len;
err_bindx_rem:
@@ -747,14 +731,16 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
* make sure that we do not delete all the addresses in the
* association.
*/
- sctp_read_lock(&asoc->base.addr_lock);
bp = &asoc->base.bind_addr;
laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
addrcnt, sp);
- sctp_read_unlock(&asoc->base.addr_lock);
if (!laddr)
continue;
+ /* We do not need RCU protection throughout this loop
+ * because this is done under a socket lock from the
+ * setsockopt call.
+ */
chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
SCTP_PARAM_DEL_IP);
if (!chunk) {
@@ -765,23 +751,16 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
/* Reset use_as_src flag for the addresses in the bind address
* list that are to be deleted.
*/
- sctp_local_bh_disable();
- sctp_write_lock(&asoc->base.addr_lock);
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
laddr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
- list_for_each(pos1, &bp->address_list) {
- saddr = list_entry(pos1,
- struct sctp_sockaddr_entry,
- list);
+ list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, laddr))
saddr->use_as_src = 0;
}
addr_buf += af->sockaddr_len;
}
- sctp_write_unlock(&asoc->base.addr_lock);
- sctp_local_bh_enable();
/* Update the route and saddr entries for all the transports
* as some of the addresses in the bind address list are
@@ -4058,9 +4037,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
sctp_assoc_t id;
struct sctp_bind_addr *bp;
struct sctp_association *asoc;
- struct list_head *pos, *temp;
struct sctp_sockaddr_entry *addr;
- rwlock_t *addr_lock;
int cnt = 0;
if (len < sizeof(sctp_assoc_t))
@@ -4077,17 +4054,13 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
*/
if (0 == id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
- addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
} else {
asoc = sctp_id2assoc(sk, id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
- addr_lock = &asoc->base.addr_lock;
}
- sctp_read_lock(addr_lock);
-
/* If the endpoint is bound to 0.0.0.0 or ::0, count the valid
* addresses from the global local address list.
*/
@@ -4095,27 +4068,33 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
addr = list_entry(bp->address_list.next,
struct sctp_sockaddr_entry, list);
if (sctp_is_any(&addr->a)) {
- list_for_each_safe(pos, temp, &sctp_local_addr_list) {
- addr = list_entry(pos,
- struct sctp_sockaddr_entry,
- list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr,
+ &sctp_local_addr_list, list) {
+ if (!addr->valid)
+ continue;
+
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
continue;
+
cnt++;
}
+ rcu_read_unlock();
} else {
cnt = 1;
}
goto done;
}
- list_for_each(pos, &bp->address_list) {
+ /* Protection on the bound address list is not needed,
+ * since in the socket option context we hold the socket lock,
+ * so there is no way that the bound address list can change.
+ */
+ list_for_each_entry(addr, &bp->address_list, list) {
cnt ++;
}
-
done:
- sctp_read_unlock(addr_lock);
return cnt;
}
@@ -4126,14 +4105,16 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
int max_addrs, void *to,
int *bytes_copied)
{
- struct list_head *pos, *next;
struct sctp_sockaddr_entry *addr;
union sctp_addr temp;
int cnt = 0;
int addrlen;
- list_for_each_safe(pos, next, &sctp_local_addr_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+ if (!addr->valid)
+ continue;
+
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
continue;
@@ -4148,6 +4129,7 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
cnt ++;
if (cnt >= max_addrs) break;
}
+ rcu_read_unlock();
return cnt;
}
@@ -4155,14 +4137,16 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
size_t space_left, int *bytes_copied)
{
- struct list_head *pos, *next;
struct sctp_sockaddr_entry *addr;
union sctp_addr temp;
int cnt = 0;
int addrlen;
- list_for_each_safe(pos, next, &sctp_local_addr_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+ if (!addr->valid)
+ continue;
+
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
continue;
@@ -4170,8 +4154,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
&temp);
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
- if (space_left < addrlen)
- return -ENOMEM;
+ if (space_left < addrlen) {
+ cnt = -ENOMEM;
+ break;
+ }
memcpy(to, &temp, addrlen);
to += addrlen;
@@ -4179,6 +4165,7 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
space_left -= addrlen;
*bytes_copied += addrlen;
}
+ rcu_read_unlock();
return cnt;
}
@@ -4191,7 +4178,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
{
struct sctp_bind_addr *bp;
struct sctp_association *asoc;
- struct list_head *pos;
int cnt = 0;
struct sctp_getaddrs_old getaddrs;
struct sctp_sockaddr_entry *addr;
@@ -4199,7 +4185,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
union sctp_addr temp;
struct sctp_sock *sp = sctp_sk(sk);
int addrlen;
- rwlock_t *addr_lock;
int err = 0;
void *addrs;
void *buf;
@@ -4221,13 +4206,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
*/
if (0 == getaddrs.assoc_id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
- addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
} else {
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
- addr_lock = &asoc->base.addr_lock;
}
to = getaddrs.addrs;
@@ -4241,8 +4224,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
if (!addrs)
return -ENOMEM;
- sctp_read_lock(addr_lock);
-
/* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
* addresses from the global local address list.
*/
@@ -4258,8 +4239,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
}
buf = addrs;
- list_for_each(pos, &bp->address_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ /* Protection on the bound address list is not needed since
+ * in the socket option context we hold a socket lock and
+ * thus the bound address list can't change.
+ */
+ list_for_each_entry(addr, &bp->address_list, list) {
memcpy(&temp, &addr->a, sizeof(temp));
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
@@ -4271,8 +4255,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
}
copy_getaddrs:
- sctp_read_unlock(addr_lock);
-
/* copy the entire address list into the user provided space */
if (copy_to_user(to, addrs, bytes_copied)) {
err = -EFAULT;
@@ -4294,7 +4276,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
{
struct sctp_bind_addr *bp;
struct sctp_association *asoc;
- struct list_head *pos;
int cnt = 0;
struct sctp_getaddrs getaddrs;
struct sctp_sockaddr_entry *addr;
@@ -4302,7 +4283,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
union sctp_addr temp;
struct sctp_sock *sp = sctp_sk(sk);
int addrlen;
- rwlock_t *addr_lock;
int err = 0;
size_t space_left;
int bytes_copied = 0;
@@ -4323,13 +4303,11 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
*/
if (0 == getaddrs.assoc_id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
- addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
} else {
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
- addr_lock = &asoc->base.addr_lock;
}
to = optval + offsetof(struct sctp_getaddrs,addrs);
@@ -4339,8 +4317,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
if (!addrs)
return -ENOMEM;
- sctp_read_lock(addr_lock);
-
/* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
* addresses from the global local address list.
*/
@@ -4352,21 +4328,24 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
space_left, &bytes_copied);
if (cnt < 0) {
err = cnt;
- goto error_lock;
+ goto out;
}
goto copy_getaddrs;
}
}
buf = addrs;
- list_for_each(pos, &bp->address_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ /* Protection on the bound address list is not needed since
+ * in the socket option context we hold a socket lock and
+ * thus the bound address list can't change.
+ */
+ list_for_each_entry(addr, &bp->address_list, list) {
memcpy(&temp, &addr->a, sizeof(temp));
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen) {
err = -ENOMEM; /*fixme: right error?*/
- goto error_lock;
+ goto out;
}
memcpy(buf, &temp, addrlen);
buf += addrlen;
@@ -4376,8 +4355,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
}
copy_getaddrs:
- sctp_read_unlock(addr_lock);
-
if (copy_to_user(to, addrs, bytes_copied)) {
err = -EFAULT;
goto out;
@@ -4388,12 +4365,6 @@ copy_getaddrs:
}
if (put_user(bytes_copied, optlen))
err = -EFAULT;
-
- goto out;
-
-error_lock:
- sctp_read_unlock(addr_lock);
-
out:
kfree(addrs);
return err;
@@ -5202,6 +5173,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
+ return 0;
}
/* Return if we are already listening. */
@@ -5249,6 +5221,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
+ return 0;
}
if (sctp_sstate(sk, LISTENING))
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 34eb977..fa0ba2a 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -659,6 +659,46 @@ done:
return retval;
}
+/*
+ * Flush out stale fragments from the reassembly queue when processing
+ * a Forward TSN.
+ *
+ * RFC 3758, Section 3.6
+ *
+ * After receiving and processing a FORWARD TSN, the data receiver MUST
+ * take cautions in updating its re-assembly queue. The receiver MUST
+ * remove any partially reassembled message, which is still missing one
+ * or more TSNs earlier than or equal to the new cumulative TSN point.
+ * In the event that the receiver has invoked the partial delivery API,
+ * a notification SHOULD also be generated to inform the upper layer API
+ * that the message being partially delivered will NOT be completed.
+ */
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
+{
+ struct sk_buff *pos, *tmp;
+ struct sctp_ulpevent *event;
+ __u32 tsn;
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return;
+
+ skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+ event = sctp_skb2event(pos);
+ tsn = event->tsn;
+
+ /* Since the entire message must be abandoned by the
+ * sender (item A3 in Section 3.5, RFC 3758), we can
+ * free all fragments on the list that are less then
+ * or equal to ctsn_point
+ */
+ if (TSN_lte(tsn, fwd_tsn)) {
+ __skb_unlink(pos, &ulpq->reasm);
+ sctp_ulpevent_free(event);
+ } else
+ break;
+ }
+}
+
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
-static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
+static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
csid = cevent->stream;
cssn = cevent->ssn;
- if (cssn != sctp_ssn_peek(in, csid))
+ /* Have we gone too far? */
+ if (csid > sid)
break;
- /* Found it, so mark in the ssnmap. */
- sctp_ssn_next(in, csid);
+ /* Have we not gone far enough? */
+ if (csid < sid)
+ continue;
+
+ /* see if this ssn has been marked by skipping */
+ if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+ break;
__skb_unlink(pos, &ulpq->lobby);
- if (!event) {
+ if (!event)
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- } else {
- /* Attach all gathered skbs to the event. */
- __skb_queue_tail(&temp, pos);
- }
+
+ /* Attach all gathered skbs to the event. */
+ __skb_queue_tail(&temp, pos);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
- if (event)
+ if (event) {
+ /* see if we have more ordered that we can deliver */
+ sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
+ }
}
-/* Skip over an SSN. */
+/* Skip over an SSN. This is used during the processing of
+ * Forwared TSN chunk to skip over the abandoned ordered data
+ */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
struct sctp_stream *in;
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
- sctp_ulpq_reap_ordered(ulpq);
+ sctp_ulpq_reap_ordered(ulpq, sid);
return;
}
diff --git a/net/socket.c b/net/socket.c
index 7d44453..b09eb90 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -777,9 +777,6 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (pos != 0)
return -ESPIPE;
- if (iocb->ki_left == 0) /* Match SYS5 behaviour */
- return 0;
-
x = alloc_sock_iocb(iocb, &siocb);
if (!x)
return -ENOMEM;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 12ff5da..036ab52 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1110,7 +1110,8 @@ svc_tcp_accept(struct svc_sock *svsk)
serv->sv_name);
printk(KERN_NOTICE
"%s: last TCP connect from %s\n",
- serv->sv_name, buf);
+ serv->sv_name, __svc_print_addr(sin,
+ buf, sizeof(buf)));
}
/*
* Always select the oldest socket. It's not fair,
@@ -1592,7 +1593,7 @@ svc_age_temp_sockets(unsigned long closure)
if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
continue;
- if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags))
+ if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags))
continue;
atomic_inc(&svsk->sk_inuse);
list_move(le, &to_be_aged);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 7eabd55..9771451 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -213,7 +213,7 @@ out_fail_notifier:
out_fail_sysfs:
return err;
}
-module_init(cfg80211_init);
+subsys_initcall(cfg80211_init);
static void cfg80211_exit(void)
{
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 88aaacd..2d5d225 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -52,12 +52,14 @@ static void wiphy_dev_release(struct device *dev)
cfg80211_dev_free(rdev);
}
+#ifdef CONFIG_HOTPLUG
static int wiphy_uevent(struct device *dev, char **envp,
int num_envp, char *buf, int size)
{
/* TODO, we probably need stuff here */
return 0;
}
+#endif
struct class ieee80211_class = {
.name = "ieee80211",
OpenPOWER on IntegriCloud