summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/distributed-arp-table.c21
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c11
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bluetooth/smp.c13
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/pktgen.c9
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/core/scm.c5
-rw-r--r--net/core/skbuff.c46
-rw-r--r--net/ipv4/ah4.c18
-rw-r--r--net/ipv4/arp.c21
-rw-r--r--net/ipv4/datagram.c25
-rw-r--r--net/ipv4/esp4.c12
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ipcomp.c7
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c54
-rw-r--r--net/ipv4/tcp_cong.c14
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c15
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/ah6.c11
-rw-r--r--net/ipv6/datagram.c16
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6mr.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c18
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_core.c82
-rw-r--r--net/l2tp/l2tp_core.h5
-rw-r--r--net/l2tp/l2tp_ip6.c10
-rw-r--r--net/l2tp/l2tp_ppp.c6
-rw-r--r--net/mac80211/cfg.c15
-rw-r--r--net/mac80211/ieee80211_i.h6
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mlme.c11
-rw-r--r--net/mac80211/offchannel.c19
-rw-r--r--net/mac80211/scan.c15
-rw-r--r--net/mac80211/tx.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c35
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c9
-rw-r--r--net/netfilter/nf_conntrack_standalone.c1
-rw-r--r--net/netfilter/x_tables.c28
-rw-r--r--net/netfilter/xt_CT.c4
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sctp/Kconfig4
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/endpointola.c5
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/outqueue.c12
-rw-r--r--net/sctp/sm_statefuns.c4
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/sunrpc/sched.c18
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_replay.c4
76 files changed, 535 insertions, 251 deletions
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 8e1d89d..5539215 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
/* this is an hash collision with the temporary selected node. Choose
* the one with the lowest address
*/
- if ((tmp_max == max) &&
+ if ((tmp_max == max) && max_orig_node &&
(batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
goto out;
@@ -738,6 +738,7 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
struct arphdr *arphdr;
struct ethhdr *ethhdr;
__be32 ip_src, ip_dst;
+ uint8_t *hw_src, *hw_dst;
uint16_t type = 0;
/* pull the ethernet header */
@@ -777,9 +778,23 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
ip_src = batadv_arp_ip_src(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
- ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst))
+ ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
+ ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
+ ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
goto out;
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
+ goto out;
+
+ /* we don't care about the destination MAC address in ARP requests */
+ if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
+ hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+ if (is_zero_ether_addr(hw_dst) ||
+ is_multicast_ether_addr(hw_dst))
+ goto out;
+ }
+
type = ntohs(arphdr->ar_op);
out:
return type;
@@ -1012,6 +1027,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
*/
ret = !batadv_is_my_client(bat_priv, hw_dst);
out:
+ if (ret)
+ kfree_skb(skb);
/* if ret == false -> packet has to be delivered to the interface */
return ret;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 25bfce0..4925a02 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
__u8 reason = hci_proto_disconn_ind(conn);
switch (conn->type) {
- case ACL_LINK:
- hci_acl_disconn(conn, reason);
- break;
case AMP_LINK:
hci_amp_disconn(conn, reason);
break;
+ default:
+ hci_acl_disconn(conn, reason);
+ break;
}
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 596660d..0f78e34 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2810,14 +2810,6 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
if (conn) {
hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
- hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, &conn->dst, conn->type,
- conn->dst_type, 0, NULL, 0,
- conn->dev_class);
- hci_dev_unlock(hdev);
-
/* Send to upper protocol */
l2cap_recv_acldata(conn, skb, flags);
return;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 705078a..81b4448 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2688,7 +2688,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (ev->opcode != HCI_OP_NOP)
del_timer(&hdev->cmd_timer);
- if (ev->ncmd) {
+ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b2bcbe2..a7352ff 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -931,7 +931,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->version = req->version;
hid->country = req->country;
- strncpy(hid->name, req->name, 128);
+ strncpy(hid->name, req->name, sizeof(req->name) - 1);
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&bt_sk(session->ctrl_sock->sk)->src);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2c78208..22e6583 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3727,6 +3727,17 @@ sendresp:
static int l2cap_connect_req(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd, u8 *data)
{
+ struct hci_dev *hdev = conn->hcon->hdev;
+ struct hci_conn *hcon = conn->hcon;
+
+ hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
+ mgmt_device_connected(hdev, &hcon->dst, hcon->type,
+ hcon->dst_type, 0, NULL, 0,
+ hcon->dev_class);
+ hci_dev_unlock(hdev);
+
l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
return 0;
}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 531a93d..57f250c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -352,7 +352,7 @@ static void __sco_sock_close(struct sock *sk)
case BT_CONNECTED:
case BT_CONFIG:
- if (sco_pi(sk)->conn) {
+ if (sco_pi(sk)->conn->hcon) {
sk->sk_state = BT_DISCONN;
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
hci_conn_put(sco_pi(sk)->conn->hcon);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 68a9587..5abefb1 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
skb_pull(skb, sizeof(code));
+ /*
+ * The SMP context must be initialized for all other PDUs except
+ * pairing and security requests. If we get any other PDU when
+ * not initialized simply disconnect (done if this function
+ * returns an error).
+ */
+ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
+ !conn->smp_chan) {
+ BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
+ kfree_skb(skb);
+ return -ENOTSUPP;
+ }
+
switch (code) {
case SMP_CMD_PAIRING_REQ:
reason = smp_cmd_pairing_req(conn, skb);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 7f884e3..8660ea3 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -16,6 +16,7 @@
#include <linux/etherdevice.h>
#include <linux/llc.h>
#include <linux/slab.h>
+#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
skb->dev = p->dev;
skb->protocol = htons(ETH_P_802_2);
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, LLC_RESERVE);
memcpy(__skb_put(skb, length), data, length);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 0337e2b..368f9c3 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
skb_queue_walk(queue, skb) {
*peeked = skb->peeked;
if (flags & MSG_PEEK) {
- if (*off >= skb->len) {
+ if (*off >= skb->len && skb->len) {
*off -= skb->len;
continue;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b29dacf..e6e1cbe 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file,
return -EFAULT;
i += len;
mutex_lock(&pktgen_thread_lock);
- pktgen_add_device(t, f);
+ ret = pktgen_add_device(t, f);
mutex_unlock(&pktgen_thread_lock);
- ret = count;
- sprintf(pg_result, "OK: add_device=%s", f);
+ if (!ret) {
+ ret = count;
+ sprintf(pg_result, "OK: add_device=%s", f);
+ } else
+ sprintf(pg_result, "ERROR: can not add device %s", f);
goto out;
}
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index c31d9e8..4425148 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -186,8 +186,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
struct fastopen_queue *fastopenq =
inet_csk(lsk)->icsk_accept_queue.fastopenq;
- BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
-
tcp_sk(sk)->fastopen_rsk = NULL;
spin_lock_bh(&fastopenq->lock);
fastopenq->qlen--;
diff --git a/net/core/scm.c b/net/core/scm.c
index 57fb1ee..905dcc6 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -35,6 +35,7 @@
#include <net/sock.h>
#include <net/compat.h>
#include <net/scm.h>
+#include <net/cls_cgroup.h>
/*
@@ -302,8 +303,10 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
}
/* Bump the usage count and install the file. */
sock = sock_from_file(fp[i], &err);
- if (sock)
+ if (sock) {
sock_update_netprioidx(sock->sk, current);
+ sock_update_classid(sock->sk, current);
+ }
fd_install(new_fd, get_file(fp[i]));
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ab989b..32443eb 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->network_header = old->network_header;
new->mac_header = old->mac_header;
new->inner_transport_header = old->inner_transport_header;
- new->inner_network_header = old->inner_transport_header;
+ new->inner_network_header = old->inner_network_header;
skb_dst_copy(new, old);
new->rxhash = old->rxhash;
new->ooo_okay = old->ooo_okay;
@@ -1649,7 +1649,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
static struct page *linear_to_page(struct page *page, unsigned int *len,
unsigned int *offset,
- struct sk_buff *skb, struct sock *sk)
+ struct sock *sk)
{
struct page_frag *pfrag = sk_page_frag(sk);
@@ -1682,14 +1682,14 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
static bool spd_fill_page(struct splice_pipe_desc *spd,
struct pipe_inode_info *pipe, struct page *page,
unsigned int *len, unsigned int offset,
- struct sk_buff *skb, bool linear,
+ bool linear,
struct sock *sk)
{
if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
return true;
if (linear) {
- page = linear_to_page(page, len, &offset, skb, sk);
+ page = linear_to_page(page, len, &offset, sk);
if (!page)
return true;
}
@@ -1706,23 +1706,9 @@ static bool spd_fill_page(struct splice_pipe_desc *spd,
return false;
}
-static inline void __segment_seek(struct page **page, unsigned int *poff,
- unsigned int *plen, unsigned int off)
-{
- unsigned long n;
-
- *poff += off;
- n = *poff / PAGE_SIZE;
- if (n)
- *page = nth_page(*page, n);
-
- *poff = *poff % PAGE_SIZE;
- *plen -= off;
-}
-
static bool __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off,
- unsigned int *len, struct sk_buff *skb,
+ unsigned int *len,
struct splice_pipe_desc *spd, bool linear,
struct sock *sk,
struct pipe_inode_info *pipe)
@@ -1737,23 +1723,19 @@ static bool __splice_segment(struct page *page, unsigned int poff,
}
/* ignore any bits we already processed */
- if (*off) {
- __segment_seek(&page, &poff, &plen, *off);
- *off = 0;
- }
+ poff += *off;
+ plen -= *off;
+ *off = 0;
do {
unsigned int flen = min(*len, plen);
- /* the linear region may spread across several pages */
- flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
-
- if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
+ if (spd_fill_page(spd, pipe, page, &flen, poff,
+ linear, sk))
return true;
-
- __segment_seek(&page, &poff, &plen, flen);
+ poff += flen;
+ plen -= flen;
*len -= flen;
-
} while (*len && plen);
return false;
@@ -1777,7 +1759,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb),
- offset, len, skb, spd,
+ offset, len, spd,
skb_head_is_locked(skb),
sk, pipe))
return true;
@@ -1790,7 +1772,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
if (__splice_segment(skb_frag_page(f),
f->page_offset, skb_frag_size(f),
- offset, len, skb, spd, false, sk, pipe))
+ offset, len, spd, false, sk, pipe))
return true;
}
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index a0d8392..a69b4e4 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -269,7 +269,11 @@ static void ah_input_done(struct crypto_async_request *base, int err)
skb->network_header += ah_hlen;
memcpy(skb_network_header(skb), work_iph, ihl);
__skb_pull(skb, ah_hlen + ihl);
- skb_set_transport_header(skb, -ihl);
+
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ skb_reset_transport_header(skb);
+ else
+ skb_set_transport_header(skb, -ihl);
out:
kfree(AH_SKB_CB(skb)->tmp);
xfrm_input_resume(skb, err);
@@ -381,7 +385,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb->network_header += ah_hlen;
memcpy(skb_network_header(skb), work_iph, ihl);
__skb_pull(skb, ah_hlen + ihl);
- skb_set_transport_header(skb, -ihl);
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ skb_reset_transport_header(skb);
+ else
+ skb_set_transport_header(skb, -ihl);
err = nexthdr;
@@ -413,9 +420,12 @@ static void ah4_err(struct sk_buff *skb, u32 info)
if (!x)
return;
- if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+ atomic_inc(&flow_cache_genid);
+ rt_genid_bump(net);
+
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
- else
+ } else
ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 9547a273..ded146b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -928,24 +928,25 @@ static void parp_redo(struct sk_buff *skb)
static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- struct arphdr *arp;
+ const struct arphdr *arp;
+
+ if (dev->flags & IFF_NOARP ||
+ skb->pkt_type == PACKET_OTHERHOST ||
+ skb->pkt_type == PACKET_LOOPBACK)
+ goto freeskb;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out_of_mem;
/* ARP header, plus 2 device addresses, plus 2 IP addresses. */
if (!pskb_may_pull(skb, arp_hdr_len(dev)))
goto freeskb;
arp = arp_hdr(skb);
- if (arp->ar_hln != dev->addr_len ||
- dev->flags & IFF_NOARP ||
- skb->pkt_type == PACKET_OTHERHOST ||
- skb->pkt_type == PACKET_LOOPBACK ||
- arp->ar_pln != 4)
+ if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
goto freeskb;
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb == NULL)
- goto out_of_mem;
-
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 424fafb..b28e863 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -85,3 +85,28 @@ out:
return err;
}
EXPORT_SYMBOL(ip4_datagram_connect);
+
+void ip4_datagram_release_cb(struct sock *sk)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct ip_options_rcu *inet_opt;
+ __be32 daddr = inet->inet_daddr;
+ struct flowi4 fl4;
+ struct rtable *rt;
+
+ if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
+ return;
+
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
+ if (inet_opt && inet_opt->opt.srr)
+ daddr = inet_opt->opt.faddr;
+ rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
+ inet->inet_saddr, inet->inet_dport,
+ inet->inet_sport, sk->sk_protocol,
+ RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ if (!IS_ERR(rt))
+ __sk_dst_set(sk, &rt->dst);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b61e9de..3b4f0cd 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -346,7 +346,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
pskb_trim(skb, skb->len - alen - padlen - 2);
__skb_pull(skb, hlen);
- skb_set_transport_header(skb, -ihl);
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ skb_reset_transport_header(skb);
+ else
+ skb_set_transport_header(skb, -ihl);
err = nexthdr[1];
@@ -499,9 +502,12 @@ static void esp4_err(struct sk_buff *skb, u32 info)
if (!x)
return;
- if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+ atomic_inc(&flow_cache_genid);
+ rt_genid_bump(net);
+
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
- else
+ } else
ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 303012a..e81b1ca 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -963,8 +963,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
ptr--;
}
if (tunnel->parms.o_flags&GRE_CSUM) {
+ int offset = skb_transport_offset(skb);
+
*ptr = 0;
- *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
+ *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
+ skb->len - offset,
+ 0));
}
}
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index d3ab47e..9a46dae 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -47,9 +47,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
if (!x)
return;
- if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+ atomic_inc(&flow_cache_genid);
+ rt_genid_bump(net);
+
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
- else
+ } else
ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8f3d054..6f9c072 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -738,6 +738,7 @@ struct proto ping_prot = {
.recvmsg = ping_recvmsg,
.bind = ping_bind,
.backlog_rcv = ping_queue_rcv_skb,
+ .release_cb = ip4_datagram_release_cb,
.hash = ping_v4_hash,
.unhash = ping_v4_unhash,
.get_port = ping_v4_get_port,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 73d1e4d..6f08991 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -894,6 +894,7 @@ struct proto raw_prot = {
.recvmsg = raw_recvmsg,
.bind = raw_bind,
.backlog_rcv = raw_rcv_skb,
+ .release_cb = ip4_datagram_release_cb,
.hash = raw_hash_sk,
.unhash = raw_unhash_sk,
.obj_size = sizeof(struct raw_sock),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 844a9ef..a0fcc47 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -912,6 +912,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
struct dst_entry *dst = &rt->dst;
struct fib_result res;
+ if (dst_metric_locked(dst, RTAX_MTU))
+ return;
+
if (dst->dev->mtu < mtu)
return;
@@ -962,7 +965,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
}
EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
-void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
@@ -975,6 +978,53 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
ip_rt_put(rt);
}
}
+
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+{
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct dst_entry *dst;
+ bool new = false;
+
+ bh_lock_sock(sk);
+ rt = (struct rtable *) __sk_dst_get(sk);
+
+ if (sock_owned_by_user(sk) || !rt) {
+ __ipv4_sk_update_pmtu(skb, sk, mtu);
+ goto out;
+ }
+
+ __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+
+ if (!__sk_dst_check(sk, 0)) {
+ rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+ if (IS_ERR(rt))
+ goto out;
+
+ new = true;
+ }
+
+ __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
+
+ dst = dst_check(&rt->dst, 0);
+ if (!dst) {
+ if (new)
+ dst_release(&rt->dst);
+
+ rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+ if (IS_ERR(rt))
+ goto out;
+
+ new = true;
+ }
+
+ if (new)
+ __sk_dst_set(sk, &rt->dst);
+
+out:
+ bh_unlock_sock(sk);
+}
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
void ipv4_redirect(struct sk_buff *skb, struct net *net,
@@ -1120,7 +1170,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
if (!mtu || time_after_eq(jiffies, rt->dst.expires))
mtu = dst_metric_raw(dst, RTAX_MTU);
- if (mtu && rt_is_output_route(rt))
+ if (mtu)
return mtu;
mtu = dst->dev->mtu;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 291f2ed..cdf2e70 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)
{
int cnt; /* increase in packets */
unsigned int delta = 0;
+ u32 snd_cwnd = tp->snd_cwnd;
+
+ if (unlikely(!snd_cwnd)) {
+ pr_err_once("snd_cwnd is nul, please report this bug.\n");
+ snd_cwnd = 1U;
+ }
/* RFC3465: ABC Slow start
* Increase only after a full MSS of bytes is acked
@@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)
if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
else
- cnt = tp->snd_cwnd; /* exponential increase */
+ cnt = snd_cwnd; /* exponential increase */
/* RFC3465: ABC
* We MAY increase by 2 if discovered delayed ack
@@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)
tp->bytes_acked = 0;
tp->snd_cwnd_cnt += cnt;
- while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- tp->snd_cwnd_cnt -= tp->snd_cwnd;
+ while (tp->snd_cwnd_cnt >= snd_cwnd) {
+ tp->snd_cwnd_cnt -= snd_cwnd;
delta++;
}
- tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
+ tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 18f97ca..ad70a96 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
}
} else {
if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+ if (!tcp_packets_in_flight(tp)) {
+ tcp_enter_frto_loss(sk, 2, flag);
+ return true;
+ }
+
/* Prevent sending of new data. */
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp));
@@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
* the remote receives only the retransmitted (regular) SYNs: either
* the original SYN-data or the corresponding SYN-ACK is lost.
*/
- syn_drop = (cookie->len <= 0 && data &&
- inet_csk(sk)->icsk_retransmits);
+ syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 54139fa..eadb693 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -369,11 +369,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
* We do take care of PMTU discovery (RFC1191) special case :
* we can receive locally generated ICMP messages while socket is held.
*/
- if (sock_owned_by_user(sk) &&
- type != ICMP_DEST_UNREACH &&
- code != ICMP_FRAG_NEEDED)
- NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
-
+ if (sock_owned_by_user(sk)) {
+ if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
+ NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+ }
if (sk->sk_state == TCP_CLOSE)
goto out;
@@ -497,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
* errors returned from accept().
*/
inet_csk_reqsk_queue_drop(sk, req, prev);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
goto out;
case TCP_SYN_SENT:
@@ -1501,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
- if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
goto drop;
+ }
req = inet_reqsk_alloc(&tcp_request_sock_ops);
if (!req)
@@ -1667,6 +1669,7 @@ drop_and_release:
drop_and_free:
reqsk_free(req);
drop:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return 0;
}
EXPORT_SYMBOL(tcp_v4_conn_request);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 79c8dbe..1f4d405 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1952,6 +1952,7 @@ struct proto udp_prot = {
.recvmsg = udp_recvmsg,
.sendpage = udp_sendpage,
.backlog_rcv = __udp_queue_rcv_skb,
+ .release_cb = ip4_datagram_release_cb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v4_rehash,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 420e563..1b5d8cb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
if (dev->addr_len != IEEE802154_ADDR_LEN)
return -1;
memcpy(eui, dev->dev_addr, 8);
+ eui[0] ^= 2;
return 0;
}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ecc35b9..3842331 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -472,7 +472,10 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
skb->network_header += ah_hlen;
memcpy(skb_network_header(skb), work_iph, hdr_len);
__skb_pull(skb, ah_hlen + hdr_len);
- skb_set_transport_header(skb, -hdr_len);
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ skb_reset_transport_header(skb);
+ else
+ skb_set_transport_header(skb, -hdr_len);
out:
kfree(AH_SKB_CB(skb)->tmp);
xfrm_input_resume(skb, err);
@@ -593,9 +596,13 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
skb->network_header += ah_hlen;
memcpy(skb_network_header(skb), work_iph, hdr_len);
- skb->transport_header = skb->network_header;
__skb_pull(skb, ah_hlen + hdr_len);
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ skb_reset_transport_header(skb);
+ else
+ skb_set_transport_header(skb, -hdr_len);
+
err = nexthdr;
out_free:
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 8edf260..7a778b9 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
if (skb->protocol == htons(ETH_P_IPV6)) {
sin->sin6_addr = ipv6_hdr(skb)->saddr;
if (np->rxopt.all)
- datagram_recv_ctl(sk, msg, skb);
+ ip6_datagram_recv_ctl(sk, msg, skb);
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin->sin6_scope_id = IP6CB(skb)->iif;
} else {
@@ -468,7 +468,8 @@ out:
}
-int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet6_skb_parm *opt = IP6CB(skb);
@@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
}
return 0;
}
+EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
-int datagram_send_ctl(struct net *net, struct sock *sk,
- struct msghdr *msg, struct flowi6 *fl6,
- struct ipv6_txoptions *opt,
- int *hlimit, int *tclass, int *dontfrag)
+int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
+ struct msghdr *msg, struct flowi6 *fl6,
+ struct ipv6_txoptions *opt,
+ int *hlimit, int *tclass, int *dontfrag)
{
struct in6_pktinfo *src_info;
struct cmsghdr *cmsg;
@@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
exit_f:
return err;
}
-EXPORT_SYMBOL_GPL(datagram_send_ctl);
+EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 282f372..40ffd72 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -300,7 +300,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
pskb_trim(skb, skb->len - alen - padlen - 2);
__skb_pull(skb, hlen);
- skb_set_transport_header(skb, -hdr_len);
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ skb_reset_transport_header(skb);
+ else
+ skb_set_transport_header(skb, -hdr_len);
err = nexthdr[1];
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index b4a9fd5..fff5bdd 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -81,10 +81,22 @@ static inline struct sock *icmpv6_sk(struct net *net)
return net->ipv6.icmp_sk[smp_processor_id()];
}
+static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info)
+{
+ struct net *net = dev_net(skb->dev);
+
+ if (type == ICMPV6_PKT_TOOBIG)
+ ip6_update_pmtu(skb, net, info, 0, 0);
+ else if (type == NDISC_REDIRECT)
+ ip6_redirect(skb, net, 0, 0);
+}
+
static int icmpv6_rcv(struct sk_buff *skb);
static const struct inet6_protocol icmpv6_protocol = {
.handler = icmpv6_rcv,
+ .err_handler = icmpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 29124b7..d6de4b4 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
msg.msg_control = (void*)(fl->opt+1);
memset(&flowi6, 0, sizeof(flowi6));
- err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
- &junk, &junk);
+ err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
+ &junk, &junk, &junk);
if (err)
goto done;
err = -EINVAL;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c727e47..131dd09 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
int ret;
if (!ip6_tnl_xmit_ctl(t))
- return -1;
+ goto tx_err;
switch (skb->protocol) {
case htons(ETH_P_IP):
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5552d13..0c7c03d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1213,10 +1213,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
if (dst_allfrag(rt->dst.path))
cork->flags |= IPCORK_ALLFRAG;
cork->length = 0;
- exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
+ exthdrlen = (opt ? opt->opt_flen : 0);
length += exthdrlen;
transhdrlen += exthdrlen;
- dst_exthdrlen = rt->dst.header_len;
+ dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
} else {
rt = (struct rt6_info *)cork->dst;
fl6 = &inet->cork.fl.u.ip6;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 26dcdec..8fd154e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1710,6 +1710,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
return -EINVAL;
if (get_user(v, (u32 __user *)optval))
return -EFAULT;
+ /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
+ if (v != RT_TABLE_DEFAULT && v >= 100000000)
+ return -EINVAL;
if (sk == mrt->mroute6_sk)
return -EBUSY;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ee94d31..d1e2e8e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -476,8 +476,8 @@ sticky_done:
msg.msg_controllen = optlen;
msg.msg_control = (void*)(opt+1);
- retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
- &junk);
+ retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
+ &junk, &junk);
if (retv)
goto done;
update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
release_sock(sk);
if (skb) {
- int err = datagram_recv_ctl(sk, &msg, skb);
+ int err = ip6_datagram_recv_ctl(sk, &msg, skb);
kfree_skb(skb);
if (err)
return err;
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 7302b0b..83acc14 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
+#include <net/ipv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_ipv6/ip6t_NPT.h>
@@ -18,11 +19,20 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
{
struct ip6t_npt_tginfo *npt = par->targinfo;
__wsum src_sum = 0, dst_sum = 0;
+ struct in6_addr pfx;
unsigned int i;
if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
return -EINVAL;
+ /* Ensure that LSB of prefix is zero */
+ ipv6_addr_prefix(&pfx, &npt->src_pfx.in6, npt->src_pfx_len);
+ if (!ipv6_addr_equal(&pfx, &npt->src_pfx.in6))
+ return -EINVAL;
+ ipv6_addr_prefix(&pfx, &npt->dst_pfx.in6, npt->dst_pfx_len);
+ if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6))
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
src_sum = csum_add(src_sum,
(__force __wsum)npt->src_pfx.in6.s6_addr16[i]);
@@ -30,7 +40,7 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
(__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);
}
- npt->adjustment = (__force __sum16) csum_sub(src_sum, dst_sum);
+ npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum));
return 0;
}
@@ -51,7 +61,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
idx = i / 32;
addr->s6_addr32[idx] &= mask;
- addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx];
+ addr->s6_addr32[idx] |= ~mask & npt->dst_pfx.in6.s6_addr32[idx];
}
if (pfx_len <= 48)
@@ -66,8 +76,8 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
return false;
}
- sum = (__force __sum16) csum_add((__force __wsum)addr->s6_addr16[idx],
- npt->adjustment);
+ sum = ~csum_fold(csum_add(csum_unfold((__force __sum16)addr->s6_addr16[idx]),
+ csum_unfold(npt->adjustment)));
if (sum == CSUM_MANGLED_0)
sum = 0;
*(__force __sum16 *)&addr->s6_addr16[idx] = sum;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6cd29b1..70fa814 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
sock_recv_ts_and_drops(msg, sk, skb);
if (np->rxopt.all)
- datagram_recv_ctl(sk, msg, skb);
+ ip6_datagram_recv_ctl(sk, msg, skb);
err = copied;
if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
- err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
- &hlimit, &tclass, &dontfrag);
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+ &hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e229a3b..363d8b7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -928,7 +928,7 @@ restart:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
- if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
nrt = rt6_alloc_clone(rt, &fl6->daddr);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 93825dd..4f435371 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
inet_csk_reqsk_queue_drop(sk, req, prev);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
goto out;
case TCP_SYN_SENT:
@@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
goto drop;
+ }
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
if (req == NULL)
@@ -1108,6 +1111,7 @@ drop_and_release:
drop_and_free:
reqsk_free(req);
drop:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return 0; /* don't send reset */
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index dfaa29b..fb08329 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -443,7 +443,7 @@ try_again:
ip_cmsg_recv(msg, skb);
} else {
if (np->rxopt.all)
- datagram_recv_ctl(sk, msg, skb);
+ ip6_datagram_recv_ctl(sk, msg, skb);
}
err = copied;
@@ -1153,8 +1153,8 @@ do_udp_sendmsg:
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
- err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
- &hlimit, &tclass, &dontfrag);
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+ &hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1a9f372..2ac884d 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
}
+/* Lookup the tunnel socket, possibly involving the fs code if the socket is
+ * owned by userspace. A struct sock returned from this function must be
+ * released using l2tp_tunnel_sock_put once you're done with it.
+ */
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
+{
+ int err = 0;
+ struct socket *sock = NULL;
+ struct sock *sk = NULL;
+
+ if (!tunnel)
+ goto out;
+
+ if (tunnel->fd >= 0) {
+ /* Socket is owned by userspace, who might be in the process
+ * of closing it. Look the socket up using the fd to ensure
+ * consistency.
+ */
+ sock = sockfd_lookup(tunnel->fd, &err);
+ if (sock)
+ sk = sock->sk;
+ } else {
+ /* Socket is owned by kernelspace */
+ sk = tunnel->sock;
+ }
+
+out:
+ return sk;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
+
+/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
+void l2tp_tunnel_sock_put(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+ if (tunnel) {
+ if (tunnel->fd >= 0) {
+ /* Socket is owned by userspace */
+ sockfd_put(sk->sk_socket);
+ }
+ sock_put(sk);
+ }
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
+
/* Lookup a session by id in the global session list
*/
static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
struct udphdr *uh;
struct inet_sock *inet;
__wsum csum;
- int old_headroom;
- int new_headroom;
int headroom;
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
int udp_len;
@@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
*/
headroom = NET_SKB_PAD + sizeof(struct iphdr) +
uhlen + hdr_len;
- old_headroom = skb_headroom(skb);
if (skb_cow_head(skb, headroom)) {
kfree_skb(skb);
return NET_XMIT_DROP;
}
- new_headroom = skb_headroom(skb);
skb_orphan(skb);
- skb->truesize += new_headroom - old_headroom;
-
/* Setup L2TP header */
session->build_header(session, __skb_push(skb, hdr_len));
@@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
tunnel->old_sk_destruct = sk->sk_destruct;
sk->sk_destruct = &l2tp_tunnel_destruct;
tunnel->sock = sk;
+ tunnel->fd = fd;
lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
sk->sk_allocation = GFP_ATOMIC;
@@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
*/
int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
- int err = 0;
- struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
+ int err = -EBADF;
+ struct socket *sock = NULL;
+ struct sock *sk = NULL;
+
+ sk = l2tp_tunnel_sock_lookup(tunnel);
+ if (!sk)
+ goto out;
+
+ sock = sk->sk_socket;
+ BUG_ON(!sock);
/* Force the tunnel socket to close. This will eventually
* cause the tunnel to be deleted via the normal socket close
* mechanisms when userspace closes the tunnel socket.
*/
- if (sock != NULL) {
- err = inet_shutdown(sock, 2);
+ err = inet_shutdown(sock, 2);
- /* If the tunnel's socket was created by the kernel,
- * close the socket here since the socket was not
- * created by userspace.
- */
- if (sock->file == NULL)
- err = inet_release(sock);
- }
+ /* If the tunnel's socket was created by the kernel,
+ * close the socket here since the socket was not
+ * created by userspace.
+ */
+ if (sock->file == NULL)
+ err = inet_release(sock);
+ l2tp_tunnel_sock_put(sk);
+out:
return err;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 56d583e..e62204c 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,7 +188,8 @@ struct l2tp_tunnel {
int (*recv_payload_hook)(struct sk_buff *skb);
void (*old_sk_destruct)(struct sock *);
struct sock *sock; /* Parent socket */
- int fd;
+ int fd; /* Parent fd, if tunnel socket
+ * was created by userspace */
uint8_t priv[0]; /* private data */
};
@@ -228,6 +229,8 @@ out:
return tunnel;
}
+extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+extern void l2tp_tunnel_sock_put(struct sock *sk);
extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 9275471..8ee4a86 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
- err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
- &hlimit, &tclass, &dontfrag);
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+ &hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
- struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
size_t copied = 0;
int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
lsa->l2tp_scope_id = IP6CB(skb)->iif;
}
- if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
+ if (np->rxopt.all)
+ ip6_datagram_recv_ctl(sk, msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 286366e..716605c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
struct pppol2tp_session *ps;
- int old_headroom;
- int new_headroom;
int uhlen, headroom;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (tunnel == NULL)
goto abort_put_sess;
- old_headroom = skb_headroom(skb);
uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
headroom = NET_SKB_PAD +
sizeof(struct iphdr) + /* IP header */
@@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (skb_cow_head(skb, headroom))
goto abort_put_sess_tun;
- new_headroom = skb_headroom(skb);
- skb->truesize += new_headroom - old_headroom;
-
/* Setup PPP header */
__skb_push(skb, sizeof(ppph));
skb->data[0] = ppph[0];
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 47e0aca..0479c64 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -164,7 +164,17 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
sta = sta_info_get(sdata, mac_addr);
else
sta = sta_info_get_bss(sdata, mac_addr);
- if (!sta) {
+ /*
+ * The ASSOC test makes sure the driver is ready to
+ * receive the key. When wpa_supplicant has roamed
+ * using FT, it attempts to set the key before
+ * association has completed, this rejects that attempt
+ * so it will set the key again after assocation.
+ *
+ * TODO: accept the key if we have a station entry and
+ * add it to the device after the station.
+ */
+ if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {
ieee80211_key_free(sdata->local, key);
err = -ENOENT;
goto out_unlock;
@@ -1994,7 +2004,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate));
+ memcpy(sdata->vif.bss_conf.mcast_rate, rate,
+ sizeof(int) * IEEE80211_NUM_BANDS);
return 0;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8563b9a..2ed065c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1358,10 +1358,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
void ieee80211_sched_scan_stopped_work(struct work_struct *work);
/* off-channel helpers */
-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
- bool offchannel_ps_enable);
-void ieee80211_offchannel_return(struct ieee80211_local *local,
- bool offchannel_ps_disable);
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
+void ieee80211_offchannel_return(struct ieee80211_local *local);
void ieee80211_roc_setup(struct ieee80211_local *local);
void ieee80211_start_next_roc(struct ieee80211_local *local);
void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 47aeee2..2659e42 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -215,6 +215,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
skb->priority = 7;
info->control.vif = &sdata->vif;
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
}
@@ -246,11 +247,13 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
return -EAGAIN;
skb = dev_alloc_skb(local->tx_headroom +
+ IEEE80211_ENCRYPT_HEADROOM +
+ IEEE80211_ENCRYPT_TAILROOM +
hdr_len +
2 + 15 /* PERR IE */);
if (!skb)
return -1;
- skb_reserve(skb, local->tx_headroom);
+ skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a355292..5107248 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3400,6 +3400,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
ret = 0;
+out:
while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
IEEE80211_CHAN_DISABLED)) {
if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
@@ -3408,14 +3409,13 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
goto out;
}
- ret = chandef_downgrade(chandef);
+ ret |= chandef_downgrade(chandef);
}
if (chandef->width != vht_chandef.width)
sdata_info(sdata,
- "local regulatory prevented using AP HT/VHT configuration, downgraded\n");
+ "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
-out:
WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
return ret;
}
@@ -3529,8 +3529,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
*/
ret = ieee80211_vif_use_channel(sdata, &chandef,
IEEE80211_CHANCTX_SHARED);
- while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+ while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
ifmgd->flags |= chandef_downgrade(&chandef);
+ ret = ieee80211_vif_use_channel(sdata, &chandef,
+ IEEE80211_CHANCTX_SHARED);
+ }
return ret;
}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index a5379ae..a3ad4c3 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -102,8 +102,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_reset_conn_monitor(sdata);
}
-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
- bool offchannel_ps_enable)
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
@@ -134,8 +133,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
netif_tx_stop_all_queues(sdata->dev);
- if (offchannel_ps_enable &&
- (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
sdata->u.mgd.associated)
ieee80211_offchannel_ps_enable(sdata);
}
@@ -143,8 +141,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
mutex_unlock(&local->iflist_mtx);
}
-void ieee80211_offchannel_return(struct ieee80211_local *local,
- bool offchannel_ps_disable)
+void ieee80211_offchannel_return(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
@@ -163,11 +160,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
continue;
/* Tell AP we're back */
- if (offchannel_ps_disable &&
- sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.associated)
- ieee80211_offchannel_ps_disable(sdata);
- }
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_disable(sdata);
if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
/*
@@ -385,7 +380,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
local->tmp_channel = NULL;
ieee80211_hw_config(local, 0);
- ieee80211_offchannel_return(local, true);
+ ieee80211_offchannel_return(local);
}
ieee80211_recalc_idle(local);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d59fc68..bf82e69 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -292,7 +292,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
if (!was_hw_scan) {
ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
- ieee80211_offchannel_return(local, true);
+ ieee80211_offchannel_return(local);
}
ieee80211_recalc_idle(local);
@@ -341,7 +341,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
- ieee80211_offchannel_stop_vifs(local, true);
+ ieee80211_offchannel_stop_vifs(local);
ieee80211_configure_filter(local);
@@ -678,12 +678,8 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
local->scan_channel = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- /*
- * Re-enable vifs and beaconing. Leave PS
- * in off-channel state..will put that back
- * on-channel at the end of scanning.
- */
- ieee80211_offchannel_return(local, false);
+ /* disable PS */
+ ieee80211_offchannel_return(local);
*next_delay = HZ / 5;
/* afterwards, resume scan & go to next channel */
@@ -693,8 +689,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
static void ieee80211_scan_state_resume(struct ieee80211_local *local,
unsigned long *next_delay)
{
- /* PS already is in off-channel mode */
- ieee80211_offchannel_stop_vifs(local, false);
+ ieee80211_offchannel_stop_vifs(local);
if (local->ops->flush) {
drv_flush(local, false);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e9eadc4..467c1d1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1673,10 +1673,13 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
chanctx_conf =
rcu_dereference(tmp_sdata->vif.chanctx_conf);
}
- if (!chanctx_conf)
- goto fail_rcu;
- chan = chanctx_conf->def.chan;
+ if (chanctx_conf)
+ chan = chanctx_conf->def.chan;
+ else if (!local->use_chanctx)
+ chan = local->_oper_channel;
+ else
+ goto fail_rcu;
/*
* Frame injection is not allowed if beaconing is not allowed
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 746048b..ae8ec6f 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -61,14 +61,27 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
return 1;
}
+static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
+ unsigned int sctphoff)
+{
+ __u32 crc32;
+ struct sk_buff *iter;
+
+ crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
+ skb_walk_frags(skb, iter)
+ crc32 = sctp_update_cksum((u8 *) iter->data,
+ skb_headlen(iter), crc32);
+ sctph->checksum = sctp_end_cksum(crc32);
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
static int
sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
sctp_sctphdr_t *sctph;
unsigned int sctphoff = iph->len;
- struct sk_buff *iter;
- __be32 crc32;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
@@ -92,13 +105,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
sctph = (void *) skb_network_header(skb) + sctphoff;
sctph->source = cp->vport;
- /* Calculate the checksum */
- crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
- skb_walk_frags(skb, iter)
- crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
- crc32);
- crc32 = sctp_end_cksum(crc32);
- sctph->checksum = crc32;
+ sctp_nat_csum(skb, sctph, sctphoff);
return 1;
}
@@ -109,8 +116,6 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
{
sctp_sctphdr_t *sctph;
unsigned int sctphoff = iph->len;
- struct sk_buff *iter;
- __be32 crc32;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
@@ -134,13 +139,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
sctph = (void *) skb_network_header(skb) + sctphoff;
sctph->dest = cp->dport;
- /* Calculate the checksum */
- crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
- skb_walk_frags(skb, iter)
- crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
- crc32);
- crc32 = sctp_end_cksum(crc32);
- sctph->checksum = crc32;
+ sctp_nat_csum(skb, sctph, sctphoff);
return 1;
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index effa10c..44fd10c 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1795,6 +1795,8 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
GFP_KERNEL);
if (!tinfo->buf)
goto outtinfo;
+ } else {
+ tinfo->buf = NULL;
}
tinfo->id = id;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 016d95e..e4a0c4f 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1376,11 +1376,12 @@ void nf_conntrack_cleanup(struct net *net)
synchronize_net();
nf_conntrack_proto_fini(net);
nf_conntrack_cleanup_net(net);
+}
- if (net_eq(net, &init_net)) {
- RCU_INIT_POINTER(nf_ct_destroy, NULL);
- nf_conntrack_cleanup_init_net();
- }
+void nf_conntrack_cleanup_end(void)
+{
+ RCU_INIT_POINTER(nf_ct_destroy, NULL);
+ nf_conntrack_cleanup_init_net();
}
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 363285d..e7185c68 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -575,6 +575,7 @@ static int __init nf_conntrack_standalone_init(void)
static void __exit nf_conntrack_standalone_fini(void)
{
unregister_pernet_subsys(&nf_conntrack_net_ops);
+ nf_conntrack_cleanup_end();
}
module_init(nf_conntrack_standalone_init);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8d987c3..7b3a9e5 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -345,19 +345,27 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
}
EXPORT_SYMBOL_GPL(xt_find_revision);
-static char *textify_hooks(char *buf, size_t size, unsigned int mask)
+static char *
+textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
{
- static const char *const names[] = {
+ static const char *const inetbr_names[] = {
"PREROUTING", "INPUT", "FORWARD",
"OUTPUT", "POSTROUTING", "BROUTING",
};
- unsigned int i;
+ static const char *const arp_names[] = {
+ "INPUT", "FORWARD", "OUTPUT",
+ };
+ const char *const *names;
+ unsigned int i, max;
char *p = buf;
bool np = false;
int res;
+ names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
+ max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
+ ARRAY_SIZE(inetbr_names);
*p = '\0';
- for (i = 0; i < ARRAY_SIZE(names); ++i) {
+ for (i = 0; i < max; ++i) {
if (!(mask & (1 << i)))
continue;
res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
@@ -402,8 +410,10 @@ int xt_check_match(struct xt_mtchk_param *par,
pr_err("%s_tables: %s match: used from hooks %s, but only "
"valid from %s\n",
xt_prefix[par->family], par->match->name,
- textify_hooks(used, sizeof(used), par->hook_mask),
- textify_hooks(allow, sizeof(allow), par->match->hooks));
+ textify_hooks(used, sizeof(used), par->hook_mask,
+ par->family),
+ textify_hooks(allow, sizeof(allow), par->match->hooks,
+ par->family));
return -EINVAL;
}
if (par->match->proto && (par->match->proto != proto || inv_proto)) {
@@ -575,8 +585,10 @@ int xt_check_target(struct xt_tgchk_param *par,
pr_err("%s_tables: %s target: used from hooks %s, but only "
"usable from %s\n",
xt_prefix[par->family], par->target->name,
- textify_hooks(used, sizeof(used), par->hook_mask),
- textify_hooks(allow, sizeof(allow), par->target->hooks));
+ textify_hooks(used, sizeof(used), par->hook_mask,
+ par->family),
+ textify_hooks(allow, sizeof(allow), par->target->hooks,
+ par->family));
return -EINVAL;
}
if (par->target->proto && (par->target->proto != proto || inv_proto)) {
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 2a08430..bde009e 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -109,7 +109,7 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
struct xt_ct_target_info *info = par->targinfo;
struct nf_conntrack_tuple t;
struct nf_conn *ct;
- int ret;
+ int ret = -EOPNOTSUPP;
if (info->flags & ~XT_CT_NOTRACK)
return -EINVAL;
@@ -247,7 +247,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
struct xt_ct_target_info_v1 *info = par->targinfo;
struct nf_conntrack_tuple t;
struct nf_conn *ct;
- int ret;
+ int ret = -EOPNOTSUPP;
if (info->flags & ~XT_CT_NOTRACK)
return -EINVAL;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index a9327e2..670cbc3 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -35,10 +35,11 @@
/* Must be called with rcu_read_lock. */
static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
{
- if (unlikely(!vport)) {
- kfree_skb(skb);
- return;
- }
+ if (unlikely(!vport))
+ goto error;
+
+ if (unlikely(skb_warn_if_lro(skb)))
+ goto error;
/* Make our own copy of the packet. Otherwise we will mangle the
* packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
@@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
skb_push(skb, ETH_HLEN);
ovs_vport_receive(vport, skb);
+ return;
+
+error:
+ kfree_skb(skb);
}
/* Called with rcu_read_lock and bottom-halves disabled. */
@@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
goto error;
}
- if (unlikely(skb_warn_if_lro(skb)))
- goto error;
-
skb->dev = netdev_vport->dev;
len = skb->len;
dev_queue_xmit(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e639645..c111bd0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
packet_flush_mclist(sk);
- memset(&req_u, 0, sizeof(req_u));
-
- if (po->rx_ring.pg_vec)
+ if (po->rx_ring.pg_vec) {
+ memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
+ }
- if (po->tx_ring.pg_vec)
+ if (po->tx_ring.pg_vec) {
+ memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
+ }
fanout_release(sk);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 51561ea..79e8ed4 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1135,9 +1135,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
memset(&opt, 0, sizeof(opt));
opt.rate.rate = cl->rate.rate_bps >> 3;
- opt.buffer = cl->buffer;
+ opt.buffer = PSCHED_NS2TICKS(cl->buffer);
opt.ceil.rate = cl->ceil.rate_bps >> 3;
- opt.cbuffer = cl->cbuffer;
+ opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
opt.quantum = cl->quantum;
opt.prio = cl->prio;
opt.level = cl->level;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 298c0dd..3d2acc7 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (q->rate) {
struct sk_buff_head *list = &sch->q;
- delay += packet_len_2_sched_time(skb->len, q);
-
if (!skb_queue_empty(list)) {
/*
- * Last packet in queue is reference point (now).
- * First packet in queue is already in flight,
- * calculate this time bonus and substract
+ * Last packet in queue is reference point (now),
+ * calculate this time bonus and subtract
* from delay.
*/
- delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+ delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
+ delay = max_t(psched_tdiff_t, 0, delay);
now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
}
+
+ delay += packet_len_2_sched_time(skb->len, q);
}
cb->time_to_send = now + delay;
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 7521d94..cf48528 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -3,8 +3,8 @@
#
menuconfig IP_SCTP
- tristate "The SCTP Protocol (EXPERIMENTAL)"
- depends on INET && EXPERIMENTAL
+ tristate "The SCTP Protocol"
+ depends on INET
depends on IPV6 || IPV6=n
select CRYPTO
select CRYPTO_HMAC
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 159b9bc..d8420ae 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
return;
if (atomic_dec_and_test(&key->refcnt)) {
- kfree(key);
+ kzfree(key);
SCTP_DBG_OBJCNT_DEC(keys);
}
}
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 17a001b..1a9c5fb 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
/* Final destructor for endpoint. */
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
+ int i;
+
SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
/* Free up the HMAC transform. */
@@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
sctp_inq_free(&ep->base.inqueue);
sctp_bind_addr_free(&ep->base.bind_addr);
+ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
+ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
+
/* Remove and free the port */
if (sctp_sk(ep->base.sk)->bind_hash)
sctp_put_port(ep->base.sk);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f3f0f4d..391a245 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
*/
rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
- if (!laddr->valid && laddr->state != SCTP_ADDR_SRC)
+ if (!laddr->valid)
continue;
- if ((laddr->a.sa.sa_family == AF_INET6) &&
+ if ((laddr->state == SCTP_ADDR_SRC) &&
+ (laddr->a.sa.sa_family == AF_INET6) &&
(scope <= sctp_scope(&laddr->a))) {
bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 379c81d..9bcdbd0 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -224,7 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
/* Free the outqueue structure and any related pending chunks.
*/
-void sctp_outq_teardown(struct sctp_outq *q)
+static void __sctp_outq_teardown(struct sctp_outq *q)
{
struct sctp_transport *transport;
struct list_head *lchunk, *temp;
@@ -277,8 +277,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
sctp_chunk_free(chunk);
}
- q->error = 0;
-
/* Throw away any leftover control chunks. */
list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
list_del_init(&chunk->list);
@@ -286,11 +284,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
}
}
+void sctp_outq_teardown(struct sctp_outq *q)
+{
+ __sctp_outq_teardown(q);
+ sctp_outq_init(q->asoc, q);
+}
+
/* Free the outqueue structure and any related pending chunks. */
void sctp_outq_free(struct sctp_outq *q)
{
/* Throw away leftover chunks. */
- sctp_outq_teardown(q);
+ __sctp_outq_teardown(q);
/* If we were kmalloc()'d, free the memory. */
if (q->malloced)
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 618ec7e..5131fcf 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1779,8 +1779,10 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return SCTP_DISPOSITION_CONSUME;
nomem_ev:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e65758..cedd9bf 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
out:
- kfree(authkey);
+ kzfree(authkey);
return ret;
}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 043889a..bf3c6e8 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -366,7 +366,11 @@ int sctp_sysctl_net_register(struct net *net)
void sctp_sysctl_net_unregister(struct net *net)
{
+ struct ctl_table *table;
+
+ table = net->sctp.sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(net->sctp.sysctl_header);
+ kfree(table);
}
static struct ctl_table_header * sctp_sysctl_header;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index bfa3171..fb20f25 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -98,9 +98,25 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
+static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
+{
+ struct list_head *q = &queue->tasks[queue->priority];
+ struct rpc_task *task;
+
+ if (!list_empty(q)) {
+ task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+ if (task->tk_owner == queue->owner)
+ list_move_tail(&task->u.tk_wait.list, q);
+ }
+}
+
static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
{
- queue->priority = priority;
+ if (queue->priority != priority) {
+ /* Fairness: rotate the list when changing priority */
+ rpc_rotate_queue_owner(queue);
+ queue->priority = priority;
+ }
}
static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 0a148c9..0f679df 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
}
/*
- * See net/ipv6/datagram.c : datagram_recv_ctl
+ * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
*/
static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
struct cmsghdr *cmh)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 01592d7..45f1618 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
&iwe, IW_EV_UINT_LEN);
}
- buf = kmalloc(30, GFP_ATOMIC);
+ buf = kmalloc(31, GFP_ATOMIC);
if (buf) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 41eabc4..07c5857 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2656,7 +2656,7 @@ static void xfrm_policy_fini(struct net *net)
WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
htab = &net->xfrm.policy_bydst[dir];
- sz = (htab->hmask + 1);
+ sz = (htab->hmask + 1) * sizeof(struct hlist_head);
WARN_ON(!hlist_empty(htab->table));
xfrm_hash_free(htab->table, sz);
}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 765f6fe..35754cc 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -242,11 +242,13 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
u32 diff;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
u32 seq = ntohl(net_seq);
- u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+ u32 pos;
if (!replay_esn->replay_window)
return;
+ pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
if (seq > replay_esn->seq) {
diff = seq - replay_esn->seq;
OpenPOWER on IntegriCloud