summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c27
-rw-r--r--net/batman-adv/routing.c8
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/smp.c6
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/mlme.c35
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/mac80211/wpa.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/xt_CT.c10
-rw-r--r--net/netfilter/xt_TEE.c1
-rw-r--r--net/netfilter/xt_nat.c8
-rw-r--r--net/netlink/af_netlink.c19
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/xprtsock.c41
-rw-r--r--net/wireless/mlme.c12
28 files changed, 153 insertions, 107 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 9096bcb..ee07072 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -463,7 +463,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
- return NOTIFY_BAD;
+ if (vlan_uses_dev(dev))
+ return NOTIFY_BAD;
+ break;
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 0a9084a..fd8d5af 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1167,6 +1167,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
uint16_t crc;
unsigned long entrytime;
+ spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
+
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
/* setting claim destination address */
@@ -1210,8 +1212,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
/**
* batadv_bla_check_bcast_duplist
* @bat_priv: the bat priv with all the soft interface information
- * @bcast_packet: originator mac address
- * @hdr_size: maximum length of the frame
+ * @bcast_packet: encapsulated broadcast frame plus batman header
+ * @bcast_packet_len: length of encapsulated broadcast frame plus batman header
*
* check if it is on our broadcast list. Another gateway might
* have sent the same packet because it is connected to the same backbone,
@@ -1224,20 +1226,22 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
*/
int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
struct batadv_bcast_packet *bcast_packet,
- int hdr_size)
+ int bcast_packet_len)
{
- int i, length, curr;
+ int i, length, curr, ret = 0;
uint8_t *content;
uint16_t crc;
struct batadv_bcast_duplist_entry *entry;
- length = hdr_size - sizeof(*bcast_packet);
+ length = bcast_packet_len - sizeof(*bcast_packet);
content = (uint8_t *)bcast_packet;
content += sizeof(*bcast_packet);
/* calculate the crc ... */
crc = crc16(0, content, length);
+ spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
+
for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
curr = (bat_priv->bla.bcast_duplist_curr + i);
curr %= BATADV_DUPLIST_SIZE;
@@ -1259,9 +1263,12 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
/* this entry seems to match: same crc, not too old,
* and from another gw. therefore return 1 to forbid it.
*/
- return 1;
+ ret = 1;
+ goto out;
}
- /* not found, add a new entry (overwrite the oldest entry) */
+ /* not found, add a new entry (overwrite the oldest entry)
+ * and allow it, its the first occurence.
+ */
curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
curr %= BATADV_DUPLIST_SIZE;
entry = &bat_priv->bla.bcast_duplist[curr];
@@ -1270,8 +1277,10 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
bat_priv->bla.bcast_duplist_curr = curr;
- /* allow it, its the first occurence. */
- return 0;
+out:
+ spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
+
+ return ret;
}
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 939fc01..376b4cc 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1124,8 +1124,14 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
spin_unlock_bh(&orig_node->bcast_seqno_lock);
+ /* keep skb linear for crc calculation */
+ if (skb_linearize(skb) < 0)
+ goto out;
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+
/* check whether this has been sent by another originator before */
- if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
+ if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, skb->len))
goto out;
/* rebroadcast packet */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 2ed82ca..ac1e07a 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -205,6 +205,8 @@ struct batadv_priv_bla {
struct batadv_hashtable *backbone_hash;
struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
int bcast_duplist_curr;
+ /* protects bcast_duplist and bcast_duplist_curr */
+ spinlock_t bcast_duplist_lock;
struct batadv_bla_claim_dst claim_dest;
struct delayed_work work;
};
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 8c225ef..2ac8d50 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -32,6 +32,8 @@
#define SMP_TIMEOUT msecs_to_jiffies(30000)
+#define AUTH_REQ_MASK 0x07
+
static inline void swap128(u8 src[16], u8 dst[16])
{
int i;
@@ -230,7 +232,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
req->init_key_dist = 0;
req->resp_key_dist = dist_keys;
- req->auth_req = authreq;
+ req->auth_req = (authreq & AUTH_REQ_MASK);
return;
}
@@ -239,7 +241,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
rsp->init_key_dist = 0;
rsp->resp_key_dist = req->resp_key_dist & dist_keys;
- rsp->auth_req = authreq;
+ rsp->auth_req = (authreq & AUTH_REQ_MASK);
}
static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 159aa8b..3ef1759 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2300,10 +2300,11 @@ restart:
mutex_unlock(&con->mutex);
return;
} else {
- con->ops->put(con);
dout("con_work %p FAILED to back off %lu\n", con,
con->delay);
+ set_bit(CON_FLAG_BACKOFF, &con->flags);
}
+ goto done;
}
if (con->state == CON_STATE_STANDBY) {
@@ -2749,7 +2750,8 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
msg = con->ops->alloc_msg(con, hdr, skip);
mutex_lock(&con->mutex);
if (con->state != CON_STATE_OPEN) {
- ceph_msg_put(msg);
+ if (msg)
+ ceph_msg_put(msg);
return -EAGAIN;
}
con->in_msg = msg;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6e04b1f..4007c14 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3379,10 +3379,12 @@ EXPORT_SYMBOL(__skb_warn_lro_forwarding);
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
- if (head_stolen)
+ if (head_stolen) {
+ skb_release_head_state(skb);
kmem_cache_free(skbuff_head_cache, skb);
- else
+ } else {
__kfree_skb(skb);
+ }
}
EXPORT_SYMBOL(kfree_skb_partial);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 432f4bb..a8c65121 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1163,8 +1163,12 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
spin_lock_bh(&fnhe_lock);
if (daddr == fnhe->fnhe_daddr) {
- struct rtable *orig;
-
+ struct rtable *orig = rcu_dereference(fnhe->fnhe_rth);
+ if (orig && rt_is_expired(orig)) {
+ fnhe->fnhe_gw = 0;
+ fnhe->fnhe_pmtu = 0;
+ fnhe->fnhe_expires = 0;
+ }
if (fnhe->fnhe_pmtu) {
unsigned long expires = fnhe->fnhe_expires;
unsigned long diff = expires - jiffies;
@@ -1181,7 +1185,6 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
} else if (!rt->rt_gateway)
rt->rt_gateway = daddr;
- orig = rcu_dereference(fnhe->fnhe_rth);
rcu_assign_pointer(fnhe->fnhe_rth, rt);
if (orig)
rt_free(orig);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f32c02e..197c000 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -549,14 +549,12 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
!tp->urg_data ||
before(tp->urg_seq, tp->copied_seq) ||
!before(tp->urg_seq, tp->rcv_nxt)) {
- struct sk_buff *skb;
answ = tp->rcv_nxt - tp->copied_seq;
- /* Subtract 1, if FIN is in queue. */
- skb = skb_peek_tail(&sk->sk_receive_queue);
- if (answ && skb)
- answ -= tcp_hdr(skb)->fin;
+ /* Subtract 1, if FIN was received */
+ if (answ && sock_flag(sk, SOCK_DONE))
+ answ--;
} else
answ = tp->urg_seq - tp->copied_seq;
release_sock(sk);
@@ -2766,6 +2764,8 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
info->tcpi_options |= TCPI_OPT_ECN;
if (tp->ecn_flags & TCP_ECN_SEEN)
info->tcpi_options |= TCPI_OPT_ECN_SEEN;
+ if (tp->syn_data_acked)
+ info->tcpi_options |= TCPI_OPT_SYN_DATA;
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 432c366..1db6639 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5646,6 +5646,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
tcp_rearm_rto(sk);
return true;
}
+ tp->syn_data_acked = tp->syn_data;
return false;
}
@@ -5963,7 +5964,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
req = tp->fastopen_rsk;
if (req != NULL) {
- BUG_ON(sk->sk_state != TCP_SYN_RECV &&
+ WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
@@ -6052,7 +6053,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* ACK we have received, this would have acknowledged
* our SYNACK so stop the SYNACK timer.
*/
- if (acceptable && req != NULL) {
+ if (req != NULL) {
+ /* Return RST if ack_seq is invalid.
+ * Note that RFC793 only says to generate a
+ * DUPACK for it but for TCP Fast Open it seems
+ * better to treat this case like TCP_SYN_RECV
+ * above.
+ */
+ if (!acceptable)
+ return 1;
/* We no longer need the request sock. */
reqsk_fastopen_remove(sk, req, false);
tcp_rearm_rto(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ef998b0..0c4a643 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1461,6 +1461,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
skb_set_owner_r(skb, child);
__skb_queue_tail(&child->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tp->syn_data_acked = 1;
}
sk->sk_data_ready(sk, 0);
bh_unlock_sock(child);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 27536ba..a7302d9 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -510,6 +510,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
newtp->fastopen_rsk = NULL;
+ newtp->syn_data_acked = 0;
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index fc04711..d47c1b4 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -347,8 +347,8 @@ void tcp_retransmit_timer(struct sock *sk)
return;
}
if (tp->fastopen_rsk) {
- BUG_ON(sk->sk_state != TCP_SYN_RECV &&
- sk->sk_state != TCP_FIN_WAIT1);
+ WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
+ sk->sk_state != TCP_FIN_WAIT1);
tcp_fastopen_synack_timer(sk);
/* Before we receive ACK to our SYN-ACK don't retransmit
* anything else (e.g., data or FIN segments).
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7c7e963..b1e6cf0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -219,7 +219,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
};
static const u32 ip6_template_metrics[RTAX_MAX] = {
- [RTAX_HOPLIMIT - 1] = 255,
+ [RTAX_HOPLIMIT - 1] = 0,
};
static const struct rt6_info ip6_null_entry_template = {
@@ -1232,7 +1232,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
- dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
+ dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list;
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 95a3a7a..496ce2c 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -421,6 +421,8 @@ static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL);
}
+ tty->driver_data = self;
+
return tty_port_install(&self->port, driver, tty);
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 6f8a73c..7de7717 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -853,7 +853,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (info->control.vif == &sdata->vif) {
__skb_unlink(skb, &local->pending[i]);
- dev_kfree_skb_irq(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
}
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e714ed8..1b7eed2 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3099,22 +3099,32 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
ht_cfreq, ht_oper->primary_chan,
cbss->channel->band);
ht_oper = NULL;
+ } else {
+ channel_type = NL80211_CHAN_HT20;
}
}
- if (ht_oper) {
- channel_type = NL80211_CHAN_HT20;
+ if (ht_oper && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ /*
+ * cfg80211 already verified that the channel itself can
+ * be used, but it didn't check that we can do the right
+ * HT type, so do that here as well. If HT40 isn't allowed
+ * on this channel, disable 40 MHz operation.
+ */
- if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- switch (ht_oper->ht_param &
- IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
+ else
channel_type = NL80211_CHAN_HT40PLUS;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
+ else
channel_type = NL80211_CHAN_HT40MINUS;
- break;
- }
+ break;
}
}
@@ -3549,6 +3559,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+ bool tx = !req->local_state_change;
mutex_lock(&ifmgd->mtx);
@@ -3565,12 +3576,12 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated &&
ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
- req->reason_code, true, frame_buf);
+ req->reason_code, tx, frame_buf);
} else {
drv_mgd_prepare_tx(sdata->local, sdata);
ieee80211_send_deauth_disassoc(sdata, req->bssid,
IEEE80211_STYPE_DEAUTH,
- req->reason_code, true,
+ req->reason_code, tx,
frame_buf);
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 797dd36..0a4e4c0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -650,7 +650,7 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
*/
if (!skb)
break;
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
/*
@@ -679,7 +679,7 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
local->total_ps_buffered--;
ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
sta->sta.addr);
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
/*
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22ca350..94e5868 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -406,7 +406,7 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
int queue = info->hw_queue;
if (WARN_ON(!info->control.vif)) {
- kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
return;
}
@@ -431,7 +431,7 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (WARN_ON(!info->control.vif)) {
- kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
continue;
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index bdb53ab..8bd2f5c 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -106,7 +106,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
if (status->flag & RX_FLAG_MMIC_ERROR)
goto mic_fail;
- if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
+ if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
goto update_iv;
return RX_CONTINUE;
@@ -545,14 +546,19 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
static void bip_aad(struct sk_buff *skb, u8 *aad)
{
+ __le16 mask_fc;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
/* BIP AAD: FC(masked) || A1 || A2 || A3 */
/* FC type/subtype */
- aad[0] = skb->data[0];
/* Mask FC Retry, PwrMgt, MoreData flags to zero */
- aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6));
+ mask_fc = hdr->frame_control;
+ mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM |
+ IEEE80211_FCTL_MOREDATA);
+ put_unaligned(mask_fc, (__le16 *) &aad[0]);
/* A1 || A2 || A3 */
- memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN);
+ memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN);
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 7e7198b..c4ee437 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2589,6 +2589,8 @@ __ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
struct ip_vs_proto_data *pd;
#endif
+ memset(u, 0, sizeof (*u));
+
#ifdef CONFIG_IP_VS_PROTO_TCP
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
@@ -2766,7 +2768,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct ip_vs_timeout_user t;
- memset(&t, 0, sizeof(t));
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 16c7125..ae7f5da 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -180,9 +180,9 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
struct ctnl_timeout *timeout;
struct nf_conn_timeout *timeout_ext;
- const struct ipt_entry *e = par->entryinfo;
struct nf_conntrack_l4proto *l4proto;
int ret = 0;
+ u8 proto;
rcu_read_lock();
timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
@@ -192,9 +192,11 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
goto out;
}
- if (e->ip.invflags & IPT_INV_PROTO) {
+ proto = xt_ct_find_proto(par);
+ if (!proto) {
ret = -EINVAL;
- pr_info("You cannot use inversion on L4 protocol\n");
+ pr_info("You must specify a L4 protocol, and not use "
+ "inversions on it.\n");
goto out;
}
@@ -214,7 +216,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
/* Make sure the timeout policy matches any existing protocol tracker,
* otherwise default to generic.
*/
- l4proto = __nf_ct_l4proto_find(par->family, e->ip.proto);
+ l4proto = __nf_ct_l4proto_find(par->family, proto);
if (timeout->l4proto->l4proto != l4proto->l4proto) {
ret = -EINVAL;
pr_info("Timeout policy `%s' can only be used by L4 protocol "
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index ee2e5bc..bd93e51 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -70,6 +70,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
fl4.daddr = info->gw.ip;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+ fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return false;
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index 81aafa8..bea7464c 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -111,7 +111,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
- (1 << NF_INET_LOCAL_OUT),
+ (1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
@@ -123,7 +123,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_LOCAL_IN),
+ (1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
{
@@ -133,7 +133,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
- (1 << NF_INET_LOCAL_OUT),
+ (1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
@@ -143,7 +143,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_LOCAL_IN),
+ (1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
};
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 01e944a..4da797f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -138,6 +138,8 @@ static int netlink_dump(struct sock *sk);
static DEFINE_RWLOCK(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
+#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
+
static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static inline u32 netlink_group_mask(u32 group)
@@ -345,6 +347,11 @@ netlink_update_listeners(struct sock *sk)
struct hlist_node *node;
unsigned long mask;
unsigned int i;
+ struct listeners *listeners;
+
+ listeners = nl_deref_protected(tbl->listeners);
+ if (!listeners)
+ return;
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
@@ -352,7 +359,7 @@ netlink_update_listeners(struct sock *sk)
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
- tbl->listeners->masks[i] = mask;
+ listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
@@ -536,7 +543,11 @@ static int netlink_release(struct socket *sock)
if (netlink_is_kernel(sk)) {
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
- kfree(nl_table[sk->sk_protocol].listeners);
+ struct listeners *old;
+
+ old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
+ RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
+ kfree_rcu(old, rcu);
nl_table[sk->sk_protocol].module = NULL;
nl_table[sk->sk_protocol].bind = NULL;
nl_table[sk->sk_protocol].flags = 0;
@@ -982,7 +993,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
rcu_read_lock();
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
- if (group - 1 < nl_table[sk->sk_protocol].groups)
+ if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
@@ -1625,7 +1636,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
if (!new)
return -ENOMEM;
- old = rcu_dereference_protected(tbl->listeners, 1);
+ old = nl_deref_protected(tbl->listeners);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2a68bb3..fc2f7aa 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1409,11 +1409,11 @@ static ssize_t read_flush(struct file *file, char __user *buf,
size_t count, loff_t *ppos,
struct cache_detail *cd)
{
- char tbuf[20];
+ char tbuf[22];
unsigned long p = *ppos;
size_t len;
- sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
+ snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
len = strlen(tbuf);
if (p >= len)
return 0;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index aaaadfb..75853ca 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -254,7 +254,6 @@ struct sock_xprt {
void (*old_data_ready)(struct sock *, int);
void (*old_state_change)(struct sock *);
void (*old_write_space)(struct sock *);
- void (*old_error_report)(struct sock *);
};
/*
@@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
dprintk("RPC: sendmsg returned unrecognized error %d\n",
-status);
case -ECONNRESET:
- case -EPIPE:
xs_tcp_shutdown(xprt);
case -ECONNREFUSED:
case -ENOTCONN:
+ case -EPIPE:
clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
}
@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
transport->old_data_ready = sk->sk_data_ready;
transport->old_state_change = sk->sk_state_change;
transport->old_write_space = sk->sk_write_space;
- transport->old_error_report = sk->sk_error_report;
}
static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
sk->sk_data_ready = transport->old_data_ready;
sk->sk_state_change = transport->old_state_change;
sk->sk_write_space = transport->old_write_space;
- sk->sk_error_report = transport->old_error_report;
}
static void xs_reset_transport(struct sock_xprt *transport)
@@ -1453,7 +1450,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
xprt_clear_connecting(xprt);
}
-static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
{
smp_mb__before_clear_bit();
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
@@ -1461,6 +1458,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
clear_bit(XPRT_CLOSING, &xprt->state);
smp_mb__after_clear_bit();
+}
+
+static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+{
+ xs_sock_reset_connection_flags(xprt);
/* Mark transport as closed and wake up all pending tasks */
xprt_disconnect_done(xprt);
}
@@ -1516,6 +1518,7 @@ static void xs_tcp_state_change(struct sock *sk)
case TCP_CLOSE_WAIT:
/* The server initiated a shutdown of the socket */
xprt->connect_cookie++;
+ clear_bit(XPRT_CONNECTED, &xprt->state);
xs_tcp_force_close(xprt);
case TCP_CLOSING:
/*
@@ -1540,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk)
read_unlock_bh(&sk->sk_callback_lock);
}
-/**
- * xs_error_report - callback mainly for catching socket errors
- * @sk: socket
- */
-static void xs_error_report(struct sock *sk)
-{
- struct rpc_xprt *xprt;
-
- read_lock_bh(&sk->sk_callback_lock);
- if (!(xprt = xprt_from_sock(sk)))
- goto out;
- dprintk("RPC: %s client %p...\n"
- "RPC: error %d\n",
- __func__, xprt, sk->sk_err);
- xprt_wake_pending_tasks(xprt, -EAGAIN);
-out:
- read_unlock_bh(&sk->sk_callback_lock);
-}
-
static void xs_write_space(struct sock *sk)
{
struct socket *sock;
@@ -1858,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_local_data_ready;
sk->sk_write_space = xs_udp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_allocation = GFP_ATOMIC;
xprt_clear_connected(xprt);
@@ -1983,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_udp_data_ready;
sk->sk_write_space = xs_udp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_no_check = UDP_CSUM_NORCV;
sk->sk_allocation = GFP_ATOMIC;
@@ -2050,10 +2032,8 @@ static void xs_abort_connection(struct sock_xprt *transport)
any.sa_family = AF_UNSPEC;
result = kernel_connect(transport->sock, &any, sizeof(any), 0);
if (!result)
- xs_sock_mark_closed(&transport->xprt);
- else
- dprintk("RPC: AF_UNSPEC connect return code %d\n",
- result);
+ xs_sock_reset_connection_flags(&transport->xprt);
+ dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
}
static void xs_tcp_reuse_connection(struct sock_xprt *transport)
@@ -2098,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_data_ready = xs_tcp_data_ready;
sk->sk_state_change = xs_tcp_state_change;
sk->sk_write_space = xs_tcp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_allocation = GFP_ATOMIC;
/* socket options */
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 8016fee..904a7f3 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -457,20 +457,14 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
.reason_code = reason,
.ie = ie,
.ie_len = ie_len,
+ .local_state_change = local_state_change,
};
ASSERT_WDEV_LOCK(wdev);
- if (local_state_change) {
- if (wdev->current_bss &&
- ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(&wdev->current_bss->pub);
- wdev->current_bss = NULL;
- }
-
+ if (local_state_change && (!wdev->current_bss ||
+ !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
return 0;
- }
return rdev->ops->deauth(&rdev->wiphy, dev, &req);
}
OpenPOWER on IntegriCloud