summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_vlan.c17
-rw-r--r--net/core/fib_rules.c10
-rw-r--r--net/decnet/dn_rules.c1
-rw-r--r--net/ipv4/fib_rules.c1
-rw-r--r--net/ipv4/ipmr.c1
-rw-r--r--net/ipv4/tcp_cubic.c16
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/ip6mr.c3
-rw-r--r--net/ipv6/route.c200
-rw-r--r--net/mac80211/cfg.c4
-rw-r--r--net/mac80211/mlme.c16
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/tdls.c70
-rw-r--r--net/mac80211/vht.c15
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h12
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c20
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c20
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/netfilter/nfnetlink.c8
-rw-r--r--net/netfilter/nfnetlink_queue_core.c5
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/netlink/af_netlink.c35
-rw-r--r--net/openvswitch/Kconfig12
-rw-r--r--net/openvswitch/Makefile4
-rw-r--r--net/openvswitch/conntrack.h4
-rw-r--r--net/rds/connection.c15
-rw-r--r--net/rfkill/core.c10
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/switchdev/switchdev.c6
-rw-r--r--net/tipc/bcast.c30
-rw-r--r--net/wireless/reg.c2
34 files changed, 461 insertions, 108 deletions
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index af5e187..ea748c9 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -16,7 +16,6 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
-#include <net/switchdev.h>
#include <uapi/linux/if_bridge.h>
#include "br_private.h"
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 3cd8cc9..5f5a02b 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -117,10 +117,11 @@ out_filt:
return err;
}
-static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
- u16 vid)
+static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
+ u16 vid)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ int err = 0;
/* If driver uses VLAN ndo ops, use 8021q to delete vid
* on device, otherwise try switchdev ops to delete vid.
@@ -137,8 +138,12 @@ static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
},
};
- switchdev_port_obj_del(dev, &vlan_obj);
+ err = switchdev_port_obj_del(dev, &vlan_obj);
+ if (err == -EOPNOTSUPP)
+ err = 0;
}
+
+ return err;
}
static int __vlan_del(struct net_port_vlans *v, u16 vid)
@@ -151,7 +156,11 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
if (v->port_idx) {
struct net_bridge_port *p = v->parent.port;
- __vlan_vid_del(p->dev, p->br, vid);
+ int err;
+
+ err = __vlan_vid_del(p->dev, p->br, vid);
+ if (err)
+ return err;
}
clear_bit(vid, v->vlan_bitmap);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index ae8306e..bf77e36 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -44,7 +44,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
}
EXPORT_SYMBOL(fib_default_rule_add);
-u32 fib_default_rule_pref(struct fib_rules_ops *ops)
+static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
{
struct list_head *pos;
struct fib_rule *rule;
@@ -60,7 +60,6 @@ u32 fib_default_rule_pref(struct fib_rules_ops *ops)
return 0;
}
-EXPORT_SYMBOL(fib_default_rule_pref);
static void notify_rule_change(int event, struct fib_rule *rule,
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
@@ -299,8 +298,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
}
rule->fr_net = net;
- if (tb[FRA_PRIORITY])
- rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
+ rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
+ : fib_default_rule_pref(ops);
if (tb[FRA_IIFNAME]) {
struct net_device *dev;
@@ -350,9 +349,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
else
rule->suppress_ifgroup = -1;
- if (!tb[FRA_PRIORITY] && ops->default_pref)
- rule->pref = ops->default_pref(ops);
-
err = -EINVAL;
if (tb[FRA_GOTO]) {
if (rule->action != FR_ACT_GOTO)
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 9d66a0f..295bbd6 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -229,7 +229,6 @@ static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = {
.configure = dn_fib_rule_configure,
.compare = dn_fib_rule_compare,
.fill = dn_fib_rule_fill,
- .default_pref = fib_default_rule_pref,
.flush_cache = dn_fib_rule_flush_cache,
.nlgroup = RTNLGRP_DECnet_RULE,
.policy = dn_fib_rule_policy,
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 18123d5..f2bda9e 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -318,7 +318,6 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
.delete = fib4_rule_delete,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
- .default_pref = fib_default_rule_pref,
.nlmsg_payload = fib4_rule_nlmsg_payload,
.flush_cache = fib4_rule_flush_cache,
.nlgroup = RTNLGRP_IPV4_RULE,
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3a2c016..866ee89 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -233,7 +233,6 @@ static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
.match = ipmr_rule_match,
.configure = ipmr_rule_configure,
.compare = ipmr_rule_compare,
- .default_pref = fib_default_rule_pref,
.fill = ipmr_rule_fill,
.nlgroup = RTNLGRP_IPV4_RULE,
.policy = ipmr_rule_policy,
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 28011fb..c6ded6b 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -151,6 +151,21 @@ static void bictcp_init(struct sock *sk)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
+static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+{
+ if (event == CA_EVENT_TX_START) {
+ s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
+ struct bictcp *ca = inet_csk_ca(sk);
+
+ /* We were application limited (idle) for a while.
+ * Shift epoch_start to keep cwnd growth to cubic curve.
+ */
+ if (ca->epoch_start && delta > 0)
+ ca->epoch_start += delta;
+ return;
+ }
+}
+
/* calculate the cubic root of x using a table lookup followed by one
* Newton-Raphson iteration.
* Avg err ~= 0.195%
@@ -450,6 +465,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
.cong_avoid = bictcp_cong_avoid,
.set_state = bictcp_state,
.undo_cwnd = bictcp_undo_cwnd,
+ .cwnd_event = bictcp_cwnd_event,
.pkts_acked = bictcp_acked,
.owner = THIS_MODULE,
.name = "cubic",
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1188e4f..f9a8a12 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -164,6 +164,9 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
+ if (tcp_packets_in_flight(tp) == 0)
+ tcp_ca_event(sk, CA_EVENT_TX_START);
+
tp->lsndtime = now;
/* If it is a reply for ato after last received
@@ -940,9 +943,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
- if (tcp_packets_in_flight(tp) == 0)
- tcp_ca_event(sk, CA_EVENT_TX_START);
-
/* if no packet is in qdisc/device queue, then allow XPS to select
* another queue. We can be called from tcp_tsq_handler()
* which holds one reference to sk_wmem_alloc.
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 2367a16..9f777ec 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -258,11 +258,6 @@ nla_put_failure:
return -ENOBUFS;
}
-static u32 fib6_rule_default_pref(struct fib_rules_ops *ops)
-{
- return 0x3FFF;
-}
-
static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(16) /* dst */
@@ -279,7 +274,6 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.configure = fib6_rule_configure,
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
- .default_pref = fib6_rule_default_pref,
.nlmsg_payload = fib6_rule_nlmsg_payload,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = fib6_rule_policy,
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 74ceb73..0e004cc 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -217,7 +217,6 @@ static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
.match = ip6mr_rule_match,
.configure = ip6mr_rule_configure,
.compare = ip6mr_rule_compare,
- .default_pref = fib_default_rule_pref,
.fill = ip6mr_rule_fill,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = ip6mr_rule_policy,
@@ -550,7 +549,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
if (it->cache == &mrt->mfc6_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == mrt->mfc6_cache_array)
+ else if (it->cache == &mrt->mfc6_cache_array[it->ct])
read_unlock(&mrt_lock);
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index f45cac6..53617d7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1748,7 +1748,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
return -EINVAL;
}
-int ip6_route_add(struct fib6_config *cfg)
+int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
{
int err;
struct net *net = cfg->fc_nlinfo.nl_net;
@@ -1756,7 +1756,6 @@ int ip6_route_add(struct fib6_config *cfg)
struct net_device *dev = NULL;
struct inet6_dev *idev = NULL;
struct fib6_table *table;
- struct mx6_config mxc = { .mx = NULL, };
int addr_type;
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
@@ -1981,6 +1980,32 @@ install_route:
cfg->fc_nlinfo.nl_net = dev_net(dev);
+ *rt_ret = rt;
+
+ return 0;
+out:
+ if (dev)
+ dev_put(dev);
+ if (idev)
+ in6_dev_put(idev);
+ if (rt)
+ dst_free(&rt->dst);
+
+ *rt_ret = NULL;
+
+ return err;
+}
+
+int ip6_route_add(struct fib6_config *cfg)
+{
+ struct mx6_config mxc = { .mx = NULL, };
+ struct rt6_info *rt = NULL;
+ int err;
+
+ err = ip6_route_info_create(cfg, &rt);
+ if (err)
+ goto out;
+
err = ip6_convert_metrics(&mxc, cfg);
if (err)
goto out;
@@ -1988,14 +2013,12 @@ install_route:
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
kfree(mxc.mx);
+
return err;
out:
- if (dev)
- dev_put(dev);
- if (idev)
- in6_dev_put(idev);
if (rt)
dst_free(&rt->dst);
+
return err;
}
@@ -2776,19 +2799,78 @@ errout:
return err;
}
-static int ip6_route_multipath(struct fib6_config *cfg, int add)
+struct rt6_nh {
+ struct rt6_info *rt6_info;
+ struct fib6_config r_cfg;
+ struct mx6_config mxc;
+ struct list_head next;
+};
+
+static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
+{
+ struct rt6_nh *nh;
+
+ list_for_each_entry(nh, rt6_nh_list, next) {
+ pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
+ &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
+ nh->r_cfg.fc_ifindex);
+ }
+}
+
+static int ip6_route_info_append(struct list_head *rt6_nh_list,
+ struct rt6_info *rt, struct fib6_config *r_cfg)
+{
+ struct rt6_nh *nh;
+ struct rt6_info *rtnh;
+ int err = -EEXIST;
+
+ list_for_each_entry(nh, rt6_nh_list, next) {
+ /* check if rt6_info already exists */
+ rtnh = nh->rt6_info;
+
+ if (rtnh->dst.dev == rt->dst.dev &&
+ rtnh->rt6i_idev == rt->rt6i_idev &&
+ ipv6_addr_equal(&rtnh->rt6i_gateway,
+ &rt->rt6i_gateway))
+ return err;
+ }
+
+ nh = kzalloc(sizeof(*nh), GFP_KERNEL);
+ if (!nh)
+ return -ENOMEM;
+ nh->rt6_info = rt;
+ err = ip6_convert_metrics(&nh->mxc, r_cfg);
+ if (err) {
+ kfree(nh);
+ return err;
+ }
+ memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
+ list_add_tail(&nh->next, rt6_nh_list);
+
+ return 0;
+}
+
+static int ip6_route_multipath_add(struct fib6_config *cfg)
{
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
+ struct rt6_info *rt;
+ struct rt6_nh *err_nh;
+ struct rt6_nh *nh, *nh_safe;
int remaining;
int attrlen;
- int err = 0, last_err = 0;
+ int err = 1;
+ int nhn = 0;
+ int replace = (cfg->fc_nlinfo.nlh &&
+ (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
+ LIST_HEAD(rt6_nh_list);
remaining = cfg->fc_mp_len;
-beginning:
rtnh = (struct rtnexthop *)cfg->fc_mp;
- /* Parse a Multipath Entry */
+ /* Parse a Multipath Entry and build a list (rt6_nh_list) of
+ * rt6_info structs per nexthop
+ */
while (rtnh_ok(rtnh, remaining)) {
memcpy(&r_cfg, cfg, sizeof(*cfg));
if (rtnh->rtnh_ifindex)
@@ -2808,22 +2890,32 @@ beginning:
if (nla)
r_cfg.fc_encap_type = nla_get_u16(nla);
}
- err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
+
+ err = ip6_route_info_create(&r_cfg, &rt);
+ if (err)
+ goto cleanup;
+
+ err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
if (err) {
- last_err = err;
- /* If we are trying to remove a route, do not stop the
- * loop when ip6_route_del() fails (because next hop is
- * already gone), we should try to remove all next hops.
- */
- if (add) {
- /* If add fails, we should try to delete all
- * next hops that have been already added.
- */
- add = 0;
- remaining = cfg->fc_mp_len - remaining;
- goto beginning;
- }
+ dst_free(&rt->dst);
+ goto cleanup;
+ }
+
+ rtnh = rtnh_next(rtnh, &remaining);
+ }
+
+ err_nh = NULL;
+ list_for_each_entry(nh, &rt6_nh_list, next) {
+ err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
+ /* nh->rt6_info is used or freed at this point, reset to NULL*/
+ nh->rt6_info = NULL;
+ if (err) {
+ if (replace && nhn)
+ ip6_print_replace_route_err(&rt6_nh_list);
+ err_nh = nh;
+ goto add_errout;
}
+
/* Because each route is added like a single route we remove
* these flags after the first nexthop: if there is a collision,
* we have already failed to add the first nexthop:
@@ -2833,6 +2925,62 @@ beginning:
*/
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
NLM_F_REPLACE);
+ nhn++;
+ }
+
+ goto cleanup;
+
+add_errout:
+ /* Delete routes that were already added */
+ list_for_each_entry(nh, &rt6_nh_list, next) {
+ if (err_nh == nh)
+ break;
+ ip6_route_del(&nh->r_cfg);
+ }
+
+cleanup:
+ list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
+ if (nh->rt6_info)
+ dst_free(&nh->rt6_info->dst);
+ kfree(nh->mxc.mx);
+ list_del(&nh->next);
+ kfree(nh);
+ }
+
+ return err;
+}
+
+static int ip6_route_multipath_del(struct fib6_config *cfg)
+{
+ struct fib6_config r_cfg;
+ struct rtnexthop *rtnh;
+ int remaining;
+ int attrlen;
+ int err = 1, last_err = 0;
+
+ remaining = cfg->fc_mp_len;
+ rtnh = (struct rtnexthop *)cfg->fc_mp;
+
+ /* Parse a Multipath Entry */
+ while (rtnh_ok(rtnh, remaining)) {
+ memcpy(&r_cfg, cfg, sizeof(*cfg));
+ if (rtnh->rtnh_ifindex)
+ r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
+
+ attrlen = rtnh_attrlen(rtnh);
+ if (attrlen > 0) {
+ struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+
+ nla = nla_find(attrs, attrlen, RTA_GATEWAY);
+ if (nla) {
+ nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+ r_cfg.fc_flags |= RTF_GATEWAY;
+ }
+ }
+ err = ip6_route_del(&r_cfg);
+ if (err)
+ last_err = err;
+
rtnh = rtnh_next(rtnh, &remaining);
}
@@ -2849,7 +2997,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
return err;
if (cfg.fc_mp)
- return ip6_route_multipath(&cfg, 0);
+ return ip6_route_multipath_del(&cfg);
else
return ip6_route_del(&cfg);
}
@@ -2864,7 +3012,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
return err;
if (cfg.fc_mp)
- return ip6_route_multipath(&cfg, 1);
+ return ip6_route_multipath_add(&cfg);
else
return ip6_route_add(&cfg);
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 685ec13..17b1fe9 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2468,6 +2468,10 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
rssi_hyst == bss_conf->cqm_rssi_hyst)
return 0;
+ if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return -EOPNOTSUPP;
+
bss_conf->cqm_rssi_thold = rssi_thold;
bss_conf->cqm_rssi_hyst = rssi_hyst;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 705ef1d..cd7e55e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4267,6 +4267,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
int ret;
+ u32 i;
+ bool have_80mhz;
sband = local->hw.wiphy->bands[cbss->channel->band];
@@ -4317,6 +4319,20 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
}
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ have_80mhz = false;
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
+
+ have_80mhz = true;
+ break;
+ }
+
+ if (!have_80mhz)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
ht_cap, ht_oper, vht_oper,
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 9857693..9ce8883 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -716,7 +716,7 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
/* Filter out rates that the STA does not support */
*mask &= sta->supp_rates[sband->band];
- for (i = 0; i < sizeof(mcs_mask); i++)
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index aee701a..4e202d0 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -1249,6 +1249,58 @@ static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&local->chanctx_mtx);
}
+static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+ bool result = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+ !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) ||
+ !sta->sta.ht_cap.ht_supported)
+ continue;
+ result = true;
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void
+iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ bool tdls_ht;
+ u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED |
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
+ IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
+ u16 opmode;
+
+ /* Nothing to do if the BSS connection uses HT */
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
+ return;
+
+ tdls_ht = (sta && sta->sta.ht_cap.ht_supported) ||
+ iee80211_tdls_have_ht_peers(sdata);
+
+ opmode = sdata->vif.bss_conf.ht_operation_mode;
+
+ if (tdls_ht)
+ opmode |= protection;
+ else
+ opmode &= ~protection;
+
+ if (opmode == sdata->vif.bss_conf.ht_operation_mode)
+ return;
+
+ sdata->vif.bss_conf.ht_operation_mode = opmode;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+}
+
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper)
{
@@ -1274,6 +1326,10 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
return -ENOTSUPP;
}
+ /* protect possible bss_conf changes and avoid concurrency in
+ * ieee80211_bss_info_change_notify()
+ */
+ sdata_lock(sdata);
mutex_lock(&local->mtx);
tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
@@ -1287,16 +1343,18 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
iee80211_tdls_recalc_chanctx(sdata);
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
if (!sta) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
ret = -ENOLINK;
break;
}
+ iee80211_tdls_recalc_ht_protection(sdata, sta);
+
set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) ||
!ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
@@ -1318,6 +1376,11 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
ieee80211_flush_queues(local, sdata, false);
ret = sta_info_destroy_addr(sdata, peer);
+
+ mutex_lock(&local->sta_mtx);
+ iee80211_tdls_recalc_ht_protection(sdata, NULL);
+ mutex_unlock(&local->sta_mtx);
+
iee80211_tdls_recalc_chanctx(sdata);
break;
default:
@@ -1335,6 +1398,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
&sdata->u.mgd.request_smps_work);
mutex_unlock(&local->mtx);
+ sdata_unlock(sdata);
return ret;
}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 834ccdb..ff1c798 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -120,6 +120,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
+ bool have_80mhz;
memset(vht_cap, 0, sizeof(*vht_cap));
@@ -129,6 +130,20 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
return;
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ have_80mhz = false;
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
+
+ have_80mhz = true;
+ break;
+ }
+
+ if (!have_80mhz)
+ return;
+
/*
* A VHT STA must support 40 MHz, but if we verify that here
* then we break a few things - some APs (e.g. Netgear R6300v2
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index afe905c..691b54f 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -152,9 +152,13 @@ htable_bits(u32 hashsize)
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
#ifdef IP_SET_HASH_WITH_NET0
+/* cidr from 0 to SET_HOST_MASK() value and c = cidr + 1 */
#define NLEN(family) (SET_HOST_MASK(family) + 1)
+#define CIDR_POS(c) ((c) - 1)
#else
+/* cidr from 1 to SET_HOST_MASK() value and c = cidr + 1 */
#define NLEN(family) SET_HOST_MASK(family)
+#define CIDR_POS(c) ((c) - 2)
#endif
#else
@@ -305,7 +309,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
} else if (h->nets[i].cidr[n] < cidr) {
j = i;
} else if (h->nets[i].cidr[n] == cidr) {
- h->nets[cidr - 1].nets[n]++;
+ h->nets[CIDR_POS(cidr)].nets[n]++;
return;
}
}
@@ -314,7 +318,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
}
h->nets[i].cidr[n] = cidr;
- h->nets[cidr - 1].nets[n] = 1;
+ h->nets[CIDR_POS(cidr)].nets[n] = 1;
}
static void
@@ -325,8 +329,8 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
for (i = 0; i < nets_length; i++) {
if (h->nets[i].cidr[n] != cidr)
continue;
- h->nets[cidr - 1].nets[n]--;
- if (h->nets[cidr - 1].nets[n] > 0)
+ h->nets[CIDR_POS(cidr)].nets[n]--;
+ if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
return;
for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index 3c862c0..a93dfeb 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -131,6 +131,13 @@ hash_netnet4_data_next(struct hash_netnet4_elem *next,
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
+static void
+hash_netnet4_init(struct hash_netnet4_elem *e)
+{
+ e->cidr[0] = HOST_MASK;
+ e->cidr[1] = HOST_MASK;
+}
+
static int
hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@@ -160,7 +167,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_netnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+ struct hash_netnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, last;
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
@@ -169,6 +176,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+ hash_netnet4_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
@@ -357,6 +365,13 @@ hash_netnet6_data_next(struct hash_netnet4_elem *next,
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
+static void
+hash_netnet6_init(struct hash_netnet6_elem *e)
+{
+ e->cidr[0] = HOST_MASK;
+ e->cidr[1] = HOST_MASK;
+}
+
static int
hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@@ -385,13 +400,14 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+ struct hash_netnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+ hash_netnet6_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index 0c68734..9a14c23 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -142,6 +142,13 @@ hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
+static void
+hash_netportnet4_init(struct hash_netportnet4_elem *e)
+{
+ e->cidr[0] = HOST_MASK;
+ e->cidr[1] = HOST_MASK;
+}
+
static int
hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@@ -175,7 +182,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netportnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+ struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
@@ -185,6 +192,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+ hash_netportnet4_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -412,6 +420,13 @@ hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
+static void
+hash_netportnet6_init(struct hash_netportnet6_elem *e)
+{
+ e->cidr[0] = HOST_MASK;
+ e->cidr[1] = HOST_MASK;
+}
+
static int
hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@@ -445,7 +460,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netportnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+ struct hash_netportnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
@@ -454,6 +469,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+ hash_netportnet6_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index eedf049..c09d6c7 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -313,12 +313,13 @@ out_free:
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
-static void nf_ct_tmpl_free(struct nf_conn *tmpl)
+void nf_ct_tmpl_free(struct nf_conn *tmpl)
{
nf_ct_ext_destroy(tmpl);
nf_ct_ext_free(tmpl);
kfree(tmpl);
}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
static void
destroy_conntrack(struct nf_conntrack *nfct)
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 888b955..c8a4a48 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -380,7 +380,7 @@ static int __net_init synproxy_net_init(struct net *net)
err3:
free_percpu(snet->stats);
err2:
- nf_conntrack_free(ct);
+ nf_ct_tmpl_free(ct);
err1:
return err;
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 0c0e8ec..70277b1 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -444,6 +444,7 @@ done:
static void nfnetlink_rcv(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ u_int16_t res_id;
int msglen;
if (nlh->nlmsg_len < NLMSG_HDRLEN ||
@@ -468,7 +469,12 @@ static void nfnetlink_rcv(struct sk_buff *skb)
nfgenmsg = nlmsg_data(nlh);
skb_pull(skb, msglen);
- nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
+ /* Work around old nft using host byte order */
+ if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
+ res_id = NFNL_SUBSYS_NFTABLES;
+ else
+ res_id = ntohs(nfgenmsg->res_id);
+ nfnetlink_rcv_batch(skb, nlh, res_id);
} else {
netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
}
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 685cc6a..a5cd6d9 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -301,7 +301,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
__be32 **packet_id_ptr)
{
size_t size;
- size_t data_len = 0, cap_len = 0;
+ size_t data_len = 0, cap_len = 0, rem_len = 0;
unsigned int hlen = 0;
struct sk_buff *skb;
struct nlattr *nla;
@@ -360,6 +360,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len;
+ rem_len = data_len - hlen;
break;
}
@@ -377,7 +378,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
size += nla_total_size(seclen);
}
- skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
+ skb = __netlink_alloc_skb(net->nfnl, size, rem_len, queue->peer_portid,
GFP_ATOMIC);
if (!skb) {
skb_tx_error(entskb);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 8e52489..faf32d8 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -255,7 +255,7 @@ out:
return 0;
err3:
- nf_conntrack_free(ct);
+ nf_ct_tmpl_free(ct);
err2:
nf_ct_l3proto_module_put(par->family);
err1:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 50889be..7f86d3b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -674,12 +674,19 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
mask = datagram_poll(file, sock, wait);
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (nlk->rx_ring.pg_vec) {
- if (netlink_has_valid_frame(&nlk->rx_ring))
- mask |= POLLIN | POLLRDNORM;
+ /* We could already have received frames in the normal receive
+ * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
+ * so if mask contains pollin/etc already, there's no point
+ * walking the ring.
+ */
+ if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ if (nlk->rx_ring.pg_vec) {
+ if (netlink_has_valid_frame(&nlk->rx_ring))
+ mask |= POLLIN | POLLRDNORM;
+ }
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
}
- spin_unlock_bh(&sk->sk_receive_queue.lock);
spin_lock_bh(&sk->sk_write_queue.lock);
if (nlk->tx_ring.pg_vec) {
@@ -1837,15 +1844,16 @@ retry:
}
EXPORT_SYMBOL(netlink_unicast);
-struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask)
+struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ unsigned int ldiff, u32 dst_portid,
+ gfp_t gfp_mask)
{
#ifdef CONFIG_NETLINK_MMAP
+ unsigned int maxlen, linear_size;
struct sock *sk = NULL;
struct sk_buff *skb;
struct netlink_ring *ring;
struct nl_mmap_hdr *hdr;
- unsigned int maxlen;
sk = netlink_getsockbyportid(ssk, dst_portid);
if (IS_ERR(sk))
@@ -1856,7 +1864,11 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
if (ring->pg_vec == NULL)
goto out_put;
- if (ring->frame_size - NL_MMAP_HDRLEN < size)
+ /* We need to account the full linear size needed as a ring
+ * slot cannot have non-linear parts.
+ */
+ linear_size = size + ldiff;
+ if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
goto out_put;
skb = alloc_skb_head(gfp_mask);
@@ -1870,13 +1882,14 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
/* check again under lock */
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
- if (maxlen < size)
+ if (maxlen < linear_size)
goto out_free;
netlink_forward_ring(ring);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL)
goto err2;
+
netlink_ring_setup_skb(skb, sk, ring, hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
@@ -1902,7 +1915,7 @@ out:
#endif
return alloc_skb(size, gfp_mask);
}
-EXPORT_SYMBOL_GPL(netlink_alloc_skb);
+EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index af7cdef..2a071f4 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -5,6 +5,7 @@
config OPENVSWITCH
tristate "Open vSwitch"
depends on INET
+ depends on (!NF_CONNTRACK || NF_CONNTRACK)
select LIBCRC32C
select MPLS
select NET_MPLS_GSO
@@ -31,17 +32,6 @@ config OPENVSWITCH
If unsure, say N.
-config OPENVSWITCH_CONNTRACK
- bool "Open vSwitch conntrack action support"
- depends on OPENVSWITCH
- depends on NF_CONNTRACK
- default OPENVSWITCH
- ---help---
- If you say Y here, then Open vSwitch module will be able to pass
- packets through conntrack.
-
- Say N to exclude this support and reduce the binary size.
-
config OPENVSWITCH_GRE
tristate "Open vSwitch GRE tunneling support"
depends on OPENVSWITCH
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 5b5913b..60f8090 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,7 +15,9 @@ openvswitch-y := \
vport-internal_dev.o \
vport-netdev.o
-openvswitch-$(CONFIG_OPENVSWITCH_CONNTRACK) += conntrack.o
+ifneq ($(CONFIG_NF_CONNTRACK),)
+openvswitch-y += conntrack.o
+endif
obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
index 3cb3066..43f5dd7 100644
--- a/net/openvswitch/conntrack.h
+++ b/net/openvswitch/conntrack.h
@@ -19,7 +19,7 @@
struct ovs_conntrack_info;
enum ovs_key_attr;
-#if defined(CONFIG_OPENVSWITCH_CONNTRACK)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
void ovs_ct_init(struct net *);
void ovs_ct_exit(struct net *);
bool ovs_ct_verify(struct net *, enum ovs_key_attr attr);
@@ -82,5 +82,5 @@ static inline int ovs_ct_put_key(const struct sw_flow_key *key,
}
static inline void ovs_ct_free_action(const struct nlattr *a) { }
-#endif
+#endif /* CONFIG_NF_CONNTRACK */
#endif /* ovs_conntrack.h */
diff --git a/net/rds/connection.c b/net/rds/connection.c
index a50e652..49adeef 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -70,7 +70,8 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
} while (0)
/* rcu read lock must be held or the connection spinlock */
-static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
+static struct rds_connection *rds_conn_lookup(struct net *net,
+ struct hlist_head *head,
__be32 laddr, __be32 faddr,
struct rds_transport *trans)
{
@@ -78,7 +79,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
- conn->c_trans == trans) {
+ conn->c_trans == trans && net == rds_conn_net(conn)) {
ret = conn;
break;
}
@@ -132,7 +133,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
goto new_conn;
rcu_read_lock();
- conn = rds_conn_lookup(head, laddr, faddr, trans);
+ conn = rds_conn_lookup(net, head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
laddr == faddr && !is_outgoing) {
/* This is a looped back IB connection, and we're
@@ -189,6 +190,12 @@ new_conn:
}
}
+ if (trans == NULL) {
+ kmem_cache_free(rds_conn_slab, conn);
+ conn = ERR_PTR(-ENODEV);
+ goto out;
+ }
+
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
@@ -239,7 +246,7 @@ new_conn:
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
found = NULL;
else
- found = rds_conn_lookup(head, laddr, faddr, trans);
+ found = rds_conn_lookup(net, head, laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index f12149a..b41e9ea 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -341,7 +341,15 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
{
struct rfkill *rfkill;
- rfkill_global_states[type].cur = blocked;
+ if (type == RFKILL_TYPE_ALL) {
+ int i;
+
+ for (i = 0; i < NUM_RFKILL_TYPES; i++)
+ rfkill_global_states[i].cur = blocked;
+ } else {
+ rfkill_global_states[type].cur = blocked;
+ }
+
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->type != type && type != RFKILL_TYPE_ALL)
continue;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 4345790..b714333 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -506,14 +506,22 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (IS_ERR(rt))
continue;
+ if (!dst)
+ dst = &rt->dst;
+
/* Ensure the src address belongs to the output
* interface.
*/
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
false);
- if (!odev || odev->ifindex != fl4->flowi4_oif)
+ if (!odev || odev->ifindex != fl4->flowi4_oif) {
+ if (&rt->dst != dst)
+ dst_release(&rt->dst);
continue;
+ }
+ if (dst != &rt->dst)
+ dst_release(dst);
dst = &rt->dst;
break;
}
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 16c1c43..fda38f8 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -853,12 +853,8 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
.cb = cb,
.idx = idx,
};
- int err;
-
- err = switchdev_port_obj_dump(dev, &dump.obj);
- if (err)
- return err;
+ switchdev_port_obj_dump(dev, &dump.obj);
return dump.idx;
}
EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 8b010c9..41042de 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -170,6 +170,30 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
}
/**
+ * bclink_prepare_wakeup - prepare users for wakeup after congestion
+ * @bcl: broadcast link
+ * @resultq: queue for users which can be woken up
+ * Move a number of waiting users, as permitted by available space in
+ * the send queue, from link wait queue to specified queue for wakeup
+ */
+static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
+{
+ int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
+ int imp, lim;
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
+ imp = TIPC_SKB_CB(skb)->chain_imp;
+ lim = bcl->window + bcl->backlog[imp].limit;
+ pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
+ if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
+ continue;
+ skb_unlink(skb, &bcl->wakeupq);
+ skb_queue_tail(resultq, skb);
+ }
+}
+
+/**
* tipc_bclink_wakeup_users - wake up pending users
*
* Called with no locks taken
@@ -177,8 +201,12 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
void tipc_bclink_wakeup_users(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+ struct sk_buff_head resultq;
- tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
+ skb_queue_head_init(&resultq);
+ bclink_prepare_wakeup(bcl, &resultq);
+ tipc_sk_rcv(net, &resultq);
}
/**
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index b144485..2510b231 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2625,7 +2625,7 @@ static void restore_regulatory_settings(bool reset_user)
* settings, user regulatory settings takes precedence.
*/
if (is_an_alpha2(alpha2))
- regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
+ regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER);
spin_lock(&reg_requests_lock);
list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
OpenPOWER on IntegriCloud