summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/act_api.h37
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_rxrpc.h7
-rw-r--r--include/net/af_vsock.h20
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h40
-rw-r--r--include/net/dn.h7
-rw-r--r--include/net/dn_nsp.h1
-rw-r--r--include/net/dsa.h119
-rw-r--r--include/net/dst.h17
-rw-r--r--include/net/dst_metadata.h6
-rw-r--r--include/net/fib_notifier.h3
-rw-r--r--include/net/fq.h7
-rw-r--r--include/net/fq_impl.h81
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_ecn.h5
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/inet_sock.h11
-rw-r--r--include/net/ip6_fib.h79
-rw-r--r--include/net/ip6_route.h5
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/llc_c_ac.h8
-rw-r--r--include/net/mac80211.h8
-rw-r--r--include/net/neighbour.h4
-rw-r--r--include/net/netlink.h73
-rw-r--r--include/net/netns/ipv4.h37
-rw-r--r--include/net/phonet/phonet.h6
-rw-r--r--include/net/pkt_cls.h195
-rw-r--r--include/net/pkt_sched.h21
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/request_sock.h2
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/rtnetlink.h5
-rw-r--r--include/net/sch_generic.h15
-rw-r--r--include/net/sctp/sm.h12
-rw-r--r--include/net/sctp/stream_sched.h72
-rw-r--r--include/net/sctp/structs.h63
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/net/sock.h11
-rw-r--r--include/net/strparser.h3
-rw-r--r--include/net/switchdev.h1
-rw-r--r--include/net/tc_act/tc_gact.h5
-rw-r--r--include/net/tc_act/tc_ife.h12
-rw-r--r--include/net/tc_act/tc_mirred.h1
-rw-r--r--include/net/tcp.h203
-rw-r--r--include/net/udp.h2
47 files changed, 929 insertions, 310 deletions
diff --git a/include/net/act_api.h b/include/net/act_api.h
index b944e0e..f5e8c90 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -93,8 +93,7 @@ struct tc_action_ops {
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int, const struct tc_action_ops *);
void (*stats_update)(struct tc_action *, u64, u32, u64);
- int (*get_dev)(const struct tc_action *a, struct net *net,
- struct net_device **mirred_dev);
+ struct net_device *(*get_dev)(const struct tc_action *a);
};
struct tc_action_net {
@@ -175,4 +174,38 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
#endif
}
+typedef int tc_setup_cb_t(enum tc_setup_type type,
+ void *type_data, void *cb_priv);
+
+#ifdef CONFIG_NET_CLS_ACT
+int tc_setup_cb_egdev_register(const struct net_device *dev,
+ tc_setup_cb_t *cb, void *cb_priv);
+void tc_setup_cb_egdev_unregister(const struct net_device *dev,
+ tc_setup_cb_t *cb, void *cb_priv);
+int tc_setup_cb_egdev_call(const struct net_device *dev,
+ enum tc_setup_type type, void *type_data,
+ bool err_stop);
+#else
+static inline
+int tc_setup_cb_egdev_register(const struct net_device *dev,
+ tc_setup_cb_t *cb, void *cb_priv)
+{
+ return 0;
+}
+
+static inline
+void tc_setup_cb_egdev_unregister(const struct net_device *dev,
+ tc_setup_cb_t *cb, void *cb_priv)
+{
+}
+
+static inline
+int tc_setup_cb_egdev_call(const struct net_device *dev,
+ enum tc_setup_type type, void *type_data,
+ bool err_stop)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 87981cd..15b5ffd 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -55,9 +55,10 @@ struct prefix_info {
struct in6_validator_info {
struct in6_addr i6vi_addr;
struct inet6_dev *i6vi_dev;
+ struct netlink_ext_ack *extack;
};
-#define IN6_ADDR_HSIZE_SHIFT 4
+#define IN6_ADDR_HSIZE_SHIFT 8
#define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
int addrconf_init(void);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 3ac7915..2b3a6ee 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -49,17 +49,19 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
unsigned long,
s64,
gfp_t,
- rxrpc_notify_rx_t);
+ rxrpc_notify_rx_t,
+ bool);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
- void *, size_t, size_t *, bool, u32 *);
+ void *, size_t, size_t *, bool, u32 *, u16 *);
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, const char *);
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *);
+u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
@@ -67,5 +69,6 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *, struct key *);
int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
enum rxrpc_call_completion *, u32 *);
+u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f9fb566..9324ac2 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -22,11 +22,13 @@
#include "vsock_addr.h"
-/* vsock-specific sock->sk_state constants */
-#define VSOCK_SS_LISTEN 255
-
#define LAST_RESERVED_PORT 1023
+#define VSOCK_HASH_SIZE 251
+extern struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
+extern struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
+extern spinlock_t vsock_table_lock;
+
#define vsock_sk(__sk) ((struct vsock_sock *)__sk)
#define sk_vsock(__vsk) (&(__vsk)->sk)
@@ -175,6 +177,18 @@ const struct vsock_transport *vsock_core_get_transport(void);
/**** UTILS ****/
+/* vsock_table_lock must be held */
+static inline bool __vsock_in_bound_table(struct vsock_sock *vsk)
+{
+ return !list_empty(&vsk->bound_table);
+}
+
+/* vsock_table_lock must be held */
+static inline bool __vsock_in_connected_table(struct vsock_sock *vsk)
+{
+ return !list_empty(&vsk->connected_table);
+}
+
void vsock_release_pending(struct sock *pending);
void vsock_add_pending(struct sock *listener, struct sock *pending);
void vsock_remove_pending(struct sock *listener, struct sock *pending);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index b2e6865..f801fc9 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -330,7 +330,6 @@ static inline void bond_set_active_slave(struct slave *slave)
slave->backup = 0;
bond_queue_slave_event(slave);
bond_lower_state_changed(slave);
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -340,7 +339,6 @@ static inline void bond_set_backup_slave(struct slave *slave)
slave->backup = 1;
bond_queue_slave_event(slave);
bond_lower_state_changed(slave);
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -353,7 +351,6 @@ static inline void bond_set_slave_state(struct slave *slave,
slave->backup = slave_state;
if (notify) {
bond_lower_state_changed(slave);
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
bond_queue_slave_event(slave);
slave->should_notify = 0;
} else {
@@ -385,7 +382,6 @@ static inline void bond_slave_state_notify(struct bonding *bond)
bond_for_each_slave(bond, tmp, iter) {
if (tmp->should_notify) {
bond_lower_state_changed(tmp);
- rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC);
tmp->should_notify = 0;
}
}
@@ -596,7 +592,8 @@ void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
int bond_sysfs_slave_add(struct slave *slave);
void bond_sysfs_slave_del(struct slave *slave);
-int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
+int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ struct netlink_ext_ack *extack);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
int bond_set_carrier(struct bonding *bond);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f12fa52..8b8118a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4347,19 +4347,6 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
}
/**
- * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11
- * @skb: the 802.3 frame
- * @addr: the device MAC address
- * @iftype: the virtual interface type
- * @bssid: the network bssid (used only for iftype STATION and ADHOC)
- * @qos: build 802.11 QoS data frame
- * Return: 0 on success, or a negative error code.
- */
-int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype, const u8 *bssid,
- bool qos);
-
-/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
*
* Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
@@ -5441,9 +5428,6 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
- * @authorized: true if the 802.1X authentication was done by the driver or is
- * not needed (e.g., when Fast Transition protocol was used), false
- * otherwise. Ignored for networks that don't use 802.1X authentication.
*/
struct cfg80211_roam_info {
struct ieee80211_channel *channel;
@@ -5453,7 +5437,6 @@ struct cfg80211_roam_info {
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
- bool authorized;
};
/**
@@ -5478,6 +5461,23 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
gfp_t gfp);
/**
+ * cfg80211_port_authorized - notify cfg80211 of successful security association
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @gfp: allocation flags
+ *
+ * This function should be called by a driver that supports 4 way handshake
+ * offload after a security association was successfully established (i.e.,
+ * the 4 way handshake was completed successfully). The call to this function
+ * should be preceded with a call to cfg80211_connect_result(),
+ * cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to
+ * indicate the 802.11 association.
+ */
+void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid,
+ gfp_t gfp);
+
+/**
* cfg80211_disconnected - notify cfg80211 that connection was dropped
*
* @dev: network device
@@ -5934,7 +5934,8 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
* @ies: the IE buffer
* @ielen: the length of the IE buffer
* @ids: an array with element IDs that are allowed before
- * the split
+ * the split. A WLAN_EID_EXTENSION value means that the next
+ * EID in the list is a sub-element of the EXTENSION IE.
* @n_ids: the size of the element ID array
* @after_ric: array IE types that come after the RIC element
* @n_after_ric: size of the @after_ric array
@@ -5965,7 +5966,8 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
* @ies: the IE buffer
* @ielen: the length of the IE buffer
* @ids: an array with element IDs that are allowed before
- * the split
+ * the split. A WLAN_EID_EXTENSION value means that the next
+ * EID in the list is a sub-element of the EXTENSION IE.
* @n_ids: the size of the element ID array
* @offset: offset where to start splitting in the buffer
*
diff --git a/include/net/dn.h b/include/net/dn.h
index 913b73d..4394f7d 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -122,13 +122,6 @@ struct dn_scp /* Session Control Port */
unsigned long keepalive;
void (*keepalive_fxn)(struct sock *sk);
- /*
- * This stuff is for the fast timer for delayed acks
- */
- struct timer_list delack_timer;
- int delack_pending;
- void (*delack_fxn)(struct sock *sk);
-
};
static inline struct dn_scp *DN_SK(struct sock *sk)
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index 3a3e33d..413a15e 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -17,7 +17,6 @@
void dn_nsp_send_data_ack(struct sock *sk);
void dn_nsp_send_oth_ack(struct sock *sk);
-void dn_nsp_delayed_ack(struct sock *sk);
void dn_send_conn_ack(struct sock *sk);
void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
void dn_nsp_send_disc(struct sock *sk, unsigned char type,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 8dee216..50e276d 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -130,11 +130,6 @@ struct dsa_switch_tree {
*/
struct dsa_platform_data *pd;
- /* Copy of tag_ops->rcv for faster access in hot path */
- struct sk_buff * (*rcv)(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt);
-
/*
* The switch port to which the CPU is attached.
*/
@@ -144,12 +139,6 @@ struct dsa_switch_tree {
* Data for the individual switch chips.
*/
struct dsa_switch *ds[DSA_MAX_SWITCHES];
-
- /*
- * Tagging protocol operations for adding and removing an
- * encapsulation tag.
- */
- const struct dsa_device_ops *tag_ops;
};
/* TC matchall action types, only mirroring for now */
@@ -175,11 +164,33 @@ struct dsa_mall_tc_entry {
struct dsa_port {
+ /* A CPU port is physically connected to a master device.
+ * A user port exposed to userspace has a slave device.
+ */
+ union {
+ struct net_device *master;
+ struct net_device *slave;
+ };
+
+ /* CPU port tagging operations used by master or slave devices */
+ const struct dsa_device_ops *tag_ops;
+
+ /* Copies for faster access in master receive hot path */
+ struct dsa_switch_tree *dst;
+ struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt);
+
+ enum {
+ DSA_PORT_TYPE_UNUSED = 0,
+ DSA_PORT_TYPE_CPU,
+ DSA_PORT_TYPE_DSA,
+ DSA_PORT_TYPE_USER,
+ } type;
+
struct dsa_switch *ds;
unsigned int index;
const char *name;
struct dsa_port *cpu_dp;
- struct net_device *netdev;
struct device_node *dn;
unsigned int ageing_time;
u8 stp_state;
@@ -229,9 +240,6 @@ struct dsa_switch {
/*
* Slave mii_bus and devices for the individual ports.
*/
- u32 dsa_port_mask;
- u32 cpu_port_mask;
- u32 enabled_port_mask;
u32 phys_mii_mask;
struct mii_bus *slave_mii_bus;
@@ -250,19 +258,41 @@ struct dsa_switch {
struct dsa_port ports[];
};
+static inline const struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
+{
+ return &ds->ports[p];
+}
+
+static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
+{
+ return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
+}
+
static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
{
- return !!(ds->cpu_port_mask & (1 << p));
+ return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
}
static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
{
- return !!((ds->dsa_port_mask) & (1 << p));
+ return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
+}
+
+static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
+{
+ return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
}
-static inline bool dsa_is_normal_port(struct dsa_switch *ds, int p)
+static inline u32 dsa_user_ports(struct dsa_switch *ds)
{
- return !dsa_is_cpu_port(ds, p) && !dsa_is_dsa_port(ds, p);
+ u32 mask = 0;
+ int p;
+
+ for (p = 0; p < ds->num_ports; p++)
+ if (dsa_is_user_port(ds, p))
+ mask |= BIT(p);
+
+ return mask;
}
static inline u8 dsa_upstream_port(struct dsa_switch *ds)
@@ -294,7 +324,6 @@ struct dsa_switch_ops {
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds);
int (*setup)(struct dsa_switch *ds);
- int (*set_addr)(struct dsa_switch *ds, u8 *addr);
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
@@ -474,4 +503,54 @@ static inline int dsa_switch_resume(struct dsa_switch *ds)
}
#endif /* CONFIG_PM_SLEEP */
+enum dsa_notifier_type {
+ DSA_PORT_REGISTER,
+ DSA_PORT_UNREGISTER,
+};
+
+struct dsa_notifier_info {
+ struct net_device *dev;
+};
+
+struct dsa_notifier_register_info {
+ struct dsa_notifier_info info; /* must be first */
+ struct net_device *master;
+ unsigned int port_number;
+ unsigned int switch_number;
+};
+
+static inline struct net_device *
+dsa_notifier_info_to_dev(const struct dsa_notifier_info *info)
+{
+ return info->dev;
+}
+
+#if IS_ENABLED(CONFIG_NET_DSA)
+int register_dsa_notifier(struct notifier_block *nb);
+int unregister_dsa_notifier(struct notifier_block *nb);
+int call_dsa_notifiers(unsigned long val, struct net_device *dev,
+ struct dsa_notifier_info *info);
+#else
+static inline int register_dsa_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_dsa_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
+ struct dsa_notifier_info *info)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
+/* Broadcom tag specific helpers to insert and extract queue/port number */
+#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
+#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
+#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+
#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 06a6765..2f53ecc 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -101,7 +101,7 @@ struct dst_entry {
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
- struct rt6_info *rt6_next;
+ struct rt6_info __rcu *rt6_next;
struct dn_route __rcu *dn_next;
};
};
@@ -255,17 +255,18 @@ static inline void dst_hold(struct dst_entry *dst)
WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
}
-static inline void dst_use(struct dst_entry *dst, unsigned long time)
+static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
{
- dst_hold(dst);
- dst->__use++;
- dst->lastuse = time;
+ if (unlikely(time != dst->lastuse)) {
+ dst->__use++;
+ dst->lastuse = time;
+ }
}
-static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
+static inline void dst_hold_and_use(struct dst_entry *dst, unsigned long time)
{
- dst->__use++;
- dst->lastuse = time;
+ dst_hold(dst);
+ dst_use_noref(dst, time);
}
static inline struct dst_entry *dst_clone(struct dst_entry *dst)
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index a803129..87a0bb8 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -24,7 +24,7 @@ struct metadata_dst {
} u;
};
-static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
+static inline struct metadata_dst *skb_metadata_dst(const struct sk_buff *skb)
{
struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
@@ -34,7 +34,8 @@ static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
return NULL;
}
-static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
+static inline struct ip_tunnel_info *
+skb_tunnel_info(const struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dst_entry *dst;
@@ -86,6 +87,7 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
void metadata_dst_free(struct metadata_dst *);
struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
gfp_t flags);
+void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst);
struct metadata_dst __percpu *
metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags);
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
index 669b971..c91ec73 100644
--- a/include/net/fib_notifier.h
+++ b/include/net/fib_notifier.h
@@ -9,6 +9,7 @@
struct fib_notifier_info {
struct net *net;
int family;
+ struct netlink_ext_ack *extack;
};
enum fib_event_type {
@@ -20,6 +21,8 @@ enum fib_event_type {
FIB_EVENT_RULE_DEL,
FIB_EVENT_NH_ADD,
FIB_EVENT_NH_DEL,
+ FIB_EVENT_VIF_ADD,
+ FIB_EVENT_VIF_DEL,
};
struct fib_notifier_ops {
diff --git a/include/net/fq.h b/include/net/fq.h
index 6d8521a..ac944a6 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -90,6 +90,13 @@ typedef void fq_skb_free_t(struct fq *,
struct fq_flow *,
struct sk_buff *);
+/* Return %true to filter (drop) the frame. */
+typedef bool fq_skb_filter_t(struct fq *,
+ struct fq_tin *,
+ struct fq_flow *,
+ struct sk_buff *,
+ void *);
+
typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
struct fq_tin *,
int idx,
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 4e6131c..be7c0fa 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -12,24 +12,22 @@
/* functions that are embedded into includer */
-static struct sk_buff *fq_flow_dequeue(struct fq *fq,
- struct fq_flow *flow)
+static void fq_adjust_removal(struct fq *fq,
+ struct fq_flow *flow,
+ struct sk_buff *skb)
{
struct fq_tin *tin = flow->tin;
- struct fq_flow *i;
- struct sk_buff *skb;
-
- lockdep_assert_held(&fq->lock);
-
- skb = __skb_dequeue(&flow->queue);
- if (!skb)
- return NULL;
tin->backlog_bytes -= skb->len;
tin->backlog_packets--;
flow->backlog -= skb->len;
fq->backlog--;
fq->memory_usage -= skb->truesize;
+}
+
+static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow)
+{
+ struct fq_flow *i;
if (flow->backlog == 0) {
list_del_init(&flow->backlogchain);
@@ -43,6 +41,21 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq,
list_move_tail(&flow->backlogchain,
&i->backlogchain);
}
+}
+
+static struct sk_buff *fq_flow_dequeue(struct fq *fq,
+ struct fq_flow *flow)
+{
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&fq->lock);
+
+ skb = __skb_dequeue(&flow->queue);
+ if (!skb)
+ return NULL;
+
+ fq_adjust_removal(fq, flow, skb);
+ fq_rejigger_backlog(fq, flow);
return skb;
}
@@ -146,6 +159,7 @@ static void fq_tin_enqueue(struct fq *fq,
fq_flow_get_default_t get_default_func)
{
struct fq_flow *flow;
+ bool oom;
lockdep_assert_held(&fq->lock);
@@ -167,8 +181,8 @@ static void fq_tin_enqueue(struct fq *fq,
}
__skb_queue_tail(&flow->queue, skb);
-
- if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
+ oom = (fq->memory_usage > fq->memory_limit);
+ while (fq->backlog > fq->limit || oom) {
flow = list_first_entry_or_null(&fq->backlogs,
struct fq_flow,
backlogchain);
@@ -183,11 +197,52 @@ static void fq_tin_enqueue(struct fq *fq,
flow->tin->overlimit++;
fq->overlimit++;
- if (fq->memory_usage > fq->memory_limit)
+ if (oom) {
fq->overmemory++;
+ oom = (fq->memory_usage > fq->memory_limit);
+ }
}
}
+static void fq_flow_filter(struct fq *fq,
+ struct fq_flow *flow,
+ fq_skb_filter_t filter_func,
+ void *filter_data,
+ fq_skb_free_t free_func)
+{
+ struct fq_tin *tin = flow->tin;
+ struct sk_buff *skb, *tmp;
+
+ lockdep_assert_held(&fq->lock);
+
+ skb_queue_walk_safe(&flow->queue, skb, tmp) {
+ if (!filter_func(fq, tin, flow, skb, filter_data))
+ continue;
+
+ __skb_unlink(skb, &flow->queue);
+ fq_adjust_removal(fq, flow, skb);
+ free_func(fq, tin, flow, skb);
+ }
+
+ fq_rejigger_backlog(fq, flow);
+}
+
+static void fq_tin_filter(struct fq *fq,
+ struct fq_tin *tin,
+ fq_skb_filter_t filter_func,
+ void *filter_data,
+ fq_skb_free_t free_func)
+{
+ struct fq_flow *flow;
+
+ lockdep_assert_held(&fq->lock);
+
+ list_for_each_entry(flow, &tin->new_flows, flowchain)
+ fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
+ list_for_each_entry(flow, &tin->old_flows, flowchain)
+ fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
+}
+
static void fq_flow_reset(struct fq *fq,
struct fq_flow *flow,
fq_skb_free_t free_func)
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 13e4c89..0358745 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -169,9 +169,9 @@ enum inet_csk_ack_state_t {
};
void inet_csk_init_xmit_timers(struct sock *sk,
- void (*retransmit_handler)(unsigned long),
- void (*delack_handler)(unsigned long),
- void (*keepalive_handler)(unsigned long));
+ void (*retransmit_handler)(struct timer_list *),
+ void (*delack_handler)(struct timer_list *),
+ void (*keepalive_handler)(struct timer_list *));
void inet_csk_clear_xmit_timers(struct sock *sk);
static inline void inet_csk_schedule_ack(struct sock *sk)
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index dce2d58..f5ff16d 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -133,11 +133,6 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
return 1;
}
-static inline void IP6_ECN_clear(struct ipv6hdr *iph)
-{
- *(__be32*)iph &= ~htonl(INET_ECN_MASK << 20);
-}
-
static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
{
dscp &= ~INET_ECN_MASK;
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index fc59e07..c695807 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -95,7 +95,7 @@ struct inet_frags {
void (*constructor)(struct inet_frag_queue *q,
const void *arg);
void (*destructor)(struct inet_frag_queue *);
- void (*frag_expire)(unsigned long data);
+ void (*frag_expire)(struct timer_list *t);
struct kmem_cache *frags_cachep;
const char *frags_cache_name;
};
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index aa95053..2135c9b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -92,11 +92,12 @@ struct inet_request_sock {
wscale_ok : 1,
ecn_ok : 1,
acked : 1,
- no_srccheck: 1;
+ no_srccheck: 1,
+ smc_ok : 1;
kmemcheck_bitfield_end(flags);
u32 ir_mark;
union {
- struct ip_options_rcu *opt;
+ struct ip_options_rcu __rcu *ireq_opt;
#if IS_ENABLED(CONFIG_IPV6)
struct {
struct ipv6_txoptions *ipv6_opt;
@@ -132,6 +133,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
+static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
+{
+ return rcu_dereference_check(ireq->ireq_opt,
+ refcount_read(&ireq->req.rsk_refcnt) > 0);
+}
+
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index d060d71..10c9138 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -29,6 +29,14 @@
#define FIB6_TABLE_HASHSZ 1
#endif
+#define RT6_DEBUG 2
+
+#if RT6_DEBUG >= 3
+#define RT6_TRACE(x...) pr_debug(x)
+#else
+#define RT6_TRACE(x...) do { ; } while (0)
+#endif
+
struct rt6_info;
struct fib6_config {
@@ -60,25 +68,30 @@ struct fib6_config {
};
struct fib6_node {
- struct fib6_node *parent;
- struct fib6_node *left;
- struct fib6_node *right;
+ struct fib6_node __rcu *parent;
+ struct fib6_node __rcu *left;
+ struct fib6_node __rcu *right;
#ifdef CONFIG_IPV6_SUBTREES
- struct fib6_node *subtree;
+ struct fib6_node __rcu *subtree;
#endif
- struct rt6_info *leaf;
+ struct rt6_info __rcu *leaf;
__u16 fn_bit; /* bit key */
__u16 fn_flags;
int fn_sernum;
- struct rt6_info *rr_ptr;
+ struct rt6_info __rcu *rr_ptr;
struct rcu_head rcu;
};
+struct fib6_gc_args {
+ int timeout;
+ int more;
+};
+
#ifndef CONFIG_IPV6_SUBTREES
#define FIB6_SUBTREE(fn) NULL
#else
-#define FIB6_SUBTREE(fn) ((fn)->subtree)
+#define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1))
#endif
struct mx6_config {
@@ -98,6 +111,22 @@ struct rt6key {
struct fib6_table;
+struct rt6_exception_bucket {
+ struct hlist_head chain;
+ int depth;
+};
+
+struct rt6_exception {
+ struct hlist_node hlist;
+ struct rt6_info *rt6i;
+ unsigned long stamp;
+ struct rcu_head rcu;
+};
+
+#define FIB6_EXCEPTION_BUCKET_SIZE_SHIFT 10
+#define FIB6_EXCEPTION_BUCKET_SIZE (1 << FIB6_EXCEPTION_BUCKET_SIZE_SHIFT)
+#define FIB6_MAX_DEPTH 5
+
struct rt6_info {
struct dst_entry dst;
@@ -134,14 +163,25 @@ struct rt6_info {
struct inet6_dev *rt6i_idev;
struct rt6_info * __percpu *rt6i_pcpu;
+ struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
u32 rt6i_metric;
u32 rt6i_pmtu;
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
+ u8 exception_bucket_flushed:1,
+ unused:7;
};
+#define for_each_fib6_node_rt_rcu(fn) \
+ for (rt = rcu_dereference((fn)->leaf); rt; \
+ rt = rcu_dereference(rt->dst.rt6_next))
+
+#define for_each_fib6_walker_rt(w) \
+ for (rt = (w)->leaf; rt; \
+ rt = rcu_dereference_protected(rt->dst.rt6_next, 1))
+
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
return ((struct rt6_info *)dst)->rt6i_idev;
@@ -188,6 +228,8 @@ static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
if (fn) {
*cookie = fn->fn_sernum;
+ /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
+ smp_rmb();
status = true;
}
@@ -248,7 +290,6 @@ struct fib6_walker {
struct fib6_node *root, *node;
struct rt6_info *leaf;
enum fib6_walk_state state;
- bool prune;
unsigned int skip;
unsigned int count;
int (*func)(struct fib6_walker *);
@@ -256,12 +297,15 @@ struct fib6_walker {
};
struct rt6_statistics {
- __u32 fib_nodes;
- __u32 fib_route_nodes;
- __u32 fib_rt_alloc; /* permanent routes */
- __u32 fib_rt_entries; /* rt entries in table */
- __u32 fib_rt_cache; /* cache routes */
- __u32 fib_discarded_routes;
+ __u32 fib_nodes; /* all fib6 nodes */
+ __u32 fib_route_nodes; /* intermediate nodes */
+ __u32 fib_rt_entries; /* rt entries in fib table */
+ __u32 fib_rt_cache; /* cached rt entries in exception table */
+ __u32 fib_discarded_routes; /* total number of routes delete */
+
+ /* The following stats are not protected by any lock */
+ atomic_t fib_rt_alloc; /* total number of routes alloced */
+ atomic_t fib_rt_uncache; /* rt entries in uncached list */
};
#define RTN_TL_ROOT 0x0001
@@ -277,7 +321,7 @@ struct rt6_statistics {
struct fib6_table {
struct hlist_node tb6_hlist;
u32 tb6_id;
- rwlock_t tb6_lock;
+ spinlock_t tb6_lock;
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
unsigned int flags;
@@ -325,7 +369,8 @@ struct fib6_node *fib6_lookup(struct fib6_node *root,
struct fib6_node *fib6_locate(struct fib6_node *root,
const struct in6_addr *daddr, int dst_len,
- const struct in6_addr *saddr, int src_len);
+ const struct in6_addr *saddr, int src_len,
+ bool exact_match);
void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
void *arg);
@@ -358,6 +403,8 @@ void __net_exit fib6_notifier_exit(struct net *net);
unsigned int fib6_tables_seq_read(struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb);
+void fib6_update_sernum(struct rt6_info *rt);
+
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
int fib6_rules_init(void);
void fib6_rules_cleanup(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index ee96f40..a0087fb 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -95,6 +95,11 @@ int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack);
int ip6_ins_rt(struct rt6_info *);
int ip6_del_rt(struct rt6_info *);
+void rt6_flush_exceptions(struct rt6_info *rt);
+int rt6_remove_exception_rt(struct rt6_info *rt);
+void rt6_age_exceptions(struct rt6_info *rt, struct fib6_gc_args *gc_args,
+ unsigned long now);
+
static inline int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
const struct in6_addr *daddr,
unsigned int prefs,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 1a7f7e4..f805243 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -122,9 +122,6 @@ struct fib_info {
#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
int fib_nhs;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
- int fib_weight;
-#endif
struct rcu_head rcu;
struct fib_nh fib_nh[0];
#define fib_dev fib_nh[0].nh_dev
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 6eac5cf..3cda3b5 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -300,8 +300,8 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
-int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
- struct icmp6hdr *thdr, int len);
+void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ struct icmp6hdr *thdr, int len);
int ip6_ra_control(struct sock *sk, int sel);
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
index f3be818..e766300 100644
--- a/include/net/llc_c_ac.h
+++ b/include/net/llc_c_ac.h
@@ -171,10 +171,10 @@ int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
-void llc_conn_busy_tmr_cb(unsigned long timeout_data);
-void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
-void llc_conn_ack_tmr_cb(unsigned long timeout_data);
-void llc_conn_rej_tmr_cb(unsigned long timeout_data);
+void llc_conn_busy_tmr_cb(struct timer_list *t);
+void llc_conn_pf_cycle_tmr_cb(struct timer_list *t);
+void llc_conn_ack_tmr_cb(struct timer_list *t);
+void llc_conn_rej_tmr_cb(struct timer_list *t);
void llc_conn_set_p_flag(struct sock *sk, u8 value);
#endif /* LLC_C_AC_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 885690f..cc9073e 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5441,8 +5441,14 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
*/
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
+/**
+ * ieee80211_manage_rx_ba_offl - helper to queue an RX BA work
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, const u8 *addr,
- unsigned int bit);
+ unsigned int tid);
/**
* ieee80211_start_rx_ba_session_offl - start a Rx BA session
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 9816df2..2492000 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -190,8 +190,8 @@ struct neigh_hash_table {
struct neigh_table {
int family;
- int entry_size;
- int key_len;
+ unsigned int entry_size;
+ unsigned int key_len;
__be16 protocol;
__u32 (*hash)(const void *pkey,
const struct net_device *dev,
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e51cf5f..14c2893 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -773,7 +773,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
*/
static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
{
- return nla_put(skb, attrtype, sizeof(u8), &value);
+ /* temporary variables to work around GCC PR81715 with asan-stack=1 */
+ u8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u8), &tmp);
}
/**
@@ -784,7 +787,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
*/
static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
{
- return nla_put(skb, attrtype, sizeof(u16), &value);
+ u16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u16), &tmp);
}
/**
@@ -795,7 +800,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
*/
static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
{
- return nla_put(skb, attrtype, sizeof(__be16), &value);
+ __be16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be16), &tmp);
}
/**
@@ -806,7 +813,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
*/
static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
{
- return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be16 tmp = value;
+
+ return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -817,7 +826,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
*/
static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
{
- return nla_put(skb, attrtype, sizeof(__le16), &value);
+ __le16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le16), &tmp);
}
/**
@@ -828,7 +839,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
*/
static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
{
- return nla_put(skb, attrtype, sizeof(u32), &value);
+ u32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u32), &tmp);
}
/**
@@ -839,7 +852,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
*/
static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
{
- return nla_put(skb, attrtype, sizeof(__be32), &value);
+ __be32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be32), &tmp);
}
/**
@@ -850,7 +865,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
*/
static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
{
- return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be32 tmp = value;
+
+ return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -861,7 +878,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
*/
static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
{
- return nla_put(skb, attrtype, sizeof(__le32), &value);
+ __le32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le32), &tmp);
}
/**
@@ -874,7 +893,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
u64 value, int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
+ u64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
/**
@@ -887,7 +908,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
+ __be64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
}
/**
@@ -900,7 +923,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
- return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
+ __be64 tmp = value;
+
+ return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
padattr);
}
@@ -914,7 +939,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
+ __le64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
}
/**
@@ -925,7 +952,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
*/
static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
{
- return nla_put(skb, attrtype, sizeof(s8), &value);
+ s8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s8), &tmp);
}
/**
@@ -936,7 +965,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
*/
static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
{
- return nla_put(skb, attrtype, sizeof(s16), &value);
+ s16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s16), &tmp);
}
/**
@@ -947,7 +978,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
*/
static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
{
- return nla_put(skb, attrtype, sizeof(s32), &value);
+ s32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s32), &tmp);
}
/**
@@ -960,7 +993,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
+ s64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
}
/**
@@ -1010,7 +1045,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
__be32 addr)
{
- return nla_put_be32(skb, attrtype, addr);
+ __be32 tmp = addr;
+
+ return nla_put_be32(skb, attrtype, tmp);
}
/**
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 8387f09..141ba82 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -36,6 +36,8 @@ struct inet_timewait_death_row {
int sysctl_max_tw_buckets;
};
+struct tcp_fastopen_context;
+
struct netns_ipv4 {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *forw_hdr;
@@ -126,8 +128,40 @@ struct netns_ipv4 {
int sysctl_tcp_sack;
int sysctl_tcp_window_scaling;
int sysctl_tcp_timestamps;
+ int sysctl_tcp_early_retrans;
+ int sysctl_tcp_recovery;
+ int sysctl_tcp_thin_linear_timeouts;
+ int sysctl_tcp_slow_start_after_idle;
+ int sysctl_tcp_retrans_collapse;
+ int sysctl_tcp_stdurg;
+ int sysctl_tcp_rfc1337;
+ int sysctl_tcp_abort_on_overflow;
+ int sysctl_tcp_fack;
+ int sysctl_tcp_max_reordering;
+ int sysctl_tcp_dsack;
+ int sysctl_tcp_app_win;
+ int sysctl_tcp_adv_win_scale;
+ int sysctl_tcp_frto;
+ int sysctl_tcp_nometrics_save;
+ int sysctl_tcp_moderate_rcvbuf;
+ int sysctl_tcp_tso_win_divisor;
+ int sysctl_tcp_workaround_signed_windows;
+ int sysctl_tcp_limit_output_bytes;
+ int sysctl_tcp_challenge_ack_limit;
+ int sysctl_tcp_min_tso_segs;
+ int sysctl_tcp_min_rtt_wlen;
+ int sysctl_tcp_autocorking;
+ int sysctl_tcp_invalid_ratelimit;
+ int sysctl_tcp_pacing_ss_ratio;
+ int sysctl_tcp_pacing_ca_ratio;
struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
+ int sysctl_tcp_fastopen;
+ struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+ spinlock_t tcp_fastopen_ctx_lock;
+ unsigned int sysctl_tcp_fastopen_blackhole_timeout;
+ atomic_t tfo_active_disable_times;
+ unsigned long tfo_active_disable_stamp;
#ifdef CONFIG_NET_L3_MASTER_DEV
int sysctl_udp_l3mdev_accept;
@@ -163,6 +197,9 @@ struct netns_ipv4 {
struct fib_notifier_ops *notifier_ops;
unsigned int fib_seq; /* protected by rtnl_mutex */
+ struct fib_notifier_ops *ipmr_notifier_ops;
+ unsigned int ipmr_seq; /* protected by rtnl_mutex */
+
atomic_t rt_genid;
};
#endif
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index 039cc29..51e1a2a 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -108,8 +108,10 @@ struct phonet_protocol {
int sock_type;
};
-int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp);
-void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp);
+int phonet_proto_register(unsigned int protocol,
+ const struct phonet_protocol *pp);
+void phonet_proto_unregister(unsigned int protocol,
+ const struct phonet_protocol *pp);
int phonet_sysctl_init(void);
void phonet_sysctl_exit(void);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e80edd8..37c5ef7 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -2,6 +2,7 @@
#define __NET_PKT_CLS_H
#include <linux/pkt_cls.h>
+#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
@@ -17,20 +18,73 @@ struct tcf_walker {
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+enum tcf_block_binder_type {
+ TCF_BLOCK_BINDER_TYPE_UNSPEC,
+ TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
+ TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
+};
+
+struct tcf_block_ext_info {
+ enum tcf_block_binder_type binder_type;
+};
+
+struct tcf_block_cb;
+bool tcf_queue_work(struct work_struct *work);
+
#ifdef CONFIG_NET_CLS
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
bool create);
void tcf_chain_put(struct tcf_chain *chain);
int tcf_block_get(struct tcf_block **p_block,
- struct tcf_proto __rcu **p_filter_chain);
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
+int tcf_block_get_ext(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct tcf_block_ext_info *ei);
void tcf_block_put(struct tcf_block *block);
+void tcf_block_put_ext(struct tcf_block *block,
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct tcf_block_ext_info *ei);
+
+static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
+{
+ return block->q;
+}
+
+static inline struct net_device *tcf_block_dev(struct tcf_block *block)
+{
+ return tcf_block_q(block)->dev_queue->dev;
+}
+
+void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
+struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
+ tc_setup_cb_t *cb, void *cb_ident);
+void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
+unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
+struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
+ tc_setup_cb_t *cb, void *cb_ident,
+ void *cb_priv);
+int tcf_block_cb_register(struct tcf_block *block,
+ tc_setup_cb_t *cb, void *cb_ident,
+ void *cb_priv);
+void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
+void tcf_block_cb_unregister(struct tcf_block *block,
+ tc_setup_cb_t *cb, void *cb_ident);
+
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
#else
static inline
int tcf_block_get(struct tcf_block **p_block,
- struct tcf_proto __rcu **p_filter_chain)
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+{
+ return 0;
+}
+
+static inline
+int tcf_block_get_ext(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct tcf_block_ext_info *ei)
{
return 0;
}
@@ -39,6 +93,87 @@ static inline void tcf_block_put(struct tcf_block *block)
{
}
+static inline
+void tcf_block_put_ext(struct tcf_block *block,
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct tcf_block_ext_info *ei)
+{
+}
+
+static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
+{
+ return NULL;
+}
+
+static inline struct net_device *tcf_block_dev(struct tcf_block *block)
+{
+ return NULL;
+}
+
+static inline
+int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
+ void *cb_priv)
+{
+ return 0;
+}
+
+static inline
+void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
+ void *cb_priv)
+{
+}
+
+static inline
+void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
+{
+ return NULL;
+}
+
+static inline
+struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
+ tc_setup_cb_t *cb, void *cb_ident)
+{
+ return NULL;
+}
+
+static inline
+void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
+{
+}
+
+static inline
+unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
+{
+ return 0;
+}
+
+static inline
+struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
+ tc_setup_cb_t *cb, void *cb_ident,
+ void *cb_priv)
+{
+ return NULL;
+}
+
+static inline
+int tcf_block_cb_register(struct tcf_block *block,
+ tc_setup_cb_t *cb, void *cb_ident,
+ void *cb_priv)
+{
+ return 0;
+}
+
+static inline
+void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+{
+}
+
+static inline
+void tcf_block_cb_unregister(struct tcf_block *block,
+ tc_setup_cb_t *cb, void *cb_ident)
+{
+}
+
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
@@ -53,36 +188,43 @@ __cls_set_class(unsigned long *clp, unsigned long cl)
}
static inline unsigned long
-cls_set_class(struct tcf_proto *tp, unsigned long *clp,
- unsigned long cl)
+cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
{
unsigned long old_cl;
-
- tcf_tree_lock(tp);
+
+ sch_tree_lock(q);
old_cl = __cls_set_class(clp, cl);
- tcf_tree_unlock(tp);
-
+ sch_tree_unlock(q);
return old_cl;
}
static inline void
tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
{
+ struct Qdisc *q = tp->chain->block->q;
unsigned long cl;
- cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
- cl = cls_set_class(tp, &r->class, cl);
+ /* Check q as it is not set for shared blocks. In that case,
+ * setting class is not supported.
+ */
+ if (!q)
+ return;
+ cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
+ cl = cls_set_class(q, &r->class, cl);
if (cl)
- tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+ q->ops->cl_ops->unbind_tcf(q, cl);
}
static inline void
tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
{
+ struct Qdisc *q = tp->chain->block->q;
unsigned long cl;
+ if (!q)
+ return;
if ((cl = __cls_set_class(&r->class, 0)) != 0)
- tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+ q->ops->cl_ops->unbind_tcf(q, cl);
}
struct tcf_exts {
@@ -204,8 +346,6 @@ void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
-int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
- struct net_device **hw_dev);
/**
* struct tcf_pkt_info - packet information
@@ -405,11 +545,24 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
#endif /* CONFIG_NET_CLS_IND */
+int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
+ enum tc_setup_type type, void *type_data, bool err_stop);
+
+enum tc_block_command {
+ TC_BLOCK_BIND,
+ TC_BLOCK_UNBIND,
+};
+
+struct tc_block_offload {
+ enum tc_block_command command;
+ enum tcf_block_binder_type binder_type;
+ struct tcf_block *block;
+};
+
struct tc_cls_common_offload {
u32 chain_index;
__be16 protocol;
u32 prio;
- u32 classid;
};
static inline void
@@ -419,7 +572,6 @@ tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio;
- cls_common->classid = tp->classid;
}
struct tc_cls_u32_knode {
@@ -514,7 +666,7 @@ struct tc_cls_flower_offload {
struct fl_flow_key *mask;
struct fl_flow_key *key;
struct tcf_exts *exts;
- bool egress_dev;
+ u32 classid;
};
enum tc_matchall_command {
@@ -546,6 +698,15 @@ struct tc_cls_bpf_offload {
u32 gen_flags;
};
+struct tc_mqprio_qopt_offload {
+ /* struct tc_mqprio_qopt must always be the first element */
+ struct tc_mqprio_qopt qopt;
+ u16 mode;
+ u16 shaper;
+ u32 flags;
+ u64 min_rate[TC_QOPT_MAX_QUEUE];
+ u64 max_rate[TC_QOPT_MAX_QUEUE];
+};
/* This structure holds cookie structure that is passed from user
* to the kernel for actions and classifiers
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 259bc19..02f2db2 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -4,7 +4,9 @@
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
#include <net/sch_generic.h>
+#include <net/net_namespace.h>
#include <uapi/linux/pkt_sched.h>
#define DEFAULT_TX_QUEUE_LEN 1000
@@ -133,17 +135,18 @@ static inline unsigned int psched_mtu(const struct net_device *dev)
return dev->mtu + dev->hard_header_len;
}
-static inline bool is_classid_clsact_ingress(u32 classid)
+static inline struct net *qdisc_net(struct Qdisc *q)
{
- /* This also returns true for ingress qdisc */
- return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
- TC_H_MIN(classid) != TC_H_MIN(TC_H_MIN_EGRESS);
+ return dev_net(q->dev_queue->dev);
}
-static inline bool is_classid_clsact_egress(u32 classid)
-{
- return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
- TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_EGRESS);
-}
+struct tc_cbs_qopt_offload {
+ u8 enable;
+ s32 queue;
+ s32 hicredit;
+ s32 locredit;
+ s32 idleslope;
+ s32 sendslope;
+};
#endif
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 65ba335..4fc75f7 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -39,8 +39,8 @@
/* This is used to register protocols. */
struct net_protocol {
- void (*early_demux)(struct sk_buff *skb);
- void (*early_demux_handler)(struct sk_buff *skb);
+ int (*early_demux)(struct sk_buff *skb);
+ int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
unsigned int no_policy:1,
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 23e2205..3470155 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -150,6 +150,8 @@ struct fastopen_queue {
spinlock_t lock;
int qlen; /* # of pending (TCP_SYN_RECV) reqs */
int max_qlen; /* != 0 iff TFO is currently enabled */
+
+ struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
};
/** struct request_sock_queue - queue of request_socks
diff --git a/include/net/route.h b/include/net/route.h
index 57dfc68..d538e6d 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
fl4->fl4_gre_key = gre_key;
return ip_route_output_key(net, fl4);
}
-
+int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev,
+ struct in_device *in_dev, u32 *itag);
int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin);
int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 21837ca..e3ca8e2 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -93,9 +93,6 @@ struct rtnl_link_ops {
int slave_maxtype;
const struct nla_policy *slave_policy;
- int (*slave_validate)(struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack);
int (*slave_changelink)(struct net_device *dev,
struct net_device *slave_dev,
struct nlattr *tb[],
@@ -154,8 +151,6 @@ struct rtnl_af_ops {
size_t (*get_stats_af_size)(const struct net_device *dev);
};
-void __rtnl_af_unregister(struct rtnl_af_ops *ops);
-
void rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 684d8ed..c23e938 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -10,6 +10,7 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/list.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -270,6 +271,10 @@ struct tcf_chain {
struct tcf_block {
struct list_head chain_list;
+ struct net *net;
+ struct Qdisc *q;
+ struct list_head cb_list;
+ struct work_struct work;
};
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
@@ -357,9 +362,6 @@ static inline void sch_tree_unlock(const struct Qdisc *q)
spin_unlock_bh(qdisc_root_sleeping_lock(q));
}
-#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
-#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
-
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
@@ -409,6 +411,13 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
return NULL;
}
+static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
+{
+ u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
+
+ return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
+}
+
int qdisc_class_hash_init(struct Qdisc_class_hash *);
void qdisc_class_hash_insert(struct Qdisc_class_hash *,
struct Qdisc_class_common *);
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2db3d3a..70fb397 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -72,7 +72,7 @@ typedef enum sctp_disposition (sctp_state_fn_t) (
const union sctp_subtype type,
void *arg,
struct sctp_cmd_seq *commands);
-typedef void (sctp_timer_event_t) (unsigned long);
+typedef void (sctp_timer_event_t) (struct timer_list *);
struct sctp_sm_table_entry {
sctp_state_fn_t *fn;
const char *name;
@@ -261,7 +261,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
struct sctp_fwdtsn_skip *skiplist);
struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
- __u16 stream_num, __u16 *stream_list,
+ __u16 stream_num, __be16 *stream_list,
bool out, bool in);
struct sctp_chunk *sctp_make_strreset_tsnreq(
const struct sctp_association *asoc);
@@ -314,10 +314,10 @@ int sctp_do_sm(struct net *net, enum sctp_event event_type,
void *event_arg, gfp_t gfp);
/* 2nd level prototypes */
-void sctp_generate_t3_rtx_event(unsigned long peer);
-void sctp_generate_heartbeat_event(unsigned long peer);
-void sctp_generate_reconf_event(unsigned long peer);
-void sctp_generate_proto_unreach_event(unsigned long peer);
+void sctp_generate_t3_rtx_event(struct timer_list *t);
+void sctp_generate_heartbeat_event(struct timer_list *t);
+void sctp_generate_reconf_event(struct timer_list *t);
+void sctp_generate_proto_unreach_event(struct timer_list *t);
void sctp_ootb_pkt_free(struct sctp_packet *packet);
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
new file mode 100644
index 0000000..c676550
--- /dev/null
+++ b/include/net/sctp/stream_sched.h
@@ -0,0 +1,72 @@
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+ */
+
+#ifndef __sctp_stream_sched_h__
+#define __sctp_stream_sched_h__
+
+struct sctp_sched_ops {
+ /* Property handling for a given stream */
+ int (*set)(struct sctp_stream *stream, __u16 sid, __u16 value,
+ gfp_t gfp);
+ int (*get)(struct sctp_stream *stream, __u16 sid, __u16 *value);
+
+ /* Init the specific scheduler */
+ int (*init)(struct sctp_stream *stream);
+ /* Init a stream */
+ int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+ /* Frees the entire thing */
+ void (*free)(struct sctp_stream *stream);
+
+ /* Enqueue a chunk */
+ void (*enqueue)(struct sctp_outq *q, struct sctp_datamsg *msg);
+ /* Dequeue a chunk */
+ struct sctp_chunk *(*dequeue)(struct sctp_outq *q);
+ /* Called only if the chunk fit the packet */
+ void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk);
+ /* Sched all chunks already enqueued */
+ void (*sched_all)(struct sctp_stream *steam);
+ /* Unched all chunks already enqueued */
+ void (*unsched_all)(struct sctp_stream *steam);
+};
+
+int sctp_sched_set_sched(struct sctp_association *asoc,
+ enum sctp_sched_type sched);
+int sctp_sched_get_sched(struct sctp_association *asoc);
+int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
+ __u16 value, gfp_t gfp);
+int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
+ __u16 *value);
+void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch);
+
+void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
+int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
+
+#endif /* __sctp_stream_sched_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 0477945..16f949e 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -380,6 +380,7 @@ struct sctp_sender_hb_info {
int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
gfp_t gfp);
+int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid);
void sctp_stream_free(struct sctp_stream *stream);
void sctp_stream_clear(struct sctp_stream *stream);
void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
@@ -529,8 +530,12 @@ struct sctp_chunk {
/* How many times this chunk have been sent, for prsctp RTX policy */
int sent_count;
- /* This is our link to the per-transport transmitted list. */
- struct list_head transmitted_list;
+ union {
+ /* This is our link to the per-transport transmitted list. */
+ struct list_head transmitted_list;
+ /* List in specific stream outq */
+ struct list_head stream_list;
+ };
/* This field is used by chunks that hold fragmented data.
* For the first fragment this is the list that holds the rest of
@@ -640,6 +645,11 @@ void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *,
union sctp_addr *);
const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);
+static inline __u16 sctp_chunk_stream_no(struct sctp_chunk *ch)
+{
+ return ntohs(ch->subh.data_hdr->stream);
+}
+
enum {
SCTP_ADDR_NEW, /* new address added to assoc/ep */
SCTP_ADDR_SRC, /* address can be used as source */
@@ -1012,6 +1022,9 @@ struct sctp_outq {
/* Data pending that has never been transmitted. */
struct list_head out_chunk_list;
+ /* Stream scheduler being used */
+ struct sctp_sched_ops *sched;
+
unsigned int out_qlen; /* Total length of queued data chunks. */
/* Error of send failed, may used in SCTP_SEND_FAILED event. */
@@ -1315,11 +1328,37 @@ struct sctp_inithdr_host {
__u32 initial_tsn;
};
+struct sctp_stream_priorities {
+ /* List of priorities scheduled */
+ struct list_head prio_sched;
+ /* List of streams scheduled */
+ struct list_head active;
+ /* The next stream stream in line */
+ struct sctp_stream_out_ext *next;
+ __u16 prio;
+};
+
+struct sctp_stream_out_ext {
+ __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
+ __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+ struct list_head outq; /* chunks enqueued by this stream */
+ union {
+ struct {
+ /* Scheduled streams list */
+ struct list_head prio_list;
+ struct sctp_stream_priorities *prio_head;
+ };
+ /* Fields used by RR scheduler */
+ struct {
+ struct list_head rr_list;
+ };
+ };
+};
+
struct sctp_stream_out {
__u16 ssn;
__u8 state;
- __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
- __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+ struct sctp_stream_out_ext *ext;
};
struct sctp_stream_in {
@@ -1331,6 +1370,22 @@ struct sctp_stream {
struct sctp_stream_in *in;
__u16 outcnt;
__u16 incnt;
+ /* Current stream being sent, if any */
+ struct sctp_stream_out *out_curr;
+ union {
+ /* Fields used by priority scheduler */
+ struct {
+ /* List of priorities scheduled */
+ struct list_head prio_list;
+ };
+ /* Fields used by RR scheduler */
+ struct {
+ /* List of streams scheduled */
+ struct list_head rr_list;
+ /* The next stream stream in line */
+ struct sctp_stream_out_ext *rr_next;
+ };
+ };
};
#define SCTP_STREAM_CLOSED 0x00
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index b8c86ec..231dc42 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -130,7 +130,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags,
- __u16 stream_num, __u16 *stream_list, gfp_t gfp);
+ __u16 stream_num, __be16 *stream_list, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
const struct sctp_association *asoc, __u16 flags,
diff --git a/include/net/sock.h b/include/net/sock.h
index a6b9a8d..6f1be97 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -60,7 +60,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/cgroup-defs.h>
-
+#include <linux/rbtree.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/poll.h>
@@ -397,7 +397,10 @@ struct sock {
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
unsigned long sk_tsq_flags;
- struct sk_buff *sk_send_head;
+ union {
+ struct sk_buff *sk_send_head;
+ struct rb_root tcp_rtx_queue;
+ };
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@ -734,10 +737,10 @@ static inline void sk_add_bind_node(struct sock *sk,
*
*/
#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
- for (pos = rcu_dereference((head)->first); \
+ for (pos = rcu_dereference(hlist_first_rcu(head)); \
pos != NULL && \
({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
- pos = rcu_dereference(pos->next))
+ pos = rcu_dereference(hlist_next_rcu(pos)))
static inline struct user_namespace *sk_user_ns(struct sock *sk)
{
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 7dc131d..d96b59f 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -74,10 +74,9 @@ struct strparser {
u32 unrecov_intr : 1;
struct sk_buff **skb_nextp;
- struct timer_list msg_timer;
struct sk_buff *skb_head;
unsigned int need_bytes;
- struct delayed_work delayed_work;
+ struct delayed_work msg_timer_work;
struct work_struct work;
struct strp_stats stats;
struct strp_callbacks cb;
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d767b79..d756fbe 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -51,6 +51,7 @@ enum switchdev_attr_id {
SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
+ SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
};
struct switchdev_attr {
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 41afe1c..d979a0d 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -33,6 +33,11 @@ static inline bool __is_tcf_gact_act(const struct tc_action *a, int act,
return false;
}
+static inline bool is_tcf_gact_ok(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_OK, false);
+}
+
static inline bool is_tcf_gact_shot(const struct tc_action *a)
{
return __is_tcf_gact_act(a, TC_ACT_SHOT, false);
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index 30ba459..c7fb99c 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -6,12 +6,18 @@
#include <linux/rtnetlink.h>
#include <linux/module.h>
-struct tcf_ife_info {
- struct tc_action common;
+struct tcf_ife_params {
u8 eth_dst[ETH_ALEN];
u8 eth_src[ETH_ALEN];
u16 eth_type;
u16 flags;
+
+ struct rcu_head rcu;
+};
+
+struct tcf_ife_info {
+ struct tc_action common;
+ struct tcf_ife_params __rcu *params;
/* list of metaids allowed */
struct list_head metalist;
};
@@ -40,7 +46,7 @@ struct tcf_meta_ops {
struct module *owner;
};
-#define MODULE_ALIAS_IFE_META(metan) MODULE_ALIAS("ifemeta" __stringify_1(metan))
+#define MODULE_ALIAS_IFE_META(metan) MODULE_ALIAS("ife-meta-" metan)
int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 604bc31..21a6565 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -10,6 +10,7 @@ struct tcf_mirred {
int tcfm_ifindex;
bool tcfm_mac_header_xmit;
struct net_device __rcu *tcfm_dev;
+ struct net *net;
struct list_head tcfm_list;
};
#define to_mirred(a) ((struct tcf_mirred *)a)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 770b608..a2510cd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -45,9 +45,6 @@
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
-
-#include <linux/bpf.h>
-#include <linux/filter.h>
#include <linux/bpf-cgroup.h>
extern struct inet_hashinfo tcp_hashinfo;
@@ -191,6 +188,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
* experimental options. See draft-ietf-tcpm-experimental-options-00.txt
*/
#define TCPOPT_FASTOPEN_MAGIC 0xF989
+#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
/*
* TCP option lengths
@@ -203,6 +201,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_MD5SIG 18
#define TCPOLEN_FASTOPEN_BASE 2
#define TCPOLEN_EXP_FASTOPEN_BASE 4
+#define TCPOLEN_EXP_SMC_BASE 6
/* But this is what stacks really send out. */
#define TCPOLEN_TSTAMP_ALIGNED 12
@@ -213,6 +212,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_SACK_PERBLOCK 8
#define TCPOLEN_MD5SIG_ALIGNED 20
#define TCPOLEN_MSS_ALIGNED 4
+#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
/* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
@@ -240,41 +240,12 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/* sysctl variables for tcp */
-extern int sysctl_tcp_fastopen;
-extern int sysctl_tcp_retrans_collapse;
-extern int sysctl_tcp_stdurg;
-extern int sysctl_tcp_rfc1337;
-extern int sysctl_tcp_abort_on_overflow;
extern int sysctl_tcp_max_orphans;
-extern int sysctl_tcp_fack;
-extern int sysctl_tcp_reordering;
-extern int sysctl_tcp_max_reordering;
-extern int sysctl_tcp_dsack;
extern long sysctl_tcp_mem[3];
extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3];
-extern int sysctl_tcp_app_win;
-extern int sysctl_tcp_adv_win_scale;
-extern int sysctl_tcp_frto;
-extern int sysctl_tcp_nometrics_save;
-extern int sysctl_tcp_moderate_rcvbuf;
-extern int sysctl_tcp_tso_win_divisor;
-extern int sysctl_tcp_workaround_signed_windows;
-extern int sysctl_tcp_slow_start_after_idle;
-extern int sysctl_tcp_thin_linear_timeouts;
-extern int sysctl_tcp_thin_dupack;
-extern int sysctl_tcp_early_retrans;
-extern int sysctl_tcp_recovery;
-#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
-extern int sysctl_tcp_limit_output_bytes;
-extern int sysctl_tcp_challenge_ack_limit;
-extern int sysctl_tcp_min_tso_segs;
-extern int sysctl_tcp_min_rtt_wlen;
-extern int sysctl_tcp_autocorking;
-extern int sysctl_tcp_invalid_ratelimit;
-extern int sysctl_tcp_pacing_ss_ratio;
-extern int sysctl_tcp_pacing_ca_ratio;
+#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -345,7 +316,7 @@ void tcp_v4_err(struct sk_buff *skb, u32);
void tcp_shutdown(struct sock *sk, int how);
-void tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
@@ -417,6 +388,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_disable_fack(struct tcp_sock *tp);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
+void tcp_init_transfer(struct sock *sk, int bpf_op);
unsigned int tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
@@ -551,7 +523,13 @@ void tcp_xmit_retransmit_queue(struct sock *);
void tcp_simple_retransmit(struct sock *);
void tcp_enter_recovery(struct sock *sk, bool ece_ack);
int tcp_trim_head(struct sock *, struct sk_buff *, u32);
-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
+enum tcp_queue {
+ TCP_FRAG_IN_WRITE_QUEUE,
+ TCP_FRAG_IN_RTX_QUEUE,
+};
+int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
+ struct sk_buff *skb, u32 len,
+ unsigned int mss_now, gfp_t gfp);
void tcp_send_probe0(struct sock *);
void tcp_send_partial(struct sock *);
@@ -834,6 +812,12 @@ struct tcp_skb_cb {
struct inet6_skb_parm h6;
#endif
} header; /* For incoming skbs */
+ struct {
+ __u32 key;
+ __u32 flags;
+ struct bpf_map *map;
+ void *data_end;
+ } bpf;
};
};
@@ -1297,7 +1281,7 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
s32 delta;
- if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
+ if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
ca_ops->cong_control)
return;
delta = tcp_jiffies32 - tp->lsndtime;
@@ -1306,13 +1290,14 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
}
/* Determine a window scaling and initial window to offer. */
-void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
+void tcp_select_initial_window(const struct sock *sk, int __space,
+ __u32 mss, __u32 *rcv_wnd,
__u32 *window_clamp, int wscale_ok,
__u8 *rcv_wscale, __u32 init_rcv_wnd);
-static inline int tcp_win_from_space(int space)
+static inline int tcp_win_from_space(const struct sock *sk, int space)
{
- int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
+ int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
return tcp_adv_win_scale <= 0 ?
(space>>(-tcp_adv_win_scale)) :
@@ -1322,13 +1307,13 @@ static inline int tcp_win_from_space(int space)
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
- return tcp_win_from_space(sk->sk_rcvbuf -
+ return tcp_win_from_space(sk, sk->sk_rcvbuf -
atomic_read(&sk->sk_rmem_alloc));
}
static inline int tcp_full_space(const struct sock *sk)
{
- return tcp_win_from_space(sk->sk_rcvbuf);
+ return tcp_win_from_space(sk, sk->sk_rcvbuf);
}
extern void tcp_openreq_init_rwin(struct request_sock *req,
@@ -1549,14 +1534,16 @@ struct tcp_fastopen_request {
int copied; /* queued in tcp_connect() */
};
void tcp_free_fastopen_req(struct tcp_sock *tp);
-
-extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
-int tcp_fastopen_reset_cipher(void *key, unsigned int len);
+void tcp_fastopen_destroy_cipher(struct sock *sk);
+void tcp_fastopen_ctx_destroy(struct net *net);
+int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
+ void *key, unsigned int len);
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct tcp_fastopen_cookie *foc);
-void tcp_fastopen_init_key_once(bool publish);
+ struct tcp_fastopen_cookie *foc,
+ const struct dst_entry *dst);
+void tcp_fastopen_init_key_once(struct net *net);
bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie);
bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
@@ -1589,52 +1576,46 @@ enum tcp_chrono {
void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
-/* write queue abstraction */
-static inline void tcp_write_queue_purge(struct sock *sk)
+/* This helper is needed, because skb->tcp_tsorted_anchor uses
+ * the same memory storage than skb->destructor/_skb_refdst
+ */
+static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
{
- struct sk_buff *skb;
-
- tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
- while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
- sk_wmem_free_skb(sk, skb);
- sk_mem_reclaim(sk);
- tcp_clear_all_retrans_hints(tcp_sk(sk));
+ skb->destructor = NULL;
+ skb->_skb_refdst = 0UL;
}
-static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
-{
- return skb_peek(&sk->sk_write_queue);
+#define tcp_skb_tsorted_save(skb) { \
+ unsigned long _save = skb->_skb_refdst; \
+ skb->_skb_refdst = 0UL;
+
+#define tcp_skb_tsorted_restore(skb) \
+ skb->_skb_refdst = _save; \
}
-static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
+void tcp_write_queue_purge(struct sock *sk);
+
+static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
{
- return skb_peek_tail(&sk->sk_write_queue);
+ return skb_rb_first(&sk->tcp_rtx_queue);
}
-static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
- const struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
{
- return skb_queue_next(&sk->sk_write_queue, skb);
+ return skb_peek(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
- const struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
{
- return skb_queue_prev(&sk->sk_write_queue, skb);
+ return skb_peek_tail(&sk->sk_write_queue);
}
-#define tcp_for_write_queue(skb, sk) \
- skb_queue_walk(&(sk)->sk_write_queue, skb)
-
-#define tcp_for_write_queue_from(skb, sk) \
- skb_queue_walk_from(&(sk)->sk_write_queue, skb)
-
#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
static inline struct sk_buff *tcp_send_head(const struct sock *sk)
{
- return sk->sk_send_head;
+ return skb_peek(&sk->sk_write_queue);
}
static inline bool tcp_skb_is_last(const struct sock *sk,
@@ -1643,29 +1624,30 @@ static inline bool tcp_skb_is_last(const struct sock *sk,
return skb_queue_is_last(&sk->sk_write_queue, skb);
}
-static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
+static inline bool tcp_write_queue_empty(const struct sock *sk)
{
- if (tcp_skb_is_last(sk, skb))
- sk->sk_send_head = NULL;
- else
- sk->sk_send_head = tcp_write_queue_next(sk, skb);
+ return skb_queue_empty(&sk->sk_write_queue);
+}
+
+static inline bool tcp_rtx_queue_empty(const struct sock *sk)
+{
+ return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
+}
+
+static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
+{
+ return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
}
static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
{
- if (sk->sk_send_head == skb_unlinked) {
- sk->sk_send_head = NULL;
+ if (tcp_write_queue_empty(sk))
tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
- }
+
if (tcp_sk(sk)->highest_sack == skb_unlinked)
tcp_sk(sk)->highest_sack = NULL;
}
-static inline void tcp_init_send_head(struct sock *sk)
-{
- sk->sk_send_head = NULL;
-}
-
static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -1676,8 +1658,7 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb
__tcp_add_write_queue_tail(sk, skb);
/* Queue it, remembering where we must start sending. */
- if (sk->sk_send_head == NULL) {
- sk->sk_send_head = skb;
+ if (sk->sk_write_queue.next == skb) {
tcp_chrono_start(sk, TCP_CHRONO_BUSY);
if (tcp_sk(sk)->highest_sack == NULL)
@@ -1685,38 +1666,33 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb
}
}
-static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
-{
- __skb_queue_head(&sk->sk_write_queue, skb);
-}
-
-/* Insert buff after skb on the write queue of sk. */
-static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
- struct sk_buff *buff,
- struct sock *sk)
-{
- __skb_queue_after(&sk->sk_write_queue, skb, buff);
-}
-
/* Insert new before skb on the write queue of sk. */
static inline void tcp_insert_write_queue_before(struct sk_buff *new,
struct sk_buff *skb,
struct sock *sk)
{
__skb_queue_before(&sk->sk_write_queue, skb, new);
-
- if (sk->sk_send_head == skb)
- sk->sk_send_head = new;
}
static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
{
+ tcp_skb_tsorted_anchor_cleanup(skb);
__skb_unlink(skb, &sk->sk_write_queue);
}
-static inline bool tcp_write_queue_empty(struct sock *sk)
+void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
+
+static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
{
- return skb_queue_empty(&sk->sk_write_queue);
+ tcp_skb_tsorted_anchor_cleanup(skb);
+ rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
+}
+
+static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
+{
+ list_del(&skb->tcp_tsorted_anchor);
+ tcp_rtx_queue_unlink(skb, sk);
+ sk_wmem_free_skb(sk, skb);
}
static inline void tcp_push_pending_frames(struct sock *sk)
@@ -1745,8 +1721,9 @@ static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
{
- tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
- tcp_write_queue_next(sk, skb);
+ struct sk_buff *next = skb_rb_next(skb);
+
+ tcp_sk(sk)->highest_sack = next ?: tcp_send_head(sk);
}
static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
@@ -1756,7 +1733,9 @@ static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
static inline void tcp_highest_sack_reset(struct sock *sk)
{
- tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
+ struct sk_buff *skb = tcp_rtx_queue_head(sk);
+
+ tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk);
}
/* Called when old skb is about to be deleted (to be combined with new skb) */
@@ -1926,7 +1905,7 @@ extern void tcp_rack_reo_timeout(struct sock *sk);
/* At how many usecs into the future should the RTO fire? */
static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
- const struct sk_buff *skb = tcp_write_queue_head(sk);
+ const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
@@ -2103,4 +2082,8 @@ static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
{
return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1);
}
+
+#if IS_ENABLED(CONFIG_SMC)
+extern struct static_key_false tcp_have_smc;
+#endif
#endif /* _TCP_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 12dfbfe..6c759c8 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
}
-void udp_v4_early_demux(struct sk_buff *skb);
+int udp_v4_early_demux(struct sk_buff *skb);
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
OpenPOWER on IntegriCloud