summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
commit35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (patch)
tree15b4b33206818886d9cff371fd2163e073b70568 /include/net
parentd5935b07da53f74726e2a65dd4281d0f2c70e5d4 (diff)
parent64b1f00a0830e1c53874067273a096b228d83d36 (diff)
downloadop-kernel-dev-35a9ad8af0bb0fa3525e6d0d20e32551d226f38e.zip
op-kernel-dev-35a9ad8af0bb0fa3525e6d0d20e32551d226f38e.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Most notable changes in here: 1) By far the biggest accomplishment, thanks to a large range of contributors, is the addition of multi-send for transmit. This is the result of discussions back in Chicago, and the hard work of several individuals. Now, when the ->ndo_start_xmit() method of a driver sees skb->xmit_more as true, it can choose to defer the doorbell telling the driver to start processing the new TX queue entires. skb->xmit_more means that the generic networking is guaranteed to call the driver immediately with another SKB to send. There is logic added to the qdisc layer to dequeue multiple packets at a time, and the handling mis-predicted offloads in software is now done with no locks held. Finally, pktgen is extended to have a "burst" parameter that can be used to test a multi-send implementation. Several drivers have xmit_more support: i40e, igb, ixgbe, mlx4, virtio_net Adding support is almost trivial, so export more drivers to support this optimization soon. I want to thank, in no particular or implied order, Jesper Dangaard Brouer, Eric Dumazet, Alexander Duyck, Tom Herbert, Jamal Hadi Salim, John Fastabend, Florian Westphal, Daniel Borkmann, David Tat, Hannes Frederic Sowa, and Rusty Russell. 2) PTP and timestamping support in bnx2x, from Michal Kalderon. 3) Allow adjusting the rx_copybreak threshold for a driver via ethtool, and add rx_copybreak support to enic driver. From Govindarajulu Varadarajan. 4) Significant enhancements to the generic PHY layer and the bcm7xxx driver in particular (EEE support, auto power down, etc.) from Florian Fainelli. 5) Allow raw buffers to be used for flow dissection, allowing drivers to determine the optimal "linear pull" size for devices that DMA into pools of pages. The objective is to get exactly the necessary amount of headers into the linear SKB area pre-pulled, but no more. The new interface drivers use is eth_get_headlen(). From WANG Cong, with driver conversions (several had their own by-hand duplicated implementations) by Alexander Duyck and Eric Dumazet. 6) Support checksumming more smoothly and efficiently for encapsulations, and add "foo over UDP" facility. From Tom Herbert. 7) Add Broadcom SF2 switch driver to DSA layer, from Florian Fainelli. 8) eBPF now can load programs via a system call and has an extensive testsuite. Alexei Starovoitov and Daniel Borkmann. 9) Major overhaul of the packet scheduler to use RCU in several major areas such as the classifiers and rate estimators. From John Fastabend. 10) Add driver for Intel FM10000 Ethernet Switch, from Alexander Duyck. 11) Rearrange TCP_SKB_CB() to reduce cache line misses, from Eric Dumazet. 12) Add Datacenter TCP congestion control algorithm support, From Florian Westphal. 13) Reorganize sk_buff so that __copy_skb_header() is significantly faster. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1558 commits) netlabel: directly return netlbl_unlabel_genl_init() net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers net: description of dma_cookie cause make xmldocs warning cxgb4: clean up a type issue cxgb4: potential shift wrapping bug i40e: skb->xmit_more support net: fs_enet: Add NAPI TX net: fs_enet: Remove non NAPI RX r8169:add support for RTL8168EP net_sched: copy exts->type in tcf_exts_change() wimax: convert printk to pr_foo() af_unix: remove 0 assignment on static ipv6: Do not warn for informational ICMP messages, regardless of type. Update Intel Ethernet Driver maintainers list bridge: Save frag_max_size between PRE_ROUTING and POST_ROUTING tipc: fix bug in multicast congestion handling net: better IFF_XMIT_DST_RELEASE support net/mlx4_en: remove NETDEV_TX_BUSY 3c59x: fix bad split of cpu_to_le32(pci_map_single()) net: bcmgenet: fix Tx ring priority programming ...
Diffstat (limited to 'include/net')
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/ah.h3
-rw-r--r--include/net/bluetooth/bluetooth.h5
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h23
-rw-r--r--include/net/bluetooth/l2cap.h35
-rw-r--r--include/net/cfg80211.h69
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/codel.h2
-rw-r--r--include/net/dsa.h95
-rw-r--r--include/net/flow_keys.h16
-rw-r--r--include/net/gen_stats.h15
-rw-r--r--include/net/geneve.h97
-rw-r--r--include/net/gue.h23
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_connection_sock.h9
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip.h29
-rw-r--r--include/net/ip6_checksum.h8
-rw-r--r--include/net/ip6_fib.h20
-rw-r--r--include/net/ip_fib.h5
-rw-r--r--include/net/ip_tunnels.h38
-rw-r--r--include/net/ip_vs.h223
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/mac80211.h34
-rw-r--r--include/net/mld.h5
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h14
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h119
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h10
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h2
-rw-r--r--include/net/netfilter/nf_nat.h10
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h75
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/nft_masq.h16
-rw-r--r--include/net/netfilter/nft_reject.h9
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/xfrm.h14
-rw-r--r--include/net/nfc/nci.h16
-rw-r--r--include/net/nfc/nci_core.h9
-rw-r--r--include/net/pkt_cls.h18
-rw-r--r--include/net/pkt_sched.h8
-rw-r--r--include/net/sch_generic.h119
-rw-r--r--include/net/sctp/command.h2
-rw-r--r--include/net/snmp.h8
-rw-r--r--include/net/sock.h16
-rw-r--r--include/net/tcp.h85
-rw-r--r--include/net/udp.h21
-rw-r--r--include/net/udp_tunnel.h85
-rw-r--r--include/net/xfrm.h1
52 files changed, 984 insertions, 453 deletions
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index ec51e67..d13573b 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -202,7 +202,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
void ipv6_sock_ac_close(struct sock *sk);
-int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
void ipv6_ac_destroy_dev(struct inet6_dev *idev);
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
diff --git a/include/net/ah.h b/include/net/ah.h
index ca95b98..4e2dfa4 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -3,9 +3,6 @@
#include <linux/skbuff.h>
-/* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN 64
-
struct crypto_ahash;
struct ah_data {
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 373000d..58695ff 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -120,9 +120,9 @@ struct bt_voice {
#define BT_RCVMTU 13
__printf(1, 2)
-int bt_info(const char *fmt, ...);
+void bt_info(const char *fmt, ...);
__printf(1, 2)
-int bt_err(const char *fmt, ...);
+void bt_err(const char *fmt, ...);
#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
@@ -284,6 +284,7 @@ struct hci_req_ctrl {
struct bt_skb_cb {
__u8 pkt_type;
__u8 incoming;
+ __u16 opcode;
__u16 expect;
__u8 force_active;
struct l2cap_chan *chan;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 3f8547f..6e8f249 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -385,6 +385,7 @@ enum {
#define HCI_ERROR_AUTH_FAILURE 0x05
#define HCI_ERROR_MEMORY_EXCEEDED 0x07
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
#define HCI_ERROR_REMOTE_USER_TERM 0x13
#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6f884e6..37ff1ae 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -302,7 +302,7 @@ struct hci_dev {
__u32 req_status;
__u32 req_result;
- struct crypto_blkcipher *tfm_aes;
+ void *smp_data;
struct discovery_state discovery;
struct hci_conn_hash conn_hash;
@@ -539,7 +539,6 @@ enum {
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
HCI_CONN_SCO_SETUP_PEND,
- HCI_CONN_LE_SMP_PEND,
HCI_CONN_MGMT_CONNECTED,
HCI_CONN_SSP_ENABLED,
HCI_CONN_SC_ENABLED,
@@ -553,6 +552,7 @@ enum {
HCI_CONN_FIPS,
HCI_CONN_STK_ENCRYPT,
HCI_CONN_AUTH_INITIATOR,
+ HCI_CONN_DROP,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -702,7 +702,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
return NULL;
}
-void hci_disconnect(struct hci_conn *conn, __u8 reason);
+int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -756,9 +756,10 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status);
* _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
*/
-static inline void hci_conn_get(struct hci_conn *conn)
+static inline struct hci_conn *hci_conn_get(struct hci_conn *conn)
{
get_device(&conn->dev);
+ return conn;
}
static inline void hci_conn_put(struct hci_conn *conn)
@@ -790,7 +791,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
if (!conn->out)
timeo *= 2;
} else {
- timeo = msecs_to_jiffies(10);
+ timeo = 0;
}
break;
@@ -799,7 +800,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
break;
default:
- timeo = msecs_to_jiffies(10);
+ timeo = 0;
break;
}
@@ -925,7 +926,6 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
-int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
void hci_init_sysfs(struct hci_dev *hdev);
@@ -970,6 +970,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
+#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
+ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1258,6 +1261,8 @@ bool hci_req_pending(struct hci_dev *hdev);
void hci_req_add_le_scan_disable(struct hci_request *req);
void hci_req_add_le_passive_scan(struct hci_request *req);
+void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req);
+
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1336,8 +1341,7 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u32 passkey,
u8 entered);
-void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status);
+void mgmt_auth_failed(struct hci_conn *conn, u8 status);
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
@@ -1353,6 +1357,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+bool mgmt_powering_down(struct hci_dev *hdev);
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 8df15ad..ead99f0 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -633,10 +633,11 @@ struct l2cap_conn {
struct sk_buff_head pending_rx;
struct work_struct pending_rx_work;
+ struct work_struct id_addr_update_work;
+
__u8 disc_reason;
- struct delayed_work security_timer;
- struct smp_chan *smp_chan;
+ struct l2cap_chan *smp;
struct list_head chan_l;
struct mutex chan_lock;
@@ -708,6 +709,8 @@ enum {
FLAG_EFS_ENABLE,
FLAG_DEFER_SETUP,
FLAG_LE_CONN_REQ_SENT,
+ FLAG_PENDING_SECURITY,
+ FLAG_HOLD_HCI_CONN,
};
enum {
@@ -837,18 +840,43 @@ static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan
return NULL;
}
+static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ return -ENOSYS;
+}
+
+static inline struct sk_buff *l2cap_chan_no_alloc_skb(struct l2cap_chan *chan,
+ unsigned long hdr_len,
+ unsigned long len, int nb)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
{
}
+static inline void l2cap_chan_no_close(struct l2cap_chan *chan)
+{
+}
+
static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
{
}
+static inline void l2cap_chan_no_state_change(struct l2cap_chan *chan,
+ int state, int err)
+{
+}
+
static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
{
}
+static inline void l2cap_chan_no_suspend(struct l2cap_chan *chan)
+{
+}
+
static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
{
}
@@ -911,14 +939,13 @@ int l2cap_ertm_init(struct l2cap_chan *chan);
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
-void l2cap_conn_update_id_addr(struct hci_conn *hcon);
void l2cap_send_conn_req(struct l2cap_chan *chan);
void l2cap_move_start(struct l2cap_chan *chan);
void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
u8 status);
void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
-void l2cap_conn_get(struct l2cap_conn *conn);
+struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0a080c4..a2ddcf2 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4,6 +4,7 @@
* 802.11 device and configuration interface
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -663,6 +664,7 @@ struct cfg80211_acl_data {
* @crypto: crypto settings
* @privacy: the BSS uses privacy
* @auth_type: Authentication type (algorithm)
+ * @smps_mode: SMPS mode
* @inactivity_timeout: time in seconds to determine station's inactivity.
* @p2p_ctwindow: P2P CT Window
* @p2p_opp_ps: P2P opportunistic PS
@@ -681,6 +683,7 @@ struct cfg80211_ap_settings {
struct cfg80211_crypto_settings crypto;
bool privacy;
enum nl80211_auth_type auth_type;
+ enum nl80211_smps_mode smps_mode;
int inactivity_timeout;
u8 p2p_ctwindow;
bool p2p_opp_ps;
@@ -1503,12 +1506,14 @@ enum cfg80211_signal_type {
* @tsf: TSF contained in the frame that carried these IEs
* @rcu_head: internal use, for freeing
* @len: length of the IEs
+ * @from_beacon: these IEs are known to come from a beacon
* @data: IE data
*/
struct cfg80211_bss_ies {
u64 tsf;
struct rcu_head rcu_head;
int len;
+ bool from_beacon;
u8 data[];
};
@@ -1605,10 +1610,12 @@ struct cfg80211_auth_request {
*
* @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
* @ASSOC_REQ_DISABLE_VHT: Disable VHT
+ * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
ASSOC_REQ_DISABLE_VHT = BIT(1),
+ ASSOC_REQ_USE_RRM = BIT(2),
};
/**
@@ -1800,6 +1807,7 @@ struct cfg80211_connect_params {
* @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
* @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
* @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed
+ * @WIPHY_PARAM_DYN_ACK: dynack has been enabled
*/
enum wiphy_params_flags {
WIPHY_PARAM_RETRY_SHORT = 1 << 0,
@@ -1807,6 +1815,7 @@ enum wiphy_params_flags {
WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
+ WIPHY_PARAM_DYN_ACK = 1 << 5,
};
/*
@@ -1973,14 +1982,12 @@ struct cfg80211_wowlan_wakeup {
/**
* struct cfg80211_gtk_rekey_data - rekey data
- * @kek: key encryption key
- * @kck: key confirmation key
- * @replay_ctr: replay counter
+ * @kek: key encryption key (NL80211_KEK_LEN bytes)
+ * @kck: key confirmation key (NL80211_KCK_LEN bytes)
+ * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
*/
struct cfg80211_gtk_rekey_data {
- u8 kek[NL80211_KEK_LEN];
- u8 kck[NL80211_KCK_LEN];
- u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
+ const u8 *kek, *kck, *replay_ctr;
};
/**
@@ -2313,6 +2320,17 @@ struct cfg80211_qos_map {
* @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
* given interface This is used e.g. for dynamic HT 20/40 MHz channel width
* changes during the lifetime of the BSS.
+ *
+ * @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device
+ * with the given parameters; action frame exchange has been handled by
+ * userspace so this just has to modify the TX path to take the TS into
+ * account.
+ * If the admitted time is 0 just validate the parameters to make sure
+ * the session can be created at all; it is valid to just always return
+ * success for that but that may result in inefficient behaviour (handshake
+ * with the peer followed by immediate teardown when the addition is later
+ * rejected)
+ * @del_tx_ts: remove an existing TX TS
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2553,6 +2571,12 @@ struct cfg80211_ops {
int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_chan_def *chandef);
+
+ int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
+ u8 tsid, const u8 *peer, u8 user_prio,
+ u16 admitted_time);
+ int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
+ u8 tsid, const u8 *peer);
};
/*
@@ -2599,9 +2623,13 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
+ * @WIPHY_FLAG_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM
+ * TSPEC sessions (TID aka TSID 0-7) with the NL80211_CMD_ADD_TX_TS
+ * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it
+ * needs to be able to handle Block-Ack agreements and other things.
*/
enum wiphy_flags {
- /* use hole at 0 */
+ WIPHY_FLAG_SUPPORTS_WMM_ADMISSION = BIT(0),
/* use hole at 1 */
/* use hole at 2 */
WIPHY_FLAG_NETNS_OK = BIT(3),
@@ -3765,11 +3793,25 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
}
/**
- * cfg80211_inform_bss - inform cfg80211 of a new BSS
+ * enum cfg80211_bss_frame_type - frame type that the BSS data came from
+ * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
+ * from a beacon or probe response
+ * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
+ * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
+ */
+enum cfg80211_bss_frame_type {
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ CFG80211_BSS_FTYPE_BEACON,
+ CFG80211_BSS_FTYPE_PRESP,
+};
+
+/**
+ * cfg80211_inform_bss_width - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
* @rx_channel: The channel the frame was received on
* @scan_width: width of the control channel
+ * @ftype: frame type (if known)
* @bssid: the BSSID of the BSS
* @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
* @capability: the capability field sent by the peer
@@ -3789,6 +3831,7 @@ struct cfg80211_bss * __must_check
cfg80211_inform_bss_width(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
enum nl80211_bss_scan_width scan_width,
+ enum cfg80211_bss_frame_type ftype,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
s32 signal, gfp_t gfp);
@@ -3796,12 +3839,13 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
+ enum cfg80211_bss_frame_type ftype,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
s32 signal, gfp_t gfp)
{
return cfg80211_inform_bss_width(wiphy, rx_channel,
- NL80211_BSS_CHAN_WIDTH_20,
+ NL80211_BSS_CHAN_WIDTH_20, ftype,
bssid, tsf, capability,
beacon_interval, ie, ielen, signal,
gfp);
@@ -3902,6 +3946,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
* moves to cfg80211 in this call
* @buf: authentication frame (header + body)
* @len: length of the frame data
+ * @uapsd_queues: bitmap of ACs configured to uapsd. -1 if n/a.
*
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
@@ -3910,7 +3955,8 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
*/
void cfg80211_rx_assoc_resp(struct net_device *dev,
struct cfg80211_bss *bss,
- const u8 *buf, size_t len);
+ const u8 *buf, size_t len,
+ int uapsd_queues);
/**
* cfg80211_assoc_timeout - notification of timed out association
@@ -4412,7 +4458,6 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* @buf: Management frame (header + body)
* @len: length of the frame data
* @flags: flags, as defined in enum nl80211_rxmgmt_flags
- * @gfp: context flags
*
* This function is called whenever an Action frame is received for a station
* mode interface, but is not processed in kernel.
@@ -4423,7 +4468,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* driver is responsible for rejecting the frame.
*/
bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
- const u8 *buf, size_t len, u32 flags, gfp_t gfp);
+ const u8 *buf, size_t len, u32 flags);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 87cb190..6465bae8 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,9 +122,7 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
- __be32 diff[] = { ~from, to };
-
- *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum)));
+ *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
}
/* Implements RFC 1624 (Incremental Internet Checksum)
diff --git a/include/net/codel.h b/include/net/codel.h
index fe0eab3..aeee280 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -66,7 +66,7 @@ typedef s32 codel_tdiff_t;
static inline codel_time_t codel_get_time(void)
{
- u64 ns = ktime_to_ns(ktime_get());
+ u64 ns = ktime_get_ns();
return ns >> CODEL_SHIFT;
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6efce38..58ad8c6 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -15,6 +15,17 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+
+enum dsa_tag_protocol {
+ DSA_TAG_PROTO_NONE = 0,
+ DSA_TAG_PROTO_DSA,
+ DSA_TAG_PROTO_TRAILER,
+ DSA_TAG_PROTO_EDSA,
+ DSA_TAG_PROTO_BRCM,
+};
#define DSA_MAX_SWITCHES 4
#define DSA_MAX_PORTS 12
@@ -23,9 +34,15 @@ struct dsa_chip_data {
/*
* How to access the switch configuration registers.
*/
- struct device *mii_bus;
+ struct device *host_dev;
int sw_addr;
+ /* Device tree node pointer for this specific switch chip
+ * used during switch setup in case additional properties
+ * and resources needs to be used
+ */
+ struct device_node *of_node;
+
/*
* The names of the switch's ports. Use "cpu" to
* designate the switch port that the cpu is connected to,
@@ -34,6 +51,7 @@ struct dsa_chip_data {
* or any other string to indicate this is a physical port.
*/
char *port_names[DSA_MAX_PORTS];
+ struct device_node *port_dn[DSA_MAX_PORTS];
/*
* An array (with nr_chips elements) of which element [a]
@@ -59,6 +77,8 @@ struct dsa_platform_data {
struct dsa_chip_data *chip;
};
+struct packet_type;
+
struct dsa_switch_tree {
/*
* Configuration data for the platform device that owns
@@ -71,7 +91,11 @@ struct dsa_switch_tree {
* protocol to use.
*/
struct net_device *master_netdev;
- __be16 tag_protocol;
+ int (*rcv)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev);
+ enum dsa_tag_protocol tag_protocol;
/*
* The switch and port to which the CPU is attached.
@@ -110,15 +134,16 @@ struct dsa_switch {
struct dsa_switch_driver *drv;
/*
- * Reference to mii bus to use.
+ * Reference to host device to use.
*/
- struct mii_bus *master_mii_bus;
+ struct device *master_dev;
/*
* Slave mii_bus and devices for the individual ports.
*/
u32 dsa_port_mask;
u32 phys_port_mask;
+ u32 phys_mii_mask;
struct mii_bus *slave_mii_bus;
struct net_device *ports[DSA_MAX_PORTS];
};
@@ -147,15 +172,16 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
struct dsa_switch_driver {
struct list_head list;
- __be16 tag_protocol;
+ enum dsa_tag_protocol tag_protocol;
int priv_size;
/*
* Probing and setup.
*/
- char *(*probe)(struct mii_bus *bus, int sw_addr);
+ char *(*probe)(struct device *host_dev, int sw_addr);
int (*setup)(struct dsa_switch *ds);
int (*set_addr)(struct dsa_switch *ds, u8 *addr);
+ u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
* Access to the switch's PHY registers.
@@ -170,37 +196,64 @@ struct dsa_switch_driver {
void (*poll_link)(struct dsa_switch *ds);
/*
+ * Link state adjustment (called from libphy)
+ */
+ void (*adjust_link)(struct dsa_switch *ds, int port,
+ struct phy_device *phydev);
+ void (*fixed_link_update)(struct dsa_switch *ds, int port,
+ struct fixed_phy_status *st);
+
+ /*
* ethtool hardware statistics.
*/
void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
void (*get_ethtool_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
int (*get_sset_count)(struct dsa_switch *ds);
+
+ /*
+ * ethtool Wake-on-LAN
+ */
+ void (*get_wol)(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *w);
+ int (*set_wol)(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *w);
+
+ /*
+ * Suspend and resume
+ */
+ int (*suspend)(struct dsa_switch *ds);
+ int (*resume)(struct dsa_switch *ds);
+
+ /*
+ * Port enable/disable
+ */
+ int (*port_enable)(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+ void (*port_disable)(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+
+ /*
+ * EEE setttings
+ */
+ int (*set_eee)(struct dsa_switch *ds, int port,
+ struct phy_device *phydev,
+ struct ethtool_eee *e);
+ int (*get_eee)(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e);
};
void register_switch_driver(struct dsa_switch_driver *type);
void unregister_switch_driver(struct dsa_switch_driver *type);
+struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
static inline void *ds_to_priv(struct dsa_switch *ds)
{
return (void *)(ds + 1);
}
-/*
- * The original DSA tag format and some other tag formats have no
- * ethertype, which means that we need to add a little hack to the
- * networking receive path to make sure that received frames get
- * the right ->protocol assigned to them when one of those tag
- * formats is in use.
- */
-static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
-{
- return !!(dst->tag_protocol == htons(ETH_P_DSA));
-}
-
-static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
+static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
{
- return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
+ return dst->rcv != NULL;
}
-
#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 6667a05..7ee2df0 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -27,7 +27,19 @@ struct flow_keys {
u8 ip_proto;
};
-bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
-__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
+bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
+ void *data, __be16 proto, int nhoff, int hlen);
+static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+{
+ return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
+}
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
u32 flow_hash_from_keys(struct flow_keys *keys);
+unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
+ __be16 protocol);
#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index ea4271d..cbafa37 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -6,6 +6,11 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
+struct gnet_stats_basic_cpu {
+ struct gnet_stats_basic_packed bstats;
+ struct u64_stats_sync syncp;
+};
+
struct gnet_dump {
spinlock_t * lock;
struct sk_buff * skb;
@@ -27,21 +32,29 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d);
int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
+void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
const struct gnet_stats_basic_packed *b,
struct gnet_stats_rate_est64 *r);
-int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
+int gnet_stats_copy_queue(struct gnet_dump *d,
+ struct gnet_stats_queue __percpu *cpu_q,
+ struct gnet_stats_queue *q, __u32 qlen);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
diff --git a/include/net/geneve.h b/include/net/geneve.h
new file mode 100644
index 0000000..112132c
--- /dev/null
+++ b/include/net/geneve.h
@@ -0,0 +1,97 @@
+#ifndef __NET_GENEVE_H
+#define __NET_GENEVE_H 1
+
+#ifdef CONFIG_INET
+#include <net/udp_tunnel.h>
+#endif
+
+
+/* Geneve Header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver| Opt Len |O|C| Rsvd. | Protocol Type |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Virtual Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Variable Length Options |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Option Header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Option Class | Type |R|R|R| Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Variable Option Data |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+struct geneve_opt {
+ __be16 opt_class;
+ u8 type;
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u8 length:5;
+ u8 r3:1;
+ u8 r2:1;
+ u8 r1:1;
+#else
+ u8 r1:1;
+ u8 r2:1;
+ u8 r3:1;
+ u8 length:5;
+#endif
+ u8 opt_data[];
+};
+
+#define GENEVE_CRIT_OPT_TYPE (1 << 7)
+
+struct genevehdr {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u8 opt_len:6;
+ u8 ver:2;
+ u8 rsvd1:6;
+ u8 critical:1;
+ u8 oam:1;
+#else
+ u8 ver:2;
+ u8 opt_len:6;
+ u8 oam:1;
+ u8 critical:1;
+ u8 rsvd1:6;
+#endif
+ __be16 proto_type;
+ u8 vni[3];
+ u8 rsvd2;
+ struct geneve_opt options[];
+};
+
+#ifdef CONFIG_INET
+struct geneve_sock;
+
+typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
+
+struct geneve_sock {
+ struct hlist_node hlist;
+ geneve_rcv_t *rcv;
+ void *rcv_data;
+ struct work_struct del_work;
+ struct socket *sock;
+ struct rcu_head rcu;
+ atomic_t refcnt;
+ struct udp_offload udp_offloads;
+};
+
+#define GENEVE_VER 0
+#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
+
+struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
+ geneve_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6);
+
+void geneve_sock_release(struct geneve_sock *vs);
+
+int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
+ __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
+ __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+ bool xnet);
+#endif /*ifdef CONFIG_INET */
+
+#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gue.h b/include/net/gue.h
new file mode 100644
index 0000000..b6c3327
--- /dev/null
+++ b/include/net/gue.h
@@ -0,0 +1,23 @@
+#ifndef __NET_GUE_H
+#define __NET_GUE_H
+
+struct guehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hlen:4,
+ version:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ hlen:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 next_hdr;
+ __u16 flags;
+ };
+ __u32 word;
+ };
+};
+
+#endif
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d07b1a6..55a8d40 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -35,7 +35,6 @@ enum {
INET6_IFADDR_STATE_DAD,
INET6_IFADDR_STATE_POSTDAD,
INET6_IFADDR_STATE_ERRDAD,
- INET6_IFADDR_STATE_UP,
INET6_IFADDR_STATE_DEAD,
};
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5fbe656..848e85c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
#endif
}
+static inline unsigned long
+inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
+ unsigned long max_when)
+{
+ u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
+
+ return (unsigned long)min_t(u64, when, max_when);
+}
+
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
struct request_sock *inet_csk_search_req(const struct sock *sk,
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 01d590e..80479ab 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -61,7 +61,6 @@ struct inet_peer {
struct inet_peer_base {
struct inet_peer __rcu *root;
seqlock_t lock;
- u32 flush_seq;
int total;
};
diff --git a/include/net/ip.h b/include/net/ip.h
index db4a771..0bb6207 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -180,8 +180,10 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- __be32 saddr, const struct ip_reply_arg *arg,
+void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+ const struct ip_options *sopt,
+ __be32 daddr, __be32 saddr,
+ const struct ip_reply_arg *arg,
unsigned int len);
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
@@ -229,8 +231,6 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
}
#endif
-extern int sysctl_ip_nonlocal_bind;
-
/* From inetpeer.c */
extern int inet_peer_threshold;
extern int inet_peer_minttl;
@@ -364,6 +364,14 @@ static inline void inet_set_txhash(struct sock *sk)
sk->sk_txhash = flow_hash_from_keys(&keys);
}
+static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+
+ return csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ skb_gro_len(skb), proto, 0);
+}
+
/*
* Map a multicast IP onto multicast MAC for type ethernet.
*/
@@ -505,7 +513,14 @@ int ip_forward(struct sk_buff *skb);
void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
__be32 daddr, struct rtable *rt, int is_frag);
-int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+
+int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb,
+ const struct ip_options *sopt);
+static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
+{
+ return __ip_options_echo(dopt, skb, &IPCB(skb)->opt);
+}
+
void ip_options_fragment(struct sk_buff *skb);
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
@@ -542,6 +557,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);
+bool icmp_global_allow(void);
+extern int sysctl_icmp_msgs_per_sec;
+extern int sysctl_icmp_msgs_burst;
+
#ifdef CONFIG_PROC_FS
int ip_misc_proc_init(void);
#endif
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 55236cb..1a49b73 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -48,6 +48,14 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0));
}
+static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct ipv6hdr *iph = skb_gro_network_header(skb);
+
+ return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb_gro_len(skb), proto, 0));
+}
+
static __inline__ __sum16 tcp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index cf485f9..8eea35d 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -64,7 +64,7 @@ struct fib6_node {
__u16 fn_bit; /* bit key */
__u16 fn_flags;
- __u32 fn_sernum;
+ int fn_sernum;
struct rt6_info *rr_ptr;
};
@@ -202,15 +202,25 @@ static inline void ip6_rt_put(struct rt6_info *rt)
dst_release(&rt->dst);
}
-struct fib6_walker_t {
+enum fib6_walk_state {
+#ifdef CONFIG_IPV6_SUBTREES
+ FWS_S,
+#endif
+ FWS_L,
+ FWS_R,
+ FWS_C,
+ FWS_U
+};
+
+struct fib6_walker {
struct list_head lh;
struct fib6_node *root, *node;
struct rt6_info *leaf;
- unsigned char state;
- unsigned char prune;
+ enum fib6_walk_state state;
+ bool prune;
unsigned int skip;
unsigned int count;
- int (*func)(struct fib6_walker_t *);
+ int (*func)(struct fib6_walker *);
void *args;
};
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9922093..dc9d2a2 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -65,7 +65,8 @@ struct fnhe_hash_bucket {
struct fib_nh_exception __rcu *chain;
};
-#define FNHE_HASH_SIZE 2048
+#define FNHE_HASH_SHIFT 11
+#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT)
#define FNHE_RECLAIM_DEPTH 5
struct fib_nh {
@@ -87,7 +88,7 @@ struct fib_nh {
int nh_saddr_genid;
struct rtable __rcu * __percpu *nh_pcpu_rth_output;
struct rtable __rcu *nh_rth_input;
- struct fnhe_hash_bucket *nh_exceptions;
+ struct fnhe_hash_bucket __rcu *nh_exceptions;
};
/*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 8dd8cab..5bc6ede 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -10,6 +10,7 @@
#include <net/gro_cells.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
+#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#if IS_ENABLED(CONFIG_IPV6)
@@ -31,6 +32,13 @@ struct ip_tunnel_6rd_parm {
};
#endif
+struct ip_tunnel_encap {
+ __u16 type;
+ __u16 flags;
+ __be16 sport;
+ __be16 dport;
+};
+
struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
@@ -56,13 +64,18 @@ struct ip_tunnel {
/* These four fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */
- int hlen; /* Precalculated header length */
+ int tun_hlen; /* Precalculated header length */
int mlink;
struct ip_tunnel_dst __percpu *dst_cache;
struct ip_tunnel_parm parms;
+ int encap_hlen; /* Encap header length (FOU,GUE) */
+ struct ip_tunnel_encap encap;
+
+ int hlen; /* tun_hlen + encap_hlen */
+
/* for SIT */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd;
@@ -73,15 +86,18 @@ struct ip_tunnel {
struct gro_cells gro_cells;
};
-#define TUNNEL_CSUM __cpu_to_be16(0x01)
-#define TUNNEL_ROUTING __cpu_to_be16(0x02)
-#define TUNNEL_KEY __cpu_to_be16(0x04)
-#define TUNNEL_SEQ __cpu_to_be16(0x08)
-#define TUNNEL_STRICT __cpu_to_be16(0x10)
-#define TUNNEL_REC __cpu_to_be16(0x20)
-#define TUNNEL_VERSION __cpu_to_be16(0x40)
-#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
+#define TUNNEL_CSUM __cpu_to_be16(0x01)
+#define TUNNEL_ROUTING __cpu_to_be16(0x02)
+#define TUNNEL_KEY __cpu_to_be16(0x04)
+#define TUNNEL_SEQ __cpu_to_be16(0x08)
+#define TUNNEL_STRICT __cpu_to_be16(0x10)
+#define TUNNEL_REC __cpu_to_be16(0x20)
+#define TUNNEL_VERSION __cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
+#define TUNNEL_OAM __cpu_to_be16(0x0200)
+#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
+#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800)
struct tnl_ptk_info {
__be16 flags;
@@ -114,6 +130,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ u8 *protocol, struct flowi4 *fl4);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
@@ -131,6 +149,8 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, int net_id);
void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
+int ip_tunnel_encap_setup(struct ip_tunnel *t,
+ struct ip_tunnel_encap *ipencap);
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 624a8a5..615b20b 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1,6 +1,5 @@
-/*
- * IP Virtual Server
- * data structure and functionality definitions
+/* IP Virtual Server
+ * data structure and functionality definitions
*/
#ifndef _NET_IP_VS_H
@@ -12,7 +11,7 @@
#include <linux/list.h> /* for struct list_head */
#include <linux/spinlock.h> /* for struct rwlock_t */
-#include <linux/atomic.h> /* for struct atomic_t */
+#include <linux/atomic.h> /* for struct atomic_t */
#include <linux/compiler.h>
#include <linux/timer.h>
#include <linux/bug.h>
@@ -30,15 +29,13 @@
#endif
#include <net/net_namespace.h> /* Netw namespace */
-/*
- * Generic access of ipvs struct
- */
+/* Generic access of ipvs struct */
static inline struct netns_ipvs *net_ipvs(struct net* net)
{
return net->ipvs;
}
-/*
- * Get net ptr from skb in traffic cases
+
+/* Get net ptr from skb in traffic cases
* use skb_sknet when call is from userland (ioctl or netlink)
*/
static inline struct net *skb_net(const struct sk_buff *skb)
@@ -90,8 +87,8 @@ static inline struct net *skb_sknet(const struct sk_buff *skb)
return &init_net;
#endif
}
-/*
- * This one needed for single_open_net since net is stored directly in
+
+/* This one needed for single_open_net since net is stored directly in
* private not as a struct i.e. seq_file_net can't be used.
*/
static inline struct net *seq_file_single_net(struct seq_file *seq)
@@ -108,7 +105,7 @@ extern int ip_vs_conn_tab_size;
struct ip_vs_iphdr {
__u32 len; /* IPv4 simply where L4 starts
- IPv6 where L4 Transport Header starts */
+ * IPv6 where L4 Transport Header starts */
__u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
__s16 protocol;
__s32 flags;
@@ -304,16 +301,11 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
#define LeaveFunction(level) do {} while (0)
#endif
-
-/*
- * The port number of FTP service (in network order).
- */
+/* The port number of FTP service (in network order). */
#define FTPPORT cpu_to_be16(21)
#define FTPDATA cpu_to_be16(20)
-/*
- * TCP State Values
- */
+/* TCP State Values */
enum {
IP_VS_TCP_S_NONE = 0,
IP_VS_TCP_S_ESTABLISHED,
@@ -329,25 +321,19 @@ enum {
IP_VS_TCP_S_LAST
};
-/*
- * UDP State Values
- */
+/* UDP State Values */
enum {
IP_VS_UDP_S_NORMAL,
IP_VS_UDP_S_LAST,
};
-/*
- * ICMP State Values
- */
+/* ICMP State Values */
enum {
IP_VS_ICMP_S_NORMAL,
IP_VS_ICMP_S_LAST,
};
-/*
- * SCTP State Values
- */
+/* SCTP State Values */
enum ip_vs_sctp_states {
IP_VS_SCTP_S_NONE,
IP_VS_SCTP_S_INIT1,
@@ -366,21 +352,18 @@ enum ip_vs_sctp_states {
IP_VS_SCTP_S_LAST
};
-/*
- * Delta sequence info structure
- * Each ip_vs_conn has 2 (output AND input seq. changes).
- * Only used in the VS/NAT.
+/* Delta sequence info structure
+ * Each ip_vs_conn has 2 (output AND input seq. changes).
+ * Only used in the VS/NAT.
*/
struct ip_vs_seq {
__u32 init_seq; /* Add delta from this seq */
__u32 delta; /* Delta in sequence numbers */
__u32 previous_delta; /* Delta in sequence numbers
- before last resized pkt */
+ * before last resized pkt */
};
-/*
- * counters per cpu
- */
+/* counters per cpu */
struct ip_vs_counters {
__u32 conns; /* connections scheduled */
__u32 inpkts; /* incoming packets */
@@ -388,17 +371,13 @@ struct ip_vs_counters {
__u64 inbytes; /* incoming bytes */
__u64 outbytes; /* outgoing bytes */
};
-/*
- * Stats per cpu
- */
+/* Stats per cpu */
struct ip_vs_cpu_stats {
struct ip_vs_counters ustats;
struct u64_stats_sync syncp;
};
-/*
- * IPVS statistics objects
- */
+/* IPVS statistics objects */
struct ip_vs_estimator {
struct list_head list;
@@ -491,9 +470,7 @@ struct ip_vs_protocol {
void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
};
-/*
- * protocol data per netns
- */
+/* protocol data per netns */
struct ip_vs_proto_data {
struct ip_vs_proto_data *next;
struct ip_vs_protocol *pp;
@@ -520,9 +497,7 @@ struct ip_vs_conn_param {
__u8 pe_data_len;
};
-/*
- * IP_VS structure allocated for each dynamically scheduled connection
- */
+/* IP_VS structure allocated for each dynamically scheduled connection */
struct ip_vs_conn {
struct hlist_node c_list; /* hashed list heads */
/* Protocol, addresses and port numbers */
@@ -535,6 +510,7 @@ struct ip_vs_conn {
union nf_inet_addr daddr; /* destination address */
volatile __u32 flags; /* status flags */
__u16 protocol; /* Which protocol (TCP/UDP) */
+ __u16 daf; /* Address family of the dest */
#ifdef CONFIG_NET_NS
struct net *net; /* Name space */
#endif
@@ -560,17 +536,18 @@ struct ip_vs_conn {
struct ip_vs_dest *dest; /* real server */
atomic_t in_pkts; /* incoming packet counter */
- /* packet transmitter for different forwarding methods. If it
- mangles the packet, it must return NF_DROP or better NF_STOLEN,
- otherwise this must be changed to a sk_buff **.
- NF_ACCEPT can be returned when destination is local.
+ /* Packet transmitter for different forwarding methods. If it
+ * mangles the packet, it must return NF_DROP or better NF_STOLEN,
+ * otherwise this must be changed to a sk_buff **.
+ * NF_ACCEPT can be returned when destination is local.
*/
int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
/* Note: we can group the following members into a structure,
- in order to save more space, and the following members are
- only used in VS/NAT anyway */
+ * in order to save more space, and the following members are
+ * only used in VS/NAT anyway
+ */
struct ip_vs_app *app; /* bound ip_vs_app object */
void *app_data; /* Application private data */
struct ip_vs_seq in_seq; /* incoming seq. struct */
@@ -583,9 +560,7 @@ struct ip_vs_conn {
struct rcu_head rcu_head;
};
-/*
- * To save some memory in conn table when name space is disabled.
- */
+/* To save some memory in conn table when name space is disabled. */
static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
{
#ifdef CONFIG_NET_NS
@@ -594,6 +569,7 @@ static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
return &init_net;
#endif
}
+
static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
{
#ifdef CONFIG_NET_NS
@@ -611,13 +587,12 @@ static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
#endif
}
-/*
- * Extended internal versions of struct ip_vs_service_user and
- * ip_vs_dest_user for IPv6 support.
+/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
+ * for IPv6 support.
*
- * We need these to conveniently pass around service and destination
- * options, but unfortunately, we also need to keep the old definitions to
- * maintain userspace backwards compatibility for the setsockopt interface.
+ * We need these to conveniently pass around service and destination
+ * options, but unfortunately, we also need to keep the old definitions to
+ * maintain userspace backwards compatibility for the setsockopt interface.
*/
struct ip_vs_service_user_kern {
/* virtual service addresses */
@@ -648,12 +623,15 @@ struct ip_vs_dest_user_kern {
/* thresholds for active connections */
u32 u_threshold; /* upper threshold */
u32 l_threshold; /* lower threshold */
+
+ /* Address family of addr */
+ u16 af;
};
/*
- * The information about the virtual service offered to the net
- * and the forwarding entries
+ * The information about the virtual service offered to the net and the
+ * forwarding entries.
*/
struct ip_vs_service {
struct hlist_node s_list; /* for normal service table */
@@ -693,9 +671,8 @@ struct ip_vs_dest_dst {
struct rcu_head rcu_head;
};
-/*
- * The real server destination forwarding entry
- * with ip address, port number, and so on.
+/* The real server destination forwarding entry with ip address, port number,
+ * and so on.
*/
struct ip_vs_dest {
struct list_head n_list; /* for the dests in the service */
@@ -734,10 +711,7 @@ struct ip_vs_dest {
unsigned int in_rs_table:1; /* we are in rs_table */
};
-
-/*
- * The scheduler object
- */
+/* The scheduler object */
struct ip_vs_scheduler {
struct list_head n_list; /* d-linked list head */
char *name; /* scheduler name */
@@ -777,9 +751,7 @@ struct ip_vs_pe {
int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
};
-/*
- * The application module object (a.k.a. app incarnation)
- */
+/* The application module object (a.k.a. app incarnation) */
struct ip_vs_app {
struct list_head a_list; /* member in app list */
int type; /* IP_VS_APP_TYPE_xxx */
@@ -795,16 +767,14 @@ struct ip_vs_app {
atomic_t usecnt; /* usage counter */
struct rcu_head rcu_head;
- /*
- * output hook: Process packet in inout direction, diff set for TCP.
+ /* output hook: Process packet in inout direction, diff set for TCP.
* Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
* 2=Mangled but checksum was not updated
*/
int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
struct sk_buff *, int *diff);
- /*
- * input hook: Process packet in outin direction, diff set for TCP.
+ /* input hook: Process packet in outin direction, diff set for TCP.
* Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
* 2=Mangled but checksum was not updated
*/
@@ -863,9 +833,7 @@ struct ipvs_master_sync_state {
struct netns_ipvs {
int gen; /* Generation */
int enable; /* enable like nf_hooks do */
- /*
- * Hash table: for real service lookups
- */
+ /* Hash table: for real service lookups */
#define IP_VS_RTAB_BITS 4
#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
@@ -899,7 +867,7 @@ struct netns_ipvs {
struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
#endif
/* ip_vs_conn */
- atomic_t conn_count; /* connection counter */
+ atomic_t conn_count; /* connection counter */
/* ip_vs_ctl */
struct ip_vs_stats tot_stats; /* Statistics & est. */
@@ -986,6 +954,10 @@ struct netns_ipvs {
char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
/* net name space ptr */
struct net *net; /* Needed by timer routines */
+ /* Number of heterogeneous destinations, needed becaus heterogeneous
+ * are not supported when synchronization is enabled.
+ */
+ unsigned int mixed_address_family_dests;
};
#define DEFAULT_SYNC_THRESHOLD 3
@@ -1139,9 +1111,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
#endif
-/*
- * IPVS core functions
- * (from ip_vs_core.c)
+/* IPVS core functions
+ * (from ip_vs_core.c)
*/
const char *ip_vs_proto_name(unsigned int proto);
void ip_vs_init_hash_table(struct list_head *table, int rows);
@@ -1149,11 +1120,9 @@ void ip_vs_init_hash_table(struct list_head *table, int rows);
#define IP_VS_APP_TYPE_FTP 1
-/*
- * ip_vs_conn handling functions
- * (from ip_vs_conn.c)
+/* ip_vs_conn handling functions
+ * (from ip_vs_conn.c)
*/
-
enum {
IP_VS_DIR_INPUT = 0,
IP_VS_DIR_OUTPUT,
@@ -1210,7 +1179,7 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
void ip_vs_conn_put(struct ip_vs_conn *cp);
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
-struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
+struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
const union nf_inet_addr *daddr,
__be16 dport, unsigned int flags,
struct ip_vs_dest *dest, __u32 fwmark);
@@ -1284,9 +1253,7 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
-/*
- * IPVS netns init & cleanup functions
- */
+/* IPVS netns init & cleanup functions */
int ip_vs_estimator_net_init(struct net *net);
int ip_vs_control_net_init(struct net *net);
int ip_vs_protocol_net_init(struct net *net);
@@ -1301,9 +1268,8 @@ void ip_vs_estimator_net_cleanup(struct net *net);
void ip_vs_sync_net_cleanup(struct net *net);
void ip_vs_service_net_cleanup(struct net *net);
-/*
- * IPVS application functions
- * (from ip_vs_app.c)
+/* IPVS application functions
+ * (from ip_vs_app.c)
*/
#define IP_VS_APP_MAX_PORTS 8
struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
@@ -1323,9 +1289,7 @@ int unregister_ip_vs_pe(struct ip_vs_pe *pe);
struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
-/*
- * Use a #define to avoid all of module.h just for these trivial ops
- */
+/* Use a #define to avoid all of module.h just for these trivial ops */
#define ip_vs_pe_get(pe) \
if (pe && pe->module) \
__module_get(pe->module);
@@ -1334,9 +1298,7 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
if (pe && pe->module) \
module_put(pe->module);
-/*
- * IPVS protocol functions (from ip_vs_proto.c)
- */
+/* IPVS protocol functions (from ip_vs_proto.c) */
int ip_vs_protocol_init(void);
void ip_vs_protocol_cleanup(void);
void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
@@ -1354,9 +1316,8 @@ extern struct ip_vs_protocol ip_vs_protocol_esp;
extern struct ip_vs_protocol ip_vs_protocol_ah;
extern struct ip_vs_protocol ip_vs_protocol_sctp;
-/*
- * Registering/unregistering scheduler functions
- * (from ip_vs_sched.c)
+/* Registering/unregistering scheduler functions
+ * (from ip_vs_sched.c)
*/
int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
@@ -1375,10 +1336,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
-
-/*
- * IPVS control data and functions (from ip_vs_ctl.c)
- */
+/* IPVS control data and functions (from ip_vs_ctl.c) */
extern struct ip_vs_stats ip_vs_stats;
extern int sysctl_ip_vs_sync_ver;
@@ -1396,8 +1354,9 @@ void ip_vs_unregister_nl_ioctl(void);
int ip_vs_control_init(void);
void ip_vs_control_cleanup(void);
struct ip_vs_dest *
-ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
- __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+ip_vs_find_dest(struct net *net, int svc_af, int dest_af,
+ const union nf_inet_addr *daddr, __be16 dport,
+ const union nf_inet_addr *vaddr, __be16 vport,
__u16 protocol, __u32 fwmark, __u32 flags);
void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
@@ -1418,26 +1377,21 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
kfree(dest);
}
-/*
- * IPVS sync daemon data and function prototypes
- * (from ip_vs_sync.c)
+/* IPVS sync daemon data and function prototypes
+ * (from ip_vs_sync.c)
*/
int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
int stop_sync_thread(struct net *net, int state);
void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
-/*
- * IPVS rate estimator prototypes (from ip_vs_est.c)
- */
+/* IPVS rate estimator prototypes (from ip_vs_est.c) */
void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_zero_estimator(struct ip_vs_stats *stats);
void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
struct ip_vs_stats *stats);
-/*
- * Various IPVS packet transmitters (from ip_vs_xmit.c)
- */
+/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1468,12 +1422,10 @@ int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
#endif
#ifdef CONFIG_SYSCTL
-/*
- * This is a simple mechanism to ignore packets when
- * we are loaded. Just set ip_vs_drop_rate to 'n' and
- * we start to drop 1/rate of the packets
+/* This is a simple mechanism to ignore packets when
+ * we are loaded. Just set ip_vs_drop_rate to 'n' and
+ * we start to drop 1/rate of the packets
*/
-
static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
{
if (!ipvs->drop_rate)
@@ -1487,9 +1439,7 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
#endif
-/*
- * ip_vs_fwd_tag returns the forwarding tag of the connection
- */
+/* ip_vs_fwd_tag returns the forwarding tag of the connection */
#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
@@ -1548,9 +1498,7 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
return csum_partial(diff, sizeof(diff), oldsum);
}
-/*
- * Forget current conntrack (unconfirmed) and attach notrack entry
- */
+/* Forget current conntrack (unconfirmed) and attach notrack entry */
static inline void ip_vs_notrack(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -1567,9 +1515,8 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
}
#ifdef CONFIG_IP_VS_NFCT
-/*
- * Netfilter connection tracking
- * (from ip_vs_nfct.c)
+/* Netfilter connection tracking
+ * (from ip_vs_nfct.c)
*/
static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
@@ -1608,14 +1555,12 @@ static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
{
}
-/* CONFIG_IP_VS_NFCT */
-#endif
+#endif /* CONFIG_IP_VS_NFCT */
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
- /*
- * We think the overhead of processing active connections is 256
+ /* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index a2db816..97f4720 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -121,6 +121,7 @@ struct frag_hdr {
/* sysctls */
extern int sysctl_mld_max_msf;
+extern int sysctl_mld_qrv;
#define _DEVINC(net, statname, modifier, idev, field) \
({ \
@@ -287,7 +288,8 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
-bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
+ const struct inet6_skb_parm *opt);
static inline bool ipv6_accept_ra(struct inet6_dev *idev)
{
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index dae2e24..0ad1f47 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4,6 +4,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1226,7 +1227,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
- * particular key.
+ * particular key. Setting this flag does not necessarily mean that SKBs
+ * will have sufficient tailroom for ICV or MIC.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
@@ -1238,7 +1240,9 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
- * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
+ * not necessarily mean that SKBs will have sufficient tailroom for ICV or
+ * MIC.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames
@@ -1405,7 +1409,7 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @wme: indicates whether the STA supports WME. Only valid during AP-mode.
+ * @wme: indicates whether the STA supports QoS/WME.
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
@@ -1533,16 +1537,6 @@ struct ieee80211_tx_control {
* @IEEE80211_HW_MFP_CAPABLE:
* Hardware supports management frame protection (MFP, IEEE 802.11w).
*
- * @IEEE80211_HW_SUPPORTS_STATIC_SMPS:
- * Hardware supports static spatial multiplexing powersave,
- * ie. can turn off all but one chain even on HT connections
- * that should be using more chains.
- *
- * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS:
- * Hardware supports dynamic spatial multiplexing powersave,
- * ie. can turn off all but one chain and then wake the rest
- * up as required after, for example, rts/cts handshake.
- *
* @IEEE80211_HW_SUPPORTS_UAPSD:
* Hardware supports Unscheduled Automatic Power Save Delivery
* (U-APSD) in managed mode. The mode is configured with
@@ -1606,6 +1600,9 @@ struct ieee80211_tx_control {
* is not enabled the default action is to disconnect when getting the
* CSA frame.
*
+ * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
+ * or tailroom of TX skbs without copying them first.
+ *
* @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
* in one command, mac80211 doesn't have to run separate scans per band.
*/
@@ -1625,8 +1622,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
IEEE80211_HW_MFP_CAPABLE = 1<<13,
IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
- IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15,
- IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
+ /* free slots */
IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
@@ -1639,7 +1635,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
- /* bit 29 unused */
+ IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29,
IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30,
};
@@ -2666,7 +2662,9 @@ enum ieee80211_roc_type {
*
* @set_coverage_class: Set slot time for given coverage class as specified
* in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
- * accordingly. This callback is not required and may sleep.
+ * accordingly; coverage class equals to -1 to enable ACK timeout
+ * estimation algorithm (dynack). To disable dynack set valid value for
+ * coverage class. This callback is not required and may sleep.
*
* @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may
* be %NULL. The callback can sleep.
@@ -2950,7 +2948,7 @@ struct ieee80211_ops {
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
- void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
+ void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
diff --git a/include/net/mld.h b/include/net/mld.h
index faa1d16..01d7513 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -88,12 +88,15 @@ struct mld2_query {
#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07)
#define MLDV2_QQIC_MAN(value) ((value) & 0x0f)
+#define MLD_EXP_MIN_LIMIT 32768UL
+#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1)
+
static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
{
/* RFC3810, 5.1.3. Maximum Response Code */
unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc);
- if (mc_mrc < 32768) {
+ if (mc_mrc < MLD_EXP_MIN_LIMIT) {
ret = mc_mrc;
} else {
unsigned long mc_man, mc_exp;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 47f4254..f60558d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -373,7 +373,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
return 0;
}
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
{
unsigned int seq, hh_alen;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
new file mode 100644
index 0000000..2aa6048
--- /dev/null
+++ b/include/net/netfilter/br_netfilter.h
@@ -0,0 +1,6 @@
+#ifndef _BR_NETFILTER_H_
+#define _BR_NETFILTER_H_
+
+void br_netfilter_enable(void);
+
+#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
new file mode 100644
index 0000000..a9c001c
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -0,0 +1,14 @@
+#ifndef _NF_NAT_MASQUERADE_IPV4_H_
+#define _NF_NAT_MASQUERADE_IPV4_H_
+
+#include <net/netfilter/nf_nat.h>
+
+unsigned int
+nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct nf_nat_range *range,
+ const struct net_device *out);
+
+void nf_nat_masquerade_ipv4_register_notifier(void);
+void nf_nat_masquerade_ipv4_unregister_notifier(void);
+
+#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 931fbf8..e842719 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -1,128 +1,13 @@
#ifndef _IPV4_NF_REJECT_H
#define _IPV4_NF_REJECT_H
-#include <net/ip.h>
-#include <net/tcp.h>
-#include <net/route.h>
-#include <net/dst.h>
+#include <net/icmp.h>
static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
{
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
}
-/* Send RST reply */
-static void nf_send_reset(struct sk_buff *oldskb, int hook)
-{
- struct sk_buff *nskb;
- const struct iphdr *oiph;
- struct iphdr *niph;
- const struct tcphdr *oth;
- struct tcphdr _otcph, *tcph;
-
- /* IP header checks: fragment. */
- if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
- return;
-
- oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
- sizeof(_otcph), &_otcph);
- if (oth == NULL)
- return;
-
- /* No RST for RST. */
- if (oth->rst)
- return;
-
- if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
- return;
-
- /* Check checksum */
- if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
- return;
- oiph = ip_hdr(oldskb);
-
- nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
- LL_MAX_HEADER, GFP_ATOMIC);
- if (!nskb)
- return;
-
- skb_reserve(nskb, LL_MAX_HEADER);
-
- skb_reset_network_header(nskb);
- niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
- niph->version = 4;
- niph->ihl = sizeof(struct iphdr) / 4;
- niph->tos = 0;
- niph->id = 0;
- niph->frag_off = htons(IP_DF);
- niph->protocol = IPPROTO_TCP;
- niph->check = 0;
- niph->saddr = oiph->daddr;
- niph->daddr = oiph->saddr;
-
- skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
- memset(tcph, 0, sizeof(*tcph));
- tcph->source = oth->dest;
- tcph->dest = oth->source;
- tcph->doff = sizeof(struct tcphdr) / 4;
-
- if (oth->ack)
- tcph->seq = oth->ack_seq;
- else {
- tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
- oldskb->len - ip_hdrlen(oldskb) -
- (oth->doff << 2));
- tcph->ack = 1;
- }
-
- tcph->rst = 1;
- tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
- niph->daddr, 0);
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum_start = (unsigned char *)tcph - nskb->head;
- nskb->csum_offset = offsetof(struct tcphdr, check);
-
- /* ip_route_me_harder expects skb->dst to be set */
- skb_dst_set_noref(nskb, skb_dst(oldskb));
-
- nskb->protocol = htons(ETH_P_IP);
- if (ip_route_me_harder(nskb, RTN_UNSPEC))
- goto free_nskb;
-
- niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
-
- /* "Never happens" */
- if (nskb->len > dst_mtu(skb_dst(nskb)))
- goto free_nskb;
-
- nf_ct_attach(nskb, oldskb);
-
-#ifdef CONFIG_BRIDGE_NETFILTER
- /* If we use ip_local_out for bridged traffic, the MAC source on
- * the RST will be ours, instead of the destination's. This confuses
- * some routers/firewalls, and they drop the packet. So we need to
- * build the eth header using the original destination's MAC as the
- * source, and send the RST packet directly.
- */
- if (oldskb->nf_bridge) {
- struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
- niph->tot_len = htons(nskb->len);
- ip_send_check(niph);
- if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
- oeth->h_source, oeth->h_dest, nskb->len) < 0)
- goto free_nskb;
- dev_queue_xmit(nskb);
- } else
-#endif
- ip_local_out(nskb);
-
- return;
-
- free_nskb:
- kfree_skb(nskb);
-}
-
+void nf_send_reset(struct sk_buff *oldskb, int hook);
#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
new file mode 100644
index 0000000..0a13396
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -0,0 +1,10 @@
+#ifndef _NF_NAT_MASQUERADE_IPV6_H_
+#define _NF_NAT_MASQUERADE_IPV6_H_
+
+unsigned int
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+ const struct net_device *out);
+void nf_nat_masquerade_ipv6_register_notifier(void);
+void nf_nat_masquerade_ipv6_unregister_notifier(void);
+
+#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 710d17e..7a10cfc 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -147,7 +147,7 @@ static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
nf_ct_attach(nskb, oldskb);
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* If we use ip6_local_out for bridged traffic, the MAC source on
* the RST will be ours, instead of the destination's. This confuses
* some routers/firewalls, and they drop the packet. So we need to
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index a71dd33..344b1ab 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -32,10 +32,8 @@ struct nf_conn_nat {
struct hlist_node bysource;
struct nf_conn *ct;
union nf_conntrack_nat_help help;
-#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
- defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \
- defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \
- defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE)
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
+ IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
int masq_index;
#endif
};
@@ -68,8 +66,8 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
struct nf_conn_nat *nat,
const struct net_device *out)
{
-#if IS_ENABLED(CONFIG_IP_NF_TARGET_MASQUERADE) || \
- IS_ENABLED(CONFIG_IP6_NF_TARGET_MASQUERADE)
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
+ IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
return nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL &&
nat->masq_index != out->ifindex;
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index 5a2919b..340c013 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -42,8 +42,83 @@ const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum);
+
+unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum, unsigned int hdrlen);
+unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
+unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct));
+
#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c4d8619..3d72923 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -241,6 +241,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @dtype: data type (verdict or numeric type defined by userspace)
* @size: maximum set size
* @nelems: number of elements
+ * @policy: set parameterization (see enum nft_set_policies)
* @ops: set ops
* @flags: set flags
* @klen: key length
@@ -255,6 +256,7 @@ struct nft_set {
u32 dtype;
u32 size;
u32 nelems;
+ u16 policy;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
u16 flags;
diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h
new file mode 100644
index 0000000..c72729f
--- /dev/null
+++ b/include/net/netfilter/nft_masq.h
@@ -0,0 +1,16 @@
+#ifndef _NFT_MASQ_H_
+#define _NFT_MASQ_H_
+
+struct nft_masq {
+ u32 flags;
+};
+
+extern const struct nla_policy nft_masq_policy[];
+
+int nft_masq_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+#endif /* _NFT_MASQ_H_ */
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 36b0da2..60fa153 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -14,12 +14,7 @@ int nft_reject_init(const struct nft_ctx *ctx,
int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
-void nft_reject_ipv4_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt);
-
-void nft_reject_ipv6_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt);
+int nft_reject_icmp_code(u8 code);
+int nft_reject_icmpv6_code(u8 code);
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index aec5e12..24945ce 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -76,6 +76,7 @@ struct netns_ipv4 {
int sysctl_tcp_ecn;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
+ int sysctl_ip_nonlocal_bind;
int sysctl_fwmark_reflect;
int sysctl_tcp_fwmark_accept;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index eade27a..69ae41f 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -76,7 +76,7 @@ struct netns_ipv6 {
#endif
#endif
atomic_t dev_addr_genid;
- atomic_t rt_genid;
+ atomic_t fib6_sernum;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 3492434..9da7982 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -13,6 +13,19 @@ struct ctl_table_header;
struct xfrm_policy_hash {
struct hlist_head *table;
unsigned int hmask;
+ u8 dbits4;
+ u8 sbits4;
+ u8 dbits6;
+ u8 sbits6;
+};
+
+struct xfrm_policy_hthresh {
+ struct work_struct work;
+ seqlock_t lock;
+ u8 lbits4;
+ u8 rbits4;
+ u8 lbits6;
+ u8 rbits6;
};
struct netns_xfrm {
@@ -41,6 +54,7 @@ struct netns_xfrm {
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
+ struct xfrm_policy_hthresh policy_hthresh;
struct sock *nlsk;
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index fbfa4e4..9eca9ae 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -2,6 +2,7 @@
* The NFC Controller Interface is the communication protocol between an
* NFC Controller (NFCC) and a Device Host (DH).
*
+ * Copyright (C) 2014 Marvell International Ltd.
* Copyright (C) 2011 Texas Instruments, Inc.
*
* Written by Ilan Elias <ilane@ti.com>
@@ -65,19 +66,18 @@
#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02
#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03
#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05
-#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06
+#define NCI_NFC_V_PASSIVE_POLL_MODE 0x06
#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80
#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81
#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82
#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
-#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86
/* NCI RF Technologies */
#define NCI_NFC_RF_TECHNOLOGY_A 0x00
#define NCI_NFC_RF_TECHNOLOGY_B 0x01
#define NCI_NFC_RF_TECHNOLOGY_F 0x02
-#define NCI_NFC_RF_TECHNOLOGY_15693 0x03
+#define NCI_NFC_RF_TECHNOLOGY_V 0x03
/* NCI Bit Rates */
#define NCI_NFC_BIT_RATE_106 0x00
@@ -87,6 +87,7 @@
#define NCI_NFC_BIT_RATE_1695 0x04
#define NCI_NFC_BIT_RATE_3390 0x05
#define NCI_NFC_BIT_RATE_6780 0x06
+#define NCI_NFC_BIT_RATE_26 0x20
/* NCI RF Protocols */
#define NCI_RF_PROTOCOL_UNKNOWN 0x00
@@ -95,6 +96,7 @@
#define NCI_RF_PROTOCOL_T3T 0x03
#define NCI_RF_PROTOCOL_ISO_DEP 0x04
#define NCI_RF_PROTOCOL_NFC_DEP 0x05
+#define NCI_RF_PROTOCOL_T5T 0x06
/* NCI RF Interfaces */
#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
@@ -328,6 +330,12 @@ struct rf_tech_specific_params_nfcf_poll {
__u8 sensf_res[18]; /* 16 or 18 Bytes */
} __packed;
+struct rf_tech_specific_params_nfcv_poll {
+ __u8 res_flags;
+ __u8 dsfid;
+ __u8 uid[8]; /* 8 Bytes */
+} __packed;
+
struct nci_rf_discover_ntf {
__u8 rf_discovery_id;
__u8 rf_protocol;
@@ -338,6 +346,7 @@ struct nci_rf_discover_ntf {
struct rf_tech_specific_params_nfca_poll nfca_poll;
struct rf_tech_specific_params_nfcb_poll nfcb_poll;
struct rf_tech_specific_params_nfcf_poll nfcf_poll;
+ struct rf_tech_specific_params_nfcv_poll nfcv_poll;
} rf_tech_specific_params;
__u8 ntf_type;
@@ -372,6 +381,7 @@ struct nci_rf_intf_activated_ntf {
struct rf_tech_specific_params_nfca_poll nfca_poll;
struct rf_tech_specific_params_nfcb_poll nfcb_poll;
struct rf_tech_specific_params_nfcf_poll nfcf_poll;
+ struct rf_tech_specific_params_nfcv_poll nfcv_poll;
} rf_tech_specific_params;
__u8 data_exch_rf_tech_and_mode;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 1f9a0f5..75d10e6 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -64,10 +64,11 @@ enum nci_state {
struct nci_dev;
struct nci_ops {
- int (*open)(struct nci_dev *ndev);
- int (*close)(struct nci_dev *ndev);
- int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
- int (*setup)(struct nci_dev *ndev);
+ int (*open)(struct nci_dev *ndev);
+ int (*close)(struct nci_dev *ndev);
+ int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
+ int (*setup)(struct nci_dev *ndev);
+ __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
};
#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 6da46dc..bc49967 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -20,11 +20,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
{
- unsigned long old_cl;
-
- old_cl = *clp;
- *clp = cl;
- return old_cl;
+ return xchg(clp, cl);
}
static inline unsigned long
@@ -137,7 +133,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, bool ovr);
-void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
+void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
@@ -170,6 +166,7 @@ struct tcf_ematch {
unsigned int datalen;
u16 matchid;
u16 flags;
+ struct net *net;
};
static inline int tcf_em_is_container(struct tcf_ematch *em)
@@ -233,12 +230,11 @@ struct tcf_ematch_tree {
struct tcf_ematch_ops {
int kind;
int datalen;
- int (*change)(struct tcf_proto *, void *,
+ int (*change)(struct net *net, void *,
int, struct tcf_ematch *);
int (*match)(struct sk_buff *, struct tcf_ematch *,
struct tcf_pkt_info *);
- void (*destroy)(struct tcf_proto *,
- struct tcf_ematch *);
+ void (*destroy)(struct tcf_ematch *);
int (*dump)(struct sk_buff *, struct tcf_ematch *);
struct module *owner;
struct list_head link;
@@ -248,7 +244,7 @@ int tcf_em_register(struct tcf_ematch_ops *);
void tcf_em_unregister(struct tcf_ematch_ops *);
int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
struct tcf_ematch_tree *);
-void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
+void tcf_em_tree_destroy(struct tcf_ematch_tree *);
int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
struct tcf_pkt_info *);
@@ -305,7 +301,7 @@ struct tcf_ematch_tree {
};
#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
-#define tcf_em_tree_destroy(tp, t) do { (void)(t); } while(0)
+#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
#define tcf_em_tree_dump(skb, t, tlv) (0)
#define tcf_em_tree_change(tp, dst, src) do { } while(0)
#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ec030cd..27a3383 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -50,7 +50,7 @@ typedef long psched_tdiff_t;
static inline psched_time_t psched_get_time(void)
{
- return PSCHED_NS2TICKS(ktime_to_ns(ktime_get()));
+ return PSCHED_NS2TICKS(ktime_get_ns());
}
static inline psched_tdiff_t
@@ -65,12 +65,12 @@ struct qdisc_watchdog {
};
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle);
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires)
{
- qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
+ qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true);
}
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
@@ -99,7 +99,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab);
void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock);
+ spinlock_t *root_lock, bool validate);
void __qdisc_run(struct Qdisc *q);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 620e086..d17ed6f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -6,6 +6,8 @@
#include <linux/rcupdate.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
+#include <linux/percpu.h>
+#include <linux/dynamic_queue_limits.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -58,6 +60,7 @@ struct Qdisc {
* multiqueue device.
*/
#define TCQ_F_WARN_NONWC (1 << 16)
+#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -83,9 +86,15 @@ struct Qdisc {
*/
unsigned long state;
struct sk_buff_head q;
- struct gnet_stats_basic_packed bstats;
+ union {
+ struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ } __packed;
unsigned int __state;
- struct gnet_stats_queue qstats;
+ union {
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+ } __packed;
struct rcu_head rcu_head;
int padded;
atomic_t refcnt;
@@ -111,6 +120,21 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
qdisc->__state &= ~__QDISC___STATE_RUNNING;
}
+static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+{
+ return qdisc->flags & TCQ_F_ONETXQUEUE;
+}
+
+static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
{
return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
@@ -143,7 +167,7 @@ struct Qdisc_class_ops {
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
- struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
+ struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -212,8 +236,8 @@ struct tcf_proto_ops {
struct tcf_proto {
/* Fast access part */
- struct tcf_proto *next;
- void *root;
+ struct tcf_proto __rcu *next;
+ void __rcu *root;
int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
@@ -225,6 +249,7 @@ struct tcf_proto {
struct Qdisc *q;
void *data;
const struct tcf_proto_ops *ops;
+ struct rcu_head rcu;
};
struct qdisc_skb_cb {
@@ -260,7 +285,9 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
- return qdisc->dev_queue->qdisc;
+ struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
+
+ return q;
}
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
@@ -377,7 +404,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
void tcf_destroy(struct tcf_proto *tp);
-void tcf_destroy_chain(struct tcf_proto **fl);
+void tcf_destroy_chain(struct tcf_proto __rcu **fl);
/* Reset all TX qdiscs greater then index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
@@ -385,7 +412,7 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
struct Qdisc *qdisc;
for (; i < dev->num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
@@ -403,13 +430,18 @@ static inline void qdisc_reset_all_tx(struct net_device *dev)
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
unsigned int i;
+
+ rcu_read_lock();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- const struct Qdisc *q = txq->qdisc;
+ const struct Qdisc *q = rcu_dereference(txq->qdisc);
- if (q->q.qlen)
+ if (q->q.qlen) {
+ rcu_read_unlock();
return false;
+ }
}
+ rcu_read_unlock();
return true;
}
@@ -417,9 +449,10 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
static inline bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;
+
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (txq->qdisc != txq->qdisc_sleeping)
+ if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
return true;
}
return false;
@@ -429,9 +462,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
unsigned int i;
+
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (txq->qdisc != &noop_qdisc)
+ if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
return false;
}
return true;
@@ -477,6 +511,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
}
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+{
+ return q->flags & TCQ_F_CPUSTATS;
+}
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
const struct sk_buff *skb)
@@ -485,17 +523,62 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
+static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ struct gnet_stats_basic_cpu *bstats =
+ this_cpu_ptr(sch->cpu_bstats);
+
+ u64_stats_update_begin(&bstats->syncp);
+ bstats_update(&bstats->bstats, skb);
+ u64_stats_update_end(&bstats->syncp);
+}
+
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
bstats_update(&sch->bstats, skb);
}
+static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+}
+
+static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+}
+
+static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
+{
+ sch->qstats.drops += count;
+}
+
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+ sch->qstats.drops++;
+}
+
+static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+{
+ struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+
+ qstats->drops++;
+}
+
+static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
+{
+ sch->qstats.overlimits++;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff_head *list)
{
__skb_queue_tail(list, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -511,7 +594,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) {
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
}
@@ -530,7 +613,7 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
- sch->qstats.backlog -= len;
+ qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
return len;
}
@@ -549,7 +632,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
struct sk_buff *skb = __skb_dequeue_tail(list);
if (likely(skb != NULL))
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
return skb;
}
@@ -631,14 +714,14 @@ static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
kfree_skb(skb);
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return NET_XMIT_DROP;
}
static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
#ifdef CONFIG_NET_CLS_ACT
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index f22538e..d4a20d0 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -115,7 +115,7 @@ typedef enum {
* analysis of the state functions, but in reality just taken from
* thin air in the hopes othat we don't trigger a kernel panic.
*/
-#define SCTP_MAX_NUM_COMMANDS 14
+#define SCTP_MAX_NUM_COMMANDS 20
typedef union {
void *zero_all; /* Set to NULL to clear the entire union */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index f1f27fd..8fd2f49 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -146,19 +146,15 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS(mib, field, addend) \
this_cpu_add(mib->mibs[field], addend)
-/*
- * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
- * to make @ptr a non-percpu pointer.
- */
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
- __typeof__(*mib->mibs) *ptr = mib->mibs; \
+ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
this_cpu_inc(ptr[basefield##PKTS]); \
this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
do { \
- __typeof__(*mib->mibs) *ptr = mib->mibs; \
+ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
__this_cpu_inc(ptr[basefield##PKTS]); \
__this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
diff --git a/include/net/sock.h b/include/net/sock.h
index 591e607..7db3db1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1569,7 +1569,12 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
void sock_wfree(struct sk_buff *skb);
void skb_orphan_partial(struct sk_buff *skb);
void sock_rfree(struct sk_buff *skb);
+void sock_efree(struct sk_buff *skb);
+#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
+#else
+#define sock_edemux(skb) sock_efree(skb)
+#endif
int sock_setsockopt(struct socket *sock, int level, int op,
char __user *optval, unsigned int optlen);
@@ -2036,6 +2041,7 @@ void sk_stop_timer(struct sock *sk, struct timer_list *timer);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
/*
* Recover an error report and clear atomically
@@ -2188,6 +2194,8 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
sk->sk_stamp = skb->tstamp;
}
+void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
@@ -2195,7 +2203,13 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
*
* Note : callers should take care of initial *tx_flags value (usually 0)
*/
-void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+{
+ if (unlikely(sk->sk_tsflags))
+ __sock_tx_timestamp(sk, tx_flags);
+ if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
+ *tx_flags |= SKBTX_WIFI_STATUS;
+}
/**
* sk_eat_skb - Release a skb if it is no longer needed
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7523c32..74efeda 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -669,6 +669,12 @@ void tcp_send_window_probe(struct sock *sk);
*/
#define tcp_time_stamp ((__u32)(jiffies))
+static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
+{
+ return skb->skb_mstamp.stamp_jiffies;
+}
+
+
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
#define TCPHDR_FIN 0x01
@@ -687,15 +693,18 @@ void tcp_send_window_probe(struct sock *sk);
* If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
*/
struct tcp_skb_cb {
- union {
- struct inet_skb_parm h4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_skb_parm h6;
-#endif
- } header; /* For incoming frames */
__u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
- __u32 when; /* used to compute rtt's */
+ union {
+ /* Note : tcp_tw_isn is used in input path only
+ * (isn chosen by tcp_timewait_state_process())
+ *
+ * tcp_gso_segs is used in write queue only,
+ * cf tcp_skb_pcount()
+ */
+ __u32 tcp_tw_isn;
+ __u32 tcp_gso_segs;
+ };
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */
__u8 sacked; /* State flags for SACK/FACK. */
@@ -711,33 +720,32 @@ struct tcp_skb_cb {
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
/* 1 byte hole */
__u32 ack_seq; /* Sequence number ACK'd */
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header; /* For incoming frames */
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
-/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
- *
- * If we receive a SYN packet with these bits set, it means a network is
- * playing bad games with TOS bits. In order to avoid possible false congestion
- * notifications, we disable TCP ECN negociation.
+/* Due to TSO, an SKB can be composed of multiple actual
+ * packets. To keep these tracked properly, we use this.
*/
-static inline void
-TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
- struct net *net)
+static inline int tcp_skb_pcount(const struct sk_buff *skb)
{
- const struct tcphdr *th = tcp_hdr(skb);
+ return TCP_SKB_CB(skb)->tcp_gso_segs;
+}
- if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr &&
- INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
- inet_rsk(req)->ecn_ok = 1;
+static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
+{
+ TCP_SKB_CB(skb)->tcp_gso_segs = segs;
}
-/* Due to TSO, an SKB can be composed of multiple actual
- * packets. To keep these tracked properly, we use this.
- */
-static inline int tcp_skb_pcount(const struct sk_buff *skb)
+static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
{
- return skb_shinfo(skb)->gso_segs;
+ TCP_SKB_CB(skb)->tcp_gso_segs += segs;
}
/* This is valid iff tcp_skb_pcount() > 1. */
@@ -752,8 +760,17 @@ enum tcp_ca_event {
CA_EVENT_CWND_RESTART, /* congestion window restart */
CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
CA_EVENT_LOSS, /* loss timeout */
- CA_EVENT_FAST_ACK, /* in sequence ack */
- CA_EVENT_SLOW_ACK, /* other ack */
+ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
+ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
+ CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
+ CA_EVENT_NON_DELAYED_ACK,
+};
+
+/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
+enum tcp_ca_ack_event_flags {
+ CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
+ CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
+ CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
};
/*
@@ -763,7 +780,10 @@ enum tcp_ca_event {
#define TCP_CA_MAX 128
#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
+/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
#define TCP_CONG_NON_RESTRICTED 0x1
+/* Requires ECN/ECT set on all packets */
+#define TCP_CONG_NEEDS_ECN 0x2
struct tcp_congestion_ops {
struct list_head list;
@@ -782,6 +802,8 @@ struct tcp_congestion_ops {
void (*set_state)(struct sock *sk, u8 new_state);
/* call when cwnd event occurs (optional) */
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+ /* call when ack arrives (optional) */
+ void (*in_ack_event)(struct sock *sk, u32 flags);
/* new value of cwnd after loss (optional) */
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
@@ -796,6 +818,7 @@ struct tcp_congestion_ops {
int tcp_register_congestion_control(struct tcp_congestion_ops *type);
void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+void tcp_assign_congestion_control(struct sock *sk);
void tcp_init_congestion_control(struct sock *sk);
void tcp_cleanup_congestion_control(struct sock *sk);
int tcp_set_default_congestion_control(const char *name);
@@ -804,14 +827,20 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name);
-int tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
-extern struct tcp_congestion_ops tcp_init_congestion_ops;
u32 tcp_reno_ssthresh(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
+static inline bool tcp_ca_needs_ecn(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
+}
+
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
{
struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/include/net/udp.h b/include/net/udp.h
index 70f9413..07f9b70 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -158,6 +158,24 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr,
void udp_set_csum(bool nocheck, struct sk_buff *skb,
__be32 saddr, __be32 daddr, int len);
+struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+ struct udphdr *uh);
+int udp_gro_complete(struct sk_buff *skb, int nhoff);
+
+static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+{
+ struct udphdr *uh;
+ unsigned int hlen, off;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*uh);
+ uh = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, hlen))
+ uh = skb_gro_header_slow(skb, hlen, off);
+
+ return uh;
+}
+
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline void udp_lib_hash(struct sock *sk)
{
@@ -221,7 +239,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int udp_disconnect(struct sock *sk, int flags);
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features);
+ netdev_features_t features,
+ bool is_ipv6);
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index ffd69cb..a47790b 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -1,6 +1,14 @@
#ifndef __NET_UDP_TUNNEL_H
#define __NET_UDP_TUNNEL_H
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
+
struct udp_port_cfg {
u8 family;
@@ -26,7 +34,80 @@ struct udp_port_cfg {
use_udp6_rx_checksums:1;
};
-int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
- struct socket **sockp);
+int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp);
+#else
+static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ return 0;
+}
+#endif
+
+static inline int udp_sock_create(struct net *net,
+ struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ if (cfg->family == AF_INET)
+ return udp_sock_create4(net, cfg, sockp);
+
+ if (cfg->family == AF_INET6)
+ return udp_sock_create6(net, cfg, sockp);
+
+ return -EPFNOSUPPORT;
+}
+
+typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
+
+struct udp_tunnel_sock_cfg {
+ void *sk_user_data; /* user data used by encap_rcv call back */
+ /* Used for setting up udp_sock fields, see udp.h for details */
+ __u8 encap_type;
+ udp_tunnel_encap_rcv_t encap_rcv;
+ udp_tunnel_encap_destroy_t encap_destroy;
+};
+
+/* Setup the given (UDP) sock to receive UDP encapsulated packets */
+void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ struct udp_tunnel_sock_cfg *sock_cfg);
+
+/* Transmit the skb using UDP encapsulation. */
+int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst,
+ __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
+ __be16 dst_port, bool xnet);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
+ struct sk_buff *skb, struct net_device *dev,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be16 src_port,
+ __be16 dst_port);
+#endif
+
+void udp_tunnel_sock_release(struct socket *sock);
+
+static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
+ bool udp_csum)
+{
+ int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+
+ return iptunnel_handle_offloads(skb, udp_csum, type);
+}
+
+static inline void udp_tunnel_encap_enable(struct socket *sock)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sock->sk->sk_family == PF_INET6)
+ ipv6_stub->udpv6_encap_enable();
+ else
+#endif
+ udp_encap_enable();
+}
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 721e9c3..dc4865e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1591,6 +1591,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
+void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
OpenPOWER on IntegriCloud