diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-19 10:05:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-19 10:05:34 -0700 |
commit | 1200b6809dfd9d73bc4c7db76d288c35fa4b2ebe (patch) | |
tree | 552e03de245cdbd0780ca1215914edc4a26540f7 /include | |
parent | 6b5f04b6cf8ebab9a65d9c0026c650bb2538fd0f (diff) | |
parent | fe30937b65354c7fec244caebbdaae68e28ca797 (diff) | |
download | op-kernel-dev-1200b6809dfd9d73bc4c7db76d288c35fa4b2ebe.zip op-kernel-dev-1200b6809dfd9d73bc4c7db76d288c35fa4b2ebe.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Highlights:
1) Support more Realtek wireless chips, from Jes Sorenson.
2) New BPF types for per-cpu hash and arrap maps, from Alexei
Starovoitov.
3) Make several TCP sysctls per-namespace, from Nikolay Borisov.
4) Allow the use of SO_REUSEPORT in order to do per-thread processing
of incoming TCP/UDP connections. The muxing can be done using a
BPF program which hashes the incoming packet. From Craig Gallek.
5) Add a multiplexer for TCP streams, to provide a messaged based
interface. BPF programs can be used to determine the message
boundaries. From Tom Herbert.
6) Add 802.1AE MACSEC support, from Sabrina Dubroca.
7) Avoid factorial complexity when taking down an inetdev interface
with lots of configured addresses. We were doing things like
traversing the entire address less for each address removed, and
flushing the entire netfilter conntrack table for every address as
well.
8) Add and use SKB bulk free infrastructure, from Jesper Brouer.
9) Allow offloading u32 classifiers to hardware, and implement for
ixgbe, from John Fastabend.
10) Allow configuring IRQ coalescing parameters on a per-queue basis,
from Kan Liang.
11) Extend ethtool so that larger link mode masks can be supported.
From David Decotigny.
12) Introduce devlink, which can be used to configure port link types
(ethernet vs Infiniband, etc.), port splitting, and switch device
level attributes as a whole. From Jiri Pirko.
13) Hardware offload support for flower classifiers, from Amir Vadai.
14) Add "Local Checksum Offload". Basically, for a tunneled packet
the checksum of the outer header is 'constant' (because with the
checksum field filled into the inner protocol header, the payload
of the outer frame checksums to 'zero'), and we can take advantage
of that in various ways. From Edward Cree"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1548 commits)
bonding: fix bond_get_stats()
net: bcmgenet: fix dma api length mismatch
net/mlx4_core: Fix backward compatibility on VFs
phy: mdio-thunder: Fix some Kconfig typos
lan78xx: add ndo_get_stats64
lan78xx: handle statistics counter rollover
RDS: TCP: Remove unused constant
RDS: TCP: Add sysctl tunables for sndbuf/rcvbuf on rds-tcp socket
net: smc911x: convert pxa dma to dmaengine
team: remove duplicate set of flag IFF_MULTICAST
bonding: remove duplicate set of flag IFF_MULTICAST
net: fix a comment typo
ethernet: micrel: fix some error codes
ip_tunnels, bpf: define IP_TUNNEL_OPTS_MAX and use it
bpf, dst: add and use dst_tclassid helper
bpf: make skb->tc_classid also readable
net: mvneta: bm: clarify dependencies
cls_bpf: reset class and reuse major in da
ldmvsw: Checkpatch sunvnet.c and sunvnet_common.c
ldmvsw: Add ldmvsw.c driver code
...
Diffstat (limited to 'include')
130 files changed, 4241 insertions, 954 deletions
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 59811df..3150cbd 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -65,14 +65,14 @@ static inline __sum16 csum_fold(__wsum csum) * returns a 16-bit checksum, already complemented */ extern __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum); +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum); #endif #ifndef csum_tcpudp_magic static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h index ebc7a7b..de44109 100644 --- a/include/dt-bindings/clock/rk3036-cru.h +++ b/include/dt-bindings/clock/rk3036-cru.h @@ -54,6 +54,7 @@ #define SCLK_PVTM_VIDEO 125 #define SCLK_MAC 151 #define SCLK_MACREF 152 +#define SCLK_MACPLL 153 #define SCLK_SFC 160 /* aclk gates */ @@ -92,6 +93,7 @@ #define HCLK_SDMMC 456 #define HCLK_SDIO 457 #define HCLK_EMMC 459 +#define HCLK_MAC 460 #define HCLK_I2S 462 #define HCLK_LCDC 465 #define HCLK_ROM 467 diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 3feb1b2..0367c63 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -151,6 +151,8 @@ struct bcma_host_ops { #define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ #define BCMA_CORE_USB30_DEV 0x83D #define BCMA_CORE_ARM_CR4 0x83E +#define BCMA_CORE_GCI 0x840 +#define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */ #define BCMA_CORE_ARM_CA7 0x847 #define BCMA_CORE_SYS_MEM 0x849 #define BCMA_CORE_DEFAULT 0xFFF @@ -199,6 +201,7 @@ struct bcma_host_ops { #define BCMA_PKG_ID_BCM4707 1 #define BCMA_PKG_ID_BCM4708 2 #define BCMA_PKG_ID_BCM4709 0 +#define BCMA_CHIP_ID_BCM47094 53030 #define BCMA_CHIP_ID_BCM53018 53018 /* Board types (on PCI usually equals to the subsystem dev id) */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index db51a6f..846513c 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -217,6 +217,11 @@ #define BCMA_CC_CLKDIV_JTAG_SHIFT 8 #define BCMA_CC_CLKDIV_UART 0x000000FF #define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */ +#define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001 +#define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002 +#define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004 +#define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */ +#define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040 #define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */ #define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ #define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ @@ -351,12 +356,12 @@ #define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ #define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */ #define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */ -#define BCMA_CC_CHIPCTL_ADDR 0x0650 -#define BCMA_CC_CHIPCTL_DATA 0x0654 -#define BCMA_CC_REGCTL_ADDR 0x0658 -#define BCMA_CC_REGCTL_DATA 0x065C -#define BCMA_CC_PLLCTL_ADDR 0x0660 -#define BCMA_CC_PLLCTL_DATA 0x0664 +#define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650 +#define BCMA_CC_PMU_CHIPCTL_DATA 0x0654 +#define BCMA_CC_PMU_REGCTL_ADDR 0x0658 +#define BCMA_CC_PMU_REGCTL_DATA 0x065C +#define BCMA_CC_PMU_PLLCTL_ADDR 0x0660 +#define BCMA_CC_PMU_PLLCTL_DATA 0x0664 #define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ #define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ #define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF @@ -566,17 +571,16 @@ * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) */ struct bcma_chipcommon_pmu { + struct bcma_device *core; /* Can be separated core or just ChipCommon one */ u8 rev; /* PMU revision */ u32 crystalfreq; /* The active crystal frequency (in kHz) */ }; -#ifdef CONFIG_BCMA_DRIVER_MIPS +#ifdef CONFIG_BCMA_PFLASH struct bcma_pflash { bool present; - u8 buswidth; - u32 window; - u32 window_size; }; +#endif #ifdef CONFIG_BCMA_SFLASH struct mtd_info; @@ -600,6 +604,7 @@ struct bcma_nflash { }; #endif +#ifdef CONFIG_BCMA_DRIVER_MIPS struct bcma_serial_port { void *regs; unsigned long clockspeed; @@ -619,8 +624,9 @@ struct bcma_drv_cc { /* Fast Powerup Delay constant */ u16 fast_pwrup_delay; struct bcma_chipcommon_pmu pmu; -#ifdef CONFIG_BCMA_DRIVER_MIPS +#ifdef CONFIG_BCMA_PFLASH struct bcma_pflash pflash; +#endif #ifdef CONFIG_BCMA_SFLASH struct bcma_sflash sflash; #endif @@ -628,6 +634,7 @@ struct bcma_drv_cc { struct bcma_nflash nflash; #endif +#ifdef CONFIG_BCMA_DRIVER_MIPS int nr_serial_ports; struct bcma_serial_port serial_ports[4]; #endif /* CONFIG_BCMA_DRIVER_MIPS */ @@ -660,6 +667,19 @@ struct bcma_drv_cc_b { #define bcma_cc_maskset32(cc, offset, mask, set) \ bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) +/* PMU registers access */ +#define bcma_pmu_read32(cc, offset) \ + bcma_read32((cc)->pmu.core, offset) +#define bcma_pmu_write32(cc, offset, val) \ + bcma_write32((cc)->pmu.core, offset, val) + +#define bcma_pmu_mask32(cc, offset, mask) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask)) +#define bcma_pmu_set32(cc, offset, set) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set)) +#define bcma_pmu_maskset32(cc, offset, mask, set) \ + bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set)) + extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 9653fdb..e9b0b9a 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -59,6 +59,8 @@ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) + * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) */ /* @@ -163,6 +165,14 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); +extern unsigned int bitmap_from_u32array(unsigned long *bitmap, + unsigned int nbits, + const u32 *buf, + unsigned int nwords); +extern unsigned int bitmap_to_u32array(u32 *buf, + unsigned int nwords, + const unsigned long *bitmap, + unsigned int nbits); #ifdef __BIG_ENDIAN extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 83d1926..21ee41b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -10,6 +10,7 @@ #include <uapi/linux/bpf.h> #include <linux/workqueue.h> #include <linux/file.h> +#include <linux/percpu.h> struct bpf_map; @@ -36,6 +37,7 @@ struct bpf_map { u32 key_size; u32 value_size; u32 max_entries; + u32 map_flags; u32 pages; struct user_struct *user; const struct bpf_map_ops *ops; @@ -65,6 +67,7 @@ enum bpf_arg_type { */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ + ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ ARG_PTR_TO_CTX, /* pointer to context */ ARG_ANYTHING, /* any (initialized) argument is ok */ @@ -151,6 +154,7 @@ struct bpf_array { union { char value[0] __aligned(8); void *ptrs[0] __aligned(8); + void __percpu *pptrs[0] __aligned(8); }; }; #define MAX_TAIL_CALL_CNT 32 @@ -161,6 +165,8 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f const struct bpf_func_proto *bpf_get_trace_printk_proto(void); #ifdef CONFIG_BPF_SYSCALL +DECLARE_PER_CPU(int, bpf_prog_active); + void bpf_register_prog_type(struct bpf_prog_type_list *tl); void bpf_register_map_type(struct bpf_map_type_list *tl); @@ -173,6 +179,7 @@ struct bpf_map *__bpf_map_get(struct fd f); void bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); +int bpf_map_precharge_memlock(u32 pages); extern int sysctl_unprivileged_bpf_disabled; @@ -182,6 +189,30 @@ int bpf_prog_new_fd(struct bpf_prog *prog); int bpf_obj_pin_user(u32 ufd, const char __user *pathname); int bpf_obj_get_user(const char __user *pathname); +int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, + u64 flags); +int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, + u64 flags); +int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); + +/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and + * forced to use 'long' read/writes to try to atomically copy long counters. + * Best-effort only. No barriers here, since it _will_ race with concurrent + * updates from BPF programs. Called from bpf syscall and mostly used with + * size 8 or 16 bytes, so ask compiler to inline it. + */ +static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) +{ + const long *lsrc = src; + long *ldst = dst; + + size /= sizeof(long); + while (size--) + *ldst++ = *lsrc++; +} + /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); #else @@ -213,6 +244,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_skb_vlan_push_proto; extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; +extern const struct bpf_func_proto bpf_get_stackid_proto; /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 653dc9c..e2b7bf2 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -12,6 +12,7 @@ #ifndef _LINUX_ETHTOOL_H #define _LINUX_ETHTOOL_H +#include <linux/bitmap.h> #include <linux/compat.h> #include <uapi/linux/ethtool.h> @@ -40,9 +41,6 @@ struct compat_ethtool_rxnfc { #include <linux/rculist.h> -extern int __ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd); - /** * enum ethtool_phys_id_state - indicator state for physical identification * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated @@ -97,13 +95,70 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) return index % n_rx_rings; } +/* number of link mode bits/ulongs handled internally by kernel */ +#define __ETHTOOL_LINK_MODE_MASK_NBITS \ + (__ETHTOOL_LINK_MODE_LAST + 1) + +/* declare a link mode bitmap */ +#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ + DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) + +/* drivers must ignore base.cmd and base.link_mode_masks_nwords + * fields, but they are allowed to overwrite them (will be ignored). + */ +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); + } link_modes; +}; + +/** + * ethtool_link_ksettings_zero_link_mode - clear link_ksettings link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name) \ + bitmap_zero((ptr)->link_modes.name, __ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * ethtool_link_ksettings_add_link_mode - set bit in link_ksettings + * link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + * @mode : one of the ETHTOOL_LINK_MODE_*_BIT + * (not atomic, no bound checking) + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \ + __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) + +/** + * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + * @mode : one of the ETHTOOL_LINK_MODE_*_BIT + * (not atomic, no bound checking) + * + * Returns true/false. + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \ + test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) + +extern int +__ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings); + /** * struct ethtool_ops - optional netdev operations - * @get_settings: Get various device settings including Ethernet link + * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings + * API. Get various device settings including Ethernet link * settings. The @cmd parameter is expected to have been cleared - * before get_settings is called. Returns a negative error code or - * zero. - * @set_settings: Set various device settings including Ethernet link + * before get_settings is called. Returns a negative error code + * or zero. + * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings + * API. Set various device settings including Ethernet link * settings. Returns a negative error code or zero. * @get_drvinfo: Report driver/device information. Should only set the * @driver, @version, @fw_version and @bus_info fields. If not @@ -201,6 +256,29 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * @get_module_eeprom: Get the eeprom information from the plug-in module * @get_eee: Get Energy-Efficient (EEE) supported and status. * @set_eee: Set EEE status (enable/disable) as well as LPI timers. + * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue. + * It must check that the given queue number is valid. If neither a RX nor + * a TX queue has this number, return -EINVAL. If only a RX queue or a TX + * queue has this number, set the inapplicable fields to ~0 and return 0. + * Returns a negative error code or zero. + * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue. + * It must check that the given queue number is valid. If neither a RX nor + * a TX queue has this number, return -EINVAL. If only a RX queue or a TX + * queue has this number, ignore the inapplicable fields. + * Returns a negative error code or zero. + * @get_link_ksettings: When defined, takes precedence over the + * %get_settings method. Get various device settings + * including Ethernet link settings. The %cmd and + * %link_mode_masks_nwords fields should be ignored (use + * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any + * change to them will be overwritten by kernel. Returns a + * negative error code or zero. + * @set_link_ksettings: When defined, takes precedence over the + * %set_settings method. Set various device settings including + * Ethernet link settings. The %cmd and %link_mode_masks_nwords + * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS + * instead of the latter), any change to them will be overwritten + * by kernel. Returns a negative error code or zero. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must @@ -279,7 +357,13 @@ struct ethtool_ops { const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); - - + int (*get_per_queue_coalesce)(struct net_device *, u32, + struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, + struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, + struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, + const struct ethtool_link_ksettings *); }; #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 452c0b0..3b1f6ce 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -163,6 +163,14 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ #define IEEE80211_MAX_FRAME_LEN 2352 +/* Maximal size of an A-MSDU */ +#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839 +#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935 + +#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895 +#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991 +#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454 + #define IEEE80211_MAX_SSID_LEN 32 #define IEEE80211_MAX_MESH_ID_LEN 32 @@ -843,6 +851,8 @@ enum ieee80211_vht_opmode_bits { }; #define WLAN_SA_QUERY_TR_ID_LEN 2 +#define WLAN_MEMBERSHIP_LEN 8 +#define WLAN_USER_POSITION_LEN 16 /** * struct ieee80211_tpc_report_ie @@ -991,6 +1001,11 @@ struct ieee80211_mgmt { } __packed vht_opmode_notif; struct { u8 action_code; + u8 membership[WLAN_MEMBERSHIP_LEN]; + u8 position[WLAN_USER_POSITION_LEN]; + } __packed vht_group_notif; + struct { + u8 action_code; u8 dialog_token; u8 tpc_elem_id; u8 tpc_elem_length; @@ -1498,6 +1513,7 @@ struct ieee80211_vht_operation { #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002 +#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C @@ -2079,6 +2095,16 @@ enum ieee80211_tdls_actioncode { #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5) #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) +/* Defines the maximal number of MSDUs in an A-MSDU. */ +#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(7) +#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(0) + +/* + * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY + * information element + */ +#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7) + /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index a338a68..dcb89e3 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -46,10 +46,6 @@ struct br_ip_list { #define BR_LEARNING_SYNC BIT(9) #define BR_PROXYARP_WIFI BIT(10) -/* values as per ieee8021QBridgeFdbAgingTime */ -#define BR_MIN_AGEING_TIME (10 * HZ) -#define BR_MAX_AGEING_TIME (1000000 * HZ) - #define BR_DEFAULT_AGEING_TIME (300 * HZ) extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); diff --git a/include/linux/if_team.h b/include/linux/if_team.h index b84e49c..174f43f 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -24,6 +24,7 @@ struct team_pcpu_stats { struct u64_stats_sync syncp; u32 rx_dropped; u32 tx_dropped; + u32 rx_nohandler; }; struct team; diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 9c9de11..12f6fba 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -37,11 +37,6 @@ static inline struct igmpv3_query * return (struct igmpv3_query *)skb_transport_header(skb); } -extern int sysctl_igmp_llm_reports; -extern int sysctl_igmp_max_memberships; -extern int sysctl_igmp_max_msf; -extern int sysctl_igmp_qrv; - struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h deleted file mode 100644 index 9a715cf..0000000 --- a/include/linux/inet_lro.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * linux/include/linux/inet_lro.h - * - * Large Receive Offload (ipv4 / tcp) - * - * (C) Copyright IBM Corp. 2007 - * - * Authors: - * Jan-Bernd Themann <themann@de.ibm.com> - * Christoph Raisch <raisch@de.ibm.com> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __INET_LRO_H_ -#define __INET_LRO_H_ - -#include <net/ip.h> -#include <net/tcp.h> - -/* - * LRO statistics - */ - -struct net_lro_stats { - unsigned long aggregated; - unsigned long flushed; - unsigned long no_desc; -}; - -/* - * LRO descriptor for a tcp session - */ -struct net_lro_desc { - struct sk_buff *parent; - struct sk_buff *last_skb; - struct skb_frag_struct *next_frag; - struct iphdr *iph; - struct tcphdr *tcph; - __wsum data_csum; - __be32 tcp_rcv_tsecr; - __be32 tcp_rcv_tsval; - __be32 tcp_ack; - u32 tcp_next_seq; - u32 skb_tot_frags_len; - u16 ip_tot_len; - u16 tcp_saw_tstamp; /* timestamps enabled */ - __be16 tcp_window; - int pkt_aggr_cnt; /* counts aggregated packets */ - int vlan_packet; - int mss; - int active; -}; - -/* - * Large Receive Offload (LRO) Manager - * - * Fields must be set by driver - */ - -struct net_lro_mgr { - struct net_device *dev; - struct net_lro_stats stats; - - /* LRO features */ - unsigned long features; -#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */ -#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted - from received packets and eth protocol - is still ETH_P_8021Q */ - - /* - * Set for generated SKBs that are not added to - * the frag list in fragmented mode - */ - u32 ip_summed; - u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY - * or CHECKSUM_NONE */ - - int max_desc; /* Max number of LRO descriptors */ - int max_aggr; /* Max number of LRO packets to be aggregated */ - - int frag_align_pad; /* Padding required to properly align layer 3 - * headers in generated skb when using frags */ - - struct net_lro_desc *lro_arr; /* Array of LRO descriptors */ - - /* - * Optimized driver functions - * - * get_skb_header: returns tcp and ip header for packet in SKB - */ - int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr, - void **tcpudp_hdr, u64 *hdr_flags, void *priv); - - /* hdr_flags: */ -#define LRO_IPV4 1 /* ip_hdr is IPv4 header */ -#define LRO_TCP 2 /* tcpudp_hdr is TCP header */ - - /* - * get_frag_header: returns mac, tcp and ip header for packet in SKB - * - * @hdr_flags: Indicate what kind of LRO has to be done - * (IPv4/IPv6/TCP/UDP) - */ - int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr, - void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, - void *priv); -}; - -/* - * Processes a SKB - * - * @lro_mgr: LRO manager to use - * @skb: SKB to aggregate - * @priv: Private data that may be used by driver functions - * (for example get_tcp_ip_hdr) - */ - -void lro_receive_skb(struct net_lro_mgr *lro_mgr, - struct sk_buff *skb, - void *priv); -/* - * Forward all aggregated SKBs held by lro_mgr to network stack - */ - -void lro_flush_all(struct net_lro_mgr *lro_mgr); - -#endif diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 402753b..7edc14f 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -50,16 +50,19 @@ struct ipv6_devconf { __s32 mc_forwarding; #endif __s32 disable_ipv6; + __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; struct ipv6_stable_secret { bool initialized; struct in6_addr secret; } stable_secret; __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; void *sysctl; }; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f4fa2b2..b82646e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -64,7 +64,7 @@ #define round_down(x, y) ((x) & ~__round_mask(x, y)) #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_UP_ULL(ll,d) \ ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; }) diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 1f7bc63..ea34a86 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -69,6 +69,9 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo int mvebu_mbus_save_cpu_target(u32 *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); +int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr); +int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, + u8 *attr); int mvebu_mbus_add_window_remap_by_id(unsigned int target, unsigned int attribute, phys_addr_t base, size_t size, diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 2e8af00..bd0e707 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -33,6 +33,7 @@ #ifndef MLX4_DRIVER_H #define MLX4_DRIVER_H +#include <net/devlink.h> #include <linux/mlx4/device.h> struct mlx4_dev; @@ -89,6 +90,8 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p); void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); +struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port); + static inline u64 mlx4_mac_to_u64(u8 *addr) { u64 mac = 0; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 9566b3b..02ac300 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -374,6 +374,12 @@ enum { }; enum { + MLX5_BW_NO_LIMIT = 0, + MLX5_100_MBPS_UNIT = 3, + MLX5_GBPS_UNIT = 4, +}; + +enum { MLX5_MAX_PAGE_SHIFT = 31 }; @@ -1200,6 +1206,17 @@ enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, }; +enum mlx5_wol_mode { + MLX5_WOL_DISABLE = 0, + MLX5_WOL_SECURED_MAGIC = 1 << 1, + MLX5_WOL_MAGIC = 1 << 2, + MLX5_WOL_ARP = 1 << 3, + MLX5_WOL_BROADCAST = 1 << 4, + MLX5_WOL_MULTICAST = 1 << 5, + MLX5_WOL_UNICAST = 1 << 6, + MLX5_WOL_PHY_ACTIVITY = 1 << 7, +}; + /* MLX5 DEV CAPs */ /* TODO: EAT.ME */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9108904..3a95446 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -54,7 +54,7 @@ enum { /* one minute for the sake of bringup. Generally, commands must always * complete and we may need to increase this timeout value */ - MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000, + MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; @@ -99,6 +99,8 @@ enum { }; enum { + MLX5_REG_QETCR = 0x4005, + MLX5_REG_QTCT = 0x400a, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -458,8 +460,6 @@ struct mlx5_priv { struct mlx5_uuar_info uuari; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); - struct io_mapping *bf_mapping; - /* pages stuff */ struct workqueue_struct *pg_wq; struct rb_root page_root; @@ -717,7 +717,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, + bool map_wc); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); @@ -796,37 +797,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); -int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, - int ptys_size, int proto_mask, u8 local_port); -int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, - u32 *proto_cap, int proto_mask); -int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, - u32 *proto_admin, int proto_mask); -int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, - u8 *link_width_oper, u8 local_port); -int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, int proto_mask, - u8 local_port); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask); -int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, - enum mlx5_port_status status); -int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, - enum mlx5_port_status *status); - -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, - u8 port); - -int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, - u8 *vl_hw_cap, u8 local_port); - -int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); -int mlx5_query_port_pause(struct mlx5_core_dev *dev, - u32 *rx_pause, u32 *tx_pause); - int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 9b8a02b..e52730e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -166,6 +166,8 @@ enum { MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, + MLX5_CMD_OP_SET_WOL_ROL = 0x830, + MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, @@ -730,7 +732,19 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_1bf[0x3]; u8 log_max_msg[0x5]; - u8 reserved_at_1c7[0x18]; + u8 reserved_at_1c7[0x4]; + u8 max_tc[0x4]; + u8 reserved_at_1cf[0x6]; + u8 rol_s[0x1]; + u8 rol_g[0x1]; + u8 reserved_at_1d7[0x1]; + u8 wol_s[0x1]; + u8 wol_g[0x1]; + u8 wol_a[0x1]; + u8 wol_b[0x1]; + u8 wol_m[0x1]; + u8 wol_u[0x1]; + u8 wol_p[0x1]; u8 stat_rate_support[0x10]; u8 reserved_at_1ef[0xc]; @@ -6911,6 +6925,54 @@ struct mlx5_ifc_mtt_bits { u8 rd_en[0x1]; }; +struct mlx5_ifc_query_wol_rol_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 rol_mode[0x8]; + u8 wol_mode[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_wol_rol_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_wol_rol_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_wol_rol_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 rol_mode_valid[0x1]; + u8 wol_mode_valid[0x1]; + u8 reserved_at_42[0xe]; + u8 rol_mode[0x8]; + u8 wol_mode[0x8]; + + u8 reserved_at_60[0x20]; +}; + enum { MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, @@ -7102,4 +7164,49 @@ struct mlx5_ifc_modify_flow_table_in_bits { u8 reserved_at_100[0x100]; }; +struct mlx5_ifc_ets_tcn_config_reg_bits { + u8 g[0x1]; + u8 b[0x1]; + u8 r[0x1]; + u8 reserved_at_3[0x9]; + u8 group[0x4]; + u8 reserved_at_10[0x9]; + u8 bw_allocation[0x7]; + + u8 reserved_at_20[0xc]; + u8 max_bw_units[0x4]; + u8 reserved_at_30[0x8]; + u8 max_bw_value[0x8]; +}; + +struct mlx5_ifc_ets_global_config_reg_bits { + u8 reserved_at_0[0x2]; + u8 r[0x1]; + u8 reserved_at_3[0x1d]; + + u8 reserved_at_20[0xc]; + u8 max_bw_units[0x4]; + u8 reserved_at_30[0x8]; + u8 max_bw_value[0x8]; +}; + +struct mlx5_ifc_qetc_reg_bits { + u8 reserved_at_0[0x8]; + u8 port_number[0x8]; + u8 reserved_at_10[0x30]; + + struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8]; + struct mlx5_ifc_ets_global_config_reg_bits global_configuration; +}; + +struct mlx5_ifc_qtct_reg_bits { + u8 reserved_at_0[0x8]; + u8 port_number[0x8]; + u8 reserved_at_10[0xd]; + u8 prio[0x3]; + + u8 reserved_at_20[0x1d]; + u8 tclass[0x3]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h new file mode 100644 index 0000000..a1d145a --- /dev/null +++ b/include/linux/mlx5/port.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_PORT_H__ +#define __MLX5_PORT_H__ + +#include <linux/mlx5/driver.h> + +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask, u8 local_port); +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask); +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask); +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port); +int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, int proto_mask, + u8 local_port); +int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, + int proto_mask); +int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status); +int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status *status); + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, + u8 port); + +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port); + +int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); +int mlx5_query_port_pause(struct mlx5_core_dev *dev, + u32 *rx_pause, u32 *tx_pause); + +int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); +int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, + u8 *pfc_en_rx); + +int mlx5_max_tc(struct mlx5_core_dev *mdev); + +int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); +int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); +int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); +int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); +int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); + +#endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/net.h b/include/linux/net.h index 0b4ac7d..49175e4 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -215,6 +215,7 @@ int __sock_create(struct net *net, int family, int type, int proto, int sock_create(int family, int type, int proto, struct socket **res); int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res); int sock_create_lite(int family, int type, int proto, struct socket **res); +struct socket *sock_alloc(void); void sock_release(struct socket *sock); int sock_sendmsg(struct socket *sock, struct msghdr *msg); int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index d9654f0e..a734bf4 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -67,6 +67,8 @@ enum { NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ NETIF_F_BUSY_POLL_BIT, /* Busy poll */ + NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ + /* * Add your fresh new feature above and remember to update * netdev_features_strings[] in net/core/ethtool.c and maybe @@ -124,6 +126,7 @@ enum { #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) +#define NETIF_F_HW_TC __NETIF_F(HW_TC) #define for_each_netdev_feature(mask_addr, bit) \ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5440b7b..be693b3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -51,6 +51,7 @@ #include <linux/neighbour.h> #include <uapi/linux/netdevice.h> #include <uapi/linux/if_bonding.h> +#include <uapi/linux/pkt_cls.h> struct netpoll_info; struct device; @@ -267,6 +268,7 @@ struct header_ops { void (*cache_update)(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); + bool (*validate)(const char *ll_header, unsigned int len); }; /* These flag bits are private to the generic network queueing @@ -778,6 +780,27 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb); +/* These structures hold the attributes of qdisc and classifiers + * that are being passed to the netdevice through the setup_tc op. + */ +enum { + TC_SETUP_MQPRIO, + TC_SETUP_CLSU32, + TC_SETUP_CLSFLOWER, +}; + +struct tc_cls_u32_offload; + +struct tc_to_netdev { + unsigned int type; + union { + u8 tc; + struct tc_cls_u32_offload *cls_u32; + struct tc_cls_flower_offload *cls_flower; + }; +}; + + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -1073,6 +1096,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * This function is used to get egress tunnel information for given skb. * This is useful for retrieving outer tunnel header parameters while * sampling packet. + * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); + * This function is used to specify the headroom that the skb must + * consider when allocation skb during packet reception. Setting + * appropriate rx headroom value allows avoiding skb head copy on + * forward. Setting a negative value reset the rx headroom to the + * default value. * */ struct net_device_ops { @@ -1150,7 +1179,10 @@ struct net_device_ops { int (*ndo_set_vf_rss_query_en)( struct net_device *dev, int vf, bool setting); - int (*ndo_setup_tc)(struct net_device *dev, u8 tc); + int (*ndo_setup_tc)(struct net_device *dev, + u32 handle, + __be16 protocol, + struct tc_to_netdev *tc); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); @@ -1255,6 +1287,8 @@ struct net_device_ops { bool proto_down); int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); + void (*ndo_set_rx_headroom)(struct net_device *dev, + int needed_headroom); }; /** @@ -1291,6 +1325,10 @@ struct net_device_ops { * @IFF_OPENVSWITCH: device is a Open vSwitch master * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device * @IFF_TEAM: device is a team device + * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured + * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external + * entity (i.e. the master device for bridged veth) + * @IFF_MACSEC: device is a MACsec device */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1318,6 +1356,9 @@ enum netdev_priv_flags { IFF_OPENVSWITCH = 1<<22, IFF_L3MDEV_SLAVE = 1<<23, IFF_TEAM = 1<<24, + IFF_RXFH_CONFIGURED = 1<<25, + IFF_PHONY_HEADROOM = 1<<26, + IFF_MACSEC = 1<<27, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1345,6 +1386,8 @@ enum netdev_priv_flags { #define IFF_OPENVSWITCH IFF_OPENVSWITCH #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE #define IFF_TEAM IFF_TEAM +#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED +#define IFF_MACSEC IFF_MACSEC /** * struct net_device - The DEVICE structure. @@ -1397,6 +1440,8 @@ enum netdev_priv_flags { * do not use this in drivers * @tx_dropped: Dropped packets by core network, * do not use this in drivers + * @rx_nohandler: nohandler dropped packets by core network on + * inactive devices, do not use this in drivers * * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, @@ -1420,8 +1465,7 @@ enum netdev_priv_flags { * @dma: DMA channel * @mtu: Interface MTU value * @type: Interface hardware type - * @hard_header_len: Hardware header length, which means that this is the - * minimum size of a packet. + * @hard_header_len: Maximum hardware header length. * * @needed_headroom: Extra headroom the hardware may need, but not in all * cases can this be guaranteed @@ -1611,6 +1655,7 @@ struct net_device { atomic_long_t rx_dropped; atomic_long_t tx_dropped; + atomic_long_t rx_nohandler; #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def * wireless_handlers; @@ -1908,6 +1953,26 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv); +/* returns the headroom that the master device needs to take in account + * when forwarding to this dev + */ +static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) +{ + return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; +} + +static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) +{ + if (dev->netdev_ops->ndo_set_rx_headroom) + dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); +} + +/* set the device rx headroom to the dev's default */ +static inline void netdev_reset_rx_headroom(struct net_device *dev) +{ + netdev_set_rx_headroom(dev, -1); +} + /* * Net namespace inlines */ @@ -2627,6 +2692,24 @@ static inline int dev_parse_header(const struct sk_buff *skb, return dev->header_ops->parse(skb, haddr); } +/* ll_header must have at least hard_header_len allocated */ +static inline bool dev_validate_header(const struct net_device *dev, + char *ll_header, int len) +{ + if (likely(len >= dev->hard_header_len)) + return true; + + if (capable(CAP_SYS_RAWIO)) { + memset(ll_header + len, 0, dev->hard_header_len - len); + return true; + } + + if (dev->header_ops && dev->header_ops->validate) + return dev->header_ops->validate(ll_header, len); + + return false; +} + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) @@ -3741,7 +3824,7 @@ void netdev_lower_state_changed(struct net_device *lower_dev, /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 -extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; +extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; void netdev_rss_key_fill(void *buffer, size_t len); int dev_get_nest_level(struct net_device *dev, @@ -3965,6 +4048,11 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, skb->mac_len = mac_len; } +static inline bool netif_is_macsec(const struct net_device *dev) +{ + return dev->priv_flags & IFF_MACSEC; +} + static inline bool netif_is_macvlan(const struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN; @@ -4045,6 +4133,11 @@ static inline bool netif_is_lag_port(const struct net_device *dev) return netif_is_bond_slave(dev) || netif_is_team_port(dev); } +static inline bool netif_is_rxfh_configured(const struct net_device *dev) +{ + return dev->priv_flags & IFF_RXFH_CONFIGURED; +} + /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 0ad5567..9230f9a 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -141,22 +141,6 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg); #ifdef HAVE_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; - -static inline bool nf_hook_list_active(struct list_head *hook_list, - u_int8_t pf, unsigned int hook) -{ - if (__builtin_constant_p(pf) && - __builtin_constant_p(hook)) - return static_key_false(&nf_hooks_needed[pf][hook]); - - return !list_empty(hook_list); -} -#else -static inline bool nf_hook_list_active(struct list_head *hook_list, - u_int8_t pf, unsigned int hook) -{ - return !list_empty(hook_list); -} #endif int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); @@ -177,9 +161,18 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, int (*okfn)(struct net *, struct sock *, struct sk_buff *), int thresh) { - struct list_head *hook_list = &net->nf.hooks[pf][hook]; + struct list_head *hook_list; + +#ifdef HAVE_JUMP_LABEL + if (__builtin_constant_p(pf) && + __builtin_constant_p(hook) && + !static_key_false(&nf_hooks_needed[pf][hook])) + return 1; +#endif + + hook_list = &net->nf.hooks[pf][hook]; - if (nf_hook_list_active(hook_list, pf, hook)) { + if (!list_empty(hook_list)) { struct nf_hook_state state; nf_hook_state_init(&state, hook_list, hook, thresh, diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index ba0d978..1d82dd5 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -34,8 +34,6 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); int nfnetlink_has_listeners(struct net *net, unsigned int group); -struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size, - u32 dst_portid, gfp_t gfp_mask); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index c557741..80a305b 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -200,6 +200,9 @@ struct xt_table { u_int8_t af; /* address/protocol family */ int priority; /* hook order */ + /* called when table is needed in the given netns */ + int (*table_init)(struct net *net); + /* A unique name... */ const char name[XT_TABLE_MAXNAMELEN]; }; @@ -408,8 +411,7 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) return cnt; } -struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); -void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); +struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); #ifdef CONFIG_COMPAT #include <net/compat.h> diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 6f074db..029b95e 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -48,10 +48,11 @@ struct arpt_error { } extern void *arpt_alloc_initial_table(const struct xt_table *); -extern struct xt_table *arpt_register_table(struct net *net, - const struct xt_table *table, - const struct arpt_replace *repl); -extern void arpt_unregister_table(struct xt_table *table); +int arpt_register_table(struct net *net, const struct xt_table *table, + const struct arpt_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void arpt_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); extern unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index aa598f9..7bfc589 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -24,10 +24,11 @@ extern void ipt_init(void) __init; -extern struct xt_table *ipt_register_table(struct net *net, - const struct xt_table *table, - const struct ipt_replace *repl); -extern void ipt_unregister_table(struct net *net, struct xt_table *table); +int ipt_register_table(struct net *net, const struct xt_table *table, + const struct ipt_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void ipt_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); /* Standard entry. */ struct ipt_standard { diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 0f76e5c..b21c392 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -25,10 +25,11 @@ extern void ip6t_init(void) __init; extern void *ip6t_alloc_initial_table(const struct xt_table *); -extern struct xt_table *ip6t_register_table(struct net *net, - const struct xt_table *table, - const struct ip6t_replace *repl); -extern void ip6t_unregister_table(struct net *net, struct xt_table *table); +int ip6t_register_table(struct net *net, const struct xt_table *table, + const struct ip6t_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void ip6t_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 0b41959..da14ab6 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -69,16 +69,6 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group) extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern int netlink_has_listeners(struct sock *sk, unsigned int group); -extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size, - unsigned int ldiff, u32 dst_portid, - gfp_t gfp_mask); -static inline struct sk_buff * -netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid, - gfp_t gfp_mask) -{ - return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask); -} - extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 79ec7bb..78fda2a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -966,11 +966,20 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); +extern struct perf_callchain_entry * +get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, + bool crosstask, bool add_mark); +extern int get_callchain_buffers(void); +extern void put_callchain_buffers(void); -static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) +static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) { - if (entry->nr < PERF_MAX_STACK_DEPTH) + if (entry->nr < PERF_MAX_STACK_DEPTH) { entry->ip[entry->nr++] = ip; + return 0; + } else { + return -1; /* no more room, stop walking the stack */ + } } extern int sysctl_perf_event_paranoid; diff --git a/include/linux/phy.h b/include/linux/phy.h index d6f3641..2abd791 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -327,8 +327,6 @@ struct phy_c45_device_ids { /* phy_device: An instance of a PHY * * drv: Pointer to the driver for this PHY instance - * bus: Pointer to the bus this PHY is on - * dev: driver model device structure for this PHY * phy_id: UID for this device found during discovery * c45_ids: 802.3-c45 Device Identifers if is_c45. * is_c45: Set to true if this phy uses clause 45 addressing. @@ -338,7 +336,6 @@ struct phy_c45_device_ids { * suspended: Set to true if this phy has been suspended successfully. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. - * addr: Bus address of PHY * link_timeout: The number of timer firings to wait before the * giving up on the current attempt at acquiring a link * irq: IRQ number of the PHY's interrupt (-1 if none) diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 2400d2e..1d41ec4 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -19,7 +19,7 @@ extern struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, int link_gpio, struct device_node *np); -extern void fixed_phy_del(int phy_addr); +extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); @@ -40,9 +40,8 @@ static inline struct phy_device *fixed_phy_register(unsigned int irq, { return ERR_PTR(-ENODEV); } -static inline int fixed_phy_del(int phy_addr) +static inline void fixed_phy_unregister(struct phy_device *phydev) { - return -ENODEV; } static inline int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h deleted file mode 100644 index e75dcbf..0000000 --- a/include/linux/platform_data/brcmfmac-sdio.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2013 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _LINUX_BRCMFMAC_PLATFORM_H -#define _LINUX_BRCMFMAC_PLATFORM_H - -/* - * Platform specific driver functions and data. Through the platform specific - * device data functions can be provided to help the brcmfmac driver to - * operate with the device in combination with the used platform. - * - * Use the platform data in the following (similar) way: - * - * -#include <brcmfmac_platform.h> - - -static void brcmfmac_power_on(void) -{ -} - -static void brcmfmac_power_off(void) -{ -} - -static void brcmfmac_reset(void) -{ -} - -static struct brcmfmac_sdio_platform_data brcmfmac_sdio_pdata = { - .power_on = brcmfmac_power_on, - .power_off = brcmfmac_power_off, - .reset = brcmfmac_reset -}; - -static struct platform_device brcmfmac_device = { - .name = BRCMFMAC_SDIO_PDATA_NAME, - .id = PLATFORM_DEVID_NONE, - .dev.platform_data = &brcmfmac_sdio_pdata -}; - -void __init brcmfmac_init_pdata(void) -{ - brcmfmac_sdio_pdata.oob_irq_supported = true; - brcmfmac_sdio_pdata.oob_irq_nr = gpio_to_irq(GPIO_BRCMF_SDIO_OOB); - brcmfmac_sdio_pdata.oob_irq_flags = IORESOURCE_IRQ | - IORESOURCE_IRQ_HIGHLEVEL; - platform_device_register(&brcmfmac_device); -} - * - * - * Note: the brcmfmac can be loaded as module or be statically built-in into - * the kernel. If built-in then do note that it uses module_init (and - * module_exit) routines which equal device_initcall. So if you intend to - * create a module with the platform specific data for the brcmfmac and have - * it built-in to the kernel then use a higher initcall then device_initcall - * (see init.h). If this is not done then brcmfmac will load without problems - * but will not pickup the platform data. - * - * When the driver does not "detect" platform driver data then it will continue - * without reporting anything and just assume there is no data needed. Which is - * probably true for most platforms. - * - * Explanation of the platform_data fields: - * - * drive_strength: is the preferred drive_strength to be used for the SDIO - * pins. If 0 then a default value will be used. This is the target drive - * strength, the exact drive strength which will be used depends on the - * capabilities of the device. - * - * oob_irq_supported: does the board have support for OOB interrupts. SDIO - * in-band interrupts are relatively slow and for having less overhead on - * interrupt processing an out of band interrupt can be used. If the HW - * supports this then enable this by setting this field to true and configure - * the oob related fields. - * - * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are - * used for registering the irq using request_irq function. - * - * broken_sg_support: flag for broken sg list support of SDIO host controller. - * Set this to true if the SDIO host controller has higher align requirement - * than 32 bytes for each scatterlist item. - * - * sd_head_align: alignment requirement for start of data buffer - * - * sd_sgentry_align: length alignment requirement for each sg entry - * - * power_on: This function is called by the brcmfmac when the module gets - * loaded. This can be particularly useful for low power devices. The platform - * spcific routine may for example decide to power up the complete device. - * If there is no use-case for this function then provide NULL. - * - * power_off: This function is called by the brcmfmac when the module gets - * unloaded. At this point the device can be powered down or otherwise be reset. - * So if an actual power_off is not supported but reset is then reset the device - * when this function gets called. This can be particularly useful for low power - * devices. If there is no use-case for this function (either power-down or - * reset) then provide NULL. - * - * reset: This function can get called if the device communication broke down. - * This functionality is particularly useful in case of SDIO type devices. It is - * possible to reset a dongle via sdio data interface, but it requires that - * this is fully functional. This function is chip/module specific and this - * function should return only after the complete reset has completed. - */ - -#define BRCMFMAC_SDIO_PDATA_NAME "brcmfmac_sdio" - -struct brcmfmac_sdio_platform_data { - unsigned int drive_strength; - bool oob_irq_supported; - unsigned int oob_irq_nr; - unsigned long oob_irq_flags; - bool broken_sg_support; - unsigned short sd_head_align; - unsigned short sd_sgentry_align; - void (*power_on)(void); - void (*power_off)(void); - void (*reset)(void); -}; - -#endif /* _LINUX_BRCMFMAC_PLATFORM_H */ diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h new file mode 100644 index 0000000..1d30bf2 --- /dev/null +++ b/include/linux/platform_data/brcmfmac.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 201 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_BRCMFMAC_PLATFORM_H +#define _LINUX_BRCMFMAC_PLATFORM_H + + +#define BRCMFMAC_PDATA_NAME "brcmfmac" + +#define BRCMFMAC_COUNTRY_BUF_SZ 4 + + +/* + * Platform specific driver functions and data. Through the platform specific + * device data functions and data can be provided to help the brcmfmac driver to + * operate with the device in combination with the used platform. + */ + + +/** + * Note: the brcmfmac can be loaded as module or be statically built-in into + * the kernel. If built-in then do note that it uses module_init (and + * module_exit) routines which equal device_initcall. So if you intend to + * create a module with the platform specific data for the brcmfmac and have + * it built-in to the kernel then use a higher initcall then device_initcall + * (see init.h). If this is not done then brcmfmac will load without problems + * but will not pickup the platform data. + * + * When the driver does not "detect" platform driver data then it will continue + * without reporting anything and just assume there is no data needed. Which is + * probably true for most platforms. + */ + +/** + * enum brcmf_bus_type - Bus type identifier. Currently SDIO, USB and PCIE are + * supported. + */ +enum brcmf_bus_type { + BRCMF_BUSTYPE_SDIO, + BRCMF_BUSTYPE_USB, + BRCMF_BUSTYPE_PCIE +}; + + +/** + * struct brcmfmac_sdio_pd - SDIO Device specific platform data. + * + * @txglomsz: SDIO txglom size. Use 0 if default of driver is to be + * used. + * @drive_strength: is the preferred drive_strength to be used for the SDIO + * pins. If 0 then a default value will be used. This is + * the target drive strength, the exact drive strength + * which will be used depends on the capabilities of the + * device. + * @oob_irq_supported: does the board have support for OOB interrupts. SDIO + * in-band interrupts are relatively slow and for having + * less overhead on interrupt processing an out of band + * interrupt can be used. If the HW supports this then + * enable this by setting this field to true and configure + * the oob related fields. + * @oob_irq_nr, + * @oob_irq_flags: the OOB interrupt information. The values are used for + * registering the irq using request_irq function. + * @broken_sg_support: flag for broken sg list support of SDIO host controller. + * Set this to true if the SDIO host controller has higher + * align requirement than 32 bytes for each scatterlist + * item. + * @sd_head_align: alignment requirement for start of data buffer. + * @sd_sgentry_align: length alignment requirement for each sg entry. + * @reset: This function can get called if the device communication + * broke down. This functionality is particularly useful in + * case of SDIO type devices. It is possible to reset a + * dongle via sdio data interface, but it requires that + * this is fully functional. This function is chip/module + * specific and this function should return only after the + * complete reset has completed. + */ +struct brcmfmac_sdio_pd { + int txglomsz; + unsigned int drive_strength; + bool oob_irq_supported; + unsigned int oob_irq_nr; + unsigned long oob_irq_flags; + bool broken_sg_support; + unsigned short sd_head_align; + unsigned short sd_sgentry_align; + void (*reset)(void); +}; + +/** + * struct brcmfmac_pd_cc_entry - Struct for translating user space country code + * (iso3166) to firmware country code and + * revision. + * + * @iso3166: iso3166 alpha 2 country code string. + * @cc: firmware country code string. + * @rev: firmware country code revision. + */ +struct brcmfmac_pd_cc_entry { + char iso3166[BRCMFMAC_COUNTRY_BUF_SZ]; + char cc[BRCMFMAC_COUNTRY_BUF_SZ]; + s32 rev; +}; + +/** + * struct brcmfmac_pd_cc - Struct for translating country codes as set by user + * space to a country code and rev which can be used by + * firmware. + * + * @table_size: number of entries in table (> 0) + * @table: array of 1 or more elements with translation information. + */ +struct brcmfmac_pd_cc { + int table_size; + struct brcmfmac_pd_cc_entry table[0]; +}; + +/** + * struct brcmfmac_pd_device - Device specific platform data. (id/rev/bus_type) + * is the unique identifier of the device. + * + * @id: ID of the device for which this data is. In case of SDIO + * or PCIE this is the chipid as identified by chip.c In + * case of USB this is the chipid as identified by the + * device query. + * @rev: chip revision, see id. + * @bus_type: The type of bus. Some chipid/rev exist for different bus + * types. Each bus type has its own set of settings. + * @feature_disable: Bitmask of features to disable (override), See feature.c + * in brcmfmac for details. + * @country_codes: If available, pointer to struct for translating country + * codes. + * @bus: Bus specific (union) device settings. Currently only + * SDIO. + */ +struct brcmfmac_pd_device { + unsigned int id; + unsigned int rev; + enum brcmf_bus_type bus_type; + unsigned int feature_disable; + struct brcmfmac_pd_cc *country_codes; + union { + struct brcmfmac_sdio_pd sdio; + } bus; +}; + +/** + * struct brcmfmac_platform_data - BRCMFMAC specific platform data. + * + * @power_on: This function is called by the brcmfmac driver when the module + * gets loaded. This can be particularly useful for low power + * devices. The platform spcific routine may for example decide to + * power up the complete device. If there is no use-case for this + * function then provide NULL. + * @power_off: This function is called by the brcmfmac when the module gets + * unloaded. At this point the devices can be powered down or + * otherwise be reset. So if an actual power_off is not supported + * but reset is supported by the devices then reset the devices + * when this function gets called. This can be particularly useful + * for low power devices. If there is no use-case for this + * function then provide NULL. + */ +struct brcmfmac_platform_data { + void (*power_on)(void); + void (*power_off)(void); + char *fw_alternative_path; + int device_count; + struct brcmfmac_pd_device devices[0]; +}; + + +#endif /* _LINUX_BRCMFMAC_PLATFORM_H */ diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h deleted file mode 100644 index ca13992..0000000 --- a/include/linux/platform_data/microread.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Driver include for the Inside Secure microread NFC Chip. - * - * Copyright (C) 2011 Tieto Poland - * Copyright (C) 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _MICROREAD_H -#define _MICROREAD_H - -#include <linux/i2c.h> - -#define MICROREAD_DRIVER_NAME "microread" - -/* board config platform data for microread */ -struct microread_nfc_platform_data { - unsigned int rst_gpio; - unsigned int irq_gpio; - unsigned int ioh_gpio; -}; - -#endif /* _MICROREAD_H */ diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 1d1ba2c..53ecb37 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -11,9 +11,11 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 +#define X_FINAL_CLEANUP_AGG_INT 1 + #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 4 -#define FW_REVISION_VERSION 2 +#define FW_MINOR_VERSION 7 +#define FW_REVISION_VERSION 3 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -152,6 +154,9 @@ /* number of queues in a PF queue group */ #define QM_PF_QUEUE_GROUP_SIZE 8 +/* the size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 + /* base number of Tx PQs in the CM PQ representation. * should be used when storing PQ IDs in CM PQ registers and context */ @@ -285,6 +290,16 @@ #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +#define SDM_COMP_TYPE_PXP 5 +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +#define SDM_COMP_TYPE_RAM 8 + /******************/ /* PBF CONSTANTS */ /******************/ @@ -335,7 +350,7 @@ struct event_ring_entry { /* Multi function mode */ enum mf_mode { - SF, + ERROR_MODE /* Unsupported mode */, MF_OVLAN, MF_NPAR, MAX_MF_MODE @@ -606,4 +621,19 @@ struct status_block { #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 }; +struct tunnel_parsing_flags { + u8 flags; +#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 +#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 +#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 +#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 +#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 +#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 +#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 +#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +}; #endif /* __COMMON_HSI__ */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 320b337..092cb0c 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -17,10 +17,8 @@ #define ETH_MAX_RAMROD_PER_CON 8 #define ETH_TX_BD_PAGE_SIZE_BYTES 4096 #define ETH_RX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096 #define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 #define ETH_RX_NUM_NEXT_PAGE_BDS 2 -#define ETH_RX_NUM_NEXT_PAGE_SGES 2 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 @@ -34,7 +32,8 @@ #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS -#define ETH_REG_CQE_PBL_SIZE 3 +/* Maximum number of buffers, used for RX packet placement */ +#define ETH_RX_MAX_BUFF_PER_PKT 5 /* num of MAC/VLAN filters */ #define ETH_NUM_MAC_FILTERS 512 @@ -54,9 +53,9 @@ /* TPA constants */ #define ETH_TPA_MAX_AGGS_NUM 64 -#define ETH_TPA_CQE_START_SGL_SIZE 3 -#define ETH_TPA_CQE_CONT_SGL_SIZE 6 -#define ETH_TPA_CQE_END_SGL_SIZE 4 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Queue Zone sizes */ #define TSTORM_QZONE_SIZE 0 @@ -74,18 +73,18 @@ struct coalescing_timeset { struct eth_tx_1st_bd_flags { u8 bitfields; +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 #define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4 -#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 @@ -97,38 +96,44 @@ struct eth_tx_data_1st_bd { __le16 vlan; u8 nbds; struct eth_tx_1st_bd_flags bd_flags; - __le16 fw_use_only; + __le16 bitfields; +#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2 }; /* The parsing information data for the second tx bd of a given packet. */ struct eth_tx_data_2nd_bd { __le16 tunn_ip_size; - __le16 bitfields; -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 -#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 - __le16 bitfields2; + __le16 bitfields1; #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 #define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 #define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14 -#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 + __le16 bitfields2; +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; /* Regular ETH Rx FP CQE. */ @@ -145,11 +150,68 @@ struct eth_fast_path_rx_reg_cqe { struct parsing_and_err_flags pars_flags; __le16 vlan_tag; __le32 rss_hash; - __le16 len_on_bd; + __le16 len_on_first_bd; u8 placement_offset; - u8 reserved; - __le16 pbl[ETH_REG_CQE_PBL_SIZE]; - u8 reserved1[10]; + struct tunnel_parsing_flags tunnel_pars_flags; + u8 bd_num; + u8 reserved[7]; + u32 fw_debug; + u8 reserved1[3]; + u8 flags; +#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2 +}; + +/* TPA-continue ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_cont_cqe { + u8 type; + u8 tpa_agg_index; + __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 reserved[5]; + u8 reserved1; + __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; +}; + +/* TPA-end ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_end_cqe { + u8 type; + u8 tpa_agg_index; + __le16 total_packet_len; + u8 num_of_bds; + u8 end_reason; + __le16 num_of_coalesced_segs; + __le32 ts_delta; + __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + u8 reserved1[3]; + u8 reserved2; + __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; +}; + +/* TPA-start ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_start_cqe { + u8 type; + u8 bitfields; +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 + __le16 seg_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_first_bd; + u8 placement_offset; + struct tunnel_parsing_flags tunnel_pars_flags; + u8 tpa_agg_index; + u8 header_len; + __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; + u32 fw_debug; }; /* The L4 pseudo checksum mode for Ethernet */ @@ -168,13 +230,26 @@ struct eth_slow_path_rx_cqe { u8 type; u8 ramrod_cmd_id; u8 error_flag; - u8 reserved[27]; + u8 reserved[25]; __le16 echo; + u8 reserved1; + u8 flags; +/* for PMD mode - valid indication */ +#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1 +#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0 +/* for PMD mode - valid toggle indication */ +#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1 +#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1 +#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F +#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2 }; /* union for all ETH Rx CQE types */ union eth_rx_cqe { struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; struct eth_slow_path_rx_cqe slow_path; }; @@ -183,15 +258,18 @@ enum eth_rx_cqe_type { ETH_RX_CQE_TYPE_UNUSED, ETH_RX_CQE_TYPE_REGULAR, ETH_RX_CQE_TYPE_SLOW_PATH, + ETH_RX_CQE_TYPE_TPA_START, + ETH_RX_CQE_TYPE_TPA_CONT, + ETH_RX_CQE_TYPE_TPA_END, MAX_ETH_RX_CQE_TYPE }; /* ETH Rx producers data */ struct eth_rx_prod_data { __le16 bd_prod; - __le16 sge_prod; __le16 cqe_prod; __le16 reserved; + __le16 reserved1; }; /* The first tx bd of a given packet */ @@ -211,12 +289,17 @@ struct eth_tx_2nd_bd { /* The parsing information data for the third tx bd of a given packet. */ struct eth_tx_data_3rd_bd { __le16 lso_mss; - u8 bitfields; + __le16 bitfields; #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF #define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 - u8 resereved0[3]; +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 + u8 tunn_l4_hdr_start_offset_w; + u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ @@ -226,12 +309,24 @@ struct eth_tx_3rd_bd { struct eth_tx_data_3rd_bd data; }; +/* Complementary information for the regular tx bd of a given packet. */ +struct eth_tx_data_bd { + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved3; +}; + /* The common non-special TX BD ring element */ struct eth_tx_bd { struct regpair addr; __le16 nbytes; - __le16 reserved0; - __le32 reserved1; + struct eth_tx_data_bd data; }; union eth_tx_bd_types { diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 41b9049..5f8fcaa 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -19,6 +19,10 @@ /* dma_addr_t manip */ #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) +#define DMA_REGPAIR_LE(x, val) do { \ + (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)); \ + } while (0) #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) #define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 81ab178..e1d6983 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -33,10 +33,20 @@ struct qed_update_vport_params { u8 vport_id; u8 update_vport_active_flg; u8 vport_active_flg; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; u8 update_rss_flg; struct qed_update_vport_rss_params rss_params; }; +struct qed_start_vport_params { + bool remove_inner_vlan; + bool gro_enable; + bool drop_ttl0; + u8 vport_id; + u16 mtu; +}; + struct qed_stop_rxq_params { u8 rss_id; u8 rx_queue_id; @@ -116,9 +126,7 @@ struct qed_eth_ops { void *cookie); int (*vport_start)(struct qed_dev *cdev, - u8 vport_id, u16 mtu, - u8 drop_ttl0_flg, - u8 inner_vlan_removal_en_flg); + struct qed_start_vport_params *params); int (*vport_stop)(struct qed_dev *cdev, u8 vport_id); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index d4a32e8..1f7599c7 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -80,7 +80,7 @@ struct qed_dev_info { u8 num_hwfns; u8 hw_mac[ETH_ALEN]; - bool is_mf; + bool is_mf_default; /* FW version */ u16 fw_major; @@ -360,6 +360,12 @@ enum DP_MODULE { /* to be added...up to 0x8000000 */ }; +enum qed_mf_mode { + QED_MF_DEFAULT, + QED_MF_OVLAN, + QED_MF_NPAR, +}; + struct qed_eth_stats { u64 no_buff_discards; u64 packet_too_big_discard; @@ -440,6 +446,12 @@ struct qed_eth_stats { #define RX_PI 0 #define TX_PI(tc) (RX_PI + 1 + tc) +struct qed_sb_cnt_info { + int sb_cnt; + int sb_iov_cnt; + int sb_free_blk; +}; + static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) { u32 prod = 0; diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 14ec165..17d4f84 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -319,6 +319,27 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, }) /** + * list_next_or_null_rcu - get the first element from a list + * @head: the head for the list. + * @ptr: the list head to take the next element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note that if the ptr is at the end of the list, NULL is returned. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_next_or_null_rcu(head, ptr, type, member) \ +({ \ + struct list_head *__head = (head); \ + struct list_head *__ptr = (ptr); \ + struct list_head *__next = READ_ONCE(__ptr->next); \ + likely(__next != __head) ? list_entry_rcu(__next, type, \ + member) : NULL; \ +}) + +/** * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h deleted file mode 100644 index 20bcb55..0000000 --- a/include/linux/rfkill-gpio.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2011, NVIDIA Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - - -#ifndef __RFKILL_GPIO_H -#define __RFKILL_GPIO_H - -#include <linux/types.h> -#include <linux/rfkill.h> - -/** - * struct rfkill_gpio_platform_data - platform data for rfkill gpio device. - * for unused gpio's, the expected value is -1. - * @name: name for the gpio rf kill instance - */ - -struct rfkill_gpio_platform_data { - char *name; - enum rfkill_type type; -}; - -#endif /* __RFKILL_GPIO_H */ diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index d901078..e6a0031 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -104,7 +104,8 @@ int __must_check rfkill_register(struct rfkill *rfkill); * * Pause polling -- say transmitter is off for other reasons. * NOTE: not necessary for suspend/resume -- in that case the - * core stops polling anyway + * core stops polling anyway (but will also correctly handle + * the case of polling having been paused before suspend.) */ void rfkill_pause_polling(struct rfkill *rfkill); @@ -212,6 +213,15 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); * @rfkill: rfkill struct to query */ bool rfkill_blocked(struct rfkill *rfkill); + +/** + * rfkill_find_type - Helpper for finding rfkill type by name + * @name: the name of the type + * + * Returns enum rfkill_type that conrresponds the name. + */ +enum rfkill_type rfkill_find_type(const char *name); + #else /* !RFKILL */ static inline struct rfkill * __must_check rfkill_alloc(const char *name, @@ -268,6 +278,12 @@ static inline bool rfkill_blocked(struct rfkill *rfkill) { return false; } + +static inline enum rfkill_type rfkill_find_type(const char *name) +{ + return RFKILL_TYPE_ALL; +} + #endif /* RFKILL || RFKILL_MODULE */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d3fcd45..15d0df9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1161,10 +1161,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; -static inline void skb_sender_cpu_clear(struct sk_buff *skb) -{ -} - #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { @@ -2186,6 +2182,11 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb) return skb->csum_start - skb_headroom(skb); } +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ + return skb->head + skb->csum_start; +} + static inline int skb_transport_offset(const struct sk_buff *skb) { return skb_transport_header(skb) - skb->data; @@ -2424,6 +2425,10 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, { return __napi_alloc_skb(napi, length, GFP_ATOMIC); } +void napi_consume_skb(struct sk_buff *skb, int budget); + +void __kfree_skb_flush(void); +void __kfree_skb_defer(struct sk_buff *skb); /** * __dev_alloc_pages - allocate page for network Rx @@ -2646,6 +2651,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len skb_headroom(skb) + len <= skb->hdr_len; } +static inline int skb_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} + static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, int cloned) { @@ -3574,6 +3586,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) struct skb_gso_cb { int mac_offset; int encap_level; + __wsum csum; __u16 csum_start; }; #define SKB_SGO_CB_OFFSET 32 @@ -3600,6 +3613,16 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) return 0; } +static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) +{ + /* Do not update partial checksums if remote checksum is enabled. */ + if (skb->remcsum_offload) + return; + + SKB_GSO_CB(skb)->csum = res; + SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; +} + /* Compute the checksum for a gso segment. First compute the checksum value * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and * then add in skb->csum (checksum from csum_start to end of packet). @@ -3610,15 +3633,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) */ static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) { - int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - - skb_transport_offset(skb); - __wsum partial; + unsigned char *csum_start = skb_transport_header(skb); + int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; + __wsum partial = SKB_GSO_CB(skb)->csum; - partial = csum_partial(skb_transport_header(skb), plen, skb->csum); - skb->csum = res; - SKB_GSO_CB(skb)->csum_start -= plen; + SKB_GSO_CB(skb)->csum = res; + SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; - return csum_fold(partial); + return csum_fold(csum_partial(csum_start, plen, partial)); } static inline bool skb_is_gso(const struct sk_buff *skb) @@ -3708,5 +3730,30 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) return hdr_len + skb_gso_transport_seglen(skb); } +/* Local Checksum Offload. + * Compute outer checksum based on the assumption that the + * inner checksum will be offloaded later. + * See Documentation/networking/checksum-offloads.txt for + * explanation of how this works. + * Fill in outer checksum adjustment (e.g. with sum of outer + * pseudo-header) before calling. + * Also ensure that inner checksum is in linear data area. + */ +static inline __wsum lco_csum(struct sk_buff *skb) +{ + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *l4_hdr = skb_transport_header(skb); + __wsum partial; + + /* Start with complement of inner checksum adjustment */ + partial = ~csum_unfold(*(__force __sum16 *)(csum_start + + skb->csum_offset)); + + /* Add in checksum of our headers (incl. outer checksum + * adjustment filled in by caller) and return result. + */ + return csum_partial(l4_hdr, csum_start - l4_hdr, partial); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 5bf59c8..73bf6c6 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -200,7 +200,9 @@ struct ucred { #define AF_ALG 38 /* Algorithm sockets */ #define AF_NFC 39 /* NFC sockets */ #define AF_VSOCK 40 /* vSockets */ -#define AF_MAX 41 /* For now.. */ +#define AF_KCM 41 /* Kernel Connection Multiplexor*/ + +#define AF_MAX 42 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -246,6 +248,7 @@ struct ucred { #define PF_ALG AF_ALG #define PF_NFC AF_NFC #define PF_VSOCK AF_VSOCK +#define PF_KCM AF_KCM #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -274,6 +277,7 @@ struct ucred { #define MSG_MORE 0x8000 /* Sender will send more */ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ +#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ @@ -322,6 +326,7 @@ struct ucred { #define SOL_CAIF 278 #define SOL_ALG 279 #define SOL_NFC 280 +#define SOL_KCM 281 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 881a79d..4bcf5a6 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -90,7 +90,21 @@ struct stmmac_dma_cfg { int pbl; int fixed_burst; int mixed_burst; - int burst_len; + bool aal; +}; + +#define AXI_BLEN 7 +struct stmmac_axi { + bool axi_lpi_en; + bool axi_xit_frm; + u32 axi_wr_osr_lmt; + u32 axi_rd_osr_lmt; + bool axi_kbbe; + bool axi_axi_all; + u32 axi_blen[AXI_BLEN]; + bool axi_fb; + bool axi_mb; + bool axi_rb; }; struct plat_stmmacenet_data { @@ -123,5 +137,6 @@ struct plat_stmmacenet_data { int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; + struct stmmac_axi *axi; }; #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index b386361..7be9b12 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) return (struct tcphdr *)skb_transport_header(skb); } +static inline unsigned int __tcp_hdrlen(const struct tcphdr *th) +{ + return th->doff * 4; +} + static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) { - return tcp_hdr(skb)->doff * 4; + return __tcp_hdrlen(tcp_hdr(skb)); } static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) @@ -153,6 +158,9 @@ struct tcp_sock { u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn * total number of segments in. */ + u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn + * total number of data segments in. + */ u32 rcv_nxt; /* What we want to receive next */ u32 copied_seq; /* Head of yet unread data */ u32 rcv_wup; /* rcv_nxt on last window update sent */ @@ -160,6 +168,9 @@ struct tcp_sock { u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut * The total number of segments sent. */ + u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut + * total number of data segments sent. + */ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked * sum(delta(snd_una)), or how many bytes * were acked. @@ -256,6 +267,7 @@ struct tcp_sock { u32 prr_delivered; /* Number of newly delivered packets to * receiver in Recovery. */ u32 prr_out; /* Total number of pkts sent during Recovery. */ + u32 delivered; /* Total data packets delivered incl. rexmits */ u32 rcv_wnd; /* Current receiver window */ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index 2f6a3f2..da3a77d 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -75,6 +75,8 @@ #define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \ LOWPAN_IPHC_MAX_HEADER_LEN + \ LOWPAN_NHC_MAX_HDR_LEN) +/* SCI/DCI is 4 bit width, so we have maximum 16 entries */ +#define LOWPAN_IPHC_CTX_TABLE_SIZE (1 << 4) #define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */ #define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */ @@ -98,9 +100,39 @@ enum lowpan_lltypes { LOWPAN_LLTYPE_IEEE802154, }; +enum lowpan_iphc_ctx_flags { + LOWPAN_IPHC_CTX_FLAG_ACTIVE, + LOWPAN_IPHC_CTX_FLAG_COMPRESSION, +}; + +struct lowpan_iphc_ctx { + u8 id; + struct in6_addr pfx; + u8 plen; + unsigned long flags; +}; + +struct lowpan_iphc_ctx_table { + spinlock_t lock; + const struct lowpan_iphc_ctx_ops *ops; + struct lowpan_iphc_ctx table[LOWPAN_IPHC_CTX_TABLE_SIZE]; +}; + +static inline bool lowpan_iphc_ctx_is_active(const struct lowpan_iphc_ctx *ctx) +{ + return test_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags); +} + +static inline bool +lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx) +{ + return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); +} + struct lowpan_priv { enum lowpan_lltypes lltype; struct dentry *iface_debugfs; + struct lowpan_iphc_ctx_table ctx; /* must be last */ u8 priv[0] __aligned(sizeof(void *)); diff --git a/include/net/act_api.h b/include/net/act_api.h index 9d446f13..2a19fe1 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -7,6 +7,8 @@ #include <net/sch_generic.h> #include <net/pkt_sched.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> struct tcf_common { struct hlist_node tcfc_head; @@ -65,11 +67,6 @@ static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask) return 0; } -static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf) -{ - kfree(hf->htab); -} - /* Update lastuse only if needed, to avoid dirtying a cache line. * We use a temp variable to avoid fetching jiffies twice. */ @@ -81,42 +78,76 @@ static inline void tcf_lastuse_update(struct tcf_t *tm) tm->lastuse = now; } -#ifdef CONFIG_NET_CLS_ACT - -#define ACT_P_CREATED 1 -#define ACT_P_DELETED 1 - struct tc_action { void *priv; const struct tc_action_ops *ops; __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ __u32 order; struct list_head list; + struct tcf_hashinfo *hinfo; }; +#ifdef CONFIG_NET_CLS_ACT + +#define ACT_P_CREATED 1 +#define ACT_P_DELETED 1 + struct tc_action_ops { struct list_head head; - struct tcf_hashinfo *hinfo; char kind[IFNAMSIZ]; __u32 type; /* TBD to match kind */ struct module *owner; int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); int (*dump)(struct sk_buff *, struct tc_action *, int, int); void (*cleanup)(struct tc_action *, int bind); - int (*lookup)(struct tc_action *, u32); + int (*lookup)(struct net *, struct tc_action *, u32); int (*init)(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *act, int ovr, int bind); - int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); + int (*walk)(struct net *, struct sk_buff *, + struct netlink_callback *, int, struct tc_action *); +}; + +struct tc_action_net { + struct tcf_hashinfo *hinfo; + const struct tc_action_ops *ops; }; -int tcf_hash_search(struct tc_action *a, u32 index); -u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); -int tcf_hash_check(u32 index, struct tc_action *a, int bind); -int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, - int size, int bind, bool cpustats); +static inline +int tc_action_net_init(struct tc_action_net *tn, const struct tc_action_ops *ops, + unsigned int mask) +{ + int err = 0; + + tn->hinfo = kmalloc(sizeof(*tn->hinfo), GFP_KERNEL); + if (!tn->hinfo) + return -ENOMEM; + tn->ops = ops; + err = tcf_hashinfo_init(tn->hinfo, mask); + if (err) + kfree(tn->hinfo); + return err; +} + +void tcf_hashinfo_destroy(const struct tc_action_ops *ops, + struct tcf_hashinfo *hinfo); + +static inline void tc_action_net_exit(struct tc_action_net *tn) +{ + tcf_hashinfo_destroy(tn->ops, tn->hinfo); +} + +int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, + struct netlink_callback *cb, int type, + struct tc_action *a); +int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index); +u32 tcf_hash_new_index(struct tc_action_net *tn); +int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a, + int bind); +int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est, + struct tc_action *a, int size, int bind, bool cpustats); void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); -void tcf_hash_insert(struct tc_action *a); +void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a); int __tcf_hash_release(struct tc_action *a, bool bind, bool strict); @@ -125,8 +156,8 @@ static inline int tcf_hash_release(struct tc_action *a, bool bind) return __tcf_hash_release(a, bind, false); } -int tcf_register_action(struct tc_action_ops *a, unsigned int mask); -int tcf_unregister_action(struct tc_action_ops *a); +int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops); +int tcf_unregister_action(struct tc_action_ops *a, struct pernet_operations *ops); int tcf_action_destroy(struct list_head *actions, int bind); int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, struct tcf_result *res); @@ -140,5 +171,16 @@ int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); + +#define tc_no_actions(_exts) \ + (list_empty(&(_exts)->actions)) + +#define tc_for_each_action(_a, _exts) \ + list_for_each_entry(a, &(_exts)->actions, list) +#else /* CONFIG_NET_CLS_ACT */ + +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) + #endif /* CONFIG_NET_CLS_ACT */ #endif diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 47f52d3..730d856 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -87,6 +87,8 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, u32 banned_flags); int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, u32 banned_flags); +int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, + bool match_wildcard); int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, bool match_wildcard); void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 339ea57..5d38d98 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -233,6 +233,7 @@ enum { HCI_SC_ENABLED, HCI_SC_ONLY, HCI_PRIVACY, + HCI_LIMITED_PRIVACY, HCI_RPA_EXPIRED, HCI_RPA_RESOLVING, HCI_HS_ENABLED, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index d4f82ed..dc71473 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -25,6 +25,7 @@ #ifndef __HCI_CORE_H #define __HCI_CORE_H +#include <linux/leds.h> #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sock.h> @@ -396,6 +397,8 @@ struct hci_dev { struct delayed_work rpa_expired; bdaddr_t rpa; + struct led_trigger *power_led; + int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index f1fbc3b..f358ad5 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -306,5 +306,6 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); int bond_3ad_set_carrier(struct bonding *bond); void bond_3ad_update_lacp_rate(struct bonding *bond); +void bond_3ad_update_ad_actor_settings(struct bonding *bond); #endif /* _NET_BOND_3AD_H */ diff --git a/include/net/bonding.h b/include/net/bonding.h index ee6c520..791800d 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -215,6 +215,7 @@ struct bonding { * ALB mode (6) - to sync the use and modifications of its hash table */ spinlock_t mode_lock; + spinlock_t stats_lock; u8 send_peer_notif; u8 igmp_retrans; #ifdef CONFIG_PROC_FS diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9bcaaf7..9e1b24c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -712,6 +712,8 @@ struct cfg80211_acl_data { * @p2p_opp_ps: P2P opportunistic PS * @acl: ACL configuration used by the drivers which has support for * MAC address based access control + * @pbss: If set, start as a PCP instead of AP. Relevant for DMG + * networks. */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -730,6 +732,7 @@ struct cfg80211_ap_settings { u8 p2p_ctwindow; bool p2p_opp_ps; const struct cfg80211_acl_data *acl; + bool pbss; }; /** @@ -1888,6 +1891,8 @@ struct cfg80211_ibss_params { * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT Capability overrides * @vht_capa_mask: The bits of vht_capa which are to be used. + * @pbss: if set, connect to a PCP instead of AP. Valid for DMG + * networks. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -1910,6 +1915,7 @@ struct cfg80211_connect_params { struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa; struct ieee80211_vht_cap vht_capa_mask; + bool pbss; }; /** @@ -3489,6 +3495,7 @@ struct cfg80211_cached_keys; * registered for unexpected class 3 frames (AP mode) * @conn: (private) cfg80211 software SME connection state machine data * @connect_keys: (private) keys to set after connection is established + * @conn_bss_type: connecting/connected BSS type * @ibss_fixed: (private) IBSS is using fixed BSSID * @ibss_dfs_possible: (private) IBSS may change to a DFS channel * @event_list: (private) list for internal event processing @@ -3519,6 +3526,7 @@ struct wireless_dev { u8 ssid_len, mesh_id_len, mesh_id_up_len; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; + enum ieee80211_bss_type conn_bss_type; struct list_head event_list; spinlock_t event_lock; diff --git a/include/net/checksum.h b/include/net/checksum.h index 10a16b5..5c30891 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -88,8 +88,11 @@ static inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { u32 sum = (__force u32)csum2; - if (offset&1) - sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF); + + /* rotate sum to align it with a 16b boundary */ + if (offset & 1) + sum = ror32(sum, 8); + return csum_add(csum, (__force __wsum)sum); } @@ -102,10 +105,7 @@ csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) static inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { - u32 sum = (__force u32)csum2; - if (offset&1) - sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF); - return csum_sub(csum, (__force __wsum)sum); + return csum_block_add(csum, ~csum2, offset); } static inline __wsum csum_unfold(__sum16 n) @@ -120,6 +120,11 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) #define CSUM_MANGLED_0 ((__force __sum16)0xffff) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} + static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); diff --git a/include/net/codel.h b/include/net/codel.h index 267e702..d168aca 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -162,12 +162,14 @@ struct codel_vars { * struct codel_stats - contains codel shared variables and stats * @maxpacket: largest packet we've seen so far * @drop_count: temp count of dropped packets in dequeue() + * @drop_len: bytes of dropped packets in dequeue() * ecn_mark: number of packets we ECN marked instead of dropping * ce_mark: number of packets CE marked because sojourn time was above ce_threshold */ struct codel_stats { u32 maxpacket; u32 drop_count; + u32 drop_len; u32 ecn_mark; u32 ce_mark; }; @@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, vars->rec_inv_sqrt); goto end; } + stats->drop_len += qdisc_pkt_len(skb); qdisc_drop(skb, sch); stats->drop_count++; skb = dequeue_func(vars, sch); @@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { + stats->drop_len += qdisc_pkt_len(skb); qdisc_drop(skb, sch); stats->drop_count++; diff --git a/include/net/devlink.h b/include/net/devlink.h new file mode 100644 index 0000000..c37d257 --- /dev/null +++ b/include/net/devlink.h @@ -0,0 +1,140 @@ +/* + * include/net/devlink.h - Network physical device Netlink interface + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _NET_DEVLINK_H_ +#define _NET_DEVLINK_H_ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/gfp.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <net/net_namespace.h> +#include <uapi/linux/devlink.h> + +struct devlink_ops; + +struct devlink { + struct list_head list; + struct list_head port_list; + const struct devlink_ops *ops; + struct device *dev; + possible_net_t _net; + char priv[0] __aligned(NETDEV_ALIGN); +}; + +struct devlink_port { + struct list_head list; + struct devlink *devlink; + unsigned index; + bool registered; + enum devlink_port_type type; + enum devlink_port_type desired_type; + void *type_dev; + bool split; + u32 split_group; +}; + +struct devlink_ops { + size_t priv_size; + int (*port_type_set)(struct devlink_port *devlink_port, + enum devlink_port_type port_type); + int (*port_split)(struct devlink *devlink, unsigned int port_index, + unsigned int count); + int (*port_unsplit)(struct devlink *devlink, unsigned int port_index); +}; + +static inline void *devlink_priv(struct devlink *devlink) +{ + BUG_ON(!devlink); + return &devlink->priv; +} + +static inline struct devlink *priv_to_devlink(void *priv) +{ + BUG_ON(!priv); + return container_of(priv, struct devlink, priv); +} + +struct ib_device; + +#if IS_ENABLED(CONFIG_NET_DEVLINK) + +struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size); +int devlink_register(struct devlink *devlink, struct device *dev); +void devlink_unregister(struct devlink *devlink); +void devlink_free(struct devlink *devlink); +int devlink_port_register(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index); +void devlink_port_unregister(struct devlink_port *devlink_port); +void devlink_port_type_eth_set(struct devlink_port *devlink_port, + struct net_device *netdev); +void devlink_port_type_ib_set(struct devlink_port *devlink_port, + struct ib_device *ibdev); +void devlink_port_type_clear(struct devlink_port *devlink_port); +void devlink_port_split_set(struct devlink_port *devlink_port, + u32 split_group); + +#else + +static inline struct devlink *devlink_alloc(const struct devlink_ops *ops, + size_t priv_size) +{ + return kzalloc(sizeof(struct devlink) + priv_size, GFP_KERNEL); +} + +static inline int devlink_register(struct devlink *devlink, struct device *dev) +{ + return 0; +} + +static inline void devlink_unregister(struct devlink *devlink) +{ +} + +static inline void devlink_free(struct devlink *devlink) +{ + kfree(devlink); +} + +static inline int devlink_port_register(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index) +{ + return 0; +} + +static inline void devlink_port_unregister(struct devlink_port *devlink_port) +{ +} + +static inline void devlink_port_type_eth_set(struct devlink_port *devlink_port, + struct net_device *netdev) +{ +} + +static inline void devlink_port_type_ib_set(struct devlink_port *devlink_port, + struct ib_device *ibdev) +{ +} + +static inline void devlink_port_type_clear(struct devlink_port *devlink_port) +{ +} + +static inline void devlink_port_split_set(struct devlink_port *devlink_port, + u32 split_group) +{ +} + +#endif + +#endif /* _NET_DEVLINK_H_ */ diff --git a/include/net/dsa.h b/include/net/dsa.h index 26a0e86..6463bb2 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -296,16 +296,17 @@ struct dsa_switch_driver { /* * Bridge integration */ - int (*port_join_bridge)(struct dsa_switch *ds, int port, - u32 br_port_mask); - int (*port_leave_bridge)(struct dsa_switch *ds, int port, - u32 br_port_mask); + int (*port_bridge_join)(struct dsa_switch *ds, int port, + struct net_device *bridge); + void (*port_bridge_leave)(struct dsa_switch *ds, int port); int (*port_stp_update)(struct dsa_switch *ds, int port, u8 state); /* * VLAN support */ + int (*port_vlan_filtering)(struct dsa_switch *ds, int port, + bool vlan_filtering); int (*port_vlan_prepare)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans); @@ -314,9 +315,9 @@ struct dsa_switch_driver { struct switchdev_trans *trans); int (*port_vlan_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); - int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid); - int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid, - unsigned long *ports, unsigned long *untagged); + int (*port_vlan_dump)(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)); /* * Forwarding database diff --git a/include/net/dst.h b/include/net/dst.h index c7329dc..5c98443 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -398,6 +398,18 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, __skb_tunnel_rx(skb, dev, net); } +static inline u32 dst_tclassid(const struct sk_buff *skb) +{ +#ifdef CONFIG_IP_ROUTE_CLASSID + const struct dst_entry *dst; + + dst = skb_dst(skb); + if (dst) + return dst->tclassid; +#endif + return 0; +} + int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); static inline int dst_discard(struct sk_buff *skb) { diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h new file mode 100644 index 0000000..151accae --- /dev/null +++ b/include/net/dst_cache.h @@ -0,0 +1,97 @@ +#ifndef _NET_DST_CACHE_H +#define _NET_DST_CACHE_H + +#include <linux/jiffies.h> +#include <net/dst.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#endif + +struct dst_cache { + struct dst_cache_pcpu __percpu *cache; + unsigned long reset_ts; +}; + +/** + * dst_cache_get - perform cache lookup + * @dst_cache: the cache + * + * The caller should use dst_cache_get_ip4() if it need to retrieve the + * source address to be used when xmitting to the cached dst. + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache); + +/** + * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr); + +/** + * dst_cache_set_ip4 - store the ipv4 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr); + +#if IS_ENABLED(CONFIG_IPV6) + +/** + * dst_cache_set_ip6 - store the ipv6 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr); + +/** + * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr); +#endif + +/** + * dst_cache_reset - invalidate the cache contents + * @dst_cache: the cache + * + * This do not free the cached dst to avoid races and contentions. + * the dst will be freed on later cache lookup. + */ +static inline void dst_cache_reset(struct dst_cache *dst_cache) +{ + dst_cache->reset_ts = jiffies; +} + +/** + * dst_cache_init - initialize the cache, allocating the required storage + * @dst_cache: the cache + * @gfp: allocation flags + */ +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp); + +/** + * dst_cache_destroy - empty the cache and free the allocated storage + * @dst_cache: the cache + * + * No synchronization is enforced: it must be called only when the cache + * is unsed. + */ +void dst_cache_destroy(struct dst_cache *dst_cache); + +#endif diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 30a56ab..5db9f59 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -62,6 +62,7 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a, sizeof(a->u.tun_info) + a->u.tun_info.options_len); } +void metadata_dst_free(struct metadata_dst *); struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags); struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags); @@ -125,7 +126,7 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, ip_tunnel_key_init(&tun_dst->u.tun_info.key, iph->saddr, iph->daddr, iph->tos, iph->ttl, - 0, 0, tunnel_id, flags); + 0, 0, 0, tunnel_id, flags); return tun_dst; } @@ -151,8 +152,11 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb, info->key.u.ipv6.src = ip6h->saddr; info->key.u.ipv6.dst = ip6h->daddr; + info->key.tos = ipv6_get_dsfield(ip6h); info->key.ttl = ip6h->hop_limit; + info->key.label = ip6_flowlabel(ip6h); + return tun_dst; } diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 8c8548c..d3d60dc 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -184,4 +184,17 @@ static inline bool flow_keys_have_l4(struct flow_keys *keys) u32 flow_hash_from_keys(struct flow_keys *keys); +static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector, + enum flow_dissector_key_id key_id) +{ + return flow_dissector->used_keys & (1 << key_id); +} + +static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissector, + enum flow_dissector_key_id key_id, + void *target_container) +{ + return ((char *)target_container) + flow_dissector->offset[key_id]; +} + #endif diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 43c0e771..8d4608c 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -83,7 +83,6 @@ struct genl_family { * @attrs: netlink attributes * @_net: network namespace * @user_ptr: user pointers - * @dst_sk: destination socket */ struct genl_info { u32 snd_seq; @@ -94,7 +93,6 @@ struct genl_info { struct nlattr ** attrs; possible_net_t _net; void * user_ptr[2]; - struct sock * dst_sk; }; static inline struct net *genl_info_net(struct genl_info *info) @@ -188,8 +186,6 @@ int genl_unregister_family(struct genl_family *family); void genl_notify(struct genl_family *family, struct sk_buff *skb, struct genl_info *info, u32 group, gfp_t flags); -struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info, - gfp_t flags); void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, struct genl_family *family, int flags, u8 cmd); diff --git a/include/net/hwbm.h b/include/net/hwbm.h new file mode 100644 index 0000000..47d0866 --- /dev/null +++ b/include/net/hwbm.h @@ -0,0 +1,28 @@ +#ifndef _HWBM_H +#define _HWBM_H + +struct hwbm_pool { + /* Capacity of the pool */ + int size; + /* Size of the buffers managed */ + int frag_size; + /* Number of buffers currently used by this pool */ + int buf_num; + /* constructor called during alocation */ + int (*construct)(struct hwbm_pool *bm_pool, void *buf); + /* protect acces to the buffer counter*/ + spinlock_t lock; + /* private data */ + void *priv; +}; +#ifdef CONFIG_HWBM +void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf); +int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp); +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp); +#else +void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {} +int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; } +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) +{ return 0; } +#endif /* CONFIG_HWBM */ +#endif /* _HWBM_H */ diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 7ff588c..28332bd 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -53,6 +53,7 @@ struct sock *__inet6_lookup_established(struct net *net, struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, @@ -60,6 +61,7 @@ struct sock *inet6_lookup_listener(struct net *net, static inline struct sock *__inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, @@ -71,12 +73,12 @@ static inline struct sock *__inet6_lookup(struct net *net, if (sk) return sk; - return inet6_lookup_listener(net, hashinfo, saddr, sport, + return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport, daddr, hnum, dif); } static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, - struct sk_buff *skb, + struct sk_buff *skb, int doff, const __be16 sport, const __be16 dport, int iif) @@ -86,16 +88,19 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, if (sk) return sk; - return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, - &ipv6_hdr(skb)->saddr, sport, + return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, + doff, &ipv6_hdr(skb)->saddr, sport, &ipv6_hdr(skb)->daddr, ntohs(dport), iif); } struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const __be16 dport, const int dif); + +int inet6_hash(struct sock *sk); #endif /* IS_ENABLED(CONFIG_IPV6) */ #define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 12aac0f..909972a 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -13,6 +13,7 @@ struct netns_frags { int timeout; int high_thresh; int low_thresh; + int max_dist; }; /** diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index de2e3ad..50f635c 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -207,12 +207,16 @@ void inet_hashinfo_init(struct inet_hashinfo *h); bool inet_ehash_insert(struct sock *sk, struct sock *osk); bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); -void __inet_hash(struct sock *sk, struct sock *osk); -void inet_hash(struct sock *sk); +int __inet_hash(struct sock *sk, struct sock *osk, + int (*saddr_same)(const struct sock *sk1, + const struct sock *sk2, + bool match_wildcard)); +int inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const __be32 saddr, const __be16 sport, const __be32 daddr, const unsigned short hnum, @@ -220,10 +224,11 @@ struct sock *__inet_lookup_listener(struct net *net, static inline struct sock *inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { - return __inet_lookup_listener(net, hashinfo, saddr, sport, + return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport, daddr, ntohs(dport), dif); } @@ -299,6 +304,7 @@ static inline struct sock * static inline struct sock *__inet_lookup(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const __be32 saddr, const __be16 sport, const __be32 daddr, const __be16 dport, const int dif) @@ -307,12 +313,13 @@ static inline struct sock *__inet_lookup(struct net *net, struct sock *sk = __inet_lookup_established(net, hashinfo, saddr, sport, daddr, hnum, dif); - return sk ? : __inet_lookup_listener(net, hashinfo, saddr, sport, - daddr, hnum, dif); + return sk ? : __inet_lookup_listener(net, hashinfo, skb, doff, saddr, + sport, daddr, hnum, dif); } static inline struct sock *inet_lookup(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const __be32 saddr, const __be16 sport, const __be32 daddr, const __be16 dport, const int dif) @@ -320,7 +327,8 @@ static inline struct sock *inet_lookup(struct net *net, struct sock *sk; local_bh_disable(); - sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif); + sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, + dport, dif); local_bh_enable(); return sk; @@ -328,6 +336,7 @@ static inline struct sock *inet_lookup(struct net *net, static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, struct sk_buff *skb, + int doff, const __be16 sport, const __be16 dport) { @@ -337,8 +346,8 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, if (sk) return sk; else - return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, - iph->saddr, sport, + return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, + doff, iph->saddr, sport, iph->daddr, dport, inet_iif(skb)); } diff --git a/include/net/ip.h b/include/net/ip.h index 1a98f1c..fad74d3 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -240,17 +240,13 @@ static inline int inet_is_local_reserved_port(struct net *net, int port) } #endif +__be32 inet_current_timestamp(void); + /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; extern int inet_peer_maxttl; -/* From ip_input.c */ -extern int sysctl_ip_early_demux; - -/* From ip_output.c */ -extern int sysctl_ip_dynaddr; - void ipfrag_init(void); void ip_static_sysctl_init(void); diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 1a49b73..cca8405 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -37,8 +37,7 @@ #ifndef _HAVE_ARCH_IPV6_CSUM __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum csum); + __u32 len, __u8 proto, __wsum csum); #endif static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 0d0ce0b..499a707 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -6,6 +6,7 @@ #include <linux/if_tunnel.h> #include <linux/ip6_tunnel.h> #include <net/ip_tunnels.h> +#include <net/dst_cache.h> #define IP6TUNNEL_ERR_TIMEO (30*HZ) @@ -33,12 +34,6 @@ struct __ip6_tnl_parm { __be32 o_key; }; -struct ip6_tnl_dst { - seqlock_t lock; - struct dst_entry __rcu *dst; - u32 cookie; -}; - /* IPv6 tunnel */ struct ip6_tnl { struct ip6_tnl __rcu *next; /* next tunnel in list */ @@ -46,7 +41,7 @@ struct ip6_tnl { struct net *net; /* netns for packet i/o */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ - struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */ + struct dst_cache dst_cache; /* cached dst */ int err_count; unsigned long err_time; @@ -66,11 +61,6 @@ struct ipv6_tlv_tnl_enc_lim { __u8 encap_limit; /* tunnel encapsulation limit */ } __packed; -struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t); -int ip6_tnl_dst_init(struct ip6_tnl *t); -void ip6_tnl_dst_destroy(struct ip6_tnl *t); -void ip6_tnl_dst_reset(struct ip6_tnl *t); -void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index dda9abf..c35dda9 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -7,12 +7,15 @@ #include <linux/socket.h> #include <linux/types.h> #include <linux/u64_stats_sync.h> +#include <linux/bitops.h> + #include <net/dsfield.h> #include <net/gro_cells.h> #include <net/inet_ecn.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/lwtunnel.h> +#include <net/dst_cache.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> @@ -47,6 +50,7 @@ struct ip_tunnel_key { __be16 tun_flags; u8 tos; /* TOS for IPv4, TC for IPv6 */ u8 ttl; /* TTL for IPv4, HL for IPv6 */ + __be32 label; /* Flow Label for IPv6 */ __be16 tp_src; __be16 tp_dst; }; @@ -55,8 +59,16 @@ struct ip_tunnel_key { #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ +/* Maximum tunnel options length. */ +#define IP_TUNNEL_OPTS_MAX \ + GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \ + options_len) * BITS_PER_BYTE) - 1, 0) + struct ip_tunnel_info { struct ip_tunnel_key key; +#ifdef CONFIG_DST_CACHE + struct dst_cache dst_cache; +#endif u8 options_len; u8 mode; }; @@ -85,11 +97,6 @@ struct ip_tunnel_prl_entry { struct rcu_head rcu_head; }; -struct ip_tunnel_dst { - struct dst_entry __rcu *dst; - __be32 saddr; -}; - struct metadata_dst; struct ip_tunnel { @@ -108,7 +115,7 @@ struct ip_tunnel { int tun_hlen; /* Precalculated header length */ int mlink; - struct ip_tunnel_dst __percpu *dst_cache; + struct dst_cache dst_cache; struct ip_tunnel_parm parms; @@ -141,6 +148,7 @@ struct ip_tunnel { #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400) #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800) #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) +#define TUNNEL_NOCACHE __cpu_to_be16(0x2000) #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT) @@ -181,7 +189,7 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, __be32 saddr, __be32 daddr, - u8 tos, u8 ttl, + u8 tos, u8 ttl, __be32 label, __be16 tp_src, __be16 tp_dst, __be64 tun_id, __be16 tun_flags) { @@ -192,6 +200,7 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 0, IP_TUNNEL_KEY_IPV4_PAD_LEN); key->tos = tos; key->ttl = ttl; + key->label = label; key->tun_flags = tun_flags; /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of @@ -207,6 +216,20 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE); } +static inline bool +ip_tunnel_dst_cache_usable(const struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + if (skb->mark) + return false; + if (!info) + return true; + if (info->key.tun_flags & TUNNEL_NOCACHE) + return false; + + return true; +} + static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info *tun_info) { @@ -248,7 +271,6 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); void ip_tunnel_setup(struct net_device *dev, int net_id); -void ip_tunnel_dst_reset_all(struct ip_tunnel *t); int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap); @@ -273,15 +295,15 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, return INET_ECN_encapsulate(tos, inner); } -int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); +int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, + bool xnet); void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, u8 proto, u8 tos, u8 ttl, __be16 df, bool xnet); struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags); -struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, - int gso_type_mask); +struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) { @@ -356,6 +378,17 @@ static inline void ip_tunnel_unneed_metadata(void) { } +static inline void ip_tunnel_info_opts_get(void *to, + const struct ip_tunnel_info *info) +{ +} + +static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, + const void *from, int len) +{ + info->options_len = 0; +} + #endif /* CONFIG_INET */ #endif /* __NET_IP_TUNNELS_H */ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 0816c87..a6cc576 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) } #endif /* CONFIG_IP_VS_NFCT */ +/* Really using conntrack? */ +static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, + struct sk_buff *skb) +{ +#ifdef CONFIG_IP_VS_NFCT + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + + if (!(cp->flags & IP_VS_CONN_F_NFCT)) + return false; + ct = nf_ct_get(skb, &ctinfo); + if (ct && !nf_ct_is_untracked(ct)) + return true; +#endif + return false; +} + static inline int ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) { diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 6570f379..f3c9857 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -259,8 +259,12 @@ static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) rcu_read_lock(); opt = rcu_dereference(np->opt); - if (opt && !atomic_inc_not_zero(&opt->refcnt)) - opt = NULL; + if (opt) { + if (!atomic_inc_not_zero(&opt->refcnt)) + opt = NULL; + else + opt = rcu_pointer_handoff(opt); + } rcu_read_unlock(); return opt; } diff --git a/include/net/kcm.h b/include/net/kcm.h new file mode 100644 index 0000000..2840b58 --- /dev/null +++ b/include/net/kcm.h @@ -0,0 +1,226 @@ +/* + * Kernel Connection Multiplexor + * + * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __NET_KCM_H_ +#define __NET_KCM_H_ + +#include <linux/skbuff.h> +#include <net/sock.h> +#include <uapi/linux/kcm.h> + +extern unsigned int kcm_net_id; + +#define KCM_STATS_ADD(stat, count) ((stat) += (count)) +#define KCM_STATS_INCR(stat) ((stat)++) + +struct kcm_psock_stats { + unsigned long long rx_msgs; + unsigned long long rx_bytes; + unsigned long long tx_msgs; + unsigned long long tx_bytes; + unsigned int rx_aborts; + unsigned int rx_mem_fail; + unsigned int rx_need_more_hdr; + unsigned int rx_msg_too_big; + unsigned int rx_msg_timeouts; + unsigned int rx_bad_hdr_len; + unsigned long long reserved; + unsigned long long unreserved; + unsigned int tx_aborts; +}; + +struct kcm_mux_stats { + unsigned long long rx_msgs; + unsigned long long rx_bytes; + unsigned long long tx_msgs; + unsigned long long tx_bytes; + unsigned int rx_ready_drops; + unsigned int tx_retries; + unsigned int psock_attach; + unsigned int psock_unattach_rsvd; + unsigned int psock_unattach; +}; + +struct kcm_stats { + unsigned long long rx_msgs; + unsigned long long rx_bytes; + unsigned long long tx_msgs; + unsigned long long tx_bytes; +}; + +struct kcm_tx_msg { + unsigned int sent; + unsigned int fragidx; + unsigned int frag_offset; + unsigned int msg_flags; + struct sk_buff *frag_skb; + struct sk_buff *last_skb; +}; + +struct kcm_rx_msg { + int full_len; + int accum_len; + int offset; + int early_eaten; +}; + +/* Socket structure for KCM client sockets */ +struct kcm_sock { + struct sock sk; + struct kcm_mux *mux; + struct list_head kcm_sock_list; + int index; + u32 done : 1; + struct work_struct done_work; + + struct kcm_stats stats; + + /* Transmit */ + struct kcm_psock *tx_psock; + struct work_struct tx_work; + struct list_head wait_psock_list; + struct sk_buff *seq_skb; + + /* Don't use bit fields here, these are set under different locks */ + bool tx_wait; + bool tx_wait_more; + + /* Receive */ + struct kcm_psock *rx_psock; + struct list_head wait_rx_list; /* KCMs waiting for receiving */ + bool rx_wait; + u32 rx_disabled : 1; +}; + +struct bpf_prog; + +/* Structure for an attached lower socket */ +struct kcm_psock { + struct sock *sk; + struct kcm_mux *mux; + int index; + + u32 tx_stopped : 1; + u32 rx_stopped : 1; + u32 done : 1; + u32 unattaching : 1; + + void (*save_state_change)(struct sock *sk); + void (*save_data_ready)(struct sock *sk); + void (*save_write_space)(struct sock *sk); + + struct list_head psock_list; + + struct kcm_psock_stats stats; + + /* Receive */ + struct sk_buff *rx_skb_head; + struct sk_buff **rx_skb_nextp; + struct sk_buff *ready_rx_msg; + struct list_head psock_ready_list; + struct work_struct rx_work; + struct delayed_work rx_delayed_work; + struct bpf_prog *bpf_prog; + struct kcm_sock *rx_kcm; + unsigned long long saved_rx_bytes; + unsigned long long saved_rx_msgs; + struct timer_list rx_msg_timer; + unsigned int rx_need_bytes; + + /* Transmit */ + struct kcm_sock *tx_kcm; + struct list_head psock_avail_list; + unsigned long long saved_tx_bytes; + unsigned long long saved_tx_msgs; +}; + +/* Per net MUX list */ +struct kcm_net { + struct mutex mutex; + struct kcm_psock_stats aggregate_psock_stats; + struct kcm_mux_stats aggregate_mux_stats; + struct list_head mux_list; + int count; +}; + +/* Structure for a MUX */ +struct kcm_mux { + struct list_head kcm_mux_list; + struct rcu_head rcu; + struct kcm_net *knet; + + struct list_head kcm_socks; /* All KCM sockets on MUX */ + int kcm_socks_cnt; /* Total KCM socket count for MUX */ + struct list_head psocks; /* List of all psocks on MUX */ + int psocks_cnt; /* Total attached sockets */ + + struct kcm_mux_stats stats; + struct kcm_psock_stats aggregate_psock_stats; + + /* Receive */ + spinlock_t rx_lock ____cacheline_aligned_in_smp; + struct list_head kcm_rx_waiters; /* KCMs waiting for receiving */ + struct list_head psocks_ready; /* List of psocks with a msg ready */ + struct sk_buff_head rx_hold_queue; + + /* Transmit */ + spinlock_t lock ____cacheline_aligned_in_smp; /* TX and mux locking */ + struct list_head psocks_avail; /* List of available psocks */ + struct list_head kcm_tx_waiters; /* KCMs waiting for a TX psock */ +}; + +#ifdef CONFIG_PROC_FS +int kcm_proc_init(void); +void kcm_proc_exit(void); +#else +static inline int kcm_proc_init(void) { return 0; } +static inline void kcm_proc_exit(void) { } +#endif + +static inline void aggregate_psock_stats(struct kcm_psock_stats *stats, + struct kcm_psock_stats *agg_stats) +{ + /* Save psock statistics in the mux when psock is being unattached. */ + +#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) + SAVE_PSOCK_STATS(rx_msgs); + SAVE_PSOCK_STATS(rx_bytes); + SAVE_PSOCK_STATS(rx_aborts); + SAVE_PSOCK_STATS(rx_mem_fail); + SAVE_PSOCK_STATS(rx_need_more_hdr); + SAVE_PSOCK_STATS(rx_msg_too_big); + SAVE_PSOCK_STATS(rx_msg_timeouts); + SAVE_PSOCK_STATS(rx_bad_hdr_len); + SAVE_PSOCK_STATS(tx_msgs); + SAVE_PSOCK_STATS(tx_bytes); + SAVE_PSOCK_STATS(reserved); + SAVE_PSOCK_STATS(unreserved); + SAVE_PSOCK_STATS(tx_aborts); +#undef SAVE_PSOCK_STATS +} + +static inline void aggregate_mux_stats(struct kcm_mux_stats *stats, + struct kcm_mux_stats *agg_stats) +{ + /* Save psock statistics in the mux when psock is being unattached. */ + +#define SAVE_MUX_STATS(_stat) (agg_stats->_stat += stats->_stat) + SAVE_MUX_STATS(rx_msgs); + SAVE_MUX_STATS(rx_bytes); + SAVE_MUX_STATS(tx_msgs); + SAVE_MUX_STATS(tx_bytes); + SAVE_MUX_STATS(rx_ready_drops); + SAVE_MUX_STATS(psock_attach); + SAVE_MUX_STATS(psock_unattach_rsvd); + SAVE_MUX_STATS(psock_unattach); +#undef SAVE_MUX_STATS +} + +#endif /* __NET_KCM_H_ */ diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 5567d46..c43a9c7 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -39,7 +39,7 @@ struct l3mdev_ops { #ifdef CONFIG_NET_L3_MASTER_DEV -int l3mdev_master_ifindex_rcu(struct net_device *dev); +int l3mdev_master_ifindex_rcu(const struct net_device *dev); static inline int l3mdev_master_ifindex(struct net_device *dev) { int ifindex; @@ -179,7 +179,7 @@ struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net, #else -static inline int l3mdev_master_ifindex_rcu(struct net_device *dev) +static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev) { return 0; } diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 66350ce..e9f116e 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -170,6 +170,8 @@ static inline int lwtunnel_input(struct sk_buff *skb) return -EOPNOTSUPP; } -#endif +#endif /* CONFIG_LWTUNNEL */ + +#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type)) #endif /* __NET_LWTUNNEL_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 7c30faf..0c09da3 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 Intel Deutschland GmbH + * Copyright (C) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -298,6 +298,7 @@ struct ieee80211_vif_chanctx_switch { * note that this is only called when it changes after the channel * context had been assigned. * @BSS_CHANGED_OCB: OCB join status changed + * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -323,6 +324,7 @@ enum ieee80211_bss_change { BSS_CHANGED_BEACON_INFO = 1<<20, BSS_CHANGED_BANDWIDTH = 1<<21, BSS_CHANGED_OCB = 1<<22, + BSS_CHANGED_MU_GROUPS = 1<<23, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -436,6 +438,19 @@ struct ieee80211_event { }; /** + * struct ieee80211_mu_group_data - STA's VHT MU-MIMO group data + * + * This structure describes the group id data of VHT MU-MIMO + * + * @membership: 64 bits array - a bit is set if station is member of the group + * @position: 2 bits per group id indicating the position in the group + */ +struct ieee80211_mu_group_data { + u8 membership[WLAN_MEMBERSHIP_LEN]; + u8 position[WLAN_USER_POSITION_LEN]; +}; + +/** * struct ieee80211_bss_conf - holds the BSS's changing parameters * * This structure keeps information about a BSS (and an association @@ -477,6 +492,7 @@ struct ieee80211_event { * @enable_beacon: whether beaconing should be enabled or not * @chandef: Channel definition for this BSS -- the hardware might be * configured a higher bandwidth than this BSS uses, for example. + * @mu_group: VHT MU-MIMO group membership data * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation. * This field is only valid when the channel is a wide HT/VHT channel. * Note that with TDLS this can be the case (channel is HT, protection must @@ -535,6 +551,7 @@ struct ieee80211_bss_conf { s32 cqm_rssi_thold; u32 cqm_rssi_hyst; struct cfg80211_chan_def chandef; + struct ieee80211_mu_group_data mu_group; __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; int arp_addr_cnt; bool qos; @@ -691,12 +708,14 @@ enum mac80211_tx_info_flags { * protocol frame (e.g. EAP) * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll * frame (PS-Poll or uAPSD). + * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information * * These flags are used in tx_info->control.flags. */ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0), IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1), + IEEE80211_TX_CTRL_RATE_INJECT = BIT(2), }; /* @@ -993,6 +1012,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the last symbol of the MPDU * (including FCS) was received. + * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime + * field) is valid and contains the time the SYNC preamble was received. * @RX_FLAG_SHORTPRE: Short preamble was used for this frame * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index @@ -1014,6 +1035,14 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC * is stored in the @ampdu_delimiter_crc field) * @RX_FLAG_LDPC: LDPC was used + * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without + * processing it in any regular way. + * This is useful if drivers offload some frames but still want to report + * them for sniffing purposes. + * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except + * monitor interfaces. + * This is useful if drivers offload some frames but still want to report + * them for sniffing purposes. * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 * @RX_FLAG_10MHZ: 10 MHz (half channel) was used * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used @@ -1033,6 +1062,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), RX_FLAG_DECRYPTED = BIT(1), + RX_FLAG_MACTIME_PLCP_START = BIT(2), RX_FLAG_MMIC_STRIPPED = BIT(3), RX_FLAG_IV_STRIPPED = BIT(4), RX_FLAG_FAILED_FCS_CRC = BIT(5), @@ -1046,7 +1076,7 @@ enum mac80211_rx_flags { RX_FLAG_HT_GF = BIT(13), RX_FLAG_AMPDU_DETAILS = BIT(14), RX_FLAG_PN_VALIDATED = BIT(15), - /* bit 16 free */ + RX_FLAG_DUP_VALIDATED = BIT(16), RX_FLAG_AMPDU_LAST_KNOWN = BIT(17), RX_FLAG_AMPDU_IS_LAST = BIT(18), RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19), @@ -1054,6 +1084,8 @@ enum mac80211_rx_flags { RX_FLAG_MACTIME_END = BIT(21), RX_FLAG_VHT = BIT(22), RX_FLAG_LDPC = BIT(23), + RX_FLAG_ONLY_MONITOR = BIT(24), + RX_FLAG_SKIP_MONITOR = BIT(25), RX_FLAG_STBC_MASK = BIT(26) | BIT(27), RX_FLAG_10MHZ = BIT(28), RX_FLAG_5MHZ = BIT(29), @@ -1072,6 +1104,7 @@ enum mac80211_rx_flags { * @RX_VHT_FLAG_160MHZ: 160 MHz was used * @RX_VHT_FLAG_BF: packet was beamformed */ + enum mac80211_rx_vht_flags { RX_VHT_FLAG_80MHZ = BIT(0), RX_VHT_FLAG_160MHZ = BIT(1), @@ -1091,6 +1124,8 @@ enum mac80211_rx_vht_flags { * it but can store it and pass it back to the driver for synchronisation * @band: the active band when this frame was received * @freq: frequency the radio was tuned to when receiving this frame, in MHz + * This field must be set for management frames, but isn't strictly needed + * for data (other) frames - for those it only affects radiotap reporting. * @signal: signal strength when receiving this frame, either in dBm, in dB or * unspecified depending on the hardware capabilities flags * @IEEE80211_HW_SIGNAL_* @@ -1347,6 +1382,7 @@ enum ieee80211_vif_flags { * @csa_active: marks whether a channel switch is going on. Internally it is * write-protected by sdata_lock and local->mtx so holding either is fine * for read access. + * @mu_mimo_owner: indicates interface owns MU-MIMO capability * @driver_flags: flags/capabilities the driver has for this interface, * these need to be set (or cleared) when the interface is added * or, if supported by the driver, the interface type is changed @@ -1373,6 +1409,7 @@ struct ieee80211_vif { u8 addr[ETH_ALEN]; bool p2p; bool csa_active; + bool mu_mimo_owner; u8 cab_queue; u8 hw_queue[IEEE80211_NUM_ACS]; @@ -1486,9 +1523,8 @@ enum ieee80211_key_flags { * wants to be given when a frame is transmitted and needs to be * encrypted in hardware. * @cipher: The key's cipher suite selector. - * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver - * as well if it needs to do software PN assignment by itself - * (e.g. due to TSO) + * @tx_pn: PN used for TX keys, may be used by the driver as well if it + * needs to do software PN assignment by itself (e.g. due to TSO) * @flags: key flags, see &enum ieee80211_key_flags. * @keyidx: the key index (0-3) * @keylen: key material length @@ -1514,6 +1550,9 @@ struct ieee80211_key_conf { #define IEEE80211_MAX_PN_LEN 16 +#define TKIP_PN_TO_IV16(pn) ((u16)(pn & 0xffff)) +#define TKIP_PN_TO_IV32(pn) ((u32)((pn >> 16) & 0xffffffff)) + /** * struct ieee80211_key_seq - key sequence counter * @@ -1684,6 +1723,18 @@ struct ieee80211_sta_rates { * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only * valid if the STA is a TDLS peer in the first place. * @mfp: indicates whether the STA uses management frame protection or not. + * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single + * A-MSDU. Taken from the Extended Capabilities element. 0 means + * unlimited. + * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This + * field is always valid for packets with a VHT preamble. For packets + * with a HT preamble, additional limits apply: + * + If the skb is transmitted as part of a BA agreement, the + * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes. + * + If the skb is not part of a BA aggreement, the A-MSDU maximal + * size is min(max_amsdu_len, 7935) bytes. + * Both additional HT limits must be enforced by the low level driver. + * This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2). * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { @@ -1702,6 +1753,8 @@ struct ieee80211_sta { bool tdls; bool tdls_initiator; bool mfp; + u8 max_amsdu_subframes; + u16 max_amsdu_len; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; @@ -1910,6 +1963,11 @@ struct ieee80211_txq { * by just its MAC address; this prevents, for example, the same station * from connecting to two virtual AP interfaces at the same time. * + * @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the + * reordering buffer internally, guaranteeing mac80211 receives frames in + * order and does not need to manage its own reorder buffer or BA session + * timeout. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -1946,6 +2004,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU, IEEE80211_HW_BEACON_TX_STATUS, IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR, + IEEE80211_HW_SUPPORTS_REORDERING_BUFFER, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2167,7 +2226,7 @@ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev * @hw: the &struct ieee80211_hw to set the MAC address for * @addr: the address to set */ -static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) +static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr) { memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); } @@ -2684,6 +2743,33 @@ enum ieee80211_ampdu_mlme_action { }; /** + * struct ieee80211_ampdu_params - AMPDU action parameters + * + * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action. + * @sta: peer of this AMPDU session + * @tid: tid of the BA session + * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When + * action is set to %IEEE80211_AMPDU_RX_START the driver passes back the + * actual ssn value used to start the session and writes the value here. + * @buf_size: reorder buffer size (number of subframes). Valid only when the + * action is set to %IEEE80211_AMPDU_RX_START or + * %IEEE80211_AMPDU_TX_OPERATIONAL + * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU. + * valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL + * @timeout: BA session timeout. Valid only when the action is set to + * %IEEE80211_AMPDU_RX_START + */ +struct ieee80211_ampdu_params { + enum ieee80211_ampdu_mlme_action action; + struct ieee80211_sta *sta; + u16 tid; + u16 ssn; + u8 buf_size; + bool amsdu; + u16 timeout; +}; + +/** * enum ieee80211_frame_release_type - frame release reason * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll * @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to @@ -3027,13 +3113,9 @@ enum ieee80211_reconfig_type { * @ampdu_action: Perform a certain A-MPDU action * The RA/TID combination determines the destination and TID we want * the ampdu action to be performed for. The action is defined through - * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) - * is the first frame we expect to perform the action on. Notice - * that TX/RX_STOP can pass NULL for this parameter. - * The @buf_size parameter is only valid when the action is set to - * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder - * buffer size (number of subframes) for this session -- the driver - * may neither send aggregates containing more subframes than this + * ieee80211_ampdu_mlme_action. + * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver + * may neither send aggregates containing more subframes than @buf_size * nor send aggregates in a way that lost frames would exceed the * buffer size. If just limiting the aggregate size, this would be * possible with a buf_size of 8: @@ -3044,9 +3126,6 @@ enum ieee80211_reconfig_type { * buffer size of 8. Correct ways to retransmit #1 would be: * - TX: 1 or 18 or 81 * Even "189" would be wrong since 1 could be lost again. - * The @amsdu parameter is valid when the action is set to - * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability - * to receive A-MSDU within A-MPDU. * * Returns a negative error code on failure. * The callback can sleep. @@ -3388,9 +3467,7 @@ struct ieee80211_ops { int (*tx_last_beacon)(struct ieee80211_hw *hw); int (*ampdu_action)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu); + struct ieee80211_ampdu_params *params); int (*get_survey)(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void (*rfkill_poll)(struct ieee80211_hw *hw); @@ -4374,21 +4451,19 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb, u8 *p2k); /** - * ieee80211_get_key_tx_seq - get key TX sequence counter + * ieee80211_tkip_add_iv - write TKIP IV and Ext. IV to pos * + * @pos: start of crypto header * @keyconf: the parameter passed with the set key - * @seq: buffer to receive the sequence data + * @pn: PN to add * - * This function allows a driver to retrieve the current TX IV/PN - * for the given key. It must not be called if IV generation is - * offloaded to the device. + * Returns: pointer to the octet following IVs (i.e. beginning of + * the packet payload) * - * Note that this function may only be called when no TX processing - * can be done concurrently, for example when queues are stopped - * and the stop has been synchronized. + * This function writes the tkip IV value to pos (which should + * point to the crypto header) */ -void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf, - struct ieee80211_key_seq *seq); +u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn); /** * ieee80211_get_key_rx_seq - get key RX sequence counter @@ -4410,23 +4485,6 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq); /** - * ieee80211_set_key_tx_seq - set key TX sequence counter - * - * @keyconf: the parameter passed with the set key - * @seq: new sequence data - * - * This function allows a driver to set the current TX IV/PNs for the - * given key. This is useful when resuming from WoWLAN sleep and the - * device may have transmitted frames using the PTK, e.g. replies to - * ARP requests. - * - * Note that this function may only be called when no TX processing - * can be done concurrently. - */ -void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf, - struct ieee80211_key_seq *seq); - -/** * ieee80211_set_key_rx_seq - set key RX sequence counter * * @keyconf: the parameter passed with the set key @@ -5121,6 +5179,24 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, const u8 *addr); /** + * ieee80211_mark_rx_ba_filtered_frames - move RX BA window and mark filtered + * @pubsta: station struct + * @tid: the session's TID + * @ssn: starting sequence number of the bitmap, all frames before this are + * assumed to be out of the window after the call + * @filtered: bitmap of filtered frames, BIT(0) is the @ssn entry etc. + * @received_mpdus: number of received mpdus in firmware + * + * This function moves the BA window and releases all frames before @ssn, and + * marks frames marked in the bitmap as having been filtered. Afterwards, it + * checks if any frames in the window starting from @ssn can now be released + * (in case they were only waiting for frames that were filtered.) + */ +void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, + u16 ssn, u64 filtered, + u16 received_mpdus); + +/** * ieee80211_send_bar - send a BlockAckReq frame * * can be used to flush pending frames from the peer's aggregation reorder @@ -5371,6 +5447,21 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif) return ieee80211_iftype_p2p(vif->type, vif->p2p); } +/** + * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data + * + * @vif: the specified virtual interface + * @membership: 64 bits array - a bit is set if station is member of the group + * @position: 2 bits per group id indicating the position in the group + * + * Note: This function assumes that the given vif is valid and the position and + * membership data is of the correct size and are in the same byte order as the + * matching GroupId management frame. + * Calls to this function need to be serialized with RX path. + */ +void ieee80211_update_mu_groups(struct ieee80211_vif *vif, + const u8 *membership, const u8 *position); + void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, int rssi_min_thold, int rssi_max_thold); @@ -5523,4 +5614,19 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); */ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); + +/** + * ieee80211_txq_get_depth - get pending frame/byte count of given txq + * + * The values are not guaranteed to be coherent with regard to each other, i.e. + * txq state can change half-way of this function and the caller may end up + * with "new" frame_cnt and "old" byte_cnt or vice-versa. + * + * @txq: pointer obtained from station or virtual interface + * @frame_cnt: pointer to store frame count + * @byte_cnt: pointer to store byte count + */ +void ieee80211_txq_get_depth(struct ieee80211_txq *txq, + unsigned long *frame_cnt, + unsigned long *byte_cnt); #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h index da574bb..6cd7a70 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -16,10 +16,10 @@ #ifndef NET_MAC802154_H #define NET_MAC802154_H +#include <asm/unaligned.h> #include <net/af_ieee802154.h> #include <linux/ieee802154.h> #include <linux/skbuff.h> -#include <linux/unaligned/memmove.h> #include <net/cfg802154.h> @@ -247,13 +247,14 @@ struct ieee802154_ops { */ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) { - /* return some invalid fc on failure */ - if (unlikely(skb->len < 2)) { + /* check if we can fc at skb_mac_header of sk buffer */ + if (unlikely(!skb_mac_header_was_set(skb) || + (skb_tail_pointer(skb) - skb_mac_header(skb)) < 2)) { WARN_ON(1); return cpu_to_le16(0); } - return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb)); + return get_unaligned_le16(skb_mac_header(skb)); } /** @@ -263,7 +264,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) */ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) { - __put_unaligned_memmove64(swab64p(be64_src), le64_dst); + put_unaligned_le64(get_unaligned_be64(be64_src), le64_dst); } /** @@ -273,7 +274,7 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) */ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) { - __put_unaligned_memmove64(swab64p(le64_src), be64_dst); + put_unaligned_be64(get_unaligned_le64(le64_src), be64_dst); } /** @@ -283,7 +284,7 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) */ static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src) { - __put_unaligned_memmove16(swab16p(le16_src), be16_dst); + put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst); } /** diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h index e2a518b..a3f3c11 100644 --- a/include/net/netfilter/nft_masq.h +++ b/include/net/netfilter/nft_masq.h @@ -2,7 +2,9 @@ #define _NFT_MASQ_H_ struct nft_masq { - u32 flags; + u32 flags; + enum nft_registers sreg_proto_min:8; + enum nft_registers sreg_proto_max:8; }; extern const struct nla_policy nft_masq_policy[]; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2b7907a..a69cde3 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -80,9 +80,13 @@ struct netns_ipv4 { int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; + int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; + /* Shall we try to damage output packets if routing dev changes? */ + int sysctl_ip_dynaddr; + int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; @@ -98,6 +102,21 @@ struct netns_ipv4 { int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; + int sysctl_tcp_syn_retries; + int sysctl_tcp_synack_retries; + int sysctl_tcp_syncookies; + int sysctl_tcp_reordering; + int sysctl_tcp_retries1; + int sysctl_tcp_retries2; + int sysctl_tcp_orphan_retries; + int sysctl_tcp_fin_timeout; + unsigned int sysctl_tcp_notsent_lowat; + + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_llm_reports; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; atomic_t dev_addr_genid; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index c0368db..10d0848 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -58,7 +58,10 @@ struct netns_ipv6 { struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; #ifdef CONFIG_IPV6_MULTIPLE_TABLES diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index 68e5097..039cc29 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h @@ -51,7 +51,7 @@ void pn_sock_init(void); struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa); void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb); void phonet_get_local_port_range(int *min, int *max); -void pn_sock_hash(struct sock *sk); +int pn_sock_hash(struct sock *sk); void pn_sock_unhash(struct sock *sk); int pn_sock_get_port(struct sock *sk, unsigned short sport); diff --git a/include/net/ping.h b/include/net/ping.h index ac80cb4..5fd7cc2 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -65,7 +65,7 @@ struct pingfakehdr { }; int ping_get_port(struct sock *sk, unsigned short ident); -void ping_hash(struct sock *sk); +int ping_hash(struct sock *sk); void ping_unhash(struct sock *sk); int ping_init_sock(struct sock *sk); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index bc49967..caa5e18 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -358,4 +358,69 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) } #endif /* CONFIG_NET_CLS_IND */ +struct tc_cls_u32_knode { + struct tcf_exts *exts; + struct tc_u32_sel *sel; + u32 handle; + u32 val; + u32 mask; + u32 link_handle; + u8 fshift; +}; + +struct tc_cls_u32_hnode { + u32 handle; + u32 prio; + unsigned int divisor; +}; + +enum tc_clsu32_command { + TC_CLSU32_NEW_KNODE, + TC_CLSU32_REPLACE_KNODE, + TC_CLSU32_DELETE_KNODE, + TC_CLSU32_NEW_HNODE, + TC_CLSU32_REPLACE_HNODE, + TC_CLSU32_DELETE_HNODE, +}; + +struct tc_cls_u32_offload { + /* knode values */ + enum tc_clsu32_command command; + union { + struct tc_cls_u32_knode knode; + struct tc_cls_u32_hnode hnode; + }; +}; + +/* tca flags definitions */ +#define TCA_CLS_FLAGS_SKIP_HW 1 + +static inline bool tc_should_offload(struct net_device *dev, u32 flags) +{ + if (!(dev->features & NETIF_F_HW_TC)) + return false; + + if (flags & TCA_CLS_FLAGS_SKIP_HW) + return false; + + if (!dev->netdev_ops->ndo_setup_tc) + return false; + + return true; +} + +enum tc_fl_command { + TC_CLSFLOWER_REPLACE, + TC_CLSFLOWER_DESTROY, +}; + +struct tc_cls_flower_offload { + enum tc_fl_command command; + unsigned long cookie; + struct flow_dissector *dissector; + struct fl_flow_key *mask; + struct fl_flow_key *key; + struct tcf_exts *exts; +}; + #endif diff --git a/include/net/raw.h b/include/net/raw.h index 6a40c65..3e78900 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -57,7 +57,7 @@ int raw_seq_open(struct inode *ino, struct file *file, #endif -void raw_hash_sk(struct sock *sk); +int raw_hash_sk(struct sock *sk); void raw_unhash_sk(struct sock *sk); struct raw_sock { diff --git a/include/net/route.h b/include/net/route.h index a3b9ef7..9b0a523 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -329,14 +329,13 @@ static inline int inet_iif(const struct sk_buff *skb) return skb->skb_iif; } -extern int sysctl_ip_default_ttl; - static inline int ip4_dst_hoplimit(const struct dst_entry *dst) { int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); + struct net *net = dev_net(dst->dev); if (hoplimit == 0) - hoplimit = sysctl_ip_default_ttl; + hoplimit = net->ipv4.sysctl_ip_default_ttl; return hoplimit; } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 636a362..46e55f0 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -345,6 +345,12 @@ extern struct Qdisc_ops pfifo_fast_ops; extern struct Qdisc_ops mq_qdisc_ops; extern struct Qdisc_ops noqueue_qdisc_ops; extern const struct Qdisc_ops *default_qdisc_ops; +static inline const struct Qdisc_ops * +get_default_qdisc_ops(const struct net_device *dev, int ntx) +{ + return ntx < dev->real_num_tx_queues ? + default_qdisc_ops : &pfifo_fast_ops; +} struct Qdisc_class_common { u32 classid; @@ -396,7 +402,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc); void qdisc_reset(struct Qdisc *qdisc); void qdisc_destroy(struct Qdisc *qdisc); -void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, + unsigned int len); struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops); struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, @@ -707,6 +714,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch) sch->qstats.backlog = 0; } +static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + struct Qdisc **pold) +{ + struct Qdisc *old; + + sch_tree_lock(sch); + old = *pold; + *pold = new; + if (old != NULL) { + qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); + qdisc_reset(old); + } + sch_tree_unlock(sch); + + return old; +} + static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, struct sk_buff_head *list) { diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 487ef34..efc0174 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -201,7 +201,7 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *, struct sctp_chunk * sctp_make_datafrag_empty(struct sctp_association *, const struct sctp_sndrcvinfo *sinfo, int len, const __u8 flags, - __u16 ssn); + __u16 ssn, gfp_t gfp); struct sctp_chunk *sctp_make_ecne(const struct sctp_association *, const __u32); struct sctp_chunk *sctp_make_sack(const struct sctp_association *); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 5a57409..e2ac062 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -535,7 +535,6 @@ struct sctp_datamsg { struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, struct sctp_sndrcvinfo *, struct iov_iter *); -void sctp_datamsg_free(struct sctp_datamsg *); void sctp_datamsg_put(struct sctp_datamsg *); void sctp_chunk_fail(struct sctp_chunk *, int error); int sctp_chunk_abandoned(struct sctp_chunk *); @@ -656,7 +655,7 @@ void sctp_chunk_free(struct sctp_chunk *); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); struct sctp_chunk *sctp_chunkify(struct sk_buff *, const struct sctp_association *, - struct sock *); + struct sock *, gfp_t gfp); void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *, union sctp_addr *); const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); @@ -718,10 +717,10 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *, __u16 sport, __u16 dport); struct sctp_packet *sctp_packet_config(struct sctp_packet *, __u32 vtag, int); sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *, - struct sctp_chunk *, int); + struct sctp_chunk *, int, gfp_t); sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *, struct sctp_chunk *); -int sctp_packet_transmit(struct sctp_packet *); +int sctp_packet_transmit(struct sctp_packet *, gfp_t); void sctp_packet_free(struct sctp_packet *); static inline int sctp_packet_empty(struct sctp_packet *packet) @@ -1054,7 +1053,7 @@ struct sctp_outq { void sctp_outq_init(struct sctp_association *, struct sctp_outq *); void sctp_outq_teardown(struct sctp_outq *); void sctp_outq_free(struct sctp_outq*); -int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk); +int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t); int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *); int sctp_outq_is_empty(const struct sctp_outq *); void sctp_outq_restart(struct sctp_outq *); @@ -1062,7 +1061,7 @@ void sctp_outq_restart(struct sctp_outq *); void sctp_retransmit(struct sctp_outq *, struct sctp_transport *, sctp_retransmit_reason_t); void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8); -int sctp_outq_uncork(struct sctp_outq *); +int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp); /* Uncork and flush an outqueue. */ static inline void sctp_outq_cork(struct sctp_outq *q) { diff --git a/include/net/sock.h b/include/net/sock.h index f5ea148..255d3e0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -984,7 +984,7 @@ struct proto { void (*release_cb)(struct sock *sk); /* Keeping track of sk's, looking them up, and port selection methods. */ - void (*hash)(struct sock *sk); + int (*hash)(struct sock *sk); void (*unhash)(struct sock *sk); void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); @@ -1194,10 +1194,10 @@ static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, /* With per-bucket locks this operation is not-atomic, so that * this version is not worse. */ -static inline void __sk_prot_rehash(struct sock *sk) +static inline int __sk_prot_rehash(struct sock *sk) { sk->sk_prot->unhash(sk); - sk->sk_prot->hash(sk); + return sk->sk_prot->hash(sk); } void sk_prot_clear_portaddr_nulls(struct sock *sk, int size); diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h index 592a6bc..93c520b 100644 --- a/include/net/tc_act/tc_gact.h +++ b/include/net/tc_act/tc_gact.h @@ -2,6 +2,7 @@ #define __NET_TC_GACT_H #include <net/act_api.h> +#include <linux/tc_act/tc_gact.h> struct tcf_gact { struct tcf_common common; @@ -15,4 +16,19 @@ struct tcf_gact { #define to_gact(a) \ container_of(a->priv, struct tcf_gact, common) +static inline bool is_tcf_gact_shot(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + struct tcf_gact *gact; + + if (a->ops && a->ops->type != TCA_ACT_GACT) + return false; + + gact = a->priv; + if (gact->tcf_action == TC_ACT_SHOT) + return true; + +#endif + return false; +} #endif /* __NET_TC_GACT_H */ diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h new file mode 100644 index 0000000..dc9a09a --- /dev/null +++ b/include/net/tc_act/tc_ife.h @@ -0,0 +1,61 @@ +#ifndef __NET_TC_IFE_H +#define __NET_TC_IFE_H + +#include <net/act_api.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/module.h> + +#define IFE_METAHDRLEN 2 +struct tcf_ife_info { + struct tcf_common common; + u8 eth_dst[ETH_ALEN]; + u8 eth_src[ETH_ALEN]; + u16 eth_type; + u16 flags; + /* list of metaids allowed */ + struct list_head metalist; +}; +#define to_ife(a) \ + container_of(a->priv, struct tcf_ife_info, common) + +struct tcf_meta_info { + const struct tcf_meta_ops *ops; + void *metaval; + u16 metaid; + struct list_head metalist; +}; + +struct tcf_meta_ops { + u16 metaid; /*Maintainer provided ID */ + u16 metatype; /*netlink attribute type (look at net/netlink.h) */ + const char *name; + const char *synopsis; + struct list_head list; + int (*check_presence)(struct sk_buff *, struct tcf_meta_info *); + int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *); + int (*decode)(struct sk_buff *, void *, u16 len); + int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi); + int (*alloc)(struct tcf_meta_info *, void *); + void (*release)(struct tcf_meta_info *); + int (*validate)(void *val, int len); + struct module *owner; +}; + +#define MODULE_ALIAS_IFE_META(metan) MODULE_ALIAS("ifemeta" __stringify_1(metan)) + +int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi); +int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi); +int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, + const void *dval); +int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval); +int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval); +int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi); +int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi); +int ife_validate_meta_u32(void *val, int len); +int ife_validate_meta_u16(void *val, int len); +void ife_release_meta_gen(struct tcf_meta_info *mi); +int register_ife_op(struct tcf_meta_ops *mops); +int unregister_ife_op(struct tcf_meta_ops *mops); + +#endif /* __NET_TC_IFE_H */ diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index 0df9a0d..b496d5a 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -20,6 +20,7 @@ #define __NET_TC_SKBEDIT_H #include <net/act_api.h> +#include <linux/tc_act/tc_skbedit.h> struct tcf_skbedit { struct tcf_common common; @@ -32,4 +33,19 @@ struct tcf_skbedit { #define to_skbedit(a) \ container_of(a->priv, struct tcf_skbedit, common) +/* Return true iff action is mark */ +static inline bool is_tcf_skbedit_mark(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) + return to_skbedit(a)->flags == SKBEDIT_F_MARK; +#endif + return false; +} + +static inline u32 tcf_skbedit_mark(const struct tc_action *a) +{ + return to_skbedit(a)->mark; +} + #endif /* __NET_TC_SKBEDIT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index b04bc98..b91370f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -238,13 +238,6 @@ extern struct inet_timewait_death_row tcp_death_row; extern int sysctl_tcp_timestamps; extern int sysctl_tcp_window_scaling; extern int sysctl_tcp_sack; -extern int sysctl_tcp_fin_timeout; -extern int sysctl_tcp_syn_retries; -extern int sysctl_tcp_synack_retries; -extern int sysctl_tcp_retries1; -extern int sysctl_tcp_retries2; -extern int sysctl_tcp_orphan_retries; -extern int sysctl_tcp_syncookies; extern int sysctl_tcp_fastopen; extern int sysctl_tcp_retrans_collapse; extern int sysctl_tcp_stdurg; @@ -273,7 +266,6 @@ extern int sysctl_tcp_thin_dupack; extern int sysctl_tcp_early_retrans; extern int sysctl_tcp_limit_output_bytes; extern int sysctl_tcp_challenge_ack_limit; -extern unsigned int sysctl_tcp_notsent_lowat; extern int sysctl_tcp_min_tso_segs; extern int sysctl_tcp_min_rtt_wlen; extern int sysctl_tcp_autocorking; @@ -567,6 +559,7 @@ void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); +void tcp_fin(struct sock *sk); /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); @@ -962,9 +955,11 @@ static inline void tcp_enable_fack(struct tcp_sock *tp) */ static inline void tcp_enable_early_retrans(struct tcp_sock *tp) { + struct net *net = sock_net((struct sock *)tp); + tp->do_early_retrans = sysctl_tcp_early_retrans && sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack && - sysctl_tcp_reordering == 3; + net->ipv4.sysctl_tcp_reordering == 3; } static inline void tcp_disable_early_retrans(struct tcp_sock *tp) @@ -1251,7 +1246,7 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) static inline int tcp_fin_time(const struct sock *sk) { - int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; + int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; const int rto = inet_csk(sk)->icsk_rto; if (fin_timeout < (rto << 2) - (rto >> 1)) @@ -1433,6 +1428,7 @@ void tcp_free_fastopen_req(struct tcp_sock *tp); extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; int tcp_fastopen_reset_cipher(void *key, unsigned int len); +void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct tcp_fastopen_cookie *foc, @@ -1681,7 +1677,8 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) { - return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat; + struct net *net = sock_net((struct sock *)tp); + return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; } static inline bool tcp_stream_memory_free(const struct sock *sk) @@ -1815,4 +1812,38 @@ static inline void skb_set_tcp_pure_ack(struct sk_buff *skb) skb->truesize = 2; } +static inline int tcp_inq(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + int answ; + + if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { + answ = 0; + } else if (sock_flag(sk, SOCK_URGINLINE) || + !tp->urg_data || + before(tp->urg_seq, tp->copied_seq) || + !before(tp->urg_seq, tp->rcv_nxt)) { + + answ = tp->rcv_nxt - tp->copied_seq; + + /* Subtract 1, if FIN was received */ + if (answ && sock_flag(sk, SOCK_DONE)) + answ--; + } else { + answ = tp->urg_seq - tp->copied_seq; + } + + return answ; +} + +static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) +{ + u16 segs_in; + + segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs); + tp->segs_in += segs_in; + if (skb->len > tcp_hdrlen(skb)) + tp->data_segs_in += segs_in; +} + #endif /* _TCP_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 2842541..92927f7 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -177,9 +177,10 @@ static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) } /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ -static inline void udp_lib_hash(struct sock *sk) +static inline int udp_lib_hash(struct sock *sk) { BUG(); + return 0; } void udp_lib_unhash(struct sock *sk); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index cca2ad3..b831140 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -88,8 +88,8 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, - __u8 prio, __u8 ttl, __be16 src_port, - __be16 dst_port, bool nocheck); + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck); #endif void udp_tunnel_sock_release(struct socket *sock); @@ -103,7 +103,7 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, { int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - return iptunnel_handle_offloads(skb, udp_csum, type); + return iptunnel_handle_offloads(skb, type); } static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff) diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 0fb8644..a763c96 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -9,17 +9,71 @@ #include <linux/udp.h> #include <net/dst_metadata.h> +/* VXLAN protocol (RFC 7348) header: + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |R|R|R|R|I|R|R|R| Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | VXLAN Network Identifier (VNI) | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * I = VXLAN Network Identifier (VNI) present. + */ +struct vxlanhdr { + __be32 vx_flags; + __be32 vx_vni; +}; + +/* VXLAN header flags. */ +#define VXLAN_HF_VNI cpu_to_be32(BIT(27)) + +#define VXLAN_N_VID (1u << 24) +#define VXLAN_VID_MASK (VXLAN_N_VID - 1) +#define VXLAN_VNI_MASK cpu_to_be32(VXLAN_VID_MASK << 8) +#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) + #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) +#define FDB_HASH_BITS 8 +#define FDB_HASH_SIZE (1<<FDB_HASH_BITS) + +/* Remote checksum offload for VXLAN (VXLAN_F_REMCSUM_[RT]X): + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |R|R|R|R|I|R|R|R|R|R|C| Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | VXLAN Network Identifier (VNI) |O| Csum start | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * C = Remote checksum offload bit. When set indicates that the + * remote checksum offload data is present. + * + * O = Offset bit. Indicates the checksum offset relative to + * checksum start. + * + * Csum start = Checksum start divided by two. + * + * http://tools.ietf.org/html/draft-herbert-vxlan-rco + */ + +/* VXLAN-RCO header flags. */ +#define VXLAN_HF_RCO cpu_to_be32(BIT(21)) + +/* Remote checksum offload header option */ +#define VXLAN_RCO_MASK cpu_to_be32(0x7f) /* Last byte of vni field */ +#define VXLAN_RCO_UDP cpu_to_be32(0x80) /* Indicate UDP RCO (TCP when not set *) */ +#define VXLAN_RCO_SHIFT 1 /* Left shift of start */ +#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1) +#define VXLAN_MAX_REMCSUM_START (0x7f << VXLAN_RCO_SHIFT) /* - * VXLAN Group Based Policy Extension: + * VXLAN Group Based Policy Extension (VXLAN_F_GBP): * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * |1|-|-|-|1|-|-|-|R|D|R|R|A|R|R|R| Group Policy ID | + * |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | VXLAN Network Identifier (VNI) | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * + * G = Group Policy ID present. + * * D = Don't Learn bit. When set, this bit indicates that the egress * VTEP MUST NOT learn the source address of the encapsulated frame. * @@ -27,18 +81,18 @@ * this packet. Policies MUST NOT be applied by devices when the * A bit is set. * - * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy + * https://tools.ietf.org/html/draft-smith-vxlan-group-policy */ struct vxlanhdr_gbp { - __u8 vx_flags; + u8 vx_flags; #ifdef __LITTLE_ENDIAN_BITFIELD - __u8 reserved_flags1:3, + u8 reserved_flags1:3, policy_applied:1, reserved_flags2:2, dont_learn:1, reserved_flags3:1; #elif defined(__BIG_ENDIAN_BITFIELD) - __u8 reserved_flags1:1, + u8 reserved_flags1:1, dont_learn:1, reserved_flags2:2, policy_applied:1, @@ -50,7 +104,10 @@ struct vxlanhdr_gbp { __be32 vx_vni; }; -#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF) +/* VXLAN-GBP header flags. */ +#define VXLAN_HF_GBP cpu_to_be32(BIT(31)) + +#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | cpu_to_be32(0xFFFFFF)) /* skb->mark mapping * @@ -62,44 +119,6 @@ struct vxlanhdr_gbp { #define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16) #define VXLAN_GBP_ID_MASK (0xFFFF) -/* VXLAN protocol header: - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * |G|R|R|R|I|R|R|C| Reserved | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | VXLAN Network Identifier (VNI) | Reserved | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * - * G = 1 Group Policy (VXLAN-GBP) - * I = 1 VXLAN Network Identifier (VNI) present - * C = 1 Remote checksum offload (RCO) - */ -struct vxlanhdr { - __be32 vx_flags; - __be32 vx_vni; -}; - -/* VXLAN header flags. */ -#define VXLAN_HF_RCO BIT(21) -#define VXLAN_HF_VNI BIT(27) -#define VXLAN_HF_GBP BIT(31) - -/* Remote checksum offload header option */ -#define VXLAN_RCO_MASK 0x7f /* Last byte of vni field */ -#define VXLAN_RCO_UDP 0x80 /* Indicate UDP RCO (TCP when not set *) */ -#define VXLAN_RCO_SHIFT 1 /* Left shift of start */ -#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1) -#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT) - -#define VXLAN_N_VID (1u << 24) -#define VXLAN_VID_MASK (VXLAN_N_VID - 1) -#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) -#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) - -#define VNI_HASH_BITS 10 -#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) -#define FDB_HASH_BITS 8 -#define FDB_HASH_SIZE (1<<FDB_HASH_BITS) - struct vxlan_metadata { u32 gbp; }; @@ -125,23 +144,25 @@ union vxlan_addr { struct vxlan_rdst { union vxlan_addr remote_ip; __be16 remote_port; - u32 remote_vni; + __be32 remote_vni; u32 remote_ifindex; struct list_head list; struct rcu_head rcu; + struct dst_cache dst_cache; }; struct vxlan_config { union vxlan_addr remote_ip; union vxlan_addr saddr; - u32 vni; + __be32 vni; int remote_ifindex; int mtu; __be16 dst_port; - __u16 port_min; - __u16 port_max; - __u8 tos; - __u8 ttl; + u16 port_min; + u16 port_max; + u8 tos; + u8 ttl; + __be32 label; u32 flags; unsigned long age_interval; unsigned int addrmax; @@ -177,7 +198,7 @@ struct vxlan_dev { #define VXLAN_F_L2MISS 0x08 #define VXLAN_F_L3MISS 0x10 #define VXLAN_F_IPV6 0x20 -#define VXLAN_F_UDP_CSUM 0x40 +#define VXLAN_F_UDP_ZERO_CSUM_TX 0x40 #define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80 #define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100 #define VXLAN_F_REMCSUM_TX 0x200 @@ -242,6 +263,68 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, /* IPv6 header + UDP + VXLAN + Ethernet header */ #define VXLAN6_HEADROOM (40 + 8 + 8 + 14) +static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb) +{ + return (struct vxlanhdr *)(udp_hdr(skb) + 1); +} + +static inline __be32 vxlan_vni(__be32 vni_field) +{ +#if defined(__BIG_ENDIAN) + return vni_field >> 8; +#else + return (vni_field & VXLAN_VNI_MASK) << 8; +#endif +} + +static inline __be32 vxlan_vni_field(__be32 vni) +{ +#if defined(__BIG_ENDIAN) + return vni << 8; +#else + return vni >> 8; +#endif +} + +static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id) +{ +#if defined(__BIG_ENDIAN) + return tun_id; +#else + return tun_id >> 32; +#endif +} + +static inline __be64 vxlan_vni_to_tun_id(__be32 vni) +{ +#if defined(__BIG_ENDIAN) + return (__be64)vni; +#else + return (__be64)vni << 32; +#endif +} + +static inline size_t vxlan_rco_start(__be32 vni_field) +{ + return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; +} + +static inline size_t vxlan_rco_offset(__be32 vni_field) +{ + return (vni_field & VXLAN_RCO_UDP) ? + offsetof(struct udphdr, check) : + offsetof(struct tcphdr, check); +} + +static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset) +{ + __be32 vni_field = cpu_to_be32(start >> VXLAN_RCO_SHIFT); + + if (offset == offsetof(struct udphdr, check)) + vni_field |= VXLAN_RCO_UDP; + return vni_field; +} + #if IS_ENABLED(CONFIG_VXLAN) void vxlan_get_rx_port(struct net_device *netdev); #else diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index c34c900..931a47b 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -262,24 +262,22 @@ static inline enum ib_mtu iboe_get_mtu(int mtu) static inline int iboe_get_rate(struct net_device *dev) { - struct ethtool_cmd cmd; - u32 speed; + struct ethtool_link_ksettings cmd; int err; rtnl_lock(); - err = __ethtool_get_settings(dev, &cmd); + err = __ethtool_get_link_ksettings(dev, &cmd); rtnl_unlock(); if (err) return IB_RATE_PORT_CURRENT; - speed = ethtool_cmd_speed(&cmd); - if (speed >= 40000) + if (cmd.base.speed >= 40000) return IB_RATE_40_GBPS; - else if (speed >= 30000) + else if (cmd.base.speed >= 30000) return IB_RATE_30_GBPS; - else if (speed >= 20000) + else if (cmd.base.speed >= 20000) return IB_RATE_20_GBPS; - else if (speed >= 10000) + else if (cmd.base.speed >= 10000) return IB_RATE_10_GBPS; else return IB_RATE_PORT_CURRENT; diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index 4dce116..9ebab3a 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -22,7 +22,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ * on-the-wire Rx packet header * - all multibyte fields should be in network byte order */ -struct rxrpc_header { +struct rxrpc_wire_header { __be32 epoch; /* client boot timestamp */ __be32 cid; /* connection and channel ID */ @@ -68,10 +68,19 @@ struct rxrpc_header { } __packed; -#define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X) - extern const char *rxrpc_pkts[]; +#define RXRPC_SUPPORTED_PACKET_TYPES ( \ + (1 << RXRPC_PACKET_TYPE_DATA) | \ + (1 << RXRPC_PACKET_TYPE_ACK) | \ + (1 << RXRPC_PACKET_TYPE_BUSY) | \ + (1 << RXRPC_PACKET_TYPE_ABORT) | \ + (1 << RXRPC_PACKET_TYPE_ACKALL) | \ + (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \ + (1 << RXRPC_PACKET_TYPE_RESPONSE) | \ + /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \ + (1 << RXRPC_PACKET_TYPE_VERSION)) + /*****************************************************************************/ /* * jumbo packet secondary header diff --git a/include/trace/events/sunvnet.h b/include/trace/events/sunvnet.h new file mode 100644 index 0000000..eb080b2 --- /dev/null +++ b/include/trace/events/sunvnet.h @@ -0,0 +1,139 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sunvnet + +#if !defined(_TRACE_SUNVNET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SUNVNET_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(vnet_rx_one, + + TP_PROTO(int lsid, int rsid, int index, int needs_ack), + + TP_ARGS(lsid, rsid, index, needs_ack), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, index) + __field(int, needs_ack) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->index = index; + __entry->needs_ack = needs_ack; + ), + + TP_printk("(%x:%x) walk_rx_one index %d; needs_ack %d", + __entry->lsid, __entry->rsid, + __entry->index, __entry->needs_ack) +); + +DECLARE_EVENT_CLASS(vnet_tx_stopped_ack_template, + + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + + TP_ARGS(lsid, rsid, ack_end, npkts), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, ack_end) + __field(int, npkts) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->ack_end = ack_end; + __entry->npkts = npkts; + ), + + TP_printk("(%x:%x) stopped ack for %d; npkts %d", + __entry->lsid, __entry->rsid, + __entry->ack_end, __entry->npkts) +); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_send_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_defer_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_pending_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); + +TRACE_EVENT(vnet_rx_stopped_ack, + + TP_PROTO(int lsid, int rsid, int end), + + TP_ARGS(lsid, rsid, end), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, end) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->end = end; + ), + + TP_printk("(%x:%x) stopped ack for index %d", + __entry->lsid, __entry->rsid, __entry->end) +); + +TRACE_EVENT(vnet_tx_trigger, + + TP_PROTO(int lsid, int rsid, int start, int err), + + TP_ARGS(lsid, rsid, start, err), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, start) + __field(int, err) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->start = start; + __entry->err = err; + ), + + TP_printk("(%x:%x) Tx trigger for %d sent with err %d %s", + __entry->lsid, __entry->rsid, __entry->start, + __entry->err, __entry->err > 0 ? "(ok)" : " ") +); + +TRACE_EVENT(vnet_skip_tx_trigger, + + TP_PROTO(int lsid, int rsid, int last), + + TP_ARGS(lsid, rsid, last), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, last) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->last = last; + ), + + TP_printk("(%x:%x) Skip Tx trigger. Last trigger sent was %d", + __entry->lsid, __entry->rsid, __entry->last) +); +#endif /* _TRACE_SOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index fb8a416..67d632f 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -90,4 +90,6 @@ #define SO_ATTACH_REUSEPORT_CBPF 51 #define SO_ATTACH_REUSEPORT_EBPF 52 +#define SO_CNX_ADVICE 53 + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 5c9ae6a..0495884 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -174,6 +174,7 @@ header-y += if_hippi.h header-y += if_infiniband.h header-y += if_link.h header-y += if_ltalk.h +header-y += if_macsec.h header-y += if_packet.h header-y += if_phonet.h header-y += if_plip.h diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5df4881..924f537 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -81,6 +81,9 @@ enum bpf_map_type { BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY, + BPF_MAP_TYPE_PERCPU_HASH, + BPF_MAP_TYPE_PERCPU_ARRAY, + BPF_MAP_TYPE_STACK_TRACE, }; enum bpf_prog_type { @@ -98,12 +101,15 @@ enum bpf_prog_type { #define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_EXIST 2 /* update existing element */ +#define BPF_F_NO_PREALLOC (1U << 0) + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ __u32 key_size; /* size of key in bytes */ __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ + __u32 map_flags; /* prealloc or not */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -270,6 +276,42 @@ enum bpf_func_id { */ BPF_FUNC_perf_event_output, BPF_FUNC_skb_load_bytes, + + /** + * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id + * @ctx: struct pt_regs* + * @map: pointer to stack_trace map + * @flags: bits 0-7 - numer of stack frames to skip + * bit 8 - collect user stack instead of kernel + * bit 9 - compare stacks by hash only + * bit 10 - if two different stacks hash into the same stackid + * discard old + * other bits - reserved + * Return: >= 0 stackid on success or negative error + */ + BPF_FUNC_get_stackid, + + /** + * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff + * @from: raw from buffer + * @from_size: length of from buffer + * @to: raw to buffer + * @to_size: length of to buffer + * @seed: optional seed + * Return: csum result + */ + BPF_FUNC_csum_diff, + + /** + * bpf_skb_[gs]et_tunnel_opt(skb, opt, size) + * retrieve or populate tunnel options metadata + * @skb: pointer to skb + * @opt: pointer to raw tunnel option data + * @size: size of @opt + * Return: 0 on success for set, option size for get + */ + BPF_FUNC_skb_get_tunnel_opt, + BPF_FUNC_skb_set_tunnel_opt, __BPF_FUNC_MAX_ID, }; @@ -277,6 +319,7 @@ enum bpf_func_id { /* BPF_FUNC_skb_store_bytes flags. */ #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) +#define BPF_F_INVALIDATE_HASH (1ULL << 1) /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. @@ -285,6 +328,7 @@ enum bpf_func_id { /* BPF_FUNC_l4_csum_replace flags. */ #define BPF_F_PSEUDO_HDR (1ULL << 4) +#define BPF_F_MARK_MANGLED_0 (1ULL << 5) /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ #define BPF_F_INGRESS (1ULL << 0) @@ -292,8 +336,15 @@ enum bpf_func_id { /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ #define BPF_F_TUNINFO_IPV6 (1ULL << 0) +/* BPF_FUNC_get_stackid flags. */ +#define BPF_F_SKIP_FIELD_MASK 0xffULL +#define BPF_F_USER_STACK (1ULL << 8) +#define BPF_F_FAST_STACK_CMP (1ULL << 9) +#define BPF_F_REUSE_STACKID (1ULL << 10) + /* BPF_FUNC_skb_set_tunnel_key flags. */ #define BPF_F_ZERO_CSUM_TX (1ULL << 1) +#define BPF_F_DONT_FRAGMENT (1ULL << 2) /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure @@ -324,6 +375,7 @@ struct bpf_tunnel_key { }; __u8 tunnel_tos; __u8 tunnel_ttl; + __u32 tunnel_label; }; #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h new file mode 100644 index 0000000..c9fee57 --- /dev/null +++ b/include/uapi/linux/devlink.h @@ -0,0 +1,72 @@ +/* + * include/uapi/linux/devlink.h - Network physical device Netlink interface + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_DEVLINK_H_ +#define _UAPI_LINUX_DEVLINK_H_ + +#define DEVLINK_GENL_NAME "devlink" +#define DEVLINK_GENL_VERSION 0x1 +#define DEVLINK_GENL_MCGRP_CONFIG_NAME "config" + +enum devlink_command { + /* don't change the order or add anything between, this is ABI! */ + DEVLINK_CMD_UNSPEC, + + DEVLINK_CMD_GET, /* can dump */ + DEVLINK_CMD_SET, + DEVLINK_CMD_NEW, + DEVLINK_CMD_DEL, + + DEVLINK_CMD_PORT_GET, /* can dump */ + DEVLINK_CMD_PORT_SET, + DEVLINK_CMD_PORT_NEW, + DEVLINK_CMD_PORT_DEL, + + DEVLINK_CMD_PORT_SPLIT, + DEVLINK_CMD_PORT_UNSPLIT, + + /* add new commands above here */ + + __DEVLINK_CMD_MAX, + DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 +}; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET, + DEVLINK_PORT_TYPE_AUTO, + DEVLINK_PORT_TYPE_ETH, + DEVLINK_PORT_TYPE_IB, +}; + +enum devlink_attr { + /* don't change the order or add anything between, this is ABI! */ + DEVLINK_ATTR_UNSPEC, + + /* bus name + dev name together are a handle for devlink entity */ + DEVLINK_ATTR_BUS_NAME, /* string */ + DEVLINK_ATTR_DEV_NAME, /* string */ + + DEVLINK_ATTR_PORT_INDEX, /* u32 */ + DEVLINK_ATTR_PORT_TYPE, /* u16 */ + DEVLINK_ATTR_PORT_DESIRED_TYPE, /* u16 */ + DEVLINK_ATTR_PORT_NETDEV_IFINDEX, /* u32 */ + DEVLINK_ATTR_PORT_NETDEV_NAME, /* string */ + DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */ + DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */ + DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */ + + /* add new attributes above here, update the policy in devlink.c */ + + __DEVLINK_ATTR_MAX, + DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1 +}; + +#endif /* _UAPI_LINUX_DEVLINK_H_ */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 57fa390..2835b07 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -13,15 +13,21 @@ #ifndef _UAPI_LINUX_ETHTOOL_H #define _UAPI_LINUX_ETHTOOL_H +#include <linux/kernel.h> #include <linux/types.h> #include <linux/if_ether.h> +#ifndef __KERNEL__ +#include <limits.h> /* for INT_MAX */ +#endif + /* All structures exposed to userland should be defined such that they * have the same layout for 32-bit and 64-bit userland. */ /** - * struct ethtool_cmd - link control and status + * struct ethtool_cmd - DEPRECATED, link control and status + * This structure is DEPRECATED, please use struct ethtool_link_settings. * @cmd: Command number = %ETHTOOL_GSET or %ETHTOOL_SSET * @supported: Bitmask of %SUPPORTED_* flags for the link modes, * physical connectors and other link features for which the @@ -31,7 +37,7 @@ * physical connectors and other link features that are * advertised through autonegotiation or enabled for * auto-detection. - * @speed: Low bits of the speed + * @speed: Low bits of the speed, 1Mb units, 0 to INT_MAX or SPEED_UNKNOWN * @duplex: Duplex mode; one of %DUPLEX_* * @port: Physical connector type; one of %PORT_* * @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not @@ -47,7 +53,7 @@ * obsoleted by &struct ethtool_coalesce. Read-only; deprecated. * @maxrxpkt: Historically used to report RX IRQ coalescing; now * obsoleted by &struct ethtool_coalesce. Read-only; deprecated. - * @speed_hi: High bits of the speed + * @speed_hi: High bits of the speed, 1Mb units, 0 to INT_MAX or SPEED_UNKNOWN * @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of * %ETH_TP_MDI_*. If the status is unknown or not applicable, the * value will be %ETH_TP_MDI_INVALID. Read-only. @@ -748,6 +754,56 @@ struct ethtool_usrip4_spec { __u8 proto; }; +/** + * struct ethtool_tcpip6_spec - flow specification for TCP/IPv6 etc. + * @ip6src: Source host + * @ip6dst: Destination host + * @psrc: Source port + * @pdst: Destination port + * @tclass: Traffic Class + * + * This can be used to specify a TCP/IPv6, UDP/IPv6 or SCTP/IPv6 flow. + */ +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +/** + * struct ethtool_ah_espip6_spec - flow specification for IPsec/IPv6 + * @ip6src: Source host + * @ip6dst: Destination host + * @spi: Security parameters index + * @tclass: Traffic Class + * + * This can be used to specify an IPsec transport or tunnel over IPv6. + */ +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +/** + * struct ethtool_usrip6_spec - general flow specification for IPv6 + * @ip6src: Source host + * @ip6dst: Destination host + * @l4_4_bytes: First 4 bytes of transport (layer 4) header + * @tclass: Traffic Class + * @l4_proto: Transport protocol number (nexthdr after any Extension Headers) + */ +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; @@ -755,6 +811,12 @@ union ethtool_flow_union { struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52]; }; @@ -1146,10 +1208,29 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT) #define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT) +#define MAX_NUM_QUEUE 4096 + +/** + * struct ethtool_per_queue_op - apply sub command to the queues in mask. + * @cmd: ETHTOOL_PERQUEUE + * @sub_command: the sub command which apply to each queues + * @queue_mask: Bitmap of the queues which sub command apply to + * @data: A complete command structure following for each of the queues addressed + */ +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[__KERNEL_DIV_ROUND_UP(MAX_NUM_QUEUE, 32)]; + char data[]; +}; /* CMDs currently supported */ -#define ETHTOOL_GSET 0x00000001 /* Get settings. */ -#define ETHTOOL_SSET 0x00000002 /* Set settings. */ +#define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings. + * Please use ETHTOOL_GLINKSETTINGS + */ +#define ETHTOOL_SSET 0x00000002 /* DEPRECATED, Set settings. + * Please use ETHTOOL_SLINKSETTINGS + */ #define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */ #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ #define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ @@ -1229,73 +1310,141 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */ #define ETHTOOL_GPHYSTATS 0x0000004a /* get PHY-specific statistics */ +#define ETHTOOL_PERQUEUE 0x0000004b /* Set per queue options */ + +#define ETHTOOL_GLINKSETTINGS 0x0000004c /* Get ethtool_link_settings */ +#define ETHTOOL_SLINKSETTINGS 0x0000004d /* Set ethtool_link_settings */ + + /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET #define SPARC_ETH_SSET ETHTOOL_SSET -#define SUPPORTED_10baseT_Half (1 << 0) -#define SUPPORTED_10baseT_Full (1 << 1) -#define SUPPORTED_100baseT_Half (1 << 2) -#define SUPPORTED_100baseT_Full (1 << 3) -#define SUPPORTED_1000baseT_Half (1 << 4) -#define SUPPORTED_1000baseT_Full (1 << 5) -#define SUPPORTED_Autoneg (1 << 6) -#define SUPPORTED_TP (1 << 7) -#define SUPPORTED_AUI (1 << 8) -#define SUPPORTED_MII (1 << 9) -#define SUPPORTED_FIBRE (1 << 10) -#define SUPPORTED_BNC (1 << 11) -#define SUPPORTED_10000baseT_Full (1 << 12) -#define SUPPORTED_Pause (1 << 13) -#define SUPPORTED_Asym_Pause (1 << 14) -#define SUPPORTED_2500baseX_Full (1 << 15) -#define SUPPORTED_Backplane (1 << 16) -#define SUPPORTED_1000baseKX_Full (1 << 17) -#define SUPPORTED_10000baseKX4_Full (1 << 18) -#define SUPPORTED_10000baseKR_Full (1 << 19) -#define SUPPORTED_10000baseR_FEC (1 << 20) -#define SUPPORTED_20000baseMLD2_Full (1 << 21) -#define SUPPORTED_20000baseKR2_Full (1 << 22) -#define SUPPORTED_40000baseKR4_Full (1 << 23) -#define SUPPORTED_40000baseCR4_Full (1 << 24) -#define SUPPORTED_40000baseSR4_Full (1 << 25) -#define SUPPORTED_40000baseLR4_Full (1 << 26) -#define SUPPORTED_56000baseKR4_Full (1 << 27) -#define SUPPORTED_56000baseCR4_Full (1 << 28) -#define SUPPORTED_56000baseSR4_Full (1 << 29) -#define SUPPORTED_56000baseLR4_Full (1 << 30) - -#define ADVERTISED_10baseT_Half (1 << 0) -#define ADVERTISED_10baseT_Full (1 << 1) -#define ADVERTISED_100baseT_Half (1 << 2) -#define ADVERTISED_100baseT_Full (1 << 3) -#define ADVERTISED_1000baseT_Half (1 << 4) -#define ADVERTISED_1000baseT_Full (1 << 5) -#define ADVERTISED_Autoneg (1 << 6) -#define ADVERTISED_TP (1 << 7) -#define ADVERTISED_AUI (1 << 8) -#define ADVERTISED_MII (1 << 9) -#define ADVERTISED_FIBRE (1 << 10) -#define ADVERTISED_BNC (1 << 11) -#define ADVERTISED_10000baseT_Full (1 << 12) -#define ADVERTISED_Pause (1 << 13) -#define ADVERTISED_Asym_Pause (1 << 14) -#define ADVERTISED_2500baseX_Full (1 << 15) -#define ADVERTISED_Backplane (1 << 16) -#define ADVERTISED_1000baseKX_Full (1 << 17) -#define ADVERTISED_10000baseKX4_Full (1 << 18) -#define ADVERTISED_10000baseKR_Full (1 << 19) -#define ADVERTISED_10000baseR_FEC (1 << 20) -#define ADVERTISED_20000baseMLD2_Full (1 << 21) -#define ADVERTISED_20000baseKR2_Full (1 << 22) -#define ADVERTISED_40000baseKR4_Full (1 << 23) -#define ADVERTISED_40000baseCR4_Full (1 << 24) -#define ADVERTISED_40000baseSR4_Full (1 << 25) -#define ADVERTISED_40000baseLR4_Full (1 << 26) -#define ADVERTISED_56000baseKR4_Full (1 << 27) -#define ADVERTISED_56000baseCR4_Full (1 << 28) -#define ADVERTISED_56000baseSR4_Full (1 << 29) -#define ADVERTISED_56000baseLR4_Full (1 << 30) +/* Link mode bit indices */ +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + + /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit + * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* + * macro for bits > 31. The only way to use indices > 31 is to + * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. + */ + + __ETHTOOL_LINK_MODE_LAST + = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, +}; + +#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ + (1UL << (ETHTOOL_LINK_MODE_ ## base_name ## _BIT)) + +/* DEPRECATED macros. Please migrate to + * ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. Please do NOT + * define any new SUPPORTED_* macro for bits > 31. + */ +#define SUPPORTED_10baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Half) +#define SUPPORTED_10baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Full) +#define SUPPORTED_100baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Half) +#define SUPPORTED_100baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Full) +#define SUPPORTED_1000baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Half) +#define SUPPORTED_1000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Full) +#define SUPPORTED_Autoneg __ETHTOOL_LINK_MODE_LEGACY_MASK(Autoneg) +#define SUPPORTED_TP __ETHTOOL_LINK_MODE_LEGACY_MASK(TP) +#define SUPPORTED_AUI __ETHTOOL_LINK_MODE_LEGACY_MASK(AUI) +#define SUPPORTED_MII __ETHTOOL_LINK_MODE_LEGACY_MASK(MII) +#define SUPPORTED_FIBRE __ETHTOOL_LINK_MODE_LEGACY_MASK(FIBRE) +#define SUPPORTED_BNC __ETHTOOL_LINK_MODE_LEGACY_MASK(BNC) +#define SUPPORTED_10000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseT_Full) +#define SUPPORTED_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Pause) +#define SUPPORTED_Asym_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Asym_Pause) +#define SUPPORTED_2500baseX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(2500baseX_Full) +#define SUPPORTED_Backplane __ETHTOOL_LINK_MODE_LEGACY_MASK(Backplane) +#define SUPPORTED_1000baseKX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseKX_Full) +#define SUPPORTED_10000baseKX4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKX4_Full) +#define SUPPORTED_10000baseKR_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKR_Full) +#define SUPPORTED_10000baseR_FEC __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseR_FEC) +#define SUPPORTED_20000baseMLD2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseMLD2_Full) +#define SUPPORTED_20000baseKR2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseKR2_Full) +#define SUPPORTED_40000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseKR4_Full) +#define SUPPORTED_40000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseCR4_Full) +#define SUPPORTED_40000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseSR4_Full) +#define SUPPORTED_40000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseLR4_Full) +#define SUPPORTED_56000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseKR4_Full) +#define SUPPORTED_56000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseCR4_Full) +#define SUPPORTED_56000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseSR4_Full) +#define SUPPORTED_56000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseLR4_Full) +/* Please do not define any new SUPPORTED_* macro for bits > 31, see + * notice above. + */ + +/* + * DEPRECATED macros. Please migrate to + * ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. Please do NOT + * define any new ADERTISE_* macro for bits > 31. + */ +#define ADVERTISED_10baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Half) +#define ADVERTISED_10baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Full) +#define ADVERTISED_100baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Half) +#define ADVERTISED_100baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Full) +#define ADVERTISED_1000baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Half) +#define ADVERTISED_1000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Full) +#define ADVERTISED_Autoneg __ETHTOOL_LINK_MODE_LEGACY_MASK(Autoneg) +#define ADVERTISED_TP __ETHTOOL_LINK_MODE_LEGACY_MASK(TP) +#define ADVERTISED_AUI __ETHTOOL_LINK_MODE_LEGACY_MASK(AUI) +#define ADVERTISED_MII __ETHTOOL_LINK_MODE_LEGACY_MASK(MII) +#define ADVERTISED_FIBRE __ETHTOOL_LINK_MODE_LEGACY_MASK(FIBRE) +#define ADVERTISED_BNC __ETHTOOL_LINK_MODE_LEGACY_MASK(BNC) +#define ADVERTISED_10000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseT_Full) +#define ADVERTISED_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Pause) +#define ADVERTISED_Asym_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Asym_Pause) +#define ADVERTISED_2500baseX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(2500baseX_Full) +#define ADVERTISED_Backplane __ETHTOOL_LINK_MODE_LEGACY_MASK(Backplane) +#define ADVERTISED_1000baseKX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseKX_Full) +#define ADVERTISED_10000baseKX4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKX4_Full) +#define ADVERTISED_10000baseKR_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKR_Full) +#define ADVERTISED_10000baseR_FEC __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseR_FEC) +#define ADVERTISED_20000baseMLD2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseMLD2_Full) +#define ADVERTISED_20000baseKR2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseKR2_Full) +#define ADVERTISED_40000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseKR4_Full) +#define ADVERTISED_40000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseCR4_Full) +#define ADVERTISED_40000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseSR4_Full) +#define ADVERTISED_40000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseLR4_Full) +#define ADVERTISED_56000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseKR4_Full) +#define ADVERTISED_56000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseCR4_Full) +#define ADVERTISED_56000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseSR4_Full) +#define ADVERTISED_56000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseLR4_Full) +/* Please do not define any new ADVERTISED_* macro for bits > 31, see + * notice above. + */ /* The following are all involved in forcing a particular link * mode for the device for setting things. When getting the @@ -1303,7 +1452,7 @@ enum ethtool_sfeatures_retval_bits { * it was forced up into this mode or autonegotiated. */ -/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */ +/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 @@ -1319,11 +1468,28 @@ enum ethtool_sfeatures_retval_bits { #define SPEED_UNKNOWN -1 +static inline int ethtool_validate_speed(__u32 speed) +{ + return speed <= INT_MAX || speed == SPEED_UNKNOWN; +} + /* Duplex, half or full. */ #define DUPLEX_HALF 0x00 #define DUPLEX_FULL 0x01 #define DUPLEX_UNKNOWN 0xff +static inline int ethtool_validate_duplex(__u8 duplex) +{ + switch (duplex) { + case DUPLEX_HALF: + case DUPLEX_FULL: + case DUPLEX_UNKNOWN: + return 1; + } + + return 0; +} + /* Which connector port. */ #define PORT_TP 0x00 #define PORT_AUI 0x01 @@ -1367,15 +1533,17 @@ enum ethtool_sfeatures_retval_bits { #define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ #define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ #define AH_ESP_V4_FLOW 0x04 /* hash only */ -#define TCP_V6_FLOW 0x05 /* hash only */ -#define UDP_V6_FLOW 0x06 /* hash only */ -#define SCTP_V6_FLOW 0x07 /* hash only */ +#define TCP_V6_FLOW 0x05 /* hash or spec (tcp_ip6_spec; nfc only) */ +#define UDP_V6_FLOW 0x06 /* hash or spec (udp_ip6_spec; nfc only) */ +#define SCTP_V6_FLOW 0x07 /* hash or spec (sctp_ip6_spec; nfc only) */ #define AH_ESP_V6_FLOW 0x08 /* hash only */ #define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ #define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ -#define AH_V6_FLOW 0x0b /* hash only */ -#define ESP_V6_FLOW 0x0c /* hash only */ -#define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#define AH_V6_FLOW 0x0b /* hash or spec (ah_ip6_spec; nfc only) */ +#define ESP_V6_FLOW 0x0c /* hash or spec (esp_ip6_spec; nfc only) */ +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#define IP_USER_FLOW IPV4_USER_FLOW +#define IPV6_USER_FLOW 0x0e /* spec only (usr_ip6_spec; nfc only) */ #define IPV4_FLOW 0x10 /* hash only */ #define IPV6_FLOW 0x11 /* hash only */ #define ETHER_FLOW 0x12 /* spec only (ether_spec) */ @@ -1441,4 +1609,123 @@ enum ethtool_reset_flags { }; #define ETH_RESET_SHARED_SHIFT 16 + +/** + * struct ethtool_link_settings - link control and status + * + * IMPORTANT, Backward compatibility notice: When implementing new + * user-space tools, please first try %ETHTOOL_GLINKSETTINGS, and + * if it succeeds use %ETHTOOL_SLINKSETTINGS to change link + * settings; do not use %ETHTOOL_SSET if %ETHTOOL_GLINKSETTINGS + * succeeded: stick to %ETHTOOL_GLINKSETTINGS/%SLINKSETTINGS in + * that case. Conversely, if %ETHTOOL_GLINKSETTINGS fails, use + * %ETHTOOL_GSET to query and %ETHTOOL_SSET to change link + * settings; do not use %ETHTOOL_SLINKSETTINGS if + * %ETHTOOL_GLINKSETTINGS failed: stick to + * %ETHTOOL_GSET/%ETHTOOL_SSET in that case. + * + * @cmd: Command number = %ETHTOOL_GLINKSETTINGS or %ETHTOOL_SLINKSETTINGS + * @speed: Link speed (Mbps) + * @duplex: Duplex mode; one of %DUPLEX_* + * @port: Physical connector type; one of %PORT_* + * @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not + * applicable. For clause 45 PHYs this is the PRTAD. + * @autoneg: Enable/disable autonegotiation and auto-detection; + * either %AUTONEG_DISABLE or %AUTONEG_ENABLE + * @mdio_support: Bitmask of %ETH_MDIO_SUPPORTS_* flags for the MDIO + * protocols supported by the interface; 0 if unknown. + * Read-only. + * @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of + * %ETH_TP_MDI_*. If the status is unknown or not applicable, the + * value will be %ETH_TP_MDI_INVALID. Read-only. + * @eth_tp_mdix_ctrl: Ethernet twisted pair MDI(-X) control; one of + * %ETH_TP_MDI_*. If MDI(-X) control is not implemented, reads + * yield %ETH_TP_MDI_INVALID and writes may be ignored or rejected. + * When written successfully, the link should be renegotiated if + * necessary. + * @link_mode_masks_nwords: Number of 32-bit words for each of the + * supported, advertising, lp_advertising link mode bitmaps. For + * %ETHTOOL_GLINKSETTINGS: on entry, number of words passed by user + * (>= 0); on return, if handshake in progress, negative if + * request size unsupported by kernel: absolute value indicates + * kernel recommended size and cmd field is 0, as well as all the + * other fields; otherwise (handshake completed), strictly + * positive to indicate size used by kernel and cmd field is + * %ETHTOOL_GLINKSETTINGS, all other fields populated by driver. For + * %ETHTOOL_SLINKSETTINGS: must be valid on entry, ie. a positive + * value returned previously by %ETHTOOL_GLINKSETTINGS, otherwise + * refused. For drivers: ignore this field (use kernel's + * __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will + * be overwritten by kernel. + * @supported: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, physical + * connectors and other link features for which the interface + * supports autonegotiation or auto-detection. Read-only. + * @advertising: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, physical + * connectors and other link features that are advertised through + * autonegotiation or enabled for auto-detection. + * @lp_advertising: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, and other + * link features that the link partner advertised through + * autonegotiation; 0 if unknown or not applicable. Read-only. + * + * If autonegotiation is disabled, the speed and @duplex represent the + * fixed link mode and are writable if the driver supports multiple + * link modes. If it is enabled then they are read-only; if the link + * is up they represent the negotiated link mode; if the link is down, + * the speed is 0, %SPEED_UNKNOWN or the highest enabled speed and + * @duplex is %DUPLEX_UNKNOWN or the best enabled duplex mode. + * + * Some hardware interfaces may have multiple PHYs and/or physical + * connectors fitted or do not allow the driver to detect which are + * fitted. For these interfaces @port and/or @phy_address may be + * writable, possibly dependent on @autoneg being %AUTONEG_DISABLE. + * Otherwise, attempts to write different values may be ignored or + * rejected. + * + * Deprecated %ethtool_cmd fields transceiver, maxtxpkt and maxrxpkt + * are not available in %ethtool_link_settings. Until all drivers are + * converted to ignore them or to the new %ethtool_link_settings API, + * for both queries and changes, users should always try + * %ETHTOOL_GLINKSETTINGS first, and if it fails with -ENOTSUPP stick + * only to %ETHTOOL_GSET and %ETHTOOL_SSET consistently. If it + * succeeds, then users should stick to %ETHTOOL_GLINKSETTINGS and + * %ETHTOOL_SLINKSETTINGS (which would support drivers implementing + * either %ethtool_cmd or %ethtool_link_settings). + * + * Users should assume that all fields not marked read-only are + * writable and subject to validation by the driver. They should use + * %ETHTOOL_GLINKSETTINGS to get the current values before making specific + * changes and then applying them with %ETHTOOL_SLINKSETTINGS. + * + * Drivers that implement %get_link_ksettings and/or + * %set_link_ksettings should ignore the @cmd + * and @link_mode_masks_nwords fields (any change to them overwritten + * by kernel), and rely only on kernel's internal + * %__ETHTOOL_LINK_MODE_MASK_NBITS and + * %ethtool_link_mode_mask_t. Drivers that implement + * %set_link_ksettings() should validate all fields other than @cmd + * and @link_mode_masks_nwords that are not described as read-only or + * deprecated, and must ignore all fields described as read-only. + */ +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u32 reserved[8]; + __u32 link_mode_masks[0]; + /* layout of link_mode_masks fields: + * __u32 map_supported[link_mode_masks_nwords]; + * __u32 map_advertising[link_mode_masks_nwords]; + * __u32 map_lp_advertising[link_mode_masks_nwords]; + */ +}; #endif /* _UAPI_LINUX_ETHTOOL_H */ diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h index c3363ba..5512c90 100644 --- a/include/uapi/linux/genetlink.h +++ b/include/uapi/linux/genetlink.h @@ -21,6 +21,7 @@ struct genlmsghdr { #define GENL_CMD_CAP_DO 0x02 #define GENL_CMD_CAP_DUMP 0x04 #define GENL_CMD_CAP_HASPOL 0x08 +#define GENL_UNS_ADMIN_PERM 0x10 /* * List of reserved static generic netlink identifiers: diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h index 9cf2394..f802775 100644 --- a/include/uapi/linux/if.h +++ b/include/uapi/linux/if.h @@ -37,7 +37,7 @@ * are shared for all types of net_devices. The sysfs entries are available * via /sys/class/net/<dev>/flags. Flags which can be toggled through sysfs * are annotated below, note that only a few flags can be toggled and some - * other flags are always always preserved from the original net_device flags + * other flags are always preserved from the original net_device flags * even if you try to set them via sysfs. Flags which are always preserved * are kept under the flag grouping @IFF_VOLATILE. Flags which are volatile * are annotated below as such. diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 18db144..0536eef 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -137,11 +137,17 @@ struct bridge_vlan_info { /* Bridge multicast database attributes * [MDBA_MDB] = { * [MDBA_MDB_ENTRY] = { - * [MDBA_MDB_ENTRY_INFO] + * [MDBA_MDB_ENTRY_INFO] { + * struct br_mdb_entry + * [MDBA_MDB_EATTR attributes] + * } * } * } * [MDBA_ROUTER] = { - * [MDBA_ROUTER_PORT] + * [MDBA_ROUTER_PORT] = { + * u32 ifindex + * [MDBA_ROUTER_PATTR attributes] + * } * } */ enum { @@ -166,6 +172,22 @@ enum { }; #define MDBA_MDB_ENTRY_MAX (__MDBA_MDB_ENTRY_MAX - 1) +/* per mdb entry additional attributes */ +enum { + MDBA_MDB_EATTR_UNSPEC, + MDBA_MDB_EATTR_TIMER, + __MDBA_MDB_EATTR_MAX +}; +#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1) + +/* multicast router types */ +enum { + MDB_RTR_TYPE_DISABLED, + MDB_RTR_TYPE_TEMP_QUERY, + MDB_RTR_TYPE_PERM, + MDB_RTR_TYPE_TEMP +}; + enum { MDBA_ROUTER_UNSPEC, MDBA_ROUTER_PORT, @@ -173,6 +195,15 @@ enum { }; #define MDBA_ROUTER_MAX (__MDBA_ROUTER_MAX - 1) +/* router port attributes */ +enum { + MDBA_ROUTER_PATTR_UNSPEC, + MDBA_ROUTER_PATTR_TIMER, + MDBA_ROUTER_PATTR_TYPE, + __MDBA_ROUTER_PATTR_MAX +}; +#define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1) + struct br_port_msg { __u8 family; __u32 ifindex; @@ -183,6 +214,8 @@ struct br_mdb_entry { #define MDB_TEMPORARY 0 #define MDB_PERMANENT 1 __u8 state; +#define MDB_FLAGS_OFFLOAD (1 << 0) + __u8 flags; __u16 vid; struct { union { diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index ea9221b..4a93051 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -83,6 +83,7 @@ #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ #define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ #define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_MACSEC 0x88E5 /* 802.1ae MACsec */ #define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ #define ETH_P_MVRP 0x88F5 /* 802.1Q MVRP */ #define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index a30b780..8e3f88f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -35,6 +35,8 @@ struct rtnl_link_stats { /* for cslip etc */ __u32 rx_compressed; __u32 tx_compressed; + + __u32 rx_nohandler; /* dropped, no handler found */ }; /* The main device statistics structure */ @@ -68,6 +70,8 @@ struct rtnl_link_stats64 { /* for cslip etc */ __u64 rx_compressed; __u64 tx_compressed; + + __u64 rx_nohandler; /* dropped, no handler found */ }; /* The struct should be in sync with struct ifmap */ @@ -401,6 +405,43 @@ enum { #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1) +enum { + IFLA_VRF_PORT_UNSPEC, + IFLA_VRF_PORT_TABLE, + __IFLA_VRF_PORT_MAX +}; + +#define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1) + +/* MACSEC section */ +enum { + IFLA_MACSEC_UNSPEC, + IFLA_MACSEC_SCI, + IFLA_MACSEC_PORT, + IFLA_MACSEC_ICV_LEN, + IFLA_MACSEC_CIPHER_SUITE, + IFLA_MACSEC_WINDOW, + IFLA_MACSEC_ENCODING_SA, + IFLA_MACSEC_ENCRYPT, + IFLA_MACSEC_PROTECT, + IFLA_MACSEC_INC_SCI, + IFLA_MACSEC_ES, + IFLA_MACSEC_SCB, + IFLA_MACSEC_REPLAY_PROTECT, + IFLA_MACSEC_VALIDATION, + __IFLA_MACSEC_MAX, +}; + +#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) + +enum macsec_validation_type { + MACSEC_VALIDATE_DISABLED = 0, + MACSEC_VALIDATE_CHECK = 1, + MACSEC_VALIDATE_STRICT = 2, + __MACSEC_VALIDATE_END, + MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, +}; + /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, @@ -444,6 +485,7 @@ enum { IFLA_VXLAN_GBP, IFLA_VXLAN_REMCSUM_NOPARTIAL, IFLA_VXLAN_COLLECT_METADATA, + IFLA_VXLAN_LABEL, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -466,6 +508,7 @@ enum { IFLA_GENEVE_UDP_CSUM, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, + IFLA_GENEVE_LABEL, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h new file mode 100644 index 0000000..26b0d1e --- /dev/null +++ b/include/uapi/linux/if_macsec.h @@ -0,0 +1,161 @@ +/* + * include/uapi/linux/if_macsec.h - MACsec device + * + * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UAPI_MACSEC_H +#define _UAPI_MACSEC_H + +#include <linux/types.h> + +#define MACSEC_GENL_NAME "macsec" +#define MACSEC_GENL_VERSION 1 + +#define MACSEC_MAX_KEY_LEN 128 + +#define DEFAULT_CIPHER_ID 0x0080020001000001ULL +#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL + +#define MACSEC_MIN_ICV_LEN 8 +#define MACSEC_MAX_ICV_LEN 32 + +enum macsec_attrs { + MACSEC_ATTR_UNSPEC, + MACSEC_ATTR_IFINDEX, /* u32, ifindex of the MACsec netdevice */ + MACSEC_ATTR_RXSC_CONFIG, /* config, nested macsec_rxsc_attrs */ + MACSEC_ATTR_SA_CONFIG, /* config, nested macsec_sa_attrs */ + MACSEC_ATTR_SECY, /* dump, nested macsec_secy_attrs */ + MACSEC_ATTR_TXSA_LIST, /* dump, nested, macsec_sa_attrs for each TXSA */ + MACSEC_ATTR_RXSC_LIST, /* dump, nested, macsec_rxsc_attrs for each RXSC */ + MACSEC_ATTR_TXSC_STATS, /* dump, nested, macsec_txsc_stats_attr */ + MACSEC_ATTR_SECY_STATS, /* dump, nested, macsec_secy_stats_attr */ + __MACSEC_ATTR_END, + NUM_MACSEC_ATTR = __MACSEC_ATTR_END, + MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1, +}; + +enum macsec_secy_attrs { + MACSEC_SECY_ATTR_UNSPEC, + MACSEC_SECY_ATTR_SCI, + MACSEC_SECY_ATTR_ENCODING_SA, + MACSEC_SECY_ATTR_WINDOW, + MACSEC_SECY_ATTR_CIPHER_SUITE, + MACSEC_SECY_ATTR_ICV_LEN, + MACSEC_SECY_ATTR_PROTECT, + MACSEC_SECY_ATTR_REPLAY, + MACSEC_SECY_ATTR_OPER, + MACSEC_SECY_ATTR_VALIDATE, + MACSEC_SECY_ATTR_ENCRYPT, + MACSEC_SECY_ATTR_INC_SCI, + MACSEC_SECY_ATTR_ES, + MACSEC_SECY_ATTR_SCB, + __MACSEC_SECY_ATTR_END, + NUM_MACSEC_SECY_ATTR = __MACSEC_SECY_ATTR_END, + MACSEC_SECY_ATTR_MAX = __MACSEC_SECY_ATTR_END - 1, +}; + +enum macsec_rxsc_attrs { + MACSEC_RXSC_ATTR_UNSPEC, + MACSEC_RXSC_ATTR_SCI, /* config/dump, u64 */ + MACSEC_RXSC_ATTR_ACTIVE, /* config/dump, u8 0..1 */ + MACSEC_RXSC_ATTR_SA_LIST, /* dump, nested */ + MACSEC_RXSC_ATTR_STATS, /* dump, nested, macsec_rxsc_stats_attr */ + __MACSEC_RXSC_ATTR_END, + NUM_MACSEC_RXSC_ATTR = __MACSEC_RXSC_ATTR_END, + MACSEC_RXSC_ATTR_MAX = __MACSEC_RXSC_ATTR_END - 1, +}; + +enum macsec_sa_attrs { + MACSEC_SA_ATTR_UNSPEC, + MACSEC_SA_ATTR_AN, /* config/dump, u8 0..3 */ + MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */ + MACSEC_SA_ATTR_PN, /* config/dump, u32 */ + MACSEC_SA_ATTR_KEY, /* config, data */ + MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */ + MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */ + __MACSEC_SA_ATTR_END, + NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END, + MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1, +}; + +enum macsec_nl_commands { + MACSEC_CMD_GET_TXSC, + MACSEC_CMD_ADD_RXSC, + MACSEC_CMD_DEL_RXSC, + MACSEC_CMD_UPD_RXSC, + MACSEC_CMD_ADD_TXSA, + MACSEC_CMD_DEL_TXSA, + MACSEC_CMD_UPD_TXSA, + MACSEC_CMD_ADD_RXSA, + MACSEC_CMD_DEL_RXSA, + MACSEC_CMD_UPD_RXSA, +}; + +/* u64 per-RXSC stats */ +enum macsec_rxsc_stats_attr { + MACSEC_RXSC_STATS_ATTR_UNSPEC, + MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, + MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, + __MACSEC_RXSC_STATS_ATTR_END, + NUM_MACSEC_RXSC_STATS_ATTR = __MACSEC_RXSC_STATS_ATTR_END, + MACSEC_RXSC_STATS_ATTR_MAX = __MACSEC_RXSC_STATS_ATTR_END - 1, +}; + +/* u32 per-{RX,TX}SA stats */ +enum macsec_sa_stats_attr { + MACSEC_SA_STATS_ATTR_UNSPEC, + MACSEC_SA_STATS_ATTR_IN_PKTS_OK, + MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, + MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, + MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, + MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, + MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, + MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, + __MACSEC_SA_STATS_ATTR_END, + NUM_MACSEC_SA_STATS_ATTR = __MACSEC_SA_STATS_ATTR_END, + MACSEC_SA_STATS_ATTR_MAX = __MACSEC_SA_STATS_ATTR_END - 1, +}; + +/* u64 per-TXSC stats */ +enum macsec_txsc_stats_attr { + MACSEC_TXSC_STATS_ATTR_UNSPEC, + MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, + MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, + MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, + MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, + __MACSEC_TXSC_STATS_ATTR_END, + NUM_MACSEC_TXSC_STATS_ATTR = __MACSEC_TXSC_STATS_ATTR_END, + MACSEC_TXSC_STATS_ATTR_MAX = __MACSEC_TXSC_STATS_ATTR_END - 1, +}; + +/* u64 per-SecY stats */ +enum macsec_secy_stats_attr { + MACSEC_SECY_STATS_ATTR_UNSPEC, + MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, + MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, + MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, + MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, + MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, + MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, + MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, + MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, + __MACSEC_SECY_STATS_ATTR_END, + NUM_MACSEC_SECY_STATS_ATTR = __MACSEC_SECY_STATS_ATTR_END, + MACSEC_SECY_STATS_ATTR_MAX = __MACSEC_SECY_STATS_ATTR_END - 1, +}; + +#endif /* _UAPI_MACSEC_H */ diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index 08f894d..f291569 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -165,6 +165,8 @@ enum IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL, IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL, IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP, __IPV4_DEVCONF_MAX }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 38b4fef..3958760 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -174,6 +174,9 @@ enum { DEVCONF_USE_OIF_ADDRS_ONLY, DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT, DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, + DEVCONF_DROP_UNSOLICITED_NA, + DEVCONF_KEEP_ADDR_ON_DOWN, DEVCONF_MAX }; diff --git a/include/uapi/linux/kcm.h b/include/uapi/linux/kcm.h new file mode 100644 index 0000000..a5a53094 --- /dev/null +++ b/include/uapi/linux/kcm.h @@ -0,0 +1,40 @@ +/* + * Kernel Connection Multiplexor + * + * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * User API to clone KCM sockets and attach transport socket to a KCM + * multiplexor. + */ + +#ifndef KCM_KERNEL_H +#define KCM_KERNEL_H + +struct kcm_attach { + int fd; + int bpf_fd; +}; + +struct kcm_unattach { + int fd; +}; + +struct kcm_clone { + int fd; +}; + +#define SIOCKCMATTACH (SIOCPROTOPRIVATE + 0) +#define SIOCKCMUNATTACH (SIOCPROTOPRIVATE + 1) +#define SIOCKCMCLONE (SIOCPROTOPRIVATE + 2) + +#define KCMPROTO_CONNECTED 0 + +/* Socket options */ +#define KCM_RECV_DISABLE 1 + +#endif + diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h index 321e399..466073f 100644 --- a/include/uapi/linux/kernel.h +++ b/include/uapi/linux/kernel.h @@ -9,5 +9,6 @@ #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif /* _UAPI_LINUX_KERNEL_H */ diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index ce91215..5062fb5 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -1,6 +1,7 @@ #ifndef _UAPI__LINUX_MROUTE6_H #define _UAPI__LINUX_MROUTE6_H +#include <linux/kernel.h> #include <linux/types.h> #include <linux/sockios.h> @@ -46,14 +47,8 @@ typedef unsigned short mifi_t; typedef __u32 if_mask; #define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */ -#if !defined(__KERNEL__) -#if !defined(DIV_ROUND_UP) -#define DIV_ROUND_UP(x,y) (((x) + ((y) - 1)) / (y)) -#endif -#endif - typedef struct if_set { - if_mask ifs_bits[DIV_ROUND_UP(IF_SETSIZE, NIFBITS)]; + if_mask ifs_bits[__KERNEL_DIV_ROUND_UP(IF_SETSIZE, NIFBITS)]; } if_set; #define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS))) diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h index 23cbd34..45dfad5 100644 --- a/include/uapi/linux/netconf.h +++ b/include/uapi/linux/netconf.h @@ -19,6 +19,7 @@ enum { __NETCONFA_MAX }; #define NETCONFA_MAX (__NETCONFA_MAX - 1) +#define NETCONFA_ALL -1 #define NETCONFA_IFINDEX_ALL -1 #define NETCONFA_IFINDEX_DEFAULT -2 diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 319f471..6d074d1 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -20,9 +20,15 @@ enum ip_conntrack_info { IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY, IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY, - IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY, - /* Number of distinct IP_CT types (no NEW in reply dirn). */ - IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1 + /* No NEW in reply direction. */ + + /* Number of distinct IP_CT types. */ + IP_CT_NUMBER, + + /* only for userspace compatibility */ +#ifndef __KERNEL__ + IP_CT_NEW_REPLY = IP_CT_NUMBER, +#endif }; #define NF_CT_STATE_INVALID_BIT (1 << 0) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index be41ffc..eeffde1 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -681,6 +681,7 @@ enum nft_exthdr_attributes { * @NFT_META_IIFGROUP: packet input interface group * @NFT_META_OIFGROUP: packet output interface group * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) + * @NFT_META_PRANDOM: a 32bit pseudo-random number */ enum nft_meta_keys { NFT_META_LEN, @@ -707,6 +708,7 @@ enum nft_meta_keys { NFT_META_IIFGROUP, NFT_META_OIFGROUP, NFT_META_CGROUP, + NFT_META_PRANDOM, }; /** @@ -949,10 +951,14 @@ enum nft_nat_attributes { * enum nft_masq_attributes - nf_tables masquerade expression attributes * * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) + * @NFTA_MASQ_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) + * @NFTA_MASQ_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) */ enum nft_masq_attributes { NFTA_MASQ_UNSPEC, NFTA_MASQ_FLAGS, + NFTA_MASQ_REG_PROTO_MIN, + NFTA_MASQ_REG_PROTO_MAX, __NFTA_MASQ_MAX }; #define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1) diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index f095155..0dba4e4 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -107,8 +107,10 @@ struct nlmsgerr { #define NETLINK_PKTINFO 3 #define NETLINK_BROADCAST_ERROR 4 #define NETLINK_NO_ENOBUFS 5 +#ifndef __KERNEL__ #define NETLINK_RX_RING 6 #define NETLINK_TX_RING 7 +#endif #define NETLINK_LISTEN_ALL_NSID 8 #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 @@ -134,6 +136,7 @@ struct nl_mmap_hdr { __u32 nm_gid; }; +#ifndef __KERNEL__ enum nl_mmap_status { NL_MMAP_STATUS_UNUSED, NL_MMAP_STATUS_RESERVED, @@ -145,6 +148,7 @@ enum nl_mmap_status { #define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO #define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT) #define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr)) +#endif #define NET_MAJOR 36 /* Major 36 is reserved for networking */ diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h index f2159d3..d793993 100644 --- a/include/uapi/linux/netlink_diag.h +++ b/include/uapi/linux/netlink_diag.h @@ -48,6 +48,8 @@ enum { #define NDIAG_SHOW_MEMINFO 0x00000001 /* show memory info of a socket */ #define NDIAG_SHOW_GROUPS 0x00000002 /* show groups of a netlink socket */ +#ifndef __KERNEL__ #define NDIAG_SHOW_RING_CFG 0x00000004 /* show ring configuration */ +#endif #endif diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5b7b5eb..5a30a75 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1727,6 +1727,8 @@ enum nl80211_commands { * underlying device supports these minimal RRM features: * %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES, * %NL80211_FEATURE_QUIET, + * Or, if global RRM is supported, see: + * %NL80211_EXT_FEATURE_RRM * If this flag is used, driver must add the Power Capabilities IE to the * association request. In addition, it must also set the RRM capability * flag in the association request's Capability Info field. @@ -1789,6 +1791,10 @@ enum nl80211_commands { * thus it must not specify the number of iterations, only the interval * between scans. The scan plans are executed sequentially. * Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan. + * @NL80211_ATTR_PBSS: flag attribute. If set it means operate + * in a PBSS. Specified in %NL80211_CMD_CONNECT to request + * connecting to a PCP, and in %NL80211_CMD_START_AP to start + * a PCP instead of AP. Relevant for DMG networks only. * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined @@ -2164,6 +2170,8 @@ enum nl80211_attrs { NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, NL80211_ATTR_SCHED_SCAN_PLANS, + NL80211_ATTR_PBSS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4396,12 +4404,18 @@ enum nl80211_feature_flags { /** * enum nl80211_ext_feature_index - bit index of extended features. * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates. + * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can + * can request to use RRM (see %NL80211_ATTR_USE_RRM) with + * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set + * the ASSOC_REQ_USE_RRM flag in the association request even if + * NL80211_FEATURE_QUIET is not advertized. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_VHT_IBSS, + NL80211_EXT_FEATURE_RRM, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index a27222d..616d047 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -454,6 +454,14 @@ struct ovs_key_ct_labels { #define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */ #define OVS_CS_F_INVALID 0x10 /* Could not track connection. */ #define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */ +#define OVS_CS_F_SRC_NAT 0x40 /* Packet's source address/port was + * mangled by NAT. + */ +#define OVS_CS_F_DST_NAT 0x80 /* Packet's destination address/port + * was mangled by NAT. + */ + +#define OVS_CS_F_NAT_MASK (OVS_CS_F_SRC_NAT | OVS_CS_F_DST_NAT) /** * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. @@ -632,6 +640,8 @@ struct ovs_action_hash { * mask. For each bit set in the mask, the corresponding bit in the value is * copied to the connection tracking label field in the connection. * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. + * @OVS_CT_ATTR_NAT: Nested OVS_NAT_ATTR_* for performing L3 network address + * translation (NAT) on the packet. */ enum ovs_ct_attr { OVS_CT_ATTR_UNSPEC, @@ -641,12 +651,51 @@ enum ovs_ct_attr { OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */ OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of related connections. */ + OVS_CT_ATTR_NAT, /* Nested OVS_NAT_ATTR_* */ __OVS_CT_ATTR_MAX }; #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1) /** + * enum ovs_nat_attr - Attributes for %OVS_CT_ATTR_NAT. + * + * @OVS_NAT_ATTR_SRC: Flag for Source NAT (mangle source address/port). + * @OVS_NAT_ATTR_DST: Flag for Destination NAT (mangle destination + * address/port). Only one of (@OVS_NAT_ATTR_SRC, @OVS_NAT_ATTR_DST) may be + * specified. Effective only for packets for ct_state NEW connections. + * Packets of committed connections are mangled by the NAT action according to + * the committed NAT type regardless of the flags specified. As a corollary, a + * NAT action without a NAT type flag will only mangle packets of committed + * connections. The following NAT attributes only apply for NEW + * (non-committed) connections, and they may be included only when the CT + * action has the @OVS_CT_ATTR_COMMIT flag and either @OVS_NAT_ATTR_SRC or + * @OVS_NAT_ATTR_DST is also included. + * @OVS_NAT_ATTR_IP_MIN: struct in_addr or struct in6_addr + * @OVS_NAT_ATTR_IP_MAX: struct in_addr or struct in6_addr + * @OVS_NAT_ATTR_PROTO_MIN: u16 L4 protocol specific lower boundary (port) + * @OVS_NAT_ATTR_PROTO_MAX: u16 L4 protocol specific upper boundary (port) + * @OVS_NAT_ATTR_PERSISTENT: Flag for persistent IP mapping across reboots + * @OVS_NAT_ATTR_PROTO_HASH: Flag for pseudo random L4 port mapping (MD5) + * @OVS_NAT_ATTR_PROTO_RANDOM: Flag for fully randomized L4 port mapping + */ +enum ovs_nat_attr { + OVS_NAT_ATTR_UNSPEC, + OVS_NAT_ATTR_SRC, + OVS_NAT_ATTR_DST, + OVS_NAT_ATTR_IP_MIN, + OVS_NAT_ATTR_IP_MAX, + OVS_NAT_ATTR_PROTO_MIN, + OVS_NAT_ATTR_PROTO_MAX, + OVS_NAT_ATTR_PERSISTENT, + OVS_NAT_ATTR_PROTO_HASH, + OVS_NAT_ATTR_PROTO_RANDOM, + __OVS_NAT_ATTR_MAX, +}; + +#define OVS_NAT_ATTR_MAX (__OVS_NAT_ATTR_MAX - 1) + +/** * enum ovs_action_attr - Action types. * * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 4398737..c43c5f7 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -172,6 +172,7 @@ enum { TCA_U32_INDEV, TCA_U32_PCNT, TCA_U32_MARK, + TCA_U32_FLAGS, __TCA_U32_MAX }; @@ -416,6 +417,8 @@ enum { TCA_FLOWER_KEY_TCP_DST, /* be16 */ TCA_FLOWER_KEY_UDP_SRC, /* be16 */ TCA_FLOWER_KEY_UDP_DST, /* be16 */ + + TCA_FLOWER_FLAGS, __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h index 058757f..2e00dce 100644 --- a/include/uapi/linux/rfkill.h +++ b/include/uapi/linux/rfkill.h @@ -59,6 +59,8 @@ enum rfkill_type { * @RFKILL_OP_DEL: a device was removed * @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device * @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all) + * into a state, also updating the default state used for devices that + * are hot-plugged later. */ enum rfkill_operation { RFKILL_OP_ADD = 0, diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h new file mode 100644 index 0000000..d648ff6 --- /dev/null +++ b/include/uapi/linux/tc_act/tc_ife.h @@ -0,0 +1,38 @@ +#ifndef __UAPI_TC_IFE_H +#define __UAPI_TC_IFE_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +#define TCA_ACT_IFE 25 +/* Flag bits for now just encoding/decoding; mutually exclusive */ +#define IFE_ENCODE 1 +#define IFE_DECODE 0 + +struct tc_ife { + tc_gen; + __u16 flags; +}; + +/*XXX: We need to encode the total number of bytes consumed */ +enum { + TCA_IFE_UNSPEC, + TCA_IFE_PARMS, + TCA_IFE_TM, + TCA_IFE_DMAC, + TCA_IFE_SMAC, + TCA_IFE_TYPE, + TCA_IFE_METALST, + __TCA_IFE_MAX +}; +#define TCA_IFE_MAX (__TCA_IFE_MAX - 1) + +#define IFE_META_SKBMARK 1 +#define IFE_META_HASHID 2 +#define IFE_META_PRIO 3 +#define IFE_META_QMAP 4 +/*Can be overridden at runtime by module option*/ +#define __IFE_META_MAX 5 +#define IFE_META_MAX (__IFE_META_MAX - 1) + +#endif diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 65a77b0..53e8e3f 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -196,6 +196,11 @@ struct tcp_info { __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ __u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */ __u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */ + + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */ + __u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */ }; /* for TCP_MD5SIG socket option */ diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h index 252ffd4..4f20dbc 100644 --- a/include/xen/interface/io/netif.h +++ b/include/xen/interface/io/netif.h @@ -1,16 +1,34 @@ /****************************************************************************** - * netif.h + * xen_netif.h * * Unified network-device I/O interface for Xen guest OSes. * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * * Copyright (c) 2003-2004, Keir Fraser */ -#ifndef __XEN_PUBLIC_IO_NETIF_H__ -#define __XEN_PUBLIC_IO_NETIF_H__ +#ifndef __XEN_PUBLIC_IO_XEN_NETIF_H__ +#define __XEN_PUBLIC_IO_XEN_NETIF_H__ -#include <xen/interface/io/ring.h> -#include <xen/interface/grant_table.h> +#include "ring.h" +#include "../grant_table.h" /* * Older implementation of Xen network frontend / backend has an @@ -38,10 +56,10 @@ * that it cannot safely queue packets (as it may not be kicked to send them). */ - /* +/* * "feature-split-event-channels" is introduced to separate guest TX - * and RX notificaion. Backend either doesn't support this feature or - * advertise it via xenstore as 0 (disabled) or 1 (enabled). + * and RX notification. Backend either doesn't support this feature or + * advertises it via xenstore as 0 (disabled) or 1 (enabled). * * To make use of this feature, frontend should allocate two event * channels for TX and RX, advertise them to backend as @@ -118,151 +136,804 @@ */ /* - * This is the 'wire' format for packets: - * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) - * [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info) - * [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE) - * Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data - * Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data + * "feature-multicast-control" and "feature-dynamic-multicast-control" + * advertise the capability to filter ethernet multicast packets in the + * backend. If the frontend wishes to take advantage of this feature then + * it may set "request-multicast-control". If the backend only advertises + * "feature-multicast-control" then "request-multicast-control" must be set + * before the frontend moves into the connected state. The backend will + * sample the value on this state transition and any subsequent change in + * value will have no effect. However, if the backend also advertises + * "feature-dynamic-multicast-control" then "request-multicast-control" + * may be set by the frontend at any time. In this case, the backend will + * watch the value and re-sample on watch events. + * + * If the sampled value of "request-multicast-control" is set then the + * backend transmit side should no longer flood multicast packets to the + * frontend, it should instead drop any multicast packet that does not + * match in a filter list. + * The list is amended by the frontend by sending dummy transmit requests + * containing XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as + * specified below. + * Note that the filter list may be amended even if the sampled value of + * "request-multicast-control" is not set, however the filter should only + * be applied if it is set. + */ + +/* + * Control ring + * ============ + * + * Some features, such as hashing (detailed below), require a + * significant amount of out-of-band data to be passed from frontend to + * backend. Use of xenstore is not suitable for large quantities of data + * because of quota limitations and so a dedicated 'control ring' is used. + * The ability of the backend to use a control ring is advertised by + * setting: + * + * /local/domain/X/backend/<domid>/<vif>/feature-ctrl-ring = "1" + * + * The frontend provides a control ring to the backend by setting: + * + * /local/domain/<domid>/device/vif/<vif>/ctrl-ring-ref = <gref> + * /local/domain/<domid>/device/vif/<vif>/event-channel-ctrl = <port> + * + * where <gref> is the grant reference of the shared page used to + * implement the control ring and <port> is an event channel to be used + * as a mailbox interrupt. These keys must be set before the frontend + * moves into the connected state. + * + * The control ring uses a fixed request/response message size and is + * balanced (i.e. one request to one response), so operationally it is much + * the same as a transmit or receive ring. + * Note that there is no requirement that responses are issued in the same + * order as requests. + */ + +/* + * Hash types + * ========== + * + * For the purposes of the definitions below, 'Packet[]' is an array of + * octets containing an IP packet without options, 'Array[X..Y]' means a + * sub-array of 'Array' containing bytes X thru Y inclusive, and '+' is + * used to indicate concatenation of arrays. + */ + +/* + * A hash calculated over an IP version 4 header as follows: + * + * Buffer[0..8] = Packet[12..15] (source address) + + * Packet[16..19] (destination address) + * + * Result = Hash(Buffer, 8) + */ +#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4 0 +#define XEN_NETIF_CTRL_HASH_TYPE_IPV4 \ + (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4) + +/* + * A hash calculated over an IP version 4 header and TCP header as + * follows: + * + * Buffer[0..12] = Packet[12..15] (source address) + + * Packet[16..19] (destination address) + + * Packet[20..21] (source port) + + * Packet[22..23] (destination port) + * + * Result = Hash(Buffer, 12) + */ +#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP 1 +#define XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP \ + (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP) + +/* + * A hash calculated over an IP version 6 header as follows: + * + * Buffer[0..32] = Packet[8..23] (source address ) + + * Packet[24..39] (destination address) + * + * Result = Hash(Buffer, 32) + */ +#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6 2 +#define XEN_NETIF_CTRL_HASH_TYPE_IPV6 \ + (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6) + +/* + * A hash calculated over an IP version 6 header and TCP header as + * follows: + * + * Buffer[0..36] = Packet[8..23] (source address) + + * Packet[24..39] (destination address) + + * Packet[40..41] (source port) + + * Packet[42..43] (destination port) + * + * Result = Hash(Buffer, 36) + */ +#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP 3 +#define XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP \ + (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP) + +/* + * Hash algorithms + * =============== + */ + +#define XEN_NETIF_CTRL_HASH_ALGORITHM_NONE 0 + +/* + * Toeplitz hash: + */ + +#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1 + +/* + * This algorithm uses a 'key' as well as the data buffer itself. + * (Buffer[] and Key[] are treated as shift-registers where the MSB of + * Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1] + * is the 'right-most'). + * + * Value = 0 + * For number of bits in Buffer[] + * If (left-most bit of Buffer[] is 1) + * Value ^= left-most 32 bits of Key[] + * Key[] << 1 + * Buffer[] << 1 + * + * The code below is provided for convenience where an operating system + * does not already provide an implementation. + */ +#ifdef XEN_NETIF_DEFINE_TOEPLITZ +static uint32_t xen_netif_toeplitz_hash(const uint8_t *key, + unsigned int keylen, + const uint8_t *buf, unsigned int buflen) +{ + unsigned int keyi, bufi; + uint64_t prefix = 0; + uint64_t hash = 0; + + /* Pre-load prefix with the first 8 bytes of the key */ + for (keyi = 0; keyi < 8; keyi++) { + prefix <<= 8; + prefix |= (keyi < keylen) ? key[keyi] : 0; + } + + for (bufi = 0; bufi < buflen; bufi++) { + uint8_t byte = buf[bufi]; + unsigned int bit; + + for (bit = 0; bit < 8; bit++) { + if (byte & 0x80) + hash ^= prefix; + prefix <<= 1; + byte <<= 1; + } + + /* + * 'prefix' has now been left-shifted by 8, so + * OR in the next byte. + */ + prefix |= (keyi < keylen) ? key[keyi] : 0; + keyi++; + } + + /* The valid part of the hash is in the upper 32 bits. */ + return hash >> 32; +} +#endif /* XEN_NETIF_DEFINE_TOEPLITZ */ + +/* + * Control requests (struct xen_netif_ctrl_request) + * ================================================ + * + * All requests have the following format: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | type | data[0] | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | data[1] | data[2] | + * +-----+-----+-----+-----+-----------------------+ + * + * id: the request identifier, echoed in response. + * type: the type of request (see below) + * data[]: any data associated with the request (determined by type) + */ + +struct xen_netif_ctrl_request { + uint16_t id; + uint16_t type; + +#define XEN_NETIF_CTRL_TYPE_INVALID 0 +#define XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS 1 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS 2 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_KEY 3 +#define XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE 4 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE 5 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING 6 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM 7 + + uint32_t data[3]; +}; + +/* + * Control responses (struct xen_netif_ctrl_response) + * ================================================== + * + * All responses have the following format: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | type | status | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | data | + * +-----+-----+-----+-----+ + * + * id: the corresponding request identifier + * type: the type of the corresponding request + * status: the status of request processing + * data: any data associated with the response (determined by type and + * status) + */ + +struct xen_netif_ctrl_response { + uint16_t id; + uint16_t type; + uint32_t status; + +#define XEN_NETIF_CTRL_STATUS_SUCCESS 0 +#define XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED 1 +#define XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER 2 +#define XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW 3 + + uint32_t data; +}; + +/* + * Control messages + * ================ + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM + * -------------------------------------- + * + * This is sent by the frontend to set the desired hash algorithm. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM + * data[0] = a XEN_NETIF_CTRL_HASH_ALGORITHM_* value + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The algorithm is not + * supported + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * + * NOTE: Setting data[0] to XEN_NETIF_CTRL_HASH_ALGORITHM_NONE disables + * hashing and the backend is free to choose how it steers packets + * to queues (which is the default behaviour). + * + * XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS + * ---------------------------------- + * + * This is sent by the frontend to query the types of hash supported by + * the backend. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS + * data[0] = 0 + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = supported hash types (if operation was successful) + * + * NOTE: A valid hash algorithm must be selected before this operation can + * succeed. + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS + * ---------------------------------- + * + * This is sent by the frontend to set the types of hash that the backend + * should calculate. (See above for hash type definitions). + * Note that the 'maximal' type of hash should always be chosen. For + * example, if the frontend sets both IPV4 and IPV4_TCP hash types then + * the latter hash type should be calculated for any TCP packet and the + * former only calculated for non-TCP packets. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS + * data[0] = bitwise OR of XEN_NETIF_CTRL_HASH_TYPE_* values + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - One or more flag + * value is invalid or + * unsupported + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = 0 + * + * NOTE: A valid hash algorithm must be selected before this operation can + * succeed. + * Also, setting data[0] to zero disables hashing and the backend + * is free to choose how it steers packets to queues. + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_KEY + * -------------------------------- + * + * This is sent by the frontend to set the key of the hash if the algorithm + * requires it. (See hash algorithms above). + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_KEY + * data[0] = grant reference of page containing the key (assumed to + * start at beginning of grant) + * data[1] = size of key in octets + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Key size is invalid + * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Key size is larger + * than the backend + * supports + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = 0 + * + * NOTE: Any key octets not specified are assumed to be zero (the key + * is assumed to be empty by default) and specifying a new key + * invalidates any previous key, hence specifying a key size of + * zero will clear the key (which ensures that the calculated hash + * will always be zero). + * The maximum size of key is algorithm and backend specific, but + * is also limited by the single grant reference. + * The grant reference may be read-only and must remain valid until + * the response has been processed. + * + * XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE + * ----------------------------------------- + * + * This is sent by the frontend to query the maximum size of mapping + * table supported by the backend. The size is specified in terms of + * table entries. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE + * data[0] = 0 + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = maximum number of entries allowed in the mapping table + * (if operation was successful) or zero if a mapping table is + * not supported (i.e. hash mapping is done only by modular + * arithmetic). + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE + * ------------------------------------- + * + * This is sent by the frontend to set the actual size of the mapping + * table to be used by the backend. The size is specified in terms of + * table entries. + * Any previous table is invalidated by this message and any new table + * is assumed to be zero filled. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE + * data[0] = number of entries in mapping table + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size is invalid + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = 0 + * + * NOTE: Setting data[0] to 0 means that hash mapping should be done + * using modular arithmetic. + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING + * ------------------------------------ + * + * This is sent by the frontend to set the content of the table mapping + * hash value to queue number. The backend should calculate the hash from + * the packet header, use it as an index into the table (modulo the size + * of the table) and then steer the packet to the queue number found at + * that index. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING + * data[0] = grant reference of page containing the mapping (sub-)table + * (assumed to start at beginning of grant) + * data[1] = size of (sub-)table in entries + * data[2] = offset, in entries, of sub-table within overall table + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size or content + * is invalid + * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Table size is larger + * than the backend + * supports + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = 0 + * + * NOTE: The overall table has the following format: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | mapping[0] | mapping[1] | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | . | + * | . | + * | . | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | mapping[N-2] | mapping[N-1] | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * where N is specified by a XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE + * message and each mapping must specifies a queue between 0 and + * "multi-queue-num-queues" (see above). + * The backend may support a mapping table larger than can be + * mapped by a single grant reference. Thus sub-tables within a + * larger table can be individually set by sending multiple messages + * with differing offset values. Specifying a new sub-table does not + * invalidate any table data outside that range. + * The grant reference may be read-only and must remain valid until + * the response has been processed. + */ + +DEFINE_RING_TYPES(xen_netif_ctrl, + struct xen_netif_ctrl_request, + struct xen_netif_ctrl_response); + +/* + * Guest transmit + * ============== + * + * This is the 'wire' format for transmit (frontend -> backend) packets: + * + * Fragment 1: xen_netif_tx_request_t - flags = XEN_NETTXF_* + * size = total packet size + * [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include + * XEN_NETTXF_extra_info) + * ... + * [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include + * XEN_NETIF_EXTRA_MORE) * ... - * Request N: xen_netif_tx_request -- 0 + * Fragment N: xen_netif_tx_request_t - (only if fragment N-1 flags include + * XEN_NETTXF_more_data - flags on preceding + * extras are not relevant here) + * flags = 0 + * size = fragment size + * + * NOTE: + * + * This format slightly is different from that used for receive + * (backend -> frontend) packets. Specifically, in a multi-fragment + * packet the actual size of fragment 1 can only be determined by + * subtracting the sizes of fragments 2..N from the total packet size. + * + * Ring slot size is 12 octets, however not all request/response + * structs use the full size. + * + * tx request data (xen_netif_tx_request_t) + * ------------------------------------ + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | grant ref | offset | flags | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | size | + * +-----+-----+-----+-----+ + * + * grant ref: Reference to buffer page. + * offset: Offset within buffer page. + * flags: XEN_NETTXF_*. + * id: request identifier, echoed in response. + * size: packet size in bytes. + * + * tx response (xen_netif_tx_response_t) + * --------------------------------- + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | status | unused | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | unused | + * +-----+-----+-----+-----+ + * + * id: reflects id in transmit request + * status: XEN_NETIF_RSP_* + * + * Guest receive + * ============= + * + * This is the 'wire' format for receive (backend -> frontend) packets: + * + * Fragment 1: xen_netif_rx_request_t - flags = XEN_NETRXF_* + * size = fragment size + * [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include + * XEN_NETRXF_extra_info) + * ... + * [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include + * XEN_NETIF_EXTRA_MORE) + * ... + * Fragment N: xen_netif_rx_request_t - (only if fragment N-1 flags include + * XEN_NETRXF_more_data - flags on preceding + * extras are not relevant here) + * flags = 0 + * size = fragment size + * + * NOTE: + * + * This format slightly is different from that used for transmit + * (frontend -> backend) packets. Specifically, in a multi-fragment + * packet the size of the packet can only be determined by summing the + * sizes of fragments 1..N. + * + * Ring slot size is 8 octets. + * + * rx request (xen_netif_rx_request_t) + * ------------------------------- + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | pad | gref | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * id: request identifier, echoed in response. + * gref: reference to incoming granted frame. + * + * rx response (xen_netif_rx_response_t) + * --------------------------------- + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | offset | flags | status | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * id: reflects id in receive request + * offset: offset in page of start of received packet + * flags: XEN_NETRXF_* + * status: -ve: XEN_NETIF_RSP_*; +ve: Rx'ed pkt size. + * + * NOTE: Historically, to support GSO on the frontend receive side, Linux + * netfront does not make use of the rx response id (because, as + * described below, extra info structures overlay the id field). + * Instead it assumes that responses always appear in the same ring + * slot as their corresponding request. Thus, to maintain + * compatibility, backends must make sure this is the case. + * + * Extra Info + * ========== + * + * Can be present if initial request or response has NET{T,R}XF_extra_info, + * or previous extra request has XEN_NETIF_EXTRA_MORE. + * + * The struct therefore needs to fit into either a tx or rx slot and + * is therefore limited to 8 octets. + * + * NOTE: Because extra info data overlays the usual request/response + * structures, there is no id information in the opposite direction. + * So, if an extra info overlays an rx response the frontend can + * assume that it is in the same ring slot as the request that was + * consumed to make the slot available, and the backend must ensure + * this assumption is true. + * + * extra info (xen_netif_extra_info_t) + * ------------------------------- + * + * General format: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * |type |flags| type specific data | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | padding for tx | + * +-----+-----+-----+-----+ + * + * type: XEN_NETIF_EXTRA_TYPE_* + * flags: XEN_NETIF_EXTRA_FLAG_* + * padding for tx: present only in the tx case due to 8 octet limit + * from rx case. Not shown in type specific entries + * below. + * + * XEN_NETIF_EXTRA_TYPE_GSO: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * |type |flags| size |type | pad | features | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * type: Must be XEN_NETIF_EXTRA_TYPE_GSO + * flags: XEN_NETIF_EXTRA_FLAG_* + * size: Maximum payload size of each segment. For example, + * for TCP this is just the path MSS. + * type: XEN_NETIF_GSO_TYPE_*: This determines the protocol of + * the packet and any extra features required to segment the + * packet properly. + * features: EN_XEN_NETIF_GSO_FEAT_*: This specifies any extra GSO + * features required to process this packet, such as ECN + * support for TCPv4. + * + * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * |type |flags| addr | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} + * flags: XEN_NETIF_EXTRA_FLAG_* + * addr: address to add/remove + * + * XEN_NETIF_EXTRA_TYPE_HASH: + * + * A backend that supports teoplitz hashing is assumed to accept + * this type of extra info in transmit packets. + * A frontend that enables hashing is assumed to accept + * this type of extra info in receive packets. + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * |type |flags|htype| alg |LSB ---- value ---- MSB| + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * type: Must be XEN_NETIF_EXTRA_TYPE_HASH + * flags: XEN_NETIF_EXTRA_FLAG_* + * htype: Hash type (one of _XEN_NETIF_CTRL_HASH_TYPE_* - see above) + * alg: The algorithm used to calculate the hash (one of + * XEN_NETIF_CTRL_HASH_TYPE_ALGORITHM_* - see above) + * value: Hash value */ /* Protocol checksum field is blank in the packet (hardware offload)? */ -#define _XEN_NETTXF_csum_blank (0) -#define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank) +#define _XEN_NETTXF_csum_blank (0) +#define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ -#define _XEN_NETTXF_data_validated (1) -#define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated) +#define _XEN_NETTXF_data_validated (1) +#define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ -#define _XEN_NETTXF_more_data (2) -#define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data) +#define _XEN_NETTXF_more_data (2) +#define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ -#define _XEN_NETTXF_extra_info (3) -#define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info) +#define _XEN_NETTXF_extra_info (3) +#define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info) #define XEN_NETIF_MAX_TX_SIZE 0xFFFF struct xen_netif_tx_request { - grant_ref_t gref; /* Reference to buffer page */ - uint16_t offset; /* Offset within buffer page */ - uint16_t flags; /* XEN_NETTXF_* */ - uint16_t id; /* Echoed in response message. */ - uint16_t size; /* Packet size in bytes. */ + grant_ref_t gref; + uint16_t offset; + uint16_t flags; + uint16_t id; + uint16_t size; }; /* Types of xen_netif_extra_info descriptors. */ -#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ -#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ -#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ -#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ -#define XEN_NETIF_EXTRA_TYPE_MAX (4) +#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ +#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ +#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ +#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ +#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */ +#define XEN_NETIF_EXTRA_TYPE_MAX (5) -/* xen_netif_extra_info flags. */ -#define _XEN_NETIF_EXTRA_FLAG_MORE (0) -#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) +/* xen_netif_extra_info_t flags. */ +#define _XEN_NETIF_EXTRA_FLAG_MORE (0) +#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types */ -#define XEN_NETIF_GSO_TYPE_NONE (0) -#define XEN_NETIF_GSO_TYPE_TCPV4 (1) -#define XEN_NETIF_GSO_TYPE_TCPV6 (2) +#define XEN_NETIF_GSO_TYPE_NONE (0) +#define XEN_NETIF_GSO_TYPE_TCPV4 (1) +#define XEN_NETIF_GSO_TYPE_TCPV6 (2) /* - * This structure needs to fit within both netif_tx_request and - * netif_rx_response for compatibility. + * This structure needs to fit within both xen_netif_tx_request_t and + * xen_netif_rx_response_t for compatibility. */ struct xen_netif_extra_info { - uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ - uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ - + uint8_t type; + uint8_t flags; union { struct { - /* - * Maximum payload size of each segment. For - * example, for TCP this is just the path MSS. - */ uint16_t size; - - /* - * GSO type. This determines the protocol of - * the packet and any extra features required - * to segment the packet properly. - */ - uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ - - /* Future expansion. */ + uint8_t type; uint8_t pad; - - /* - * GSO features. This specifies any extra GSO - * features required to process this packet, - * such as ECN support for TCPv4. - */ - uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ + uint16_t features; } gso; - struct { - uint8_t addr[6]; /* Address to add/remove. */ + uint8_t addr[6]; } mcast; - + struct { + uint8_t type; + uint8_t algorithm; + uint8_t value[4]; + } hash; uint16_t pad[3]; } u; }; struct xen_netif_tx_response { uint16_t id; - int16_t status; /* XEN_NETIF_RSP_* */ + int16_t status; }; struct xen_netif_rx_request { - uint16_t id; /* Echoed in response message. */ - grant_ref_t gref; /* Reference to incoming granted frame */ + uint16_t id; /* Echoed in response message. */ + uint16_t pad; + grant_ref_t gref; }; /* Packet data has been validated against protocol checksum. */ -#define _XEN_NETRXF_data_validated (0) -#define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated) +#define _XEN_NETRXF_data_validated (0) +#define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ -#define _XEN_NETRXF_csum_blank (1) -#define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank) +#define _XEN_NETRXF_csum_blank (1) +#define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ -#define _XEN_NETRXF_more_data (2) -#define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data) +#define _XEN_NETRXF_more_data (2) +#define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ -#define _XEN_NETRXF_extra_info (3) -#define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info) +#define _XEN_NETRXF_extra_info (3) +#define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info) -/* GSO Prefix descriptor. */ -#define _XEN_NETRXF_gso_prefix (4) -#define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix) +/* Packet has GSO prefix. Deprecated but included for compatibility */ +#define _XEN_NETRXF_gso_prefix (4) +#define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix) struct xen_netif_rx_response { - uint16_t id; - uint16_t offset; /* Offset in page of start of received packet */ - uint16_t flags; /* XEN_NETRXF_* */ - int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ + uint16_t id; + uint16_t offset; + uint16_t flags; + int16_t status; }; /* - * Generate netif ring structures and types. + * Generate xen_netif ring structures and types. */ -DEFINE_RING_TYPES(xen_netif_tx, - struct xen_netif_tx_request, +DEFINE_RING_TYPES(xen_netif_tx, struct xen_netif_tx_request, struct xen_netif_tx_response); -DEFINE_RING_TYPES(xen_netif_rx, - struct xen_netif_rx_request, +DEFINE_RING_TYPES(xen_netif_rx, struct xen_netif_rx_request, struct xen_netif_rx_response); -#define XEN_NETIF_RSP_DROPPED -2 -#define XEN_NETIF_RSP_ERROR -1 -#define XEN_NETIF_RSP_OKAY 0 -/* No response: used for auxiliary requests (e.g., xen_netif_extra_info). */ -#define XEN_NETIF_RSP_NULL 1 +#define XEN_NETIF_RSP_DROPPED -2 +#define XEN_NETIF_RSP_ERROR -1 +#define XEN_NETIF_RSP_OKAY 0 +/* No response: used for auxiliary requests (e.g., xen_netif_extra_info_t). */ +#define XEN_NETIF_RSP_NULL 1 #endif |