diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 14:31:10 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 14:31:10 -0800 |
commit | b2fe5fa68642860e7de76167c3111623aa0d5de1 (patch) | |
tree | b7f9b89b7039ecefbc35fe3c8e73a6ff972641dd /drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c | |
parent | a103950e0dd2058df5e8a8d4a915707bdcf205f0 (diff) | |
parent | a54667f6728c2714a400f3c884727da74b6d1717 (diff) | |
download | op-kernel-dev-b2fe5fa68642860e7de76167c3111623aa0d5de1.zip op-kernel-dev-b2fe5fa68642860e7de76167c3111623aa0d5de1.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Significantly shrink the core networking routing structures. Result
of http://vger.kernel.org/~davem/seoul2017_netdev_keynote.pdf
2) Add netdevsim driver for testing various offloads, from Jakub
Kicinski.
3) Support cross-chip FDB operations in DSA, from Vivien Didelot.
4) Add a 2nd listener hash table for TCP, similar to what was done for
UDP. From Martin KaFai Lau.
5) Add eBPF based queue selection to tun, from Jason Wang.
6) Lockless qdisc support, from John Fastabend.
7) SCTP stream interleave support, from Xin Long.
8) Smoother TCP receive autotuning, from Eric Dumazet.
9) Lots of erspan tunneling enhancements, from William Tu.
10) Add true function call support to BPF, from Alexei Starovoitov.
11) Add explicit support for GRO HW offloading, from Michael Chan.
12) Support extack generation in more netlink subsystems. From Alexander
Aring, Quentin Monnet, and Jakub Kicinski.
13) Add 1000BaseX, flow control, and EEE support to mvneta driver. From
Russell King.
14) Add flow table abstraction to netfilter, from Pablo Neira Ayuso.
15) Many improvements and simplifications to the NFP driver bpf JIT,
from Jakub Kicinski.
16) Support for ipv6 non-equal cost multipath routing, from Ido
Schimmel.
17) Add resource abstration to devlink, from Arkadi Sharshevsky.
18) Packet scheduler classifier shared filter block support, from Jiri
Pirko.
19) Avoid locking in act_csum, from Davide Caratti.
20) devinet_ioctl() simplifications from Al viro.
21) More TCP bpf improvements from Lawrence Brakmo.
22) Add support for onlink ipv6 route flag, similar to ipv4, from David
Ahern.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1925 commits)
tls: Add support for encryption using async offload accelerator
ip6mr: fix stale iterator
net/sched: kconfig: Remove blank help texts
openvswitch: meter: Use 64-bit arithmetic instead of 32-bit
tcp_nv: fix potential integer overflow in tcpnv_acked
r8169: fix RTL8168EP take too long to complete driver initialization.
qmi_wwan: Add support for Quectel EP06
rtnetlink: enable IFLA_IF_NETNSID for RTM_NEWLINK
ipmr: Fix ptrdiff_t print formatting
ibmvnic: Wait for device response when changing MAC
qlcnic: fix deadlock bug
tcp: release sk_frag.page in tcp_disconnect
ipv4: Get the address of interface correctly.
net_sched: gen_estimator: fix lockdep splat
net: macb: Handle HRESP error
net/mlx5e: IPoIB, Fix copy-paste bug in flow steering refactoring
ipv6: addrconf: break critical section in addrconf_verify_rtnl()
ipv6: change route cache aging logic
i40e/i40evf: Update DESC_NEEDED value to reflect larger value
bnxt_en: cleanup DIM work on device shutdown
...
Diffstat (limited to 'drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c')
-rw-r--r-- | drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c | 309 |
1 files changed, 305 insertions, 4 deletions
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 86b8c75..c74a6c56 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -14,6 +14,9 @@ */ #include <linux/netdevice.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <net/ip6_checksum.h> #include "rmnet_config.h" #include "rmnet_map.h" #include "rmnet_private.h" @@ -21,6 +24,233 @@ #define RMNET_MAP_DEAGGR_SPACING 64 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) +static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, + const void *txporthdr) +{ + __sum16 *check = NULL; + + switch (protocol) { + case IPPROTO_TCP: + check = &(((struct tcphdr *)txporthdr)->check); + break; + + case IPPROTO_UDP: + check = &(((struct udphdr *)txporthdr)->check); + break; + + default: + check = NULL; + break; + } + + return check; +} + +static int +rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, + struct rmnet_map_dl_csum_trailer *csum_trailer) +{ + __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; + u16 csum_value, csum_value_final; + struct iphdr *ip4h; + void *txporthdr; + __be16 addend; + + ip4h = (struct iphdr *)(skb->data); + if ((ntohs(ip4h->frag_off) & IP_MF) || + ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) + return -EOPNOTSUPP; + + txporthdr = skb->data + ip4h->ihl * 4; + + csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); + + if (!csum_field) + return -EPROTONOSUPPORT; + + /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ + if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) + return 0; + + csum_value = ~ntohs(csum_trailer->csum_value); + hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); + ip_payload_csum = csum16_sub((__force __sum16)csum_value, + (__force __be16)hdr_csum); + + pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr, + ntohs(ip4h->tot_len) - ip4h->ihl * 4, + ip4h->protocol, 0); + addend = (__force __be16)ntohs((__force __be16)pseudo_csum); + pseudo_csum = csum16_add(ip_payload_csum, addend); + + addend = (__force __be16)ntohs((__force __be16)*csum_field); + csum_temp = ~csum16_sub(pseudo_csum, addend); + csum_value_final = (__force u16)csum_temp; + + if (unlikely(csum_value_final == 0)) { + switch (ip4h->protocol) { + case IPPROTO_UDP: + /* RFC 768 - DL4 1's complement rule for UDP csum 0 */ + csum_value_final = ~csum_value_final; + break; + + case IPPROTO_TCP: + /* DL4 Non-RFC compliant TCP checksum found */ + if (*csum_field == (__force __sum16)0xFFFF) + csum_value_final = ~csum_value_final; + break; + } + } + + if (csum_value_final == ntohs((__force __be16)*csum_field)) + return 0; + else + return -EINVAL; +} + +#if IS_ENABLED(CONFIG_IPV6) +static int +rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, + struct rmnet_map_dl_csum_trailer *csum_trailer) +{ + __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; + u16 csum_value, csum_value_final; + __be16 ip6_hdr_csum, addend; + struct ipv6hdr *ip6h; + void *txporthdr; + u32 length; + + ip6h = (struct ipv6hdr *)(skb->data); + + txporthdr = skb->data + sizeof(struct ipv6hdr); + csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); + + if (!csum_field) + return -EPROTONOSUPPORT; + + csum_value = ~ntohs(csum_trailer->csum_value); + ip6_hdr_csum = (__force __be16) + ~ntohs((__force __be16)ip_compute_csum(ip6h, + (int)(txporthdr - (void *)(skb->data)))); + ip6_payload_csum = csum16_sub((__force __sum16)csum_value, + ip6_hdr_csum); + + length = (ip6h->nexthdr == IPPROTO_UDP) ? + ntohs(((struct udphdr *)txporthdr)->len) : + ntohs(ip6h->payload_len); + pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + length, ip6h->nexthdr, 0)); + addend = (__force __be16)ntohs((__force __be16)pseudo_csum); + pseudo_csum = csum16_add(ip6_payload_csum, addend); + + addend = (__force __be16)ntohs((__force __be16)*csum_field); + csum_temp = ~csum16_sub(pseudo_csum, addend); + csum_value_final = (__force u16)csum_temp; + + if (unlikely(csum_value_final == 0)) { + switch (ip6h->nexthdr) { + case IPPROTO_UDP: + /* RFC 2460 section 8.1 + * DL6 One's complement rule for UDP checksum 0 + */ + csum_value_final = ~csum_value_final; + break; + + case IPPROTO_TCP: + /* DL6 Non-RFC compliant TCP checksum found */ + if (*csum_field == (__force __sum16)0xFFFF) + csum_value_final = ~csum_value_final; + break; + } + } + + if (csum_value_final == ntohs((__force __be16)*csum_field)) + return 0; + else + return -EINVAL; +} +#endif + +static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + void *txphdr; + u16 *csum; + + txphdr = iphdr + ip4h->ihl * 4; + + if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv4_ul_csum_header(void *iphdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)iphdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + if (ip4h->protocol == IPPROTO_UDP) + ul_header->udp_ip4_ind = 1; + else + ul_header->udp_ip4_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + void *txphdr; + u16 *csum; + + txphdr = ip6hdr + sizeof(struct ipv6hdr); + + if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv6_ul_csum_header(void *ip6hdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)ip6hdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + ul_header->udp_ip4_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr); +} +#endif + /* Adds MAP header to front of skb->data * Padding is calculated and set appropriately in MAP header. Mux ID is * initialized to 0. @@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, u32 padding, map_datalen; u8 *padbytes; - if (skb_headroom(skb) < sizeof(struct rmnet_map_header)) - return NULL; - map_datalen = skb->len - hdrlen; map_header = (struct rmnet_map_header *) skb_push(skb, sizeof(struct rmnet_map_header)); @@ -69,7 +296,8 @@ done: * returned, indicating that there are no more packets to deaggregate. Caller * is responsible for freeing the original skb. */ -struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port) { struct rmnet_map_header *maph; struct sk_buff *skbn; @@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) maph = (struct rmnet_map_header *)skb->data; packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + if (((int)skb->len - (int)packet_len) < 0) return NULL; @@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) return skbn; } + +/* Validates packet checksums. Function takes a pointer to + * the beginning of a buffer which contains the IP payload + + * padding + checksum trailer. + * Only IPv4 and IPv6 are supported along with TCP & UDP. + * Fragmented or tunneled packets are not supported. + */ +int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) +{ + struct rmnet_map_dl_csum_trailer *csum_trailer; + + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) + return -EOPNOTSUPP; + + csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); + + if (!csum_trailer->valid) + return -EINVAL; + + if (skb->protocol == htons(ETH_P_IP)) + return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer); + else if (skb->protocol == htons(ETH_P_IPV6)) +#if IS_ENABLED(CONFIG_IPV6) + return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer); +#else + return -EPROTONOSUPPORT; +#endif + + return 0; +} + +/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP + * packets that are supported for UL checksum offload. + */ +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_map_ul_csum_header *ul_header; + void *iphdr; + + ul_header = (struct rmnet_map_ul_csum_header *) + skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); + + if (unlikely(!(orig_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) + goto sw_csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + iphdr = (char *)ul_header + + sizeof(struct rmnet_map_ul_csum_header); + + if (skb->protocol == htons(ETH_P_IP)) { + rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); + return; + } else if (skb->protocol == htons(ETH_P_IPV6)) { +#if IS_ENABLED(CONFIG_IPV6) + rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); + return; +#else + goto sw_csum; +#endif + } + } + +sw_csum: + ul_header->csum_start_offset = 0; + ul_header->csum_insert_offset = 0; + ul_header->csum_enabled = 0; + ul_header->udp_ip4_ind = 0; +} |