diff options
author | Jeff Garzik <jgarzik@pretzel.yyz.us> | 2005-06-26 23:38:58 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-06-26 23:38:58 -0400 |
commit | 5696c1944a33b4434a9a1ebb6383b906afd43a10 (patch) | |
tree | 16fbe6ba431bcf949ee8645510b0c2fd39b5810f /net/ipv4 | |
parent | 66b04a80eea60cabf9d89fd34deb3234a740052f (diff) | |
parent | 020f46a39eb7b99a575b9f4d105fce2b142acdf1 (diff) | |
download | op-kernel-dev-5696c1944a33b4434a9a1ebb6383b906afd43a10.zip op-kernel-dev-5696c1944a33b4434a9a1ebb6383b906afd43a10.tar.gz |
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'net/ipv4')
73 files changed, 5328 insertions, 1553 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 6d3e8b1..3e63123 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -53,6 +53,44 @@ config IP_ADVANCED_ROUTER If unsure, say N here. +choice + prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)" + depends on IP_ADVANCED_ROUTER + default IP_FIB_HASH + +config IP_FIB_HASH + bool "FIB_HASH" + ---help--- + Current FIB is very proven and good enough for most users. + +config IP_FIB_TRIE + bool "FIB_TRIE" + ---help--- + Use new experimental LC-trie as FIB lookup algoritm. + This improves lookup performance if you have a large + number of routes. + + LC-trie is a longest matching prefix lookup algorithm which + performs better than FIB_HASH for large routing tables. + But, it consumes more memory and is more complex. + + LC-trie is described in: + + IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson + IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 + An experimental study of compression methods for dynamic tries + Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. + http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/ + +endchoice + +# If the user does not enable advanced routing, he gets the safe +# default of the fib-hash algorithm. +config IP_FIB_HASH + bool + depends on !IP_ADVANCED_ROUTER + default y + config IP_MULTIPLE_TABLES bool "IP: policy routing" depends on IP_ADVANCED_ROUTER @@ -407,5 +445,112 @@ config IP_TCPDIAG config IP_TCPDIAG_IPV6 def_bool (IP_TCPDIAG=y && IPV6=y) || (IP_TCPDIAG=m && IPV6) +config TCP_CONG_ADVANCED + bool "TCP: advanced congestion control" + depends on INET + ---help--- + Support for selection of various TCP congestion control + modules. + + Nearly all users can safely say no here, and a safe default + selection will be made (BIC-TCP with new Reno as a fallback). + + If unsure, say N. + +# TCP Reno is builtin (required as fallback) +menu "TCP congestion control" + depends on TCP_CONG_ADVANCED + +config TCP_CONG_BIC + tristate "Binary Increase Congestion (BIC) control" + depends on INET + default y + ---help--- + BIC-TCP is a sender-side only change that ensures a linear RTT + fairness under large windows while offering both scalability and + bounded TCP-friendliness. The protocol combines two schemes + called additive increase and binary search increase. When the + congestion window is large, additive increase with a large + increment ensures linear RTT fairness as well as good + scalability. Under small congestion windows, binary search + increase provides TCP friendliness. + See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/ + +config TCP_CONG_WESTWOOD + tristate "TCP Westwood+" + depends on INET + default m + ---help--- + TCP Westwood+ is a sender-side only modification of the TCP Reno + protocol stack that optimizes the performance of TCP congestion + control. It is based on end-to-end bandwidth estimation to set + congestion window and slow start threshold after a congestion + episode. Using this estimation, TCP Westwood+ adaptively sets a + slow start threshold and a congestion window which takes into + account the bandwidth used at the time congestion is experienced. + TCP Westwood+ significantly increases fairness wrt TCP Reno in + wired networks and throughput over wireless links. + +config TCP_CONG_HTCP + tristate "H-TCP" + depends on INET + default m + ---help--- + H-TCP is a send-side only modifications of the TCP Reno + protocol stack that optimizes the performance of TCP + congestion control for high speed network links. It uses a + modeswitch to change the alpha and beta parameters of TCP Reno + based on network conditions and in a way so as to be fair with + other Reno and H-TCP flows. + +config TCP_CONG_HSTCP + tristate "High Speed TCP" + depends on INET && EXPERIMENTAL + default n + ---help--- + Sally Floyd's High Speed TCP (RFC 3649) congestion control. + A modification to TCP's congestion control mechanism for use + with large congestion windows. A table indicates how much to + increase the congestion window by when an ACK is received. + For more detail see http://www.icir.org/floyd/hstcp.html + +config TCP_CONG_HYBLA + tristate "TCP-Hybla congestion control algorithm" + depends on INET && EXPERIMENTAL + default n + ---help--- + TCP-Hybla is a sender-side only change that eliminates penalization of + long-RTT, large-bandwidth connections, like when satellite legs are + involved, expecially when sharing a common bottleneck with normal + terrestrial connections. + +config TCP_CONG_VEGAS + tristate "TCP Vegas" + depends on INET && EXPERIMENTAL + default n + ---help--- + TCP Vegas is a sender-side only change to TCP that anticipates + the onset of congestion by estimating the bandwidth. TCP Vegas + adjusts the sending rate by modifying the congestion + window. TCP Vegas should provide less packet loss, but it is + not as aggressive as TCP Reno. + +config TCP_CONG_SCALABLE + tristate "Scalable TCP" + depends on INET && EXPERIMENTAL + default n + ---help--- + Scalable TCP is a sender-side only change to TCP which uses a + MIMD congestion control algorithm which has some nice scaling + properties, though is known to have fairness issues. + See http://www-lce.eng.cam.ac.uk/~ctk21/scalable/ + +endmenu + +config TCP_CONG_BIC + tristate + depends on !TCP_CONG_ADVANCED + default y + source "net/ipv4/ipvs/Kconfig" diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 8b379627..5718cdb 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -5,10 +5,13 @@ obj-y := utils.o route.o inetpeer.o protocol.o \ ip_input.o ip_fragment.o ip_forward.o ip_options.o \ ip_output.o ip_sockglue.o \ - tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o tcp_minisocks.o \ + tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ + tcp_minisocks.o tcp_cong.o \ datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \ - sysctl_net_ipv4.o fib_frontend.o fib_semantics.o fib_hash.o + sysctl_net_ipv4.o fib_frontend.o fib_semantics.o +obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o +obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o obj-$(CONFIG_IP_MROUTE) += ipmr.o @@ -28,6 +31,13 @@ obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_IP_VS) += ipvs/ obj-$(CONFIG_IP_TCPDIAG) += tcp_diag.o obj-$(CONFIG_IP_ROUTE_MULTIPATH_CACHED) += multipath.o +obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o +obj-$(CONFIG_TCP_CONG_WESTWOOD) += tcp_westwood.o +obj-$(CONFIG_TCP_CONG_HSTCP) += tcp_highspeed.o +obj-$(CONFIG_TCP_CONG_HYBLA) += tcp_hybla.o +obj-$(CONFIG_TCP_CONG_HTCP) += tcp_htcp.o +obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o +obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ xfrm4_output.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index b3cb49c..658e797 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1119,6 +1119,10 @@ module_init(inet_init); #ifdef CONFIG_PROC_FS extern int fib_proc_init(void); extern void fib_proc_exit(void); +#ifdef CONFIG_IP_FIB_TRIE +extern int fib_stat_proc_init(void); +extern void fib_stat_proc_exit(void); +#endif extern int ip_misc_proc_init(void); extern int raw_proc_init(void); extern void raw_proc_exit(void); @@ -1139,11 +1143,19 @@ static int __init ipv4_proc_init(void) goto out_udp; if (fib_proc_init()) goto out_fib; +#ifdef CONFIG_IP_FIB_TRIE + if (fib_stat_proc_init()) + goto out_fib_stat; + #endif if (ip_misc_proc_init()) goto out_misc; out: return rc; out_misc: +#ifdef CONFIG_IP_FIB_TRIE + fib_stat_proc_exit(); +out_fib_stat: +#endif fib_proc_exit(); out_fib: udp4_proc_exit(); @@ -1181,6 +1193,7 @@ EXPORT_SYMBOL(inet_stream_connect); EXPORT_SYMBOL(inet_stream_ops); EXPORT_SYMBOL(inet_unregister_protosw); EXPORT_SYMBOL(net_statistics); +EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); #ifdef INET_REFCNT_DEBUG EXPORT_SYMBOL(inet_sock_nr); diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 0e98f22..514c85b 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -200,7 +200,7 @@ static void ah4_err(struct sk_buff *skb, u32 info) xfrm_state_put(x); } -static int ah_init_state(struct xfrm_state *x, void *args) +static int ah_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 3cc9673..d8a10e3 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -233,11 +233,14 @@ int inet_addr_onlink(struct in_device *in_dev, u32 a, u32 b) static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy) { + struct in_ifaddr *promote = NULL; struct in_ifaddr *ifa1 = *ifap; ASSERT_RTNL(); - /* 1. Deleting primary ifaddr forces deletion all secondaries */ + /* 1. Deleting primary ifaddr forces deletion all secondaries + * unless alias promotion is set + **/ if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) { struct in_ifaddr *ifa; @@ -251,11 +254,16 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, continue; } - *ifap1 = ifa->ifa_next; + if (!IN_DEV_PROMOTE_SECONDARIES(in_dev)) { + *ifap1 = ifa->ifa_next; - rtmsg_ifa(RTM_DELADDR, ifa); - notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa); - inet_free_ifa(ifa); + rtmsg_ifa(RTM_DELADDR, ifa); + notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa); + inet_free_ifa(ifa); + } else { + promote = ifa; + break; + } } } @@ -281,6 +289,13 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, if (!in_dev->ifa_list) inetdev_destroy(in_dev); } + + if (promote && IN_DEV_PROMOTE_SECONDARIES(in_dev)) { + /* not sure if we should send a delete notify first? */ + promote->ifa_flags &= ~IFA_F_SECONDARY; + rtmsg_ifa(RTM_NEWADDR, promote); + notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote); + } } static int inet_insert_ifa(struct in_ifaddr *ifa) @@ -1015,14 +1030,13 @@ static struct notifier_block ip_netdev_notifier = { }; static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, - u32 pid, u32 seq, int event) + u32 pid, u32 seq, int event, unsigned int flags) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; unsigned char *b = skb->tail; - nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ifm)); - if (pid) nlh->nlmsg_flags |= NLM_F_MULTI; + nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*ifm), flags); ifm = NLMSG_DATA(nlh); ifm->ifa_family = AF_INET; ifm->ifa_prefixlen = ifa->ifa_prefixlen; @@ -1075,7 +1089,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) continue; if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, - RTM_NEWADDR) <= 0) { + RTM_NEWADDR, NLM_F_MULTI) <= 0) { rcu_read_unlock(); goto done; } @@ -1098,7 +1112,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa) if (!skb) netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, ENOBUFS); - else if (inet_fill_ifaddr(skb, ifa, 0, 0, event) < 0) { + else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) { kfree_skb(skb); netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, EINVAL); } else { @@ -1384,6 +1398,15 @@ static struct devinet_sysctl_table { .proc_handler = &ipv4_doint_and_flush, .strategy = &ipv4_doint_and_flush_strategy, }, + { + .ctl_name = NET_IPV4_CONF_PROMOTE_SECONDARIES, + .procname = "promote_secondaries", + .data = &ipv4_devconf.promote_secondaries, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &ipv4_doint_and_flush, + .strategy = &ipv4_doint_and_flush_strategy, + }, }, .devinet_dev = { { @@ -1448,7 +1471,7 @@ static void devinet_sysctl_register(struct in_device *in_dev, * by sysctl and we wouldn't want anyone to change it under our feet * (see SIOCSIFNAME). */ - dev_name = net_sysctl_strdup(dev_name); + dev_name = kstrdup(dev_name, GFP_KERNEL); if (!dev_name) goto free; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 053a883..ba57446 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -362,7 +362,7 @@ static void esp_destroy(struct xfrm_state *x) kfree(esp); } -static int esp_init_state(struct xfrm_state *x, void *args) +static int esp_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; @@ -478,7 +478,7 @@ static int __init esp4_init(void) { struct xfrm_decap_state decap; - if (sizeof(struct esp_decap_data) < + if (sizeof(struct esp_decap_data) > sizeof(decap.decap_data)) { extern void decap_data_too_small(void); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 563e7d6..cd8e45a 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -516,6 +516,60 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa) #undef BRD1_OK } +static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) +{ + + struct fib_result res; + struct flowi fl = { .nl_u = { .ip4_u = { .daddr = frn->fl_addr, + .fwmark = frn->fl_fwmark, + .tos = frn->fl_tos, + .scope = frn->fl_scope } } }; + if (tb) { + local_bh_disable(); + + frn->tb_id = tb->tb_id; + frn->err = tb->tb_lookup(tb, &fl, &res); + + if (!frn->err) { + frn->prefixlen = res.prefixlen; + frn->nh_sel = res.nh_sel; + frn->type = res.type; + frn->scope = res.scope; + } + local_bh_enable(); + } +} + +static void nl_fib_input(struct sock *sk, int len) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh = NULL; + struct fib_result_nl *frn; + int err; + u32 pid; + struct fib_table *tb; + + skb = skb_recv_datagram(sk, 0, 0, &err); + nlh = (struct nlmsghdr *)skb->data; + + frn = (struct fib_result_nl *) NLMSG_DATA(nlh); + tb = fib_get_table(frn->tb_id_in); + + nl_fib_lookup(frn, tb); + + pid = nlh->nlmsg_pid; /*pid of sending process */ + NETLINK_CB(skb).groups = 0; /* not in mcast group */ + NETLINK_CB(skb).pid = 0; /* from kernel */ + NETLINK_CB(skb).dst_pid = pid; + NETLINK_CB(skb).dst_groups = 0; /* unicast */ + netlink_unicast(sk, skb, pid, MSG_DONTWAIT); +} + +static void nl_fib_lookup_init(void) +{ + netlink_kernel_create(NETLINK_FIB_LOOKUP, nl_fib_input); +} + static void fib_disable_ip(struct net_device *dev, int force) { if (fib_sync_down(0, dev, force)) @@ -604,6 +658,7 @@ void __init ip_fib_init(void) register_netdevice_notifier(&fib_netdev_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier); + nl_fib_lookup_init(); } EXPORT_SYMBOL(inet_addr_type); diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 6506dcc..b10d6bb 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c @@ -703,7 +703,8 @@ fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, &f->fn_key, fz->fz_order, fa->fa_tos, - fa->fa_info) < 0) { + fa->fa_info, + NLM_F_MULTI) < 0) { cb->args[3] = i; return -1; } diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index ac4485f..b729d97 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -30,7 +30,8 @@ extern int fib_nh_match(struct rtmsg *r, struct nlmsghdr *, struct kern_rta *rta, struct fib_info *fi); extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u8 tb_id, u8 type, u8 scope, void *dst, - int dst_len, u8 tos, struct fib_info *fi); + int dst_len, u8 tos, struct fib_info *fi, + unsigned int); extern void rtmsg_fib(int event, u32 key, struct fib_alias *fa, int z, int tb_id, struct nlmsghdr *n, struct netlink_skb_parms *req); diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 39d0aad..0b298bb 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -367,13 +367,14 @@ static struct notifier_block fib_rules_notifier = { static __inline__ int inet_fill_rule(struct sk_buff *skb, struct fib_rule *r, - struct netlink_callback *cb) + struct netlink_callback *cb, + unsigned int flags) { struct rtmsg *rtm; struct nlmsghdr *nlh; unsigned char *b = skb->tail; - nlh = NLMSG_PUT(skb, NETLINK_CREDS(cb->skb)->pid, cb->nlh->nlmsg_seq, RTM_NEWRULE, sizeof(*rtm)); + nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWRULE, sizeof(*rtm), flags); rtm = NLMSG_DATA(nlh); rtm->rtm_family = AF_INET; rtm->rtm_dst_len = r->r_dst_len; @@ -422,7 +423,7 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) for (r=fib_rules, idx=0; r; r = r->r_next, idx++) { if (idx < s_idx) continue; - if (inet_fill_rule(skb, r, cb) < 0) + if (inet_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) break; } read_unlock(&fib_rules_lock); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 029362d..c886b28 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -276,7 +276,7 @@ void rtmsg_fib(int event, u32 key, struct fib_alias *fa, struct nlmsghdr *n, struct netlink_skb_parms *req) { struct sk_buff *skb; - u32 pid = req ? req->pid : 0; + u32 pid = req ? req->pid : n->nlmsg_pid; int size = NLMSG_SPACE(sizeof(struct rtmsg)+256); skb = alloc_skb(size, GFP_KERNEL); @@ -286,7 +286,7 @@ void rtmsg_fib(int event, u32 key, struct fib_alias *fa, if (fib_dump_info(skb, pid, n->nlmsg_seq, event, tb_id, fa->fa_type, fa->fa_scope, &key, z, fa->fa_tos, - fa->fa_info) < 0) { + fa->fa_info, 0) < 0) { kfree_skb(skb); return; } @@ -932,13 +932,13 @@ u32 __fib_res_prefsrc(struct fib_result *res) int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, u8 tos, - struct fib_info *fi) + struct fib_info *fi, unsigned int flags) { struct rtmsg *rtm; struct nlmsghdr *nlh; unsigned char *b = skb->tail; - nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*rtm)); + nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); rtm = NLMSG_DATA(nlh); rtm->rtm_family = AF_INET; rtm->rtm_dst_len = dst_len; @@ -1035,7 +1035,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm, } nl->nlmsg_flags = NLM_F_REQUEST; - nl->nlmsg_pid = 0; + nl->nlmsg_pid = current->pid; nl->nlmsg_seq = 0; nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm)); if (cmd == SIOCDELRT) { diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c new file mode 100644 index 0000000..0671569 --- /dev/null +++ b/net/ipv4/fib_trie.c @@ -0,0 +1,2454 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet + * & Swedish University of Agricultural Sciences. + * + * Jens Laas <jens.laas@data.slu.se> Swedish University of + * Agricultural Sciences. + * + * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet + * + * This work is based on the LPC-trie which is originally descibed in: + * + * An experimental study of compression methods for dynamic tries + * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. + * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/ + * + * + * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson + * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 + * + * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $ + * + * + * Code from fib_hash has been reused which includes the following header: + * + * + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * IPv4 FIB: lookup engine and maintenance routines. + * + * + * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define VERSION "0.323" + +#include <linux/config.h> +#include <asm/uaccess.h> +#include <asm/system.h> +#include <asm/bitops.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/socket.h> +#include <linux/sockios.h> +#include <linux/errno.h> +#include <linux/in.h> +#include <linux/inet.h> +#include <linux/netdevice.h> +#include <linux/if_arp.h> +#include <linux/proc_fs.h> +#include <linux/skbuff.h> +#include <linux/netlink.h> +#include <linux/init.h> +#include <linux/list.h> +#include <net/ip.h> +#include <net/protocol.h> +#include <net/route.h> +#include <net/tcp.h> +#include <net/sock.h> +#include <net/ip_fib.h> +#include "fib_lookup.h" + +#undef CONFIG_IP_FIB_TRIE_STATS +#define MAX_CHILDS 16384 + +#define EXTRACT(p, n, str) ((str)<<(p)>>(32-(n))) +#define KEYLENGTH (8*sizeof(t_key)) +#define MASK_PFX(k, l) (((l)==0)?0:(k >> (KEYLENGTH-l)) << (KEYLENGTH-l)) +#define TKEY_GET_MASK(offset, bits) (((bits)==0)?0:((t_key)(-1) << (KEYLENGTH - bits) >> offset)) + +static DEFINE_RWLOCK(fib_lock); + +typedef unsigned int t_key; + +#define T_TNODE 0 +#define T_LEAF 1 +#define NODE_TYPE_MASK 0x1UL +#define NODE_PARENT(_node) \ +((struct tnode *)((_node)->_parent & ~NODE_TYPE_MASK)) +#define NODE_SET_PARENT(_node, _ptr) \ +((_node)->_parent = (((unsigned long)(_ptr)) | \ + ((_node)->_parent & NODE_TYPE_MASK))) +#define NODE_INIT_PARENT(_node, _type) \ +((_node)->_parent = (_type)) +#define NODE_TYPE(_node) \ +((_node)->_parent & NODE_TYPE_MASK) + +#define IS_TNODE(n) (!(n->_parent & T_LEAF)) +#define IS_LEAF(n) (n->_parent & T_LEAF) + +struct node { + t_key key; + unsigned long _parent; +}; + +struct leaf { + t_key key; + unsigned long _parent; + struct hlist_head list; +}; + +struct leaf_info { + struct hlist_node hlist; + int plen; + struct list_head falh; +}; + +struct tnode { + t_key key; + unsigned long _parent; + unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */ + unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */ + unsigned short full_children; /* KEYLENGTH bits needed */ + unsigned short empty_children; /* KEYLENGTH bits needed */ + struct node *child[0]; +}; + +#ifdef CONFIG_IP_FIB_TRIE_STATS +struct trie_use_stats { + unsigned int gets; + unsigned int backtrack; + unsigned int semantic_match_passed; + unsigned int semantic_match_miss; + unsigned int null_node_hit; +}; +#endif + +struct trie_stat { + unsigned int totdepth; + unsigned int maxdepth; + unsigned int tnodes; + unsigned int leaves; + unsigned int nullpointers; + unsigned int nodesizes[MAX_CHILDS]; +}; + +struct trie { + struct node *trie; +#ifdef CONFIG_IP_FIB_TRIE_STATS + struct trie_use_stats stats; +#endif + int size; + unsigned int revision; +}; + +static int trie_debug = 0; + +static int tnode_full(struct tnode *tn, struct node *n); +static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n); +static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull); +static int tnode_child_length(struct tnode *tn); +static struct node *resize(struct trie *t, struct tnode *tn); +static struct tnode *inflate(struct trie *t, struct tnode *tn); +static struct tnode *halve(struct trie *t, struct tnode *tn); +static void tnode_free(struct tnode *tn); +static void trie_dump_seq(struct seq_file *seq, struct trie *t); +extern struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio); +extern int fib_detect_death(struct fib_info *fi, int order, + struct fib_info **last_resort, int *last_idx, int *dflt); + +extern void rtmsg_fib(int event, u32 key, struct fib_alias *fa, int z, int tb_id, + struct nlmsghdr *n, struct netlink_skb_parms *req); + +static kmem_cache_t *fn_alias_kmem; +static struct trie *trie_local = NULL, *trie_main = NULL; + +static void trie_bug(char *err) +{ + printk("Trie Bug: %s\n", err); + BUG(); +} + +static inline struct node *tnode_get_child(struct tnode *tn, int i) +{ + if (i >= 1<<tn->bits) + trie_bug("tnode_get_child"); + + return tn->child[i]; +} + +static inline int tnode_child_length(struct tnode *tn) +{ + return 1<<tn->bits; +} + +/* + _________________________________________________________________ + | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | + ---------------------------------------------------------------- + 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + + _________________________________________________________________ + | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | + ----------------------------------------------------------------- + 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + + tp->pos = 7 + tp->bits = 3 + n->pos = 15 + n->bits=4 + KEYLENGTH=32 +*/ + +static inline t_key tkey_extract_bits(t_key a, int offset, int bits) +{ + if (offset < KEYLENGTH) + return ((t_key)(a << offset)) >> (KEYLENGTH - bits); + else + return 0; +} + +static inline int tkey_equals(t_key a, t_key b) +{ + return a == b; +} + +static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b) +{ + if (bits == 0 || offset >= KEYLENGTH) + return 1; + bits = bits > KEYLENGTH ? KEYLENGTH : bits; + return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0; +} + +static inline int tkey_mismatch(t_key a, int offset, t_key b) +{ + t_key diff = a ^ b; + int i = offset; + + if(!diff) + return 0; + while((diff << i) >> (KEYLENGTH-1) == 0) + i++; + return i; +} + +/* Candiate for fib_semantics */ + +static void fn_free_alias(struct fib_alias *fa) +{ + fib_release_info(fa->fa_info); + kmem_cache_free(fn_alias_kmem, fa); +} + +/* + To understand this stuff, an understanding of keys and all their bits is + necessary. Every node in the trie has a key associated with it, but not + all of the bits in that key are significant. + + Consider a node 'n' and its parent 'tp'. + + If n is a leaf, every bit in its key is significant. Its presence is + necessitaded by path compression, since during a tree traversal (when + searching for a leaf - unless we are doing an insertion) we will completely + ignore all skipped bits we encounter. Thus we need to verify, at the end of + a potentially successful search, that we have indeed been walking the + correct key path. + + Note that we can never "miss" the correct key in the tree if present by + following the wrong path. Path compression ensures that segments of the key + that are the same for all keys with a given prefix are skipped, but the + skipped part *is* identical for each node in the subtrie below the skipped + bit! trie_insert() in this implementation takes care of that - note the + call to tkey_sub_equals() in trie_insert(). + + if n is an internal node - a 'tnode' here, the various parts of its key + have many different meanings. + + Example: + _________________________________________________________________ + | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | + ----------------------------------------------------------------- + 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + + _________________________________________________________________ + | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | + ----------------------------------------------------------------- + 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + + tp->pos = 7 + tp->bits = 3 + n->pos = 15 + n->bits=4 + + First, let's just ignore the bits that come before the parent tp, that is + the bits from 0 to (tp->pos-1). They are *known* but at this point we do + not use them for anything. + + The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the + index into the parent's child array. That is, they will be used to find + 'n' among tp's children. + + The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits + for the node n. + + All the bits we have seen so far are significant to the node n. The rest + of the bits are really not needed or indeed known in n->key. + + The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into + n's child array, and will of course be different for each child. + + The rest of the bits, from (n->pos + n->bits) onward, are completely unknown + at this point. + +*/ + +static void check_tnode(struct tnode *tn) +{ + if(tn && tn->pos+tn->bits > 32) { + printk("TNODE ERROR tn=%p, pos=%d, bits=%d\n", tn, tn->pos, tn->bits); + } +} + +static int halve_threshold = 25; +static int inflate_threshold = 50; + +static struct leaf *leaf_new(void) +{ + struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL); + if(l) { + NODE_INIT_PARENT(l, T_LEAF); + INIT_HLIST_HEAD(&l->list); + } + return l; +} + +static struct leaf_info *leaf_info_new(int plen) +{ + struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL); + li->plen = plen; + INIT_LIST_HEAD(&li->falh); + return li; +} + +static inline void free_leaf(struct leaf *l) +{ + kfree(l); +} + +static inline void free_leaf_info(struct leaf_info *li) +{ + kfree(li); +} + +static struct tnode* tnode_new(t_key key, int pos, int bits) +{ + int nchildren = 1<<bits; + int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *); + struct tnode *tn = kmalloc(sz, GFP_KERNEL); + + if(tn) { + memset(tn, 0, sz); + NODE_INIT_PARENT(tn, T_TNODE); + tn->pos = pos; + tn->bits = bits; + tn->key = key; + tn->full_children = 0; + tn->empty_children = 1<<bits; + } + if(trie_debug > 0) + printk("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode), + (unsigned int) (sizeof(struct node) * 1<<bits)); + return tn; +} + +static void tnode_free(struct tnode *tn) +{ + if(!tn) { + trie_bug("tnode_free\n"); + } + if(IS_LEAF(tn)) { + free_leaf((struct leaf *)tn); + if(trie_debug > 0 ) + printk("FL %p \n", tn); + } + else if(IS_TNODE(tn)) { + kfree(tn); + if(trie_debug > 0 ) + printk("FT %p \n", tn); + } + else { + trie_bug("tnode_free\n"); + } +} + +/* + * Check whether a tnode 'n' is "full", i.e. it is an internal node + * and no bits are skipped. See discussion in dyntree paper p. 6 + */ + +static inline int tnode_full(struct tnode *tn, struct node *n) +{ + if(n == NULL || IS_LEAF(n)) + return 0; + + return ((struct tnode *) n)->pos == tn->pos + tn->bits; +} + +static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n) +{ + tnode_put_child_reorg(tn, i, n, -1); +} + + /* + * Add a child at position i overwriting the old value. + * Update the value of full_children and empty_children. + */ + +static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull) +{ + struct node *chi; + int isfull; + + if(i >= 1<<tn->bits) { + printk("bits=%d, i=%d\n", tn->bits, i); + trie_bug("tnode_put_child_reorg bits"); + } + write_lock_bh(&fib_lock); + chi = tn->child[i]; + + /* update emptyChildren */ + if (n == NULL && chi != NULL) + tn->empty_children++; + else if (n != NULL && chi == NULL) + tn->empty_children--; + + /* update fullChildren */ + if (wasfull == -1) + wasfull = tnode_full(tn, chi); + + isfull = tnode_full(tn, n); + if (wasfull && !isfull) + tn->full_children--; + + else if (!wasfull && isfull) + tn->full_children++; + if(n) + NODE_SET_PARENT(n, tn); + + tn->child[i] = n; + write_unlock_bh(&fib_lock); +} + +static struct node *resize(struct trie *t, struct tnode *tn) +{ + int i; + + if (!tn) + return NULL; + + if(trie_debug) + printk("In tnode_resize %p inflate_threshold=%d threshold=%d\n", + tn, inflate_threshold, halve_threshold); + + /* No children */ + if (tn->empty_children == tnode_child_length(tn)) { + tnode_free(tn); + return NULL; + } + /* One child */ + if (tn->empty_children == tnode_child_length(tn) - 1) + for (i = 0; i < tnode_child_length(tn); i++) { + + write_lock_bh(&fib_lock); + if (tn->child[i] != NULL) { + + /* compress one level */ + struct node *n = tn->child[i]; + if(n) + NODE_INIT_PARENT(n, NODE_TYPE(n)); + + write_unlock_bh(&fib_lock); + tnode_free(tn); + return n; + } + write_unlock_bh(&fib_lock); + } + /* + * Double as long as the resulting node has a number of + * nonempty nodes that are above the threshold. + */ + + /* + * From "Implementing a dynamic compressed trie" by Stefan Nilsson of + * the Helsinki University of Technology and Matti Tikkanen of Nokia + * Telecommunications, page 6: + * "A node is doubled if the ratio of non-empty children to all + * children in the *doubled* node is at least 'high'." + * + * 'high' in this instance is the variable 'inflate_threshold'. It + * is expressed as a percentage, so we multiply it with + * tnode_child_length() and instead of multiplying by 2 (since the + * child array will be doubled by inflate()) and multiplying + * the left-hand side by 100 (to handle the percentage thing) we + * multiply the left-hand side by 50. + * + * The left-hand side may look a bit weird: tnode_child_length(tn) + * - tn->empty_children is of course the number of non-null children + * in the current node. tn->full_children is the number of "full" + * children, that is non-null tnodes with a skip value of 0. + * All of those will be doubled in the resulting inflated tnode, so + * we just count them one extra time here. + * + * A clearer way to write this would be: + * + * to_be_doubled = tn->full_children; + * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children - + * tn->full_children; + * + * new_child_length = tnode_child_length(tn) * 2; + * + * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) / + * new_child_length; + * if (new_fill_factor >= inflate_threshold) + * + * ...and so on, tho it would mess up the while() loop. + * + * anyway, + * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >= + * inflate_threshold + * + * avoid a division: + * 100 * (not_to_be_doubled + 2*to_be_doubled) >= + * inflate_threshold * new_child_length + * + * expand not_to_be_doubled and to_be_doubled, and shorten: + * 100 * (tnode_child_length(tn) - tn->empty_children + + * tn->full_children ) >= inflate_threshold * new_child_length + * + * expand new_child_length: + * 100 * (tnode_child_length(tn) - tn->empty_children + + * tn->full_children ) >= + * inflate_threshold * tnode_child_length(tn) * 2 + * + * shorten again: + * 50 * (tn->full_children + tnode_child_length(tn) - + * tn->empty_children ) >= inflate_threshold * + * tnode_child_length(tn) + * + */ + + check_tnode(tn); + + while ((tn->full_children > 0 && + 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >= + inflate_threshold * tnode_child_length(tn))) { + + tn = inflate(t, tn); + } + + check_tnode(tn); + + /* + * Halve as long as the number of empty children in this + * node is above threshold. + */ + while (tn->bits > 1 && + 100 * (tnode_child_length(tn) - tn->empty_children) < + halve_threshold * tnode_child_length(tn)) + + tn = halve(t, tn); + + /* Only one child remains */ + + if (tn->empty_children == tnode_child_length(tn) - 1) + for (i = 0; i < tnode_child_length(tn); i++) { + + write_lock_bh(&fib_lock); + if (tn->child[i] != NULL) { + /* compress one level */ + struct node *n = tn->child[i]; + + if(n) + NODE_INIT_PARENT(n, NODE_TYPE(n)); + + write_unlock_bh(&fib_lock); + tnode_free(tn); + return n; + } + write_unlock_bh(&fib_lock); + } + + return (struct node *) tn; +} + +static struct tnode *inflate(struct trie *t, struct tnode *tn) +{ + struct tnode *inode; + struct tnode *oldtnode = tn; + int olen = tnode_child_length(tn); + int i; + + if(trie_debug) + printk("In inflate\n"); + + tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1); + + if (!tn) + trie_bug("tnode_new failed"); + + for(i = 0; i < olen; i++) { + struct node *node = tnode_get_child(oldtnode, i); + + /* An empty child */ + if (node == NULL) + continue; + + /* A leaf or an internal node with skipped bits */ + + if(IS_LEAF(node) || ((struct tnode *) node)->pos > + tn->pos + tn->bits - 1) { + if(tkey_extract_bits(node->key, tn->pos + tn->bits - 1, + 1) == 0) + put_child(t, tn, 2*i, node); + else + put_child(t, tn, 2*i+1, node); + continue; + } + + /* An internal node with two children */ + inode = (struct tnode *) node; + + if (inode->bits == 1) { + put_child(t, tn, 2*i, inode->child[0]); + put_child(t, tn, 2*i+1, inode->child[1]); + + tnode_free(inode); + } + + /* An internal node with more than two children */ + else { + struct tnode *left, *right; + int size, j; + + /* We will replace this node 'inode' with two new + * ones, 'left' and 'right', each with half of the + * original children. The two new nodes will have + * a position one bit further down the key and this + * means that the "significant" part of their keys + * (see the discussion near the top of this file) + * will differ by one bit, which will be "0" in + * left's key and "1" in right's key. Since we are + * moving the key position by one step, the bit that + * we are moving away from - the bit at position + * (inode->pos) - is the one that will differ between + * left and right. So... we synthesize that bit in the + * two new keys. + * The mask 'm' below will be a single "one" bit at + * the position (inode->pos) + */ + + t_key m = TKEY_GET_MASK(inode->pos, 1); + + /* Use the old key, but set the new significant + * bit to zero. + */ + left = tnode_new(inode->key&(~m), inode->pos + 1, + inode->bits - 1); + + if(!left) + trie_bug("tnode_new failed"); + + + /* Use the old key, but set the new significant + * bit to one. + */ + right = tnode_new(inode->key|m, inode->pos + 1, + inode->bits - 1); + + if(!right) + trie_bug("tnode_new failed"); + + size = tnode_child_length(left); + for(j = 0; j < size; j++) { + put_child(t, left, j, inode->child[j]); + put_child(t, right, j, inode->child[j + size]); + } + put_child(t, tn, 2*i, resize(t, left)); + put_child(t, tn, 2*i+1, resize(t, right)); + + tnode_free(inode); + } + } + tnode_free(oldtnode); + return tn; +} + +static struct tnode *halve(struct trie *t, struct tnode *tn) +{ + struct tnode *oldtnode = tn; + struct node *left, *right; + int i; + int olen = tnode_child_length(tn); + + if(trie_debug) printk("In halve\n"); + + tn=tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1); + + if(!tn) + trie_bug("tnode_new failed"); + + for(i = 0; i < olen; i += 2) { + left = tnode_get_child(oldtnode, i); + right = tnode_get_child(oldtnode, i+1); + + /* At least one of the children is empty */ + if (left == NULL) { + if (right == NULL) /* Both are empty */ + continue; + put_child(t, tn, i/2, right); + } else if (right == NULL) + put_child(t, tn, i/2, left); + + /* Two nonempty children */ + else { + struct tnode *newBinNode = + tnode_new(left->key, tn->pos + tn->bits, 1); + + if(!newBinNode) + trie_bug("tnode_new failed"); + + put_child(t, newBinNode, 0, left); + put_child(t, newBinNode, 1, right); + put_child(t, tn, i/2, resize(t, newBinNode)); + } + } + tnode_free(oldtnode); + return tn; +} + +static void *trie_init(struct trie *t) +{ + if(t) { + t->size = 0; + t->trie = NULL; + t->revision = 0; +#ifdef CONFIG_IP_FIB_TRIE_STATS + memset(&t->stats, 0, sizeof(struct trie_use_stats)); +#endif + } + return t; +} + +static struct leaf_info *find_leaf_info(struct hlist_head *head, int plen) +{ + struct hlist_node *node; + struct leaf_info *li; + + hlist_for_each_entry(li, node, head, hlist) { + + if ( li->plen == plen ) + return li; + } + return NULL; +} + +static inline struct list_head * get_fa_head(struct leaf *l, int plen) +{ + struct list_head *fa_head=NULL; + struct leaf_info *li = find_leaf_info(&l->list, plen); + + if(li) + fa_head = &li->falh; + + return fa_head; +} + +static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) +{ + struct leaf_info *li=NULL, *last=NULL; + struct hlist_node *node, *tmp; + + write_lock_bh(&fib_lock); + + if(hlist_empty(head)) + hlist_add_head(&new->hlist, head); + else { + hlist_for_each_entry_safe(li, node, tmp, head, hlist) { + + if (new->plen > li->plen) + break; + + last = li; + } + if(last) + hlist_add_after(&last->hlist, &new->hlist); + else + hlist_add_before(&new->hlist, &li->hlist); + } + write_unlock_bh(&fib_lock); +} + +static struct leaf * +fib_find_node(struct trie *t, u32 key) +{ + int pos; + struct tnode *tn; + struct node *n; + + pos = 0; + n=t->trie; + + while (n != NULL && NODE_TYPE(n) == T_TNODE) { + tn = (struct tnode *) n; + + check_tnode(tn); + + if(tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) { + pos=tn->pos + tn->bits; + n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits)); + } + else + break; + } + /* Case we have found a leaf. Compare prefixes */ + + if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) { + struct leaf *l = (struct leaf *) n; + return l; + } + return NULL; +} + +static struct node *trie_rebalance(struct trie *t, struct tnode *tn) +{ + int i = 0; + int wasfull; + t_key cindex, key; + struct tnode *tp = NULL; + + if(!tn) + BUG(); + + key = tn->key; + i = 0; + + while (tn != NULL && NODE_PARENT(tn) != NULL) { + + if( i > 10 ) { + printk("Rebalance tn=%p \n", tn); + if(tn) printk("tn->parent=%p \n", NODE_PARENT(tn)); + + printk("Rebalance tp=%p \n", tp); + if(tp) printk("tp->parent=%p \n", NODE_PARENT(tp)); + } + + if( i > 12 ) BUG(); + i++; + + tp = NODE_PARENT(tn); + cindex = tkey_extract_bits(key, tp->pos, tp->bits); + wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); + tn = (struct tnode *) resize (t, (struct tnode *)tn); + tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull); + + if(!NODE_PARENT(tn)) + break; + + tn = NODE_PARENT(tn); + } + /* Handle last (top) tnode */ + if (IS_TNODE(tn)) + tn = (struct tnode*) resize(t, (struct tnode *)tn); + + return (struct node*) tn; +} + +static struct list_head * +fib_insert_node(struct trie *t, u32 key, int plen) +{ + int pos, newpos; + struct tnode *tp = NULL, *tn = NULL; + struct node *n; + struct leaf *l; + int missbit; + struct list_head *fa_head=NULL; + struct leaf_info *li; + t_key cindex; + + pos = 0; + n=t->trie; + + /* If we point to NULL, stop. Either the tree is empty and we should + * just put a new leaf in if, or we have reached an empty child slot, + * and we should just put our new leaf in that. + * If we point to a T_TNODE, check if it matches our key. Note that + * a T_TNODE might be skipping any number of bits - its 'pos' need + * not be the parent's 'pos'+'bits'! + * + * If it does match the current key, get pos/bits from it, extract + * the index from our key, push the T_TNODE and walk the tree. + * + * If it doesn't, we have to replace it with a new T_TNODE. + * + * If we point to a T_LEAF, it might or might not have the same key + * as we do. If it does, just change the value, update the T_LEAF's + * value, and return it. + * If it doesn't, we need to replace it with a T_TNODE. + */ + + while (n != NULL && NODE_TYPE(n) == T_TNODE) { + tn = (struct tnode *) n; + + check_tnode(tn); + + if(tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) { + tp = tn; + pos=tn->pos + tn->bits; + n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits)); + + if(n && NODE_PARENT(n) != tn) { + printk("BUG tn=%p, n->parent=%p\n", tn, NODE_PARENT(n)); + BUG(); + } + } + else + break; + } + + /* + * n ----> NULL, LEAF or TNODE + * + * tp is n's (parent) ----> NULL or TNODE + */ + + if(tp && IS_LEAF(tp)) + BUG(); + + t->revision++; + + /* Case 1: n is a leaf. Compare prefixes */ + + if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) { + struct leaf *l = ( struct leaf *) n; + + li = leaf_info_new(plen); + + if(! li) + BUG(); + + fa_head = &li->falh; + insert_leaf_info(&l->list, li); + goto done; + } + t->size++; + l = leaf_new(); + + if(! l) + BUG(); + + l->key = key; + li = leaf_info_new(plen); + + if(! li) + BUG(); + + fa_head = &li->falh; + insert_leaf_info(&l->list, li); + + /* Case 2: n is NULL, and will just insert a new leaf */ + if (t->trie && n == NULL) { + + NODE_SET_PARENT(l, tp); + + if (!tp) + BUG(); + + else { + cindex = tkey_extract_bits(key, tp->pos, tp->bits); + put_child(t, (struct tnode *)tp, cindex, (struct node *)l); + } + } + /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */ + else { + /* + * Add a new tnode here + * first tnode need some special handling + */ + + if (tp) + pos=tp->pos+tp->bits; + else + pos=0; + if(n) { + newpos = tkey_mismatch(key, pos, n->key); + tn = tnode_new(n->key, newpos, 1); + } + else { + newpos = 0; + tn = tnode_new(key, newpos, 1); /* First tnode */ + } + if(!tn) + trie_bug("tnode_pfx_new failed"); + + NODE_SET_PARENT(tn, tp); + + missbit=tkey_extract_bits(key, newpos, 1); + put_child(t, tn, missbit, (struct node *)l); + put_child(t, tn, 1-missbit, n); + + if(tp) { + cindex = tkey_extract_bits(key, tp->pos, tp->bits); + put_child(t, (struct tnode *)tp, cindex, (struct node *)tn); + } + else { + t->trie = (struct node*) tn; /* First tnode */ + tp = tn; + } + } + if(tp && tp->pos+tp->bits > 32) { + printk("ERROR tp=%p pos=%d, bits=%d, key=%0x plen=%d\n", + tp, tp->pos, tp->bits, key, plen); + } + /* Rebalance the trie */ + t->trie = trie_rebalance(t, tp); +done:; + return fa_head; +} + +static int +fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, + struct nlmsghdr *nlhdr, struct netlink_skb_parms *req) +{ + struct trie *t = (struct trie *) tb->tb_data; + struct fib_alias *fa, *new_fa; + struct list_head *fa_head=NULL; + struct fib_info *fi; + int plen = r->rtm_dst_len; + int type = r->rtm_type; + u8 tos = r->rtm_tos; + u32 key, mask; + int err; + struct leaf *l; + + if (plen > 32) + return -EINVAL; + + key = 0; + if (rta->rta_dst) + memcpy(&key, rta->rta_dst, 4); + + key = ntohl(key); + + if(trie_debug) + printk("Insert table=%d %08x/%d\n", tb->tb_id, key, plen); + + mask = ntohl( inet_make_mask(plen) ); + + if(key & ~mask) + return -EINVAL; + + key = key & mask; + + if ((fi = fib_create_info(r, rta, nlhdr, &err)) == NULL) + goto err; + + l = fib_find_node(t, key); + fa = NULL; + + if(l) { + fa_head = get_fa_head(l, plen); + fa = fib_find_alias(fa_head, tos, fi->fib_priority); + } + + /* Now fa, if non-NULL, points to the first fib alias + * with the same keys [prefix,tos,priority], if such key already + * exists or to the node before which we will insert new one. + * + * If fa is NULL, we will need to allocate a new one and + * insert to the head of f. + * + * If f is NULL, no fib node matched the destination key + * and we need to allocate a new one of those as well. + */ + + if (fa && + fa->fa_info->fib_priority == fi->fib_priority) { + struct fib_alias *fa_orig; + + err = -EEXIST; + if (nlhdr->nlmsg_flags & NLM_F_EXCL) + goto out; + + if (nlhdr->nlmsg_flags & NLM_F_REPLACE) { + struct fib_info *fi_drop; + u8 state; + + write_lock_bh(&fib_lock); + + fi_drop = fa->fa_info; + fa->fa_info = fi; + fa->fa_type = type; + fa->fa_scope = r->rtm_scope; + state = fa->fa_state; + fa->fa_state &= ~FA_S_ACCESSED; + + write_unlock_bh(&fib_lock); + + fib_release_info(fi_drop); + if (state & FA_S_ACCESSED) + rt_cache_flush(-1); + + goto succeeded; + } + /* Error if we find a perfect match which + * uses the same scope, type, and nexthop + * information. + */ + fa_orig = fa; + list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) { + if (fa->fa_tos != tos) + break; + if (fa->fa_info->fib_priority != fi->fib_priority) + break; + if (fa->fa_type == type && + fa->fa_scope == r->rtm_scope && + fa->fa_info == fi) { + goto out; + } + } + if (!(nlhdr->nlmsg_flags & NLM_F_APPEND)) + fa = fa_orig; + } + err = -ENOENT; + if (!(nlhdr->nlmsg_flags&NLM_F_CREATE)) + goto out; + + err = -ENOBUFS; + new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); + if (new_fa == NULL) + goto out; + + new_fa->fa_info = fi; + new_fa->fa_tos = tos; + new_fa->fa_type = type; + new_fa->fa_scope = r->rtm_scope; + new_fa->fa_state = 0; +#if 0 + new_fa->dst = NULL; +#endif + /* + * Insert new entry to the list. + */ + + if(!fa_head) + fa_head = fib_insert_node(t, key, plen); + + write_lock_bh(&fib_lock); + + list_add_tail(&new_fa->fa_list, + (fa ? &fa->fa_list : fa_head)); + + write_unlock_bh(&fib_lock); + + rt_cache_flush(-1); + rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req); +succeeded: + return 0; +out: + fib_release_info(fi); +err:; + return err; +} + +static inline int check_leaf(struct trie *t, struct leaf *l, t_key key, int *plen, const struct flowi *flp, + struct fib_result *res, int *err) +{ + int i; + t_key mask; + struct leaf_info *li; + struct hlist_head *hhead = &l->list; + struct hlist_node *node; + + hlist_for_each_entry(li, node, hhead, hlist) { + + i = li->plen; + mask = ntohl(inet_make_mask(i)); + if (l->key != (key & mask)) + continue; + + if (((*err) = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) == 0) { + *plen = i; +#ifdef CONFIG_IP_FIB_TRIE_STATS + t->stats.semantic_match_passed++; +#endif + return 1; + } +#ifdef CONFIG_IP_FIB_TRIE_STATS + t->stats.semantic_match_miss++; +#endif + } + return 0; +} + +static int +fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res) +{ + struct trie *t = (struct trie *) tb->tb_data; + int plen, ret = 0; + struct node *n; + struct tnode *pn; + int pos, bits; + t_key key=ntohl(flp->fl4_dst); + int chopped_off; + t_key cindex = 0; + int current_prefix_length = KEYLENGTH; + n = t->trie; + + read_lock(&fib_lock); + if(!n) + goto failed; + +#ifdef CONFIG_IP_FIB_TRIE_STATS + t->stats.gets++; +#endif + + /* Just a leaf? */ + if (IS_LEAF(n)) { + if( check_leaf(t, (struct leaf *)n, key, &plen, flp, res, &ret) ) + goto found; + goto failed; + } + pn = (struct tnode *) n; + chopped_off = 0; + + while (pn) { + + pos = pn->pos; + bits = pn->bits; + + if(!chopped_off) + cindex = tkey_extract_bits(MASK_PFX(key, current_prefix_length), pos, bits); + + n = tnode_get_child(pn, cindex); + + if (n == NULL) { +#ifdef CONFIG_IP_FIB_TRIE_STATS + t->stats.null_node_hit++; +#endif + goto backtrace; + } + + if (IS_TNODE(n)) { +#define HL_OPTIMIZE +#ifdef HL_OPTIMIZE + struct tnode *cn = (struct tnode *)n; + t_key node_prefix, key_prefix, pref_mismatch; + int mp; + + /* + * It's a tnode, and we can do some extra checks here if we + * like, to avoid descending into a dead-end branch. + * This tnode is in the parent's child array at index + * key[p_pos..p_pos+p_bits] but potentially with some bits + * chopped off, so in reality the index may be just a + * subprefix, padded with zero at the end. + * We can also take a look at any skipped bits in this + * tnode - everything up to p_pos is supposed to be ok, + * and the non-chopped bits of the index (se previous + * paragraph) are also guaranteed ok, but the rest is + * considered unknown. + * + * The skipped bits are key[pos+bits..cn->pos]. + */ + + /* If current_prefix_length < pos+bits, we are already doing + * actual prefix matching, which means everything from + * pos+(bits-chopped_off) onward must be zero along some + * branch of this subtree - otherwise there is *no* valid + * prefix present. Here we can only check the skipped + * bits. Remember, since we have already indexed into the + * parent's child array, we know that the bits we chopped of + * *are* zero. + */ + + /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */ + + if (current_prefix_length < pos+bits) { + if (tkey_extract_bits(cn->key, current_prefix_length, + cn->pos - current_prefix_length) != 0 || + !(cn->child[0])) + goto backtrace; + } + + /* + * If chopped_off=0, the index is fully validated and we + * only need to look at the skipped bits for this, the new, + * tnode. What we actually want to do is to find out if + * these skipped bits match our key perfectly, or if we will + * have to count on finding a matching prefix further down, + * because if we do, we would like to have some way of + * verifying the existence of such a prefix at this point. + */ + + /* The only thing we can do at this point is to verify that + * any such matching prefix can indeed be a prefix to our + * key, and if the bits in the node we are inspecting that + * do not match our key are not ZERO, this cannot be true. + * Thus, find out where there is a mismatch (before cn->pos) + * and verify that all the mismatching bits are zero in the + * new tnode's key. + */ + + /* Note: We aren't very concerned about the piece of the key + * that precede pn->pos+pn->bits, since these have already been + * checked. The bits after cn->pos aren't checked since these are + * by definition "unknown" at this point. Thus, what we want to + * see is if we are about to enter the "prefix matching" state, + * and in that case verify that the skipped bits that will prevail + * throughout this subtree are zero, as they have to be if we are + * to find a matching prefix. + */ + + node_prefix = MASK_PFX(cn->key, cn->pos); + key_prefix = MASK_PFX(key, cn->pos); + pref_mismatch = key_prefix^node_prefix; + mp = 0; + + /* In short: If skipped bits in this node do not match the search + * key, enter the "prefix matching" state.directly. + */ + if (pref_mismatch) { + while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) { + mp++; + pref_mismatch = pref_mismatch <<1; + } + key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp); + + if (key_prefix != 0) + goto backtrace; + + if (current_prefix_length >= cn->pos) + current_prefix_length=mp; + } +#endif + pn = (struct tnode *)n; /* Descend */ + chopped_off = 0; + continue; + } + if (IS_LEAF(n)) { + if( check_leaf(t, (struct leaf *)n, key, &plen, flp, res, &ret)) + goto found; + } +backtrace: + chopped_off++; + + /* As zero don't change the child key (cindex) */ + while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1)))) { + chopped_off++; + } + + /* Decrease current_... with bits chopped off */ + if (current_prefix_length > pn->pos + pn->bits - chopped_off) + current_prefix_length = pn->pos + pn->bits - chopped_off; + + /* + * Either we do the actual chop off according or if we have + * chopped off all bits in this tnode walk up to our parent. + */ + + if(chopped_off <= pn->bits) + cindex &= ~(1 << (chopped_off-1)); + else { + if( NODE_PARENT(pn) == NULL) + goto failed; + + /* Get Child's index */ + cindex = tkey_extract_bits(pn->key, NODE_PARENT(pn)->pos, NODE_PARENT(pn)->bits); + pn = NODE_PARENT(pn); + chopped_off = 0; + +#ifdef CONFIG_IP_FIB_TRIE_STATS + t->stats.backtrack++; +#endif + goto backtrace; + } + } +failed: + ret = 1; +found: + read_unlock(&fib_lock); + return ret; +} + +static int trie_leaf_remove(struct trie *t, t_key key) +{ + t_key cindex; + struct tnode *tp = NULL; + struct node *n = t->trie; + struct leaf *l; + + if(trie_debug) + printk("entering trie_leaf_remove(%p)\n", n); + + /* Note that in the case skipped bits, those bits are *not* checked! + * When we finish this, we will have NULL or a T_LEAF, and the + * T_LEAF may or may not match our key. + */ + + while (n != NULL && IS_TNODE(n)) { + struct tnode *tn = (struct tnode *) n; + check_tnode(tn); + n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits)); + + if(n && NODE_PARENT(n) != tn) { + printk("BUG tn=%p, n->parent=%p\n", tn, NODE_PARENT(n)); + BUG(); + } + } + l = (struct leaf *) n; + + if(!n || !tkey_equals(l->key, key)) + return 0; + + /* + * Key found. + * Remove the leaf and rebalance the tree + */ + + t->revision++; + t->size--; + + tp = NODE_PARENT(n); + tnode_free((struct tnode *) n); + + if(tp) { + cindex = tkey_extract_bits(key, tp->pos, tp->bits); + put_child(t, (struct tnode *)tp, cindex, NULL); + t->trie = trie_rebalance(t, tp); + } + else + t->trie = NULL; + + return 1; +} + +static int +fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, + struct nlmsghdr *nlhdr, struct netlink_skb_parms *req) +{ + struct trie *t = (struct trie *) tb->tb_data; + u32 key, mask; + int plen = r->rtm_dst_len; + u8 tos = r->rtm_tos; + struct fib_alias *fa, *fa_to_delete; + struct list_head *fa_head; + struct leaf *l; + + if (plen > 32) + return -EINVAL; + + key = 0; + if (rta->rta_dst) + memcpy(&key, rta->rta_dst, 4); + + key = ntohl(key); + mask = ntohl( inet_make_mask(plen) ); + + if(key & ~mask) + return -EINVAL; + + key = key & mask; + l = fib_find_node(t, key); + + if(!l) + return -ESRCH; + + fa_head = get_fa_head(l, plen); + fa = fib_find_alias(fa_head, tos, 0); + + if (!fa) + return -ESRCH; + + if (trie_debug) + printk("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t); + + fa_to_delete = NULL; + fa_head = fa->fa_list.prev; + list_for_each_entry(fa, fa_head, fa_list) { + struct fib_info *fi = fa->fa_info; + + if (fa->fa_tos != tos) + break; + + if ((!r->rtm_type || + fa->fa_type == r->rtm_type) && + (r->rtm_scope == RT_SCOPE_NOWHERE || + fa->fa_scope == r->rtm_scope) && + (!r->rtm_protocol || + fi->fib_protocol == r->rtm_protocol) && + fib_nh_match(r, nlhdr, rta, fi) == 0) { + fa_to_delete = fa; + break; + } + } + + if (fa_to_delete) { + int kill_li = 0; + struct leaf_info *li; + + fa = fa_to_delete; + rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, nlhdr, req); + + l = fib_find_node(t, key); + li = find_leaf_info(&l->list, plen); + + write_lock_bh(&fib_lock); + + list_del(&fa->fa_list); + + if(list_empty(fa_head)) { + hlist_del(&li->hlist); + kill_li = 1; + } + write_unlock_bh(&fib_lock); + + if(kill_li) + free_leaf_info(li); + + if(hlist_empty(&l->list)) + trie_leaf_remove(t, key); + + if (fa->fa_state & FA_S_ACCESSED) + rt_cache_flush(-1); + + fn_free_alias(fa); + return 0; + } + return -ESRCH; +} + +static int trie_flush_list(struct trie *t, struct list_head *head) +{ + struct fib_alias *fa, *fa_node; + int found = 0; + + list_for_each_entry_safe(fa, fa_node, head, fa_list) { + struct fib_info *fi = fa->fa_info; + + if (fi && (fi->fib_flags&RTNH_F_DEAD)) { + + write_lock_bh(&fib_lock); + list_del(&fa->fa_list); + write_unlock_bh(&fib_lock); + + fn_free_alias(fa); + found++; + } + } + return found; +} + +static int trie_flush_leaf(struct trie *t, struct leaf *l) +{ + int found = 0; + struct hlist_head *lih = &l->list; + struct hlist_node *node, *tmp; + struct leaf_info *li = NULL; + + hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { + + found += trie_flush_list(t, &li->falh); + + if (list_empty(&li->falh)) { + + write_lock_bh(&fib_lock); + hlist_del(&li->hlist); + write_unlock_bh(&fib_lock); + + free_leaf_info(li); + } + } + return found; +} + +static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf) +{ + struct node *c = (struct node *) thisleaf; + struct tnode *p; + int idx; + + if(c == NULL) { + if(t->trie == NULL) + return NULL; + + if (IS_LEAF(t->trie)) /* trie w. just a leaf */ + return (struct leaf *) t->trie; + + p = (struct tnode*) t->trie; /* Start */ + } + else + p = (struct tnode *) NODE_PARENT(c); + while (p) { + int pos, last; + + /* Find the next child of the parent */ + if(c) + pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits); + else + pos = 0; + + last = 1 << p->bits; + for(idx = pos; idx < last ; idx++) { + if( p->child[idx]) { + + /* Decend if tnode */ + + while (IS_TNODE(p->child[idx])) { + p = (struct tnode*) p->child[idx]; + idx = 0; + + /* Rightmost non-NULL branch */ + if( p && IS_TNODE(p) ) + while ( p->child[idx] == NULL && idx < (1 << p->bits) ) idx++; + + /* Done with this tnode? */ + if( idx >= (1 << p->bits) || p->child[idx] == NULL ) + goto up; + } + return (struct leaf*) p->child[idx]; + } + } +up: + /* No more children go up one step */ + c = (struct node*) p; + p = (struct tnode *) NODE_PARENT(p); + } + return NULL; /* Ready. Root of trie */ +} + +static int fn_trie_flush(struct fib_table *tb) +{ + struct trie *t = (struct trie *) tb->tb_data; + struct leaf *ll = NULL, *l = NULL; + int found = 0, h; + + t->revision++; + + for (h=0; (l = nextleaf(t, l)) != NULL; h++) { + found += trie_flush_leaf(t, l); + + if (ll && hlist_empty(&ll->list)) + trie_leaf_remove(t, ll->key); + ll = l; + } + + if (ll && hlist_empty(&ll->list)) + trie_leaf_remove(t, ll->key); + + if(trie_debug) + printk("trie_flush found=%d\n", found); + return found; +} + +static int trie_last_dflt=-1; + +static void +fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res) +{ + struct trie *t = (struct trie *) tb->tb_data; + int order, last_idx; + struct fib_info *fi = NULL; + struct fib_info *last_resort; + struct fib_alias *fa = NULL; + struct list_head *fa_head; + struct leaf *l; + + last_idx = -1; + last_resort = NULL; + order = -1; + + read_lock(&fib_lock); + + l = fib_find_node(t, 0); + if(!l) + goto out; + + fa_head = get_fa_head(l, 0); + if(!fa_head) + goto out; + + if (list_empty(fa_head)) + goto out; + + list_for_each_entry(fa, fa_head, fa_list) { + struct fib_info *next_fi = fa->fa_info; + + if (fa->fa_scope != res->scope || + fa->fa_type != RTN_UNICAST) + continue; + + if (next_fi->fib_priority > res->fi->fib_priority) + break; + if (!next_fi->fib_nh[0].nh_gw || + next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) + continue; + fa->fa_state |= FA_S_ACCESSED; + + if (fi == NULL) { + if (next_fi != res->fi) + break; + } else if (!fib_detect_death(fi, order, &last_resort, + &last_idx, &trie_last_dflt)) { + if (res->fi) + fib_info_put(res->fi); + res->fi = fi; + atomic_inc(&fi->fib_clntref); + trie_last_dflt = order; + goto out; + } + fi = next_fi; + order++; + } + if (order <= 0 || fi == NULL) { + trie_last_dflt = -1; + goto out; + } + + if (!fib_detect_death(fi, order, &last_resort, &last_idx, &trie_last_dflt)) { + if (res->fi) + fib_info_put(res->fi); + res->fi = fi; + atomic_inc(&fi->fib_clntref); + trie_last_dflt = order; + goto out; + } + if (last_idx >= 0) { + if (res->fi) + fib_info_put(res->fi); + res->fi = last_resort; + if (last_resort) + atomic_inc(&last_resort->fib_clntref); + } + trie_last_dflt = last_idx; + out:; + read_unlock(&fib_lock); +} + +static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb, + struct sk_buff *skb, struct netlink_callback *cb) +{ + int i, s_i; + struct fib_alias *fa; + + u32 xkey=htonl(key); + + s_i=cb->args[3]; + i = 0; + + list_for_each_entry(fa, fah, fa_list) { + if (i < s_i) { + i++; + continue; + } + if (fa->fa_info->fib_nh == NULL) { + printk("Trie error _fib_nh=NULL in fa[%d] k=%08x plen=%d\n", i, key, plen); + i++; + continue; + } + if (fa->fa_info == NULL) { + printk("Trie error fa_info=NULL in fa[%d] k=%08x plen=%d\n", i, key, plen); + i++; + continue; + } + + if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_NEWROUTE, + tb->tb_id, + fa->fa_type, + fa->fa_scope, + &xkey, + plen, + fa->fa_tos, + fa->fa_info, 0) < 0) { + cb->args[3] = i; + return -1; + } + i++; + } + cb->args[3]=i; + return skb->len; +} + +static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb, + struct netlink_callback *cb) +{ + int h, s_h; + struct list_head *fa_head; + struct leaf *l = NULL; + s_h=cb->args[2]; + + for (h=0; (l = nextleaf(t, l)) != NULL; h++) { + + if (h < s_h) + continue; + if (h > s_h) + memset(&cb->args[3], 0, + sizeof(cb->args) - 3*sizeof(cb->args[0])); + + fa_head = get_fa_head(l, plen); + + if(!fa_head) + continue; + + if(list_empty(fa_head)) + continue; + + if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) { + cb->args[2]=h; + return -1; + } + } + cb->args[2]=h; + return skb->len; +} + +static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb) +{ + int m, s_m; + struct trie *t = (struct trie *) tb->tb_data; + + s_m = cb->args[1]; + + read_lock(&fib_lock); + for (m=0; m<=32; m++) { + + if (m < s_m) + continue; + if (m > s_m) + memset(&cb->args[2], 0, + sizeof(cb->args) - 2*sizeof(cb->args[0])); + + if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) { + cb->args[1] = m; + goto out; + } + } + read_unlock(&fib_lock); + cb->args[1] = m; + return skb->len; + out: + read_unlock(&fib_lock); + return -1; +} + +/* Fix more generic FIB names for init later */ + +#ifdef CONFIG_IP_MULTIPLE_TABLES +struct fib_table * fib_hash_init(int id) +#else +struct fib_table * __init fib_hash_init(int id) +#endif +{ + struct fib_table *tb; + struct trie *t; + + if (fn_alias_kmem == NULL) + fn_alias_kmem = kmem_cache_create("ip_fib_alias", + sizeof(struct fib_alias), + 0, SLAB_HWCACHE_ALIGN, + NULL, NULL); + + tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie), + GFP_KERNEL); + if (tb == NULL) + return NULL; + + tb->tb_id = id; + tb->tb_lookup = fn_trie_lookup; + tb->tb_insert = fn_trie_insert; + tb->tb_delete = fn_trie_delete; + tb->tb_flush = fn_trie_flush; + tb->tb_select_default = fn_trie_select_default; + tb->tb_dump = fn_trie_dump; + memset(tb->tb_data, 0, sizeof(struct trie)); + + t = (struct trie *) tb->tb_data; + + trie_init(t); + + if (id == RT_TABLE_LOCAL) + trie_local=t; + else if (id == RT_TABLE_MAIN) + trie_main=t; + + if (id == RT_TABLE_LOCAL) + printk("IPv4 FIB: Using LC-trie version %s\n", VERSION); + + return tb; +} + +/* Trie dump functions */ + +static void putspace_seq(struct seq_file *seq, int n) +{ + while (n--) seq_printf(seq, " "); +} + +static void printbin_seq(struct seq_file *seq, unsigned int v, int bits) +{ + while (bits--) + seq_printf(seq, "%s", (v & (1<<bits))?"1":"0"); +} + +static void printnode_seq(struct seq_file *seq, int indent, struct node *n, + int pend, int cindex, int bits) +{ + putspace_seq(seq, indent); + if (IS_LEAF(n)) + seq_printf(seq, "|"); + else + seq_printf(seq, "+"); + if (bits) { + seq_printf(seq, "%d/", cindex); + printbin_seq(seq, cindex, bits); + seq_printf(seq, ": "); + } + else + seq_printf(seq, "<root>: "); + seq_printf(seq, "%s:%p ", IS_LEAF(n)?"Leaf":"Internal node", n); + + if (IS_LEAF(n)) + seq_printf(seq, "key=%d.%d.%d.%d\n", + n->key >> 24, (n->key >> 16) % 256, (n->key >> 8) % 256, n->key % 256); + else { + int plen=((struct tnode *)n)->pos; + t_key prf=MASK_PFX(n->key, plen); + seq_printf(seq, "key=%d.%d.%d.%d/%d\n", + prf >> 24, (prf >> 16) % 256, (prf >> 8) % 256, prf % 256, plen); + } + if (IS_LEAF(n)) { + struct leaf *l=(struct leaf *)n; + struct fib_alias *fa; + int i; + for (i=32; i>=0; i--) + if(find_leaf_info(&l->list, i)) { + + struct list_head *fa_head = get_fa_head(l, i); + + if(!fa_head) + continue; + + if(list_empty(fa_head)) + continue; + + putspace_seq(seq, indent+2); + seq_printf(seq, "{/%d...dumping}\n", i); + + + list_for_each_entry(fa, fa_head, fa_list) { + putspace_seq(seq, indent+2); + if (fa->fa_info->fib_nh == NULL) { + seq_printf(seq, "Error _fib_nh=NULL\n"); + continue; + } + if (fa->fa_info == NULL) { + seq_printf(seq, "Error fa_info=NULL\n"); + continue; + } + + seq_printf(seq, "{type=%d scope=%d TOS=%d}\n", + fa->fa_type, + fa->fa_scope, + fa->fa_tos); + } + } + } + else if (IS_TNODE(n)) { + struct tnode *tn=(struct tnode *)n; + putspace_seq(seq, indent); seq_printf(seq, "| "); + seq_printf(seq, "{key prefix=%08x/", tn->key&TKEY_GET_MASK(0, tn->pos)); + printbin_seq(seq, tkey_extract_bits(tn->key, 0, tn->pos), tn->pos); + seq_printf(seq, "}\n"); + putspace_seq(seq, indent); seq_printf(seq, "| "); + seq_printf(seq, "{pos=%d", tn->pos); + seq_printf(seq, " (skip=%d bits)", tn->pos - pend); + seq_printf(seq, " bits=%d (%u children)}\n", tn->bits, (1 << tn->bits)); + putspace_seq(seq, indent); seq_printf(seq, "| "); + seq_printf(seq, "{empty=%d full=%d}\n", tn->empty_children, tn->full_children); + } +} + +static void trie_dump_seq(struct seq_file *seq, struct trie *t) +{ + struct node *n=t->trie; + int cindex=0; + int indent=1; + int pend=0; + int depth = 0; + + read_lock(&fib_lock); + + seq_printf(seq, "------ trie_dump of t=%p ------\n", t); + if (n) { + printnode_seq(seq, indent, n, pend, cindex, 0); + if (IS_TNODE(n)) { + struct tnode *tn=(struct tnode *)n; + pend = tn->pos+tn->bits; + putspace_seq(seq, indent); seq_printf(seq, "\\--\n"); + indent += 3; + depth++; + + while (tn && cindex < (1 << tn->bits)) { + if (tn->child[cindex]) { + + /* Got a child */ + + printnode_seq(seq, indent, tn->child[cindex], pend, cindex, tn->bits); + if (IS_LEAF(tn->child[cindex])) { + cindex++; + + } + else { + /* + * New tnode. Decend one level + */ + + depth++; + n=tn->child[cindex]; + tn=(struct tnode *)n; + pend=tn->pos+tn->bits; + putspace_seq(seq, indent); seq_printf(seq, "\\--\n"); + indent+=3; + cindex=0; + } + } + else + cindex++; + + /* + * Test if we are done + */ + + while (cindex >= (1 << tn->bits)) { + + /* + * Move upwards and test for root + * pop off all traversed nodes + */ + + if (NODE_PARENT(tn) == NULL) { + tn = NULL; + n = NULL; + break; + } + else { + cindex = tkey_extract_bits(tn->key, NODE_PARENT(tn)->pos, NODE_PARENT(tn)->bits); + tn = NODE_PARENT(tn); + cindex++; + n=(struct node *)tn; + pend=tn->pos+tn->bits; + indent-=3; + depth--; + } + } + } + } + else n = NULL; + } + else seq_printf(seq, "------ trie is empty\n"); + + read_unlock(&fib_lock); +} + +static struct trie_stat *trie_stat_new(void) +{ + struct trie_stat *s = kmalloc(sizeof(struct trie_stat), GFP_KERNEL); + int i; + + if(s) { + s->totdepth = 0; + s->maxdepth = 0; + s->tnodes = 0; + s->leaves = 0; + s->nullpointers = 0; + + for(i=0; i< MAX_CHILDS; i++) + s->nodesizes[i] = 0; + } + return s; +} + +static struct trie_stat *trie_collect_stats(struct trie *t) +{ + struct node *n=t->trie; + struct trie_stat *s = trie_stat_new(); + int cindex = 0; + int indent = 1; + int pend = 0; + int depth = 0; + + read_lock(&fib_lock); + + if (s) { + if (n) { + if (IS_TNODE(n)) { + struct tnode *tn = (struct tnode *)n; + pend=tn->pos+tn->bits; + indent += 3; + s->nodesizes[tn->bits]++; + depth++; + + while (tn && cindex < (1 << tn->bits)) { + if (tn->child[cindex]) { + /* Got a child */ + + if (IS_LEAF(tn->child[cindex])) { + cindex++; + + /* stats */ + if (depth > s->maxdepth) + s->maxdepth = depth; + s->totdepth += depth; + s->leaves++; + } + + else { + /* + * New tnode. Decend one level + */ + + s->tnodes++; + s->nodesizes[tn->bits]++; + depth++; + + n = tn->child[cindex]; + tn = (struct tnode *)n; + pend = tn->pos+tn->bits; + + indent += 3; + cindex = 0; + } + } + else { + cindex++; + s->nullpointers++; + } + + /* + * Test if we are done + */ + + while (cindex >= (1 << tn->bits)) { + + /* + * Move upwards and test for root + * pop off all traversed nodes + */ + + + if (NODE_PARENT(tn) == NULL) { + tn = NULL; + n = NULL; + break; + } + else { + cindex = tkey_extract_bits(tn->key, NODE_PARENT(tn)->pos, NODE_PARENT(tn)->bits); + tn = NODE_PARENT(tn); + cindex++; + n = (struct node *)tn; + pend=tn->pos+tn->bits; + indent -= 3; + depth--; + } + } + } + } + else n = NULL; + } + } + + read_unlock(&fib_lock); + return s; +} + +#ifdef CONFIG_PROC_FS + +static struct fib_alias *fib_triestat_get_first(struct seq_file *seq) +{ + return NULL; +} + +static struct fib_alias *fib_triestat_get_next(struct seq_file *seq) +{ + return NULL; +} + +static void *fib_triestat_seq_start(struct seq_file *seq, loff_t *pos) +{ + void *v = NULL; + + if (ip_fib_main_table) + v = *pos ? fib_triestat_get_next(seq) : SEQ_START_TOKEN; + return v; +} + +static void *fib_triestat_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + return v == SEQ_START_TOKEN ? fib_triestat_get_first(seq) : fib_triestat_get_next(seq); +} + +static void fib_triestat_seq_stop(struct seq_file *seq, void *v) +{ + +} + +/* + * This outputs /proc/net/fib_triestats + * + * It always works in backward compatibility mode. + * The format of the file is not supposed to be changed. + */ + +static void collect_and_show(struct trie *t, struct seq_file *seq) +{ + int bytes = 0; /* How many bytes are used, a ref is 4 bytes */ + int i, max, pointers; + struct trie_stat *stat; + int avdepth; + + stat = trie_collect_stats(t); + + bytes=0; + seq_printf(seq, "trie=%p\n", t); + + if (stat) { + if (stat->leaves) + avdepth=stat->totdepth*100 / stat->leaves; + else + avdepth=0; + seq_printf(seq, "Aver depth: %d.%02d\n", avdepth / 100, avdepth % 100 ); + seq_printf(seq, "Max depth: %4d\n", stat->maxdepth); + + seq_printf(seq, "Leaves: %d\n", stat->leaves); + bytes += sizeof(struct leaf) * stat->leaves; + seq_printf(seq, "Internal nodes: %d\n", stat->tnodes); + bytes += sizeof(struct tnode) * stat->tnodes; + + max = MAX_CHILDS-1; + + while (max >= 0 && stat->nodesizes[max] == 0) + max--; + pointers = 0; + + for (i = 1; i <= max; i++) + if (stat->nodesizes[i] != 0) { + seq_printf(seq, " %d: %d", i, stat->nodesizes[i]); + pointers += (1<<i) * stat->nodesizes[i]; + } + seq_printf(seq, "\n"); + seq_printf(seq, "Pointers: %d\n", pointers); + bytes += sizeof(struct node *) * pointers; + seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers); + seq_printf(seq, "Total size: %d kB\n", bytes / 1024); + + kfree(stat); + } + +#ifdef CONFIG_IP_FIB_TRIE_STATS + seq_printf(seq, "Counters:\n---------\n"); + seq_printf(seq,"gets = %d\n", t->stats.gets); + seq_printf(seq,"backtracks = %d\n", t->stats.backtrack); + seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed); + seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss); + seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit); +#ifdef CLEAR_STATS + memset(&(t->stats), 0, sizeof(t->stats)); +#endif +#endif /* CONFIG_IP_FIB_TRIE_STATS */ +} + +static int fib_triestat_seq_show(struct seq_file *seq, void *v) +{ + char bf[128]; + + if (v == SEQ_START_TOKEN) { + seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n", + sizeof(struct leaf), sizeof(struct tnode)); + if (trie_local) + collect_and_show(trie_local, seq); + + if (trie_main) + collect_and_show(trie_main, seq); + } + else { + snprintf(bf, sizeof(bf), + "*\t%08X\t%08X", 200, 400); + + seq_printf(seq, "%-127s\n", bf); + } + return 0; +} + +static struct seq_operations fib_triestat_seq_ops = { + .start = fib_triestat_seq_start, + .next = fib_triestat_seq_next, + .stop = fib_triestat_seq_stop, + .show = fib_triestat_seq_show, +}; + +static int fib_triestat_seq_open(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + int rc = -ENOMEM; + + rc = seq_open(file, &fib_triestat_seq_ops); + if (rc) + goto out_kfree; + + seq = file->private_data; +out: + return rc; +out_kfree: + goto out; +} + +static struct file_operations fib_triestat_seq_fops = { + .owner = THIS_MODULE, + .open = fib_triestat_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +int __init fib_stat_proc_init(void) +{ + if (!proc_net_fops_create("fib_triestat", S_IRUGO, &fib_triestat_seq_fops)) + return -ENOMEM; + return 0; +} + +void __init fib_stat_proc_exit(void) +{ + proc_net_remove("fib_triestat"); +} + +static struct fib_alias *fib_trie_get_first(struct seq_file *seq) +{ + return NULL; +} + +static struct fib_alias *fib_trie_get_next(struct seq_file *seq) +{ + return NULL; +} + +static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos) +{ + void *v = NULL; + + if (ip_fib_main_table) + v = *pos ? fib_trie_get_next(seq) : SEQ_START_TOKEN; + return v; +} + +static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + return v == SEQ_START_TOKEN ? fib_trie_get_first(seq) : fib_trie_get_next(seq); +} + +static void fib_trie_seq_stop(struct seq_file *seq, void *v) +{ + +} + +/* + * This outputs /proc/net/fib_trie. + * + * It always works in backward compatibility mode. + * The format of the file is not supposed to be changed. + */ + +static int fib_trie_seq_show(struct seq_file *seq, void *v) +{ + char bf[128]; + + if (v == SEQ_START_TOKEN) { + if (trie_local) + trie_dump_seq(seq, trie_local); + + if (trie_main) + trie_dump_seq(seq, trie_main); + } + + else { + snprintf(bf, sizeof(bf), + "*\t%08X\t%08X", 200, 400); + seq_printf(seq, "%-127s\n", bf); + } + + return 0; +} + +static struct seq_operations fib_trie_seq_ops = { + .start = fib_trie_seq_start, + .next = fib_trie_seq_next, + .stop = fib_trie_seq_stop, + .show = fib_trie_seq_show, +}; + +static int fib_trie_seq_open(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + int rc = -ENOMEM; + + rc = seq_open(file, &fib_trie_seq_ops); + if (rc) + goto out_kfree; + + seq = file->private_data; +out: + return rc; +out_kfree: + goto out; +} + +static struct file_operations fib_trie_seq_fops = { + .owner = THIS_MODULE, + .open = fib_trie_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +int __init fib_proc_init(void) +{ + if (!proc_net_fops_create("fib_trie", S_IRUGO, &fib_trie_seq_fops)) + return -ENOMEM; + return 0; +} + +void __init fib_proc_exit(void) +{ + proc_net_remove("fib_trie"); +} + +#endif /* CONFIG_PROC_FS */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 85bf0d3..cb75948 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -207,6 +207,7 @@ int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit = 1 * HZ; int sysctl_icmp_ratemask = 0x1818; +int sysctl_icmp_errors_use_inbound_ifaddr; /* * ICMP control array. This specifies what to do with each ICMP. @@ -511,8 +512,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) */ saddr = iph->daddr; - if (!(rt->rt_flags & RTCF_LOCAL)) - saddr = 0; + if (!(rt->rt_flags & RTCF_LOCAL)) { + if (sysctl_icmp_errors_use_inbound_ifaddr) + saddr = inet_select_addr(skb_in->dev, 0, RT_SCOPE_LINK); + else + saddr = 0; + } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 4e47a26..af2ec88 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -184,6 +184,7 @@ int ip_call_ra_chain(struct sk_buff *skb) raw_rcv(last, skb2); } last = sk; + nf_reset(skb); } } @@ -200,10 +201,6 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) { int ihl = skb->nh.iph->ihl*4; -#ifdef CONFIG_NETFILTER_DEBUG - nf_debug_ip_local_deliver(skb); -#endif /*CONFIG_NETFILTER_DEBUG*/ - __skb_pull(skb, ihl); /* Free reference early: we don't need it any more, and it may diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 760dc82..ee07aec 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -107,10 +107,6 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; BUG_TRAP(newskb->dst); - -#ifdef CONFIG_NETFILTER_DEBUG - nf_debug_ip_loopback_xmit(newskb); -#endif nf_reset(newskb); netif_rx(newskb); return 0; @@ -192,10 +188,6 @@ static inline int ip_finish_output2(struct sk_buff *skb) skb = skb2; } -#ifdef CONFIG_NETFILTER_DEBUG - nf_debug_ip_finish_output2(skb); -#endif /*CONFIG_NETFILTER_DEBUG*/ - nf_reset(skb); if (hh) { @@ -415,9 +407,6 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->nf_bridge = from->nf_bridge; nf_bridge_get(to->nf_bridge); #endif -#ifdef CONFIG_NETFILTER_DEBUG - to->nf_debug = from->nf_debug; -#endif #endif } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 47012b9..f8b172f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -360,14 +360,14 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) err = copied; /* Reset and regenerate socket error */ - spin_lock_irq(&sk->sk_error_queue.lock); + spin_lock_bh(&sk->sk_error_queue.lock); sk->sk_err = 0; if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; - spin_unlock_irq(&sk->sk_error_queue.lock); + spin_unlock_bh(&sk->sk_error_queue.lock); sk->sk_error_report(sk); } else - spin_unlock_irq(&sk->sk_error_queue.lock); + spin_unlock_bh(&sk->sk_error_queue.lock); out_free_skb: kfree_skb(skb); diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 1a23c52..2065944 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -236,15 +236,10 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) t->props.mode = 1; t->props.saddr.a4 = x->props.saddr.a4; t->props.flags = x->props.flags; - - t->type = xfrm_get_type(IPPROTO_IPIP, t->props.family); - if (t->type == NULL) - goto error; - - if (t->type->init_state(t, NULL)) + + if (xfrm_init_state(t)) goto error; - t->km.state = XFRM_STATE_VALID; atomic_set(&t->tunnel_users, 1); out: return t; @@ -422,7 +417,7 @@ static void ipcomp_destroy(struct xfrm_state *x) kfree(ipcd); } -static int ipcomp_init_state(struct xfrm_state *x, void *args) +static int ipcomp_init_state(struct xfrm_state *x) { int err; struct ipcomp_data *ipcd; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index e21c049..e4f809a 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1350,6 +1350,7 @@ int ip_mr_input(struct sk_buff *skb) */ read_lock(&mrt_lock); if (mroute_socket) { + nf_reset(skb); raw_rcv(mroute_socket, skb); read_unlock(&mrt_lock); return 0; diff --git a/net/ipv4/ipvs/Makefile b/net/ipv4/ipvs/Makefile index a788461..30e85de 100644 --- a/net/ipv4/ipvs/Makefile +++ b/net/ipv4/ipvs/Makefile @@ -11,7 +11,7 @@ ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ - ip_vs_est.o ip_vs_proto.o ip_vs_proto_icmp.o \ + ip_vs_est.o ip_vs_proto.o \ $(ip_vs_proto-objs-y) diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 218d970..12a82e9 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -2059,7 +2059,7 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) dst->addr = src->addr; dst->port = src->port; dst->fwmark = src->fwmark; - strcpy(dst->sched_name, src->scheduler->name); + strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); dst->flags = src->flags; dst->timeout = src->timeout / HZ; dst->netmask = src->netmask; @@ -2080,6 +2080,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get, list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { if (count >= get->num_services) goto out; + memset(&entry, 0, sizeof(entry)); ip_vs_copy_service(&entry, svc); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { @@ -2094,6 +2095,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get, list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { if (count >= get->num_services) goto out; + memset(&entry, 0, sizeof(entry)); ip_vs_copy_service(&entry, svc); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { @@ -2304,12 +2306,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) memset(&d, 0, sizeof(d)); if (ip_vs_sync_state & IP_VS_STATE_MASTER) { d[0].state = IP_VS_STATE_MASTER; - strcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn); + strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn)); d[0].syncid = ip_vs_master_syncid; } if (ip_vs_sync_state & IP_VS_STATE_BACKUP) { d[1].state = IP_VS_STATE_BACKUP; - strcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn); + strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn)); d[1].syncid = ip_vs_backup_syncid; } if (copy_to_user(user, &d, sizeof(d)) != 0) diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c index 253c462..867d4e9 100644 --- a/net/ipv4/ipvs/ip_vs_proto.c +++ b/net/ipv4/ipvs/ip_vs_proto.c @@ -216,9 +216,6 @@ int ip_vs_protocol_init(void) #ifdef CONFIG_IP_VS_PROTO_UDP REGISTER_PROTOCOL(&ip_vs_protocol_udp); #endif -#ifdef CONFIG_IP_VS_PROTO_ICMP - REGISTER_PROTOCOL(&ip_vs_protocol_icmp); -#endif #ifdef CONFIG_IP_VS_PROTO_AH REGISTER_PROTOCOL(&ip_vs_protocol_ah); #endif diff --git a/net/ipv4/ipvs/ip_vs_proto_icmp.c b/net/ipv4/ipvs/ip_vs_proto_icmp.c deleted file mode 100644 index 191e94a..0000000 --- a/net/ipv4/ipvs/ip_vs_proto_icmp.c +++ /dev/null @@ -1,182 +0,0 @@ -/* - * ip_vs_proto_icmp.c: ICMP load balancing support for IP Virtual Server - * - * Authors: Julian Anastasov <ja@ssi.bg>, March 2002 - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation; - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/icmp.h> -#include <linux/netfilter.h> -#include <linux/netfilter_ipv4.h> - -#include <net/ip_vs.h> - - -static int icmp_timeouts[1] = { 1*60*HZ }; - -static char * icmp_state_name_table[1] = { "ICMP" }; - -static struct ip_vs_conn * -icmp_conn_in_get(const struct sk_buff *skb, - struct ip_vs_protocol *pp, - const struct iphdr *iph, - unsigned int proto_off, - int inverse) -{ -#if 0 - struct ip_vs_conn *cp; - - if (likely(!inverse)) { - cp = ip_vs_conn_in_get(iph->protocol, - iph->saddr, 0, - iph->daddr, 0); - } else { - cp = ip_vs_conn_in_get(iph->protocol, - iph->daddr, 0, - iph->saddr, 0); - } - - return cp; - -#else - return NULL; -#endif -} - -static struct ip_vs_conn * -icmp_conn_out_get(const struct sk_buff *skb, - struct ip_vs_protocol *pp, - const struct iphdr *iph, - unsigned int proto_off, - int inverse) -{ -#if 0 - struct ip_vs_conn *cp; - - if (likely(!inverse)) { - cp = ip_vs_conn_out_get(iph->protocol, - iph->saddr, 0, - iph->daddr, 0); - } else { - cp = ip_vs_conn_out_get(IPPROTO_UDP, - iph->daddr, 0, - iph->saddr, 0); - } - - return cp; -#else - return NULL; -#endif -} - -static int -icmp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, - int *verdict, struct ip_vs_conn **cpp) -{ - *verdict = NF_ACCEPT; - return 0; -} - -static int -icmp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) -{ - if (!(skb->nh.iph->frag_off & __constant_htons(IP_OFFSET))) { - if (skb->ip_summed != CHECKSUM_UNNECESSARY) { - if (ip_vs_checksum_complete(skb, skb->nh.iph->ihl * 4)) { - IP_VS_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for"); - return 0; - } - } - } - return 1; -} - -static void -icmp_debug_packet(struct ip_vs_protocol *pp, - const struct sk_buff *skb, - int offset, - const char *msg) -{ - char buf[256]; - struct iphdr _iph, *ih; - - ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); - if (ih == NULL) - sprintf(buf, "%s TRUNCATED", pp->name); - else if (ih->frag_off & __constant_htons(IP_OFFSET)) - sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u frag", - pp->name, NIPQUAD(ih->saddr), - NIPQUAD(ih->daddr)); - else { - struct icmphdr _icmph, *ic; - - ic = skb_header_pointer(skb, offset + ih->ihl*4, - sizeof(_icmph), &_icmph); - if (ic == NULL) - sprintf(buf, "%s TRUNCATED to %u bytes\n", - pp->name, skb->len - offset); - else - sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u T:%d C:%d", - pp->name, NIPQUAD(ih->saddr), - NIPQUAD(ih->daddr), - ic->type, ic->code); - } - printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); -} - -static int -icmp_state_transition(struct ip_vs_conn *cp, int direction, - const struct sk_buff *skb, - struct ip_vs_protocol *pp) -{ - cp->timeout = pp->timeout_table[IP_VS_ICMP_S_NORMAL]; - return 1; -} - -static int -icmp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to) -{ - int num; - char **names; - - num = IP_VS_ICMP_S_LAST; - names = icmp_state_name_table; - return ip_vs_set_state_timeout(pp->timeout_table, num, names, sname, to); -} - - -static void icmp_init(struct ip_vs_protocol *pp) -{ - pp->timeout_table = icmp_timeouts; -} - -static void icmp_exit(struct ip_vs_protocol *pp) -{ -} - -struct ip_vs_protocol ip_vs_protocol_icmp = { - .name = "ICMP", - .protocol = IPPROTO_ICMP, - .dont_defrag = 0, - .init = icmp_init, - .exit = icmp_exit, - .conn_schedule = icmp_conn_schedule, - .conn_in_get = icmp_conn_in_get, - .conn_out_get = icmp_conn_out_get, - .snat_handler = NULL, - .dnat_handler = NULL, - .csum_check = icmp_csum_check, - .state_transition = icmp_state_transition, - .register_app = NULL, - .unregister_app = NULL, - .app_conn_bind = NULL, - .debug_packet = icmp_debug_packet, - .timeout_change = NULL, - .set_state_timeout = icmp_set_state_timeout, -}; diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index 25c4795..574d1f5 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c @@ -839,10 +839,10 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) ip_vs_sync_state |= state; if (state == IP_VS_STATE_MASTER) { - strcpy(ip_vs_master_mcast_ifn, mcast_ifn); + strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, sizeof(ip_vs_master_mcast_ifn)); ip_vs_master_syncid = syncid; } else { - strcpy(ip_vs_backup_mcast_ifn, mcast_ifn); + strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn, sizeof(ip_vs_backup_mcast_ifn)); ip_vs_backup_syncid = syncid; } diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index de21da0..a8512a3 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c @@ -127,7 +127,6 @@ ip_vs_dst_reset(struct ip_vs_dest *dest) #define IP_VS_XMIT(skb, rt) \ do { \ - nf_reset_debug(skb); \ (skb)->nfcache |= NFC_IPVS_PROPERTY; \ (skb)->ip_summed = CHECKSUM_NONE; \ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \ diff --git a/net/ipv4/multipath_drr.c b/net/ipv4/multipath_drr.c index 9349686..c9cf872 100644 --- a/net/ipv4/multipath_drr.c +++ b/net/ipv4/multipath_drr.c @@ -31,6 +31,7 @@ #include <linux/igmp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/module.h> #include <linux/mroute.h> #include <linux/init.h> #include <net/ip.h> @@ -57,7 +58,6 @@ struct multipath_device { static struct multipath_device state[MULTIPATH_MAX_DEVICECANDIDATES]; static DEFINE_SPINLOCK(state_lock); -static struct rtable *last_selection = NULL; static int inline __multipath_findslot(void) { @@ -111,11 +111,6 @@ struct notifier_block drr_dev_notifier = { .notifier_call = drr_dev_event, }; -static void drr_remove(struct rtable *rt) -{ - if (last_selection == rt) - last_selection = NULL; -} static void drr_safe_inc(atomic_t *usecount) { @@ -144,14 +139,6 @@ static void drr_select_route(const struct flowi *flp, int devidx = -1; int cur_min_devidx = -1; - /* if necessary and possible utilize the old alternative */ - if ((flp->flags & FLOWI_FLAG_MULTIPATHOLDROUTE) != 0 && - last_selection != NULL) { - result = last_selection; - *rp = result; - return; - } - /* 1. make sure all alt. nexthops have the same GC related data */ /* 2. determine the new candidate to be returned */ result = NULL; @@ -229,12 +216,10 @@ static void drr_select_route(const struct flowi *flp, } *rp = result; - last_selection = result; } static struct ip_mp_alg_ops drr_ops = { .mp_alg_select_route = drr_select_route, - .mp_alg_remove = drr_remove, }; static int __init drr_init(void) @@ -244,7 +229,7 @@ static int __init drr_init(void) if (err) return err; - err = multipath_alg_register(&drr_ops, IP_MP_ALG_RR); + err = multipath_alg_register(&drr_ops, IP_MP_ALG_DRR); if (err) goto fail; @@ -263,3 +248,4 @@ static void __exit drr_exit(void) module_init(drr_init); module_exit(drr_exit); +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/multipath_random.c b/net/ipv4/multipath_random.c index 805a16e..5249dbe 100644 --- a/net/ipv4/multipath_random.c +++ b/net/ipv4/multipath_random.c @@ -31,6 +31,7 @@ #include <linux/igmp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/module.h> #include <linux/mroute.h> #include <linux/init.h> #include <net/ip.h> @@ -126,3 +127,4 @@ static void __exit random_exit(void) module_init(random_init); module_exit(random_exit); +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/multipath_rr.c b/net/ipv4/multipath_rr.c index 554a8256..b6cd287 100644 --- a/net/ipv4/multipath_rr.c +++ b/net/ipv4/multipath_rr.c @@ -31,6 +31,7 @@ #include <linux/igmp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/module.h> #include <linux/mroute.h> #include <linux/init.h> #include <net/ip.h> @@ -47,29 +48,12 @@ #include <net/checksum.h> #include <net/ip_mp_alg.h> -#define MULTIPATH_MAX_CANDIDATES 40 - -static struct rtable* last_used = NULL; - -static void rr_remove(struct rtable *rt) -{ - if (last_used == rt) - last_used = NULL; -} - static void rr_select_route(const struct flowi *flp, struct rtable *first, struct rtable **rp) { struct rtable *nh, *result, *min_use_cand = NULL; int min_use = -1; - /* if necessary and possible utilize the old alternative */ - if ((flp->flags & FLOWI_FLAG_MULTIPATHOLDROUTE) != 0 && - last_used != NULL) { - result = last_used; - goto out; - } - /* 1. make sure all alt. nexthops have the same GC related data * 2. determine the new candidate to be returned */ @@ -90,15 +74,12 @@ static void rr_select_route(const struct flowi *flp, if (!result) result = first; -out: - last_used = result; result->u.dst.__use++; *rp = result; } static struct ip_mp_alg_ops rr_ops = { .mp_alg_select_route = rr_select_route, - .mp_alg_remove = rr_remove, }; static int __init rr_init(void) @@ -113,3 +94,4 @@ static void __exit rr_exit(void) module_init(rr_init); module_exit(rr_exit); +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c index c3d2ca1..bd7d75b 100644 --- a/net/ipv4/multipath_wrandom.c +++ b/net/ipv4/multipath_wrandom.c @@ -31,6 +31,7 @@ #include <linux/igmp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/module.h> #include <linux/mroute.h> #include <linux/init.h> #include <net/ip.h> @@ -342,3 +343,4 @@ static void __exit wrandom_exit(void) module_init(wrandom_init); module_exit(wrandom_exit); +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index df79f5e..fa16342 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -60,7 +60,6 @@ static DECLARE_MUTEX(arpt_mutex); #define ASSERT_READ_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0) #define ASSERT_WRITE_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0) -#include <linux/netfilter_ipv4/lockhelp.h> #include <linux/netfilter_ipv4/listhelp.h> struct arpt_table_info { diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c index 3dbddd0..a78a320 100644 --- a/net/ipv4/netfilter/ip_conntrack_amanda.c +++ b/net/ipv4/netfilter/ip_conntrack_amanda.c @@ -26,7 +26,6 @@ #include <net/checksum.h> #include <net/udp.h> -#include <linux/netfilter_ipv4/lockhelp.h> #include <linux/netfilter_ipv4/ip_conntrack_helper.h> #include <linux/netfilter_ipv4/ip_conntrack_amanda.h> @@ -42,7 +41,7 @@ static char *conns[] = { "DATA ", "MESG ", "INDEX " }; /* This is slow, but it's simple. --RR */ static char amanda_buffer[65536]; -static DECLARE_LOCK(amanda_buffer_lock); +static DEFINE_SPINLOCK(amanda_buffer_lock); unsigned int (*ip_nat_amanda_hook)(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, @@ -76,7 +75,7 @@ static int help(struct sk_buff **pskb, return NF_ACCEPT; } - LOCK_BH(&amanda_buffer_lock); + spin_lock_bh(&amanda_buffer_lock); skb_copy_bits(*pskb, dataoff, amanda_buffer, (*pskb)->len - dataoff); data = amanda_buffer; data_limit = amanda_buffer + (*pskb)->len - dataoff; @@ -134,7 +133,7 @@ static int help(struct sk_buff **pskb, } out: - UNLOCK_BH(&amanda_buffer_lock); + spin_unlock_bh(&amanda_buffer_lock); return ret; } diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 09e8246..4b78ebe 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -38,10 +38,10 @@ #include <linux/percpu.h> #include <linux/moduleparam.h> -/* This rwlock protects the main hash table, protocol/helper/expected +/* ip_conntrack_lock protects the main hash table, protocol/helper/expected registrations, conntrack timers*/ -#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock) -#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock) +#define ASSERT_READ_LOCK(x) +#define ASSERT_WRITE_LOCK(x) #include <linux/netfilter_ipv4/ip_conntrack.h> #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> @@ -57,7 +57,7 @@ #define DEBUGP(format, args...) #endif -DECLARE_RWLOCK(ip_conntrack_lock); +DEFINE_RWLOCK(ip_conntrack_lock); /* ip_conntrack_standalone needs this */ atomic_t ip_conntrack_count = ATOMIC_INIT(0); @@ -147,7 +147,7 @@ static void destroy_expect(struct ip_conntrack_expect *exp) static void unlink_expect(struct ip_conntrack_expect *exp) { - MUST_BE_WRITE_LOCKED(&ip_conntrack_lock); + ASSERT_WRITE_LOCK(&ip_conntrack_lock); list_del(&exp->list); /* Logically in destroy_expect, but we hold the lock here. */ exp->master->expecting--; @@ -157,9 +157,9 @@ static void expectation_timed_out(unsigned long ul_expect) { struct ip_conntrack_expect *exp = (void *)ul_expect; - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); unlink_expect(exp); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); destroy_expect(exp); } @@ -209,7 +209,7 @@ clean_from_lists(struct ip_conntrack *ct) unsigned int ho, hr; DEBUGP("clean_from_lists(%p)\n", ct); - MUST_BE_WRITE_LOCKED(&ip_conntrack_lock); + ASSERT_WRITE_LOCK(&ip_conntrack_lock); ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); @@ -240,7 +240,7 @@ destroy_conntrack(struct nf_conntrack *nfct) if (ip_conntrack_destroyed) ip_conntrack_destroyed(ct); - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, @@ -254,7 +254,7 @@ destroy_conntrack(struct nf_conntrack *nfct) } CONNTRACK_STAT_INC(delete); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); if (ct->master) ip_conntrack_put(ct->master); @@ -268,12 +268,12 @@ static void death_by_timeout(unsigned long ul_conntrack) { struct ip_conntrack *ct = (void *)ul_conntrack; - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); /* Inside lock so preempt is disabled on module removal path. * Otherwise we can get spurious warnings. */ CONNTRACK_STAT_INC(delete_list); clean_from_lists(ct); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); ip_conntrack_put(ct); } @@ -282,7 +282,7 @@ conntrack_tuple_cmp(const struct ip_conntrack_tuple_hash *i, const struct ip_conntrack_tuple *tuple, const struct ip_conntrack *ignored_conntrack) { - MUST_BE_READ_LOCKED(&ip_conntrack_lock); + ASSERT_READ_LOCK(&ip_conntrack_lock); return tuplehash_to_ctrack(i) != ignored_conntrack && ip_ct_tuple_equal(tuple, &i->tuple); } @@ -294,7 +294,7 @@ __ip_conntrack_find(const struct ip_conntrack_tuple *tuple, struct ip_conntrack_tuple_hash *h; unsigned int hash = hash_conntrack(tuple); - MUST_BE_READ_LOCKED(&ip_conntrack_lock); + ASSERT_READ_LOCK(&ip_conntrack_lock); list_for_each_entry(h, &ip_conntrack_hash[hash], list) { if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) { CONNTRACK_STAT_INC(found); @@ -313,11 +313,11 @@ ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple, { struct ip_conntrack_tuple_hash *h; - READ_LOCK(&ip_conntrack_lock); + read_lock_bh(&ip_conntrack_lock); h = __ip_conntrack_find(tuple, ignored_conntrack); if (h) atomic_inc(&tuplehash_to_ctrack(h)->ct_general.use); - READ_UNLOCK(&ip_conntrack_lock); + read_unlock_bh(&ip_conntrack_lock); return h; } @@ -352,7 +352,7 @@ __ip_conntrack_confirm(struct sk_buff **pskb) IP_NF_ASSERT(!is_confirmed(ct)); DEBUGP("Confirming conntrack %p\n", ct); - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're @@ -380,12 +380,12 @@ __ip_conntrack_confirm(struct sk_buff **pskb) atomic_inc(&ct->ct_general.use); set_bit(IPS_CONFIRMED_BIT, &ct->status); CONNTRACK_STAT_INC(insert); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); return NF_ACCEPT; } CONNTRACK_STAT_INC(insert_failed); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); return NF_DROP; } @@ -398,9 +398,9 @@ ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple, { struct ip_conntrack_tuple_hash *h; - READ_LOCK(&ip_conntrack_lock); + read_lock_bh(&ip_conntrack_lock); h = __ip_conntrack_find(tuple, ignored_conntrack); - READ_UNLOCK(&ip_conntrack_lock); + read_unlock_bh(&ip_conntrack_lock); return h != NULL; } @@ -419,13 +419,13 @@ static int early_drop(struct list_head *chain) struct ip_conntrack *ct = NULL; int dropped = 0; - READ_LOCK(&ip_conntrack_lock); + read_lock_bh(&ip_conntrack_lock); h = LIST_FIND_B(chain, unreplied, struct ip_conntrack_tuple_hash *); if (h) { ct = tuplehash_to_ctrack(h); atomic_inc(&ct->ct_general.use); } - READ_UNLOCK(&ip_conntrack_lock); + read_unlock_bh(&ip_conntrack_lock); if (!ct) return dropped; @@ -508,7 +508,7 @@ init_conntrack(const struct ip_conntrack_tuple *tuple, conntrack->timeout.data = (unsigned long)conntrack; conntrack->timeout.function = death_by_timeout; - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); exp = find_expectation(tuple); if (exp) { @@ -532,7 +532,7 @@ init_conntrack(const struct ip_conntrack_tuple *tuple, list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); atomic_inc(&ip_conntrack_count); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); if (exp) { if (exp->expectfn) @@ -723,17 +723,17 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp) { struct ip_conntrack_expect *i; - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); /* choose the the oldest expectation to evict */ list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) { if (expect_matches(i, exp) && del_timer(&i->timeout)) { unlink_expect(i); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); destroy_expect(i); return; } } - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); } struct ip_conntrack_expect *ip_conntrack_expect_alloc(void) @@ -760,15 +760,11 @@ static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp) exp->master->expecting++; list_add(&exp->list, &ip_conntrack_expect_list); - if (exp->master->helper->timeout) { - init_timer(&exp->timeout); - exp->timeout.data = (unsigned long)exp; - exp->timeout.function = expectation_timed_out; - exp->timeout.expires - = jiffies + exp->master->helper->timeout * HZ; - add_timer(&exp->timeout); - } else - exp->timeout.function = NULL; + init_timer(&exp->timeout); + exp->timeout.data = (unsigned long)exp; + exp->timeout.function = expectation_timed_out; + exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ; + add_timer(&exp->timeout); CONNTRACK_STAT_INC(expect_create); } @@ -808,7 +804,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect) DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple); DEBUGP("mask: "); DUMP_TUPLE(&expect->mask); - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); list_for_each_entry(i, &ip_conntrack_expect_list, list) { if (expect_matches(i, expect)) { /* Refresh timer: if it's dying, ignore.. */ @@ -832,7 +828,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect) ip_conntrack_expect_insert(expect); ret = 0; out: - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); return ret; } @@ -841,7 +837,7 @@ out: void ip_conntrack_alter_reply(struct ip_conntrack *conntrack, const struct ip_conntrack_tuple *newreply) { - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); /* Should be unconfirmed, so not in hash table yet */ IP_NF_ASSERT(!is_confirmed(conntrack)); @@ -851,15 +847,15 @@ void ip_conntrack_alter_reply(struct ip_conntrack *conntrack, conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; if (!conntrack->master && conntrack->expecting == 0) conntrack->helper = ip_ct_find_helper(newreply); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); } int ip_conntrack_helper_register(struct ip_conntrack_helper *me) { BUG_ON(me->timeout == 0); - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); list_prepend(&helpers, me); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); return 0; } @@ -878,7 +874,7 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me) struct ip_conntrack_expect *exp, *tmp; /* Need write lock here, to delete helper. */ - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); LIST_DELETE(&helpers, me); /* Get rid of expectations */ @@ -893,7 +889,7 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me) for (i = 0; i < ip_conntrack_htable_size; i++) LIST_FIND_W(&ip_conntrack_hash[i], unhelp, struct ip_conntrack_tuple_hash *, me); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); /* Someone could be still looking at the helper in a bh. */ synchronize_net(); @@ -925,14 +921,14 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct, ct->timeout.expires = extra_jiffies; ct_add_counters(ct, ctinfo, skb); } else { - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); /* Need del_timer for race avoidance (may already be dying). */ if (del_timer(&ct->timeout)) { ct->timeout.expires = jiffies + extra_jiffies; add_timer(&ct->timeout); } ct_add_counters(ct, ctinfo, skb); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); } } @@ -940,10 +936,6 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct, struct sk_buff * ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) { -#ifdef CONFIG_NETFILTER_DEBUG - unsigned int olddebug = skb->nf_debug; -#endif - skb_orphan(skb); local_bh_disable(); @@ -953,12 +945,7 @@ ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) if (skb) { ip_send_check(skb->nh.iph); skb->nfcache |= NFC_ALTERED; -#ifdef CONFIG_NETFILTER_DEBUG - /* Packet path as if nothing had happened. */ - skb->nf_debug = olddebug; -#endif } - return skb; } @@ -997,7 +984,7 @@ get_next_corpse(int (*iter)(struct ip_conntrack *i, void *data), { struct ip_conntrack_tuple_hash *h = NULL; - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); for (; *bucket < ip_conntrack_htable_size; (*bucket)++) { h = LIST_FIND_W(&ip_conntrack_hash[*bucket], do_iter, struct ip_conntrack_tuple_hash *, iter, data); @@ -1009,7 +996,7 @@ get_next_corpse(int (*iter)(struct ip_conntrack *i, void *data), struct ip_conntrack_tuple_hash *, iter, data); if (h) atomic_inc(&tuplehash_to_ctrack(h)->ct_general.use); - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); return h; } @@ -1201,14 +1188,14 @@ int __init ip_conntrack_init(void) } /* Don't NEED lock here, but good form anyway. */ - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); for (i = 0; i < MAX_IP_CT_PROTO; i++) ip_ct_protos[i] = &ip_conntrack_generic_protocol; /* Sew in builtin protocols. */ ip_ct_protos[IPPROTO_TCP] = &ip_conntrack_protocol_tcp; ip_ct_protos[IPPROTO_UDP] = &ip_conntrack_protocol_udp; ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); for (i = 0; i < ip_conntrack_htable_size; i++) INIT_LIST_HEAD(&ip_conntrack_hash[i]); diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c index dd86503..fea6dd2 100644 --- a/net/ipv4/netfilter/ip_conntrack_ftp.c +++ b/net/ipv4/netfilter/ip_conntrack_ftp.c @@ -16,7 +16,6 @@ #include <net/checksum.h> #include <net/tcp.h> -#include <linux/netfilter_ipv4/lockhelp.h> #include <linux/netfilter_ipv4/ip_conntrack_helper.h> #include <linux/netfilter_ipv4/ip_conntrack_ftp.h> #include <linux/moduleparam.h> @@ -28,7 +27,7 @@ MODULE_DESCRIPTION("ftp connection tracking helper"); /* This is slow, but it's simple. --RR */ static char ftp_buffer[65536]; -static DECLARE_LOCK(ip_ftp_lock); +static DEFINE_SPINLOCK(ip_ftp_lock); #define MAX_PORTS 8 static int ports[MAX_PORTS]; @@ -319,7 +318,7 @@ static int help(struct sk_buff **pskb, } datalen = (*pskb)->len - dataoff; - LOCK_BH(&ip_ftp_lock); + spin_lock_bh(&ip_ftp_lock); fb_ptr = skb_header_pointer(*pskb, dataoff, (*pskb)->len - dataoff, ftp_buffer); BUG_ON(fb_ptr == NULL); @@ -442,7 +441,7 @@ out_update_nl: if (ends_in_nl) update_nl_seq(seq, ct_ftp_info,dir); out: - UNLOCK_BH(&ip_ftp_lock); + spin_unlock_bh(&ip_ftp_lock); return ret; } diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c index 33cc734..cd98772 100644 --- a/net/ipv4/netfilter/ip_conntrack_irc.c +++ b/net/ipv4/netfilter/ip_conntrack_irc.c @@ -29,7 +29,6 @@ #include <net/checksum.h> #include <net/tcp.h> -#include <linux/netfilter_ipv4/lockhelp.h> #include <linux/netfilter_ipv4/ip_conntrack_helper.h> #include <linux/netfilter_ipv4/ip_conntrack_irc.h> #include <linux/moduleparam.h> @@ -41,7 +40,7 @@ static int max_dcc_channels = 8; static unsigned int dcc_timeout = 300; /* This is slow, but it's simple. --RR */ static char irc_buffer[65536]; -static DECLARE_LOCK(irc_buffer_lock); +static DEFINE_SPINLOCK(irc_buffer_lock); unsigned int (*ip_nat_irc_hook)(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, @@ -141,7 +140,7 @@ static int help(struct sk_buff **pskb, if (dataoff >= (*pskb)->len) return NF_ACCEPT; - LOCK_BH(&irc_buffer_lock); + spin_lock_bh(&irc_buffer_lock); ib_ptr = skb_header_pointer(*pskb, dataoff, (*pskb)->len - dataoff, irc_buffer); BUG_ON(ib_ptr == NULL); @@ -237,7 +236,7 @@ static int help(struct sk_buff **pskb, } /* while data < ... */ out: - UNLOCK_BH(&irc_buffer_lock); + spin_unlock_bh(&irc_buffer_lock); return ret; } diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c index ff8c34a..31d7539 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c @@ -26,7 +26,6 @@ #include <linux/netfilter_ipv4/ip_conntrack.h> #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> -#include <linux/netfilter_ipv4/lockhelp.h> #if 0 #define DEBUGP(format, ...) printk(format, ## __VA_ARGS__) @@ -35,7 +34,7 @@ #endif /* Protects conntrack->proto.sctp */ -static DECLARE_RWLOCK(sctp_lock); +static DEFINE_RWLOCK(sctp_lock); /* FIXME: Examine ipfilter's timeouts and conntrack transitions more closely. They're more complex. --RR @@ -199,9 +198,9 @@ static int sctp_print_conntrack(struct seq_file *s, DEBUGP(__FUNCTION__); DEBUGP("\n"); - READ_LOCK(&sctp_lock); + read_lock_bh(&sctp_lock); state = conntrack->proto.sctp.state; - READ_UNLOCK(&sctp_lock); + read_unlock_bh(&sctp_lock); return seq_printf(s, "%s ", sctp_conntrack_names[state]); } @@ -343,13 +342,13 @@ static int sctp_packet(struct ip_conntrack *conntrack, oldsctpstate = newconntrack = SCTP_CONNTRACK_MAX; for_each_sctp_chunk (skb, sch, _sch, offset, count) { - WRITE_LOCK(&sctp_lock); + write_lock_bh(&sctp_lock); /* Special cases of Verification tag check (Sec 8.5.1) */ if (sch->type == SCTP_CID_INIT) { /* Sec 8.5.1 (A) */ if (sh->vtag != 0) { - WRITE_UNLOCK(&sctp_lock); + write_unlock_bh(&sctp_lock); return -1; } } else if (sch->type == SCTP_CID_ABORT) { @@ -357,7 +356,7 @@ static int sctp_packet(struct ip_conntrack *conntrack, if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)]) && !(sh->vtag == conntrack->proto.sctp.vtag [1 - CTINFO2DIR(ctinfo)])) { - WRITE_UNLOCK(&sctp_lock); + write_unlock_bh(&sctp_lock); return -1; } } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { @@ -366,13 +365,13 @@ static int sctp_packet(struct ip_conntrack *conntrack, && !(sh->vtag == conntrack->proto.sctp.vtag [1 - CTINFO2DIR(ctinfo)] && (sch->flags & 1))) { - WRITE_UNLOCK(&sctp_lock); + write_unlock_bh(&sctp_lock); return -1; } } else if (sch->type == SCTP_CID_COOKIE_ECHO) { /* Sec 8.5.1 (D) */ if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) { - WRITE_UNLOCK(&sctp_lock); + write_unlock_bh(&sctp_lock); return -1; } } @@ -384,7 +383,7 @@ static int sctp_packet(struct ip_conntrack *conntrack, if (newconntrack == SCTP_CONNTRACK_MAX) { DEBUGP("ip_conntrack_sctp: Invalid dir=%i ctype=%u conntrack=%u\n", CTINFO2DIR(ctinfo), sch->type, oldsctpstate); - WRITE_UNLOCK(&sctp_lock); + write_unlock_bh(&sctp_lock); return -1; } @@ -396,7 +395,7 @@ static int sctp_packet(struct ip_conntrack *conntrack, ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), sizeof(_inithdr), &_inithdr); if (ih == NULL) { - WRITE_UNLOCK(&sctp_lock); + write_unlock_bh(&sctp_lock); return -1; } DEBUGP("Setting vtag %x for dir %d\n", @@ -405,7 +404,7 @@ static int sctp_packet(struct ip_conntrack *conntrack, } conntrack->proto.sctp.state = newconntrack; - WRITE_UNLOCK(&sctp_lock); + write_unlock_bh(&sctp_lock); } ip_ct_refresh_acct(conntrack, ctinfo, skb, *sctp_timeouts[newconntrack]); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index 721ddbf..809dfed 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c @@ -36,7 +36,6 @@ #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv4/ip_conntrack.h> #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> -#include <linux/netfilter_ipv4/lockhelp.h> #if 0 #define DEBUGP printk @@ -46,7 +45,7 @@ #endif /* Protects conntrack->proto.tcp */ -static DECLARE_RWLOCK(tcp_lock); +static DEFINE_RWLOCK(tcp_lock); /* "Be conservative in what you do, be liberal in what you accept from others." @@ -330,9 +329,9 @@ static int tcp_print_conntrack(struct seq_file *s, { enum tcp_conntrack state; - READ_LOCK(&tcp_lock); + read_lock_bh(&tcp_lock); state = conntrack->proto.tcp.state; - READ_UNLOCK(&tcp_lock); + read_unlock_bh(&tcp_lock); return seq_printf(s, "%s ", tcp_conntrack_names[state]); } @@ -738,14 +737,14 @@ void ip_conntrack_tcp_update(struct sk_buff *skb, end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph); - WRITE_LOCK(&tcp_lock); + write_lock_bh(&tcp_lock); /* * We have to worry for the ack in the reply packet only... */ if (after(end, conntrack->proto.tcp.seen[dir].td_end)) conntrack->proto.tcp.seen[dir].td_end = end; conntrack->proto.tcp.last_end = end; - WRITE_UNLOCK(&tcp_lock); + write_unlock_bh(&tcp_lock); DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " "receiver end=%u maxend=%u maxwin=%u scale=%i\n", sender->td_end, sender->td_maxend, sender->td_maxwin, @@ -857,7 +856,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, sizeof(_tcph), &_tcph); BUG_ON(th == NULL); - WRITE_LOCK(&tcp_lock); + write_lock_bh(&tcp_lock); old_state = conntrack->proto.tcp.state; dir = CTINFO2DIR(ctinfo); index = get_conntrack_index(th); @@ -879,7 +878,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, * that the client cannot but retransmit its SYN and * thus initiate a clean new session. */ - WRITE_UNLOCK(&tcp_lock); + write_unlock_bh(&tcp_lock); if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, "ip_ct_tcp: killing out of sync session "); @@ -894,7 +893,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, conntrack->proto.tcp.last_end = segment_seq_plus_len(ntohl(th->seq), skb->len, iph, th); - WRITE_UNLOCK(&tcp_lock); + write_unlock_bh(&tcp_lock); if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, "ip_ct_tcp: invalid packet ignored "); @@ -904,7 +903,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, DEBUGP("ip_ct_tcp: Invalid dir=%i index=%u ostate=%u\n", dir, get_conntrack_index(th), old_state); - WRITE_UNLOCK(&tcp_lock); + write_unlock_bh(&tcp_lock); if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, "ip_ct_tcp: invalid state "); @@ -918,13 +917,13 @@ static int tcp_packet(struct ip_conntrack *conntrack, conntrack->proto.tcp.seen[dir].td_end)) { /* Attempt to reopen a closed connection. * Delete this connection and look up again. */ - WRITE_UNLOCK(&tcp_lock); + write_unlock_bh(&tcp_lock); if (del_timer(&conntrack->timeout)) conntrack->timeout.function((unsigned long) conntrack); return -NF_REPEAT; } else { - WRITE_UNLOCK(&tcp_lock); + write_unlock_bh(&tcp_lock); if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, "ip_ct_tcp: invalid SYN"); @@ -949,7 +948,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, if (!tcp_in_window(&conntrack->proto.tcp, dir, index, skb, iph, th)) { - WRITE_UNLOCK(&tcp_lock); + write_unlock_bh(&tcp_lock); return -NF_ACCEPT; } in_window: @@ -972,7 +971,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, timeout = conntrack->proto.tcp.retrans >= ip_ct_tcp_max_retrans && *tcp_timeouts[new_state] > ip_ct_tcp_timeout_max_retrans ? ip_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state]; - WRITE_UNLOCK(&tcp_lock); + write_unlock_bh(&tcp_lock); if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { /* If only reply is a RST, we can consider ourselves not to diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c index 5bc28a2..8c1eaba 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_udp.c @@ -120,6 +120,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, * and moreover root might send raw packets. * FIXME: Source route IP option packets --RR */ if (hooknum == NF_IP_PRE_ROUTING + && skb->ip_summed != CHECKSUM_UNNECESSARY && csum_tcpudp_magic(iph->saddr, iph->daddr, udplen, IPPROTO_UDP, skb->ip_summed == CHECKSUM_HW ? skb->csum : skb_checksum(skb, iph->ihl*4, udplen, 0))) { diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index 46ca45f..42dc951 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c @@ -28,8 +28,8 @@ #include <net/checksum.h> #include <net/ip.h> -#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock) -#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock) +#define ASSERT_READ_LOCK(x) +#define ASSERT_WRITE_LOCK(x) #include <linux/netfilter_ipv4/ip_conntrack.h> #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> @@ -119,7 +119,7 @@ static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos) static void *ct_seq_start(struct seq_file *seq, loff_t *pos) { - READ_LOCK(&ip_conntrack_lock); + read_lock_bh(&ip_conntrack_lock); return ct_get_idx(seq, *pos); } @@ -131,7 +131,7 @@ static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) static void ct_seq_stop(struct seq_file *s, void *v) { - READ_UNLOCK(&ip_conntrack_lock); + read_unlock_bh(&ip_conntrack_lock); } static int ct_seq_show(struct seq_file *s, void *v) @@ -140,7 +140,7 @@ static int ct_seq_show(struct seq_file *s, void *v) const struct ip_conntrack *conntrack = tuplehash_to_ctrack(hash); struct ip_conntrack_protocol *proto; - MUST_BE_READ_LOCKED(&ip_conntrack_lock); + ASSERT_READ_LOCK(&ip_conntrack_lock); IP_NF_ASSERT(conntrack); /* we only want to print DIR_ORIGINAL */ @@ -239,7 +239,7 @@ static void *exp_seq_start(struct seq_file *s, loff_t *pos) /* strange seq_file api calls stop even if we fail, * thus we need to grab lock since stop unlocks */ - READ_LOCK(&ip_conntrack_lock); + read_lock_bh(&ip_conntrack_lock); if (list_empty(e)) return NULL; @@ -256,6 +256,7 @@ static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct list_head *e = v; + ++*pos; e = e->next; if (e == &ip_conntrack_expect_list) @@ -266,7 +267,7 @@ static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) static void exp_seq_stop(struct seq_file *s, void *v) { - READ_UNLOCK(&ip_conntrack_lock); + read_unlock_bh(&ip_conntrack_lock); } static int exp_seq_show(struct seq_file *s, void *v) @@ -920,22 +921,22 @@ int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto) { int ret = 0; - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); if (ip_ct_protos[proto->proto] != &ip_conntrack_generic_protocol) { ret = -EBUSY; goto out; } ip_ct_protos[proto->proto] = proto; out: - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); return ret; } void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto) { - WRITE_LOCK(&ip_conntrack_lock); + write_lock_bh(&ip_conntrack_lock); ip_ct_protos[proto->proto] = &ip_conntrack_generic_protocol; - WRITE_UNLOCK(&ip_conntrack_lock); + write_unlock_bh(&ip_conntrack_lock); /* Somebody could be still looking at the proto in bh. */ synchronize_net(); diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index 9fc6f93..739b6dd 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c @@ -22,8 +22,8 @@ #include <linux/udp.h> #include <linux/jhash.h> -#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_nat_lock) -#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_nat_lock) +#define ASSERT_READ_LOCK(x) +#define ASSERT_WRITE_LOCK(x) #include <linux/netfilter_ipv4/ip_conntrack.h> #include <linux/netfilter_ipv4/ip_conntrack_core.h> @@ -41,7 +41,7 @@ #define DEBUGP(format, args...) #endif -DECLARE_RWLOCK(ip_nat_lock); +DEFINE_RWLOCK(ip_nat_lock); /* Calculated at init based on memory size */ static unsigned int ip_nat_htable_size; @@ -65,9 +65,9 @@ static void ip_nat_cleanup_conntrack(struct ip_conntrack *conn) if (!(conn->status & IPS_NAT_DONE_MASK)) return; - WRITE_LOCK(&ip_nat_lock); + write_lock_bh(&ip_nat_lock); list_del(&conn->nat.info.bysource); - WRITE_UNLOCK(&ip_nat_lock); + write_unlock_bh(&ip_nat_lock); } /* We do checksum mangling, so if they were wrong before they're still @@ -142,7 +142,7 @@ find_appropriate_src(const struct ip_conntrack_tuple *tuple, unsigned int h = hash_by_src(tuple); struct ip_conntrack *ct; - READ_LOCK(&ip_nat_lock); + read_lock_bh(&ip_nat_lock); list_for_each_entry(ct, &bysource[h], nat.info.bysource) { if (same_src(ct, tuple)) { /* Copy source part from reply tuple. */ @@ -151,12 +151,12 @@ find_appropriate_src(const struct ip_conntrack_tuple *tuple, result->dst = tuple->dst; if (in_range(result, range)) { - READ_UNLOCK(&ip_nat_lock); + read_unlock_bh(&ip_nat_lock); return 1; } } } - READ_UNLOCK(&ip_nat_lock); + read_unlock_bh(&ip_nat_lock); return 0; } @@ -297,9 +297,9 @@ ip_nat_setup_info(struct ip_conntrack *conntrack, unsigned int srchash = hash_by_src(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL] .tuple); - WRITE_LOCK(&ip_nat_lock); + write_lock_bh(&ip_nat_lock); list_add(&info->bysource, &bysource[srchash]); - WRITE_UNLOCK(&ip_nat_lock); + write_unlock_bh(&ip_nat_lock); } /* It's done. */ @@ -474,23 +474,23 @@ int ip_nat_protocol_register(struct ip_nat_protocol *proto) { int ret = 0; - WRITE_LOCK(&ip_nat_lock); + write_lock_bh(&ip_nat_lock); if (ip_nat_protos[proto->protonum] != &ip_nat_unknown_protocol) { ret = -EBUSY; goto out; } ip_nat_protos[proto->protonum] = proto; out: - WRITE_UNLOCK(&ip_nat_lock); + write_unlock_bh(&ip_nat_lock); return ret; } /* Noone stores the protocol anywhere; simply delete it. */ void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) { - WRITE_LOCK(&ip_nat_lock); + write_lock_bh(&ip_nat_lock); ip_nat_protos[proto->protonum] = &ip_nat_unknown_protocol; - WRITE_UNLOCK(&ip_nat_lock); + write_unlock_bh(&ip_nat_lock); /* Someone could be still looking at the proto in a bh. */ synchronize_net(); @@ -509,13 +509,13 @@ int __init ip_nat_init(void) return -ENOMEM; /* Sew in builtin protocols. */ - WRITE_LOCK(&ip_nat_lock); + write_lock_bh(&ip_nat_lock); for (i = 0; i < MAX_IP_NAT_PROTO; i++) ip_nat_protos[i] = &ip_nat_unknown_protocol; ip_nat_protos[IPPROTO_TCP] = &ip_nat_protocol_tcp; ip_nat_protos[IPPROTO_UDP] = &ip_nat_protocol_udp; ip_nat_protos[IPPROTO_ICMP] = &ip_nat_protocol_icmp; - WRITE_UNLOCK(&ip_nat_lock); + write_unlock_bh(&ip_nat_lock); for (i = 0; i < ip_nat_htable_size; i++) { INIT_LIST_HEAD(&bysource[i]); diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c index 1637b96..158f34f 100644 --- a/net/ipv4/netfilter/ip_nat_helper.c +++ b/net/ipv4/netfilter/ip_nat_helper.c @@ -28,8 +28,8 @@ #include <net/tcp.h> #include <net/udp.h> -#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_nat_lock) -#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_nat_lock) +#define ASSERT_READ_LOCK(x) +#define ASSERT_WRITE_LOCK(x) #include <linux/netfilter_ipv4/ip_conntrack.h> #include <linux/netfilter_ipv4/ip_conntrack_helper.h> @@ -47,7 +47,7 @@ #define DUMP_OFFSET(x) #endif -static DECLARE_LOCK(ip_nat_seqofs_lock); +static DEFINE_SPINLOCK(ip_nat_seqofs_lock); /* Setup TCP sequence correction given this change at this sequence */ static inline void @@ -70,7 +70,7 @@ adjust_tcp_sequence(u32 seq, DEBUGP("ip_nat_resize_packet: Seq_offset before: "); DUMP_OFFSET(this_way); - LOCK_BH(&ip_nat_seqofs_lock); + spin_lock_bh(&ip_nat_seqofs_lock); /* SYN adjust. If it's uninitialized, or this is after last * correction, record it: we don't handle more than one @@ -82,7 +82,7 @@ adjust_tcp_sequence(u32 seq, this_way->offset_before = this_way->offset_after; this_way->offset_after += sizediff; } - UNLOCK_BH(&ip_nat_seqofs_lock); + spin_unlock_bh(&ip_nat_seqofs_lock); DEBUGP("ip_nat_resize_packet: Seq_offset after: "); DUMP_OFFSET(this_way); @@ -142,9 +142,6 @@ static int enlarge_skb(struct sk_buff **pskb, unsigned int extra) /* Transfer socket to new skb. */ if ((*pskb)->sk) skb_set_owner_w(nskb, (*pskb)->sk); -#ifdef CONFIG_NETFILTER_DEBUG - nskb->nf_debug = (*pskb)->nf_debug; -#endif kfree_skb(*pskb); *pskb = nskb; return 1; diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c index 581f097..60d70fa 100644 --- a/net/ipv4/netfilter/ip_nat_rule.c +++ b/net/ipv4/netfilter/ip_nat_rule.c @@ -19,8 +19,8 @@ #include <net/route.h> #include <linux/bitops.h> -#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_nat_lock) -#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_nat_lock) +#define ASSERT_READ_LOCK(x) +#define ASSERT_WRITE_LOCK(x) #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv4/ip_nat.h> diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index 79f56f6..bc59d0d 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c @@ -31,8 +31,8 @@ #include <net/checksum.h> #include <linux/spinlock.h> -#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_nat_lock) -#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_nat_lock) +#define ASSERT_READ_LOCK(x) +#define ASSERT_WRITE_LOCK(x) #include <linux/netfilter_ipv4/ip_nat.h> #include <linux/netfilter_ipv4/ip_nat_rule.h> @@ -373,7 +373,6 @@ static int init_or_cleanup(int init) cleanup_rule_init: ip_nat_rule_cleanup(); cleanup_nothing: - MUST_BE_READ_WRITE_UNLOCKED(&ip_nat_lock); return ret; } diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index e5746b6..eda1fba 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -3,6 +3,7 @@ * communicating with userspace via netlink. * * (C) 2000-2002 James Morris <jmorris@intercode.com.au> + * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -17,6 +18,7 @@ * 2005-01-10: Added /proc counter for dropped packets; fixed so * packets aren't delivered to user space if they're going * to be dropped. + * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte) * */ #include <linux/module.h> @@ -71,7 +73,15 @@ static DECLARE_MUTEX(ipqnl_sem); static void ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) { + /* TCP input path (and probably other bits) assume to be called + * from softirq context, not from syscall, like ipq_issue_verdict is + * called. TCP input path deadlocks with locks taken from timer + * softirq, e.g. We therefore emulate this by local_bh_disable() */ + + local_bh_disable(); nf_reinject(entry->skb, entry->info, verdict); + local_bh_enable(); + kfree(entry); } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 8a54f92..c88dfcd 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -67,7 +67,6 @@ static DECLARE_MUTEX(ipt_mutex); /* Must have mutex */ #define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0) #define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0) -#include <linux/netfilter_ipv4/lockhelp.h> #include <linux/netfilter_ipv4/listhelp.h> #if 0 diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 0f12e3a..9cde8c6 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -29,7 +29,6 @@ #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv4/ipt_CLUSTERIP.h> #include <linux/netfilter_ipv4/ip_conntrack.h> -#include <linux/netfilter_ipv4/lockhelp.h> #define CLUSTERIP_VERSION "0.6" @@ -41,6 +40,8 @@ #define DEBUGP #endif +#define ASSERT_READ_LOCK(x) + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("iptables target for CLUSTERIP"); @@ -67,7 +68,7 @@ static LIST_HEAD(clusterip_configs); /* clusterip_lock protects the clusterip_configs list _AND_ the configurable * data within all structurses (num_local_nodes, local_nodes[]) */ -static DECLARE_RWLOCK(clusterip_lock); +static DEFINE_RWLOCK(clusterip_lock); #ifdef CONFIG_PROC_FS static struct file_operations clusterip_proc_fops; @@ -82,9 +83,9 @@ clusterip_config_get(struct clusterip_config *c) { static inline void clusterip_config_put(struct clusterip_config *c) { if (atomic_dec_and_test(&c->refcount)) { - WRITE_LOCK(&clusterip_lock); + write_lock_bh(&clusterip_lock); list_del(&c->list); - WRITE_UNLOCK(&clusterip_lock); + write_unlock_bh(&clusterip_lock); dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0); dev_put(c->dev); kfree(c); @@ -97,7 +98,7 @@ __clusterip_config_find(u_int32_t clusterip) { struct list_head *pos; - MUST_BE_READ_LOCKED(&clusterip_lock); + ASSERT_READ_LOCK(&clusterip_lock); list_for_each(pos, &clusterip_configs) { struct clusterip_config *c = list_entry(pos, struct clusterip_config, list); @@ -114,14 +115,14 @@ clusterip_config_find_get(u_int32_t clusterip) { struct clusterip_config *c; - READ_LOCK(&clusterip_lock); + read_lock_bh(&clusterip_lock); c = __clusterip_config_find(clusterip); if (!c) { - READ_UNLOCK(&clusterip_lock); + read_unlock_bh(&clusterip_lock); return NULL; } atomic_inc(&c->refcount); - READ_UNLOCK(&clusterip_lock); + read_unlock_bh(&clusterip_lock); return c; } @@ -160,9 +161,9 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip, c->pde->data = c; #endif - WRITE_LOCK(&clusterip_lock); + write_lock_bh(&clusterip_lock); list_add(&c->list, &clusterip_configs); - WRITE_UNLOCK(&clusterip_lock); + write_unlock_bh(&clusterip_lock); return c; } @@ -172,25 +173,25 @@ clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum) { int i; - WRITE_LOCK(&clusterip_lock); + write_lock_bh(&clusterip_lock); if (c->num_local_nodes >= CLUSTERIP_MAX_NODES || nodenum > CLUSTERIP_MAX_NODES) { - WRITE_UNLOCK(&clusterip_lock); + write_unlock_bh(&clusterip_lock); return 1; } /* check if we alrady have this number in our array */ for (i = 0; i < c->num_local_nodes; i++) { if (c->local_nodes[i] == nodenum) { - WRITE_UNLOCK(&clusterip_lock); + write_unlock_bh(&clusterip_lock); return 1; } } c->local_nodes[c->num_local_nodes++] = nodenum; - WRITE_UNLOCK(&clusterip_lock); + write_unlock_bh(&clusterip_lock); return 0; } @@ -199,10 +200,10 @@ clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) { int i; - WRITE_LOCK(&clusterip_lock); + write_lock_bh(&clusterip_lock); if (c->num_local_nodes <= 1 || nodenum > CLUSTERIP_MAX_NODES) { - WRITE_UNLOCK(&clusterip_lock); + write_unlock_bh(&clusterip_lock); return 1; } @@ -211,12 +212,12 @@ clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) int size = sizeof(u_int16_t)*(c->num_local_nodes-(i+1)); memmove(&c->local_nodes[i], &c->local_nodes[i+1], size); c->num_local_nodes--; - WRITE_UNLOCK(&clusterip_lock); + write_unlock_bh(&clusterip_lock); return 0; } } - WRITE_UNLOCK(&clusterip_lock); + write_unlock_bh(&clusterip_lock); return 1; } @@ -286,21 +287,21 @@ clusterip_responsible(struct clusterip_config *config, u_int32_t hash) { int i; - READ_LOCK(&clusterip_lock); + read_lock_bh(&clusterip_lock); if (config->num_local_nodes == 0) { - READ_UNLOCK(&clusterip_lock); + read_unlock_bh(&clusterip_lock); return 0; } for (i = 0; i < config->num_local_nodes; i++) { if (config->local_nodes[i] == hash) { - READ_UNLOCK(&clusterip_lock); + read_unlock_bh(&clusterip_lock); return 1; } } - READ_UNLOCK(&clusterip_lock); + read_unlock_bh(&clusterip_lock); return 0; } @@ -338,7 +339,7 @@ target(struct sk_buff **pskb, * error messages (RELATED) and information requests (see below) */ if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP && (ctinfo == IP_CT_RELATED - || ctinfo == IP_CT_IS_REPLY+IP_CT_IS_REPLY)) + || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) return IPT_CONTINUE; /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, @@ -578,7 +579,7 @@ static void *clusterip_seq_start(struct seq_file *s, loff_t *pos) struct clusterip_config *c = pde->data; unsigned int *nodeidx; - READ_LOCK(&clusterip_lock); + read_lock_bh(&clusterip_lock); if (*pos >= c->num_local_nodes) return NULL; @@ -608,7 +609,7 @@ static void clusterip_seq_stop(struct seq_file *s, void *v) { kfree(v); - READ_UNLOCK(&clusterip_lock); + read_unlock_bh(&clusterip_lock); } static int clusterip_seq_show(struct seq_file *s, void *v) diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 57e9f6c..91e7450 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -33,7 +33,7 @@ MODULE_DESCRIPTION("iptables MASQUERADE target module"); #endif /* Lock protects masq region inside conntrack */ -static DECLARE_RWLOCK(masq_lock); +static DEFINE_RWLOCK(masq_lock); /* FIXME: Multiple targets. --RR */ static int @@ -103,9 +103,9 @@ masquerade_target(struct sk_buff **pskb, return NF_DROP; } - WRITE_LOCK(&masq_lock); + write_lock_bh(&masq_lock); ct->nat.masq_index = out->ifindex; - WRITE_UNLOCK(&masq_lock); + write_unlock_bh(&masq_lock); /* Transfer from original range. */ newrange = ((struct ip_nat_range) @@ -122,9 +122,9 @@ device_cmp(struct ip_conntrack *i, void *ifindex) { int ret; - READ_LOCK(&masq_lock); + read_lock_bh(&masq_lock); ret = (i->nat.masq_index == (int)(long)ifindex); - READ_UNLOCK(&masq_lock); + read_unlock_bh(&masq_lock); return ret; } diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 266d649..9156964 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -104,10 +104,12 @@ static inline struct rtable *route_reverse(struct sk_buff *skb, static void send_reset(struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; + struct iphdr *iph = oldskb->nh.iph; struct tcphdr _otcph, *oth, *tcph; struct rtable *rt; u_int16_t tmp_port; u_int32_t tmp_addr; + unsigned int tcplen; int needs_ack; int hh_len; @@ -124,7 +126,16 @@ static void send_reset(struct sk_buff *oldskb, int hook) if (oth->rst) return; - /* FIXME: Check checksum --RR */ + /* Check checksum */ + tcplen = oldskb->len - iph->ihl * 4; + if (((hook != NF_IP_LOCAL_IN && oldskb->ip_summed != CHECKSUM_HW) || + (hook == NF_IP_LOCAL_IN && + oldskb->ip_summed != CHECKSUM_UNNECESSARY)) && + csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP, + oldskb->ip_summed == CHECKSUM_HW ? oldskb->csum : + skb_checksum(oldskb, iph->ihl * 4, tcplen, 0))) + return; + if ((rt = route_reverse(oldskb, oth, hook)) == NULL) return; diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 6f2cefb..52a0076 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -56,7 +56,6 @@ #include <linux/netfilter.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv4/ipt_ULOG.h> -#include <linux/netfilter_ipv4/lockhelp.h> #include <net/sock.h> #include <linux/bitops.h> @@ -99,8 +98,8 @@ typedef struct { static ulog_buff_t ulog_buffers[ULOG_MAXNLGROUPS]; /* array of buffers */ -static struct sock *nflognl; /* our socket */ -static DECLARE_LOCK(ulog_lock); /* spinlock */ +static struct sock *nflognl; /* our socket */ +static DEFINE_SPINLOCK(ulog_lock); /* spinlock */ /* send one ulog_buff_t to userspace */ static void ulog_send(unsigned int nlgroupnum) @@ -135,9 +134,9 @@ static void ulog_timer(unsigned long data) /* lock to protect against somebody modifying our structure * from ipt_ulog_target at the same time */ - LOCK_BH(&ulog_lock); + spin_lock_bh(&ulog_lock); ulog_send(data); - UNLOCK_BH(&ulog_lock); + spin_unlock_bh(&ulog_lock); } static struct sk_buff *ulog_alloc_skb(unsigned int size) @@ -193,7 +192,7 @@ static void ipt_ulog_packet(unsigned int hooknum, ub = &ulog_buffers[groupnum]; - LOCK_BH(&ulog_lock); + spin_lock_bh(&ulog_lock); if (!ub->skb) { if (!(ub->skb = ulog_alloc_skb(size))) @@ -278,7 +277,7 @@ static void ipt_ulog_packet(unsigned int hooknum, ulog_send(groupnum); } - UNLOCK_BH(&ulog_lock); + spin_unlock_bh(&ulog_lock); return; @@ -288,7 +287,7 @@ nlmsg_failure: alloc_failure: PRINTR("ipt_ULOG: Error building netlink message\n"); - UNLOCK_BH(&ulog_lock); + spin_unlock_bh(&ulog_lock); } static unsigned int ipt_ulog_target(struct sk_buff **pskb, diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c index f1937190..564b49b 100644 --- a/net/ipv4/netfilter/ipt_hashlimit.c +++ b/net/ipv4/netfilter/ipt_hashlimit.c @@ -37,7 +37,6 @@ #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv4/ipt_hashlimit.h> -#include <linux/netfilter_ipv4/lockhelp.h> /* FIXME: this is just for IP_NF_ASSERRT */ #include <linux/netfilter_ipv4/ip_conntrack.h> @@ -92,7 +91,7 @@ struct ipt_hashlimit_htable { struct hlist_head hash[0]; /* hashtable itself */ }; -static DECLARE_LOCK(hashlimit_lock); /* protects htables list */ +static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */ static HLIST_HEAD(hashlimit_htables); static kmem_cache_t *hashlimit_cachep; @@ -233,9 +232,9 @@ static int htable_create(struct ipt_hashlimit_info *minfo) hinfo->timer.function = htable_gc; add_timer(&hinfo->timer); - LOCK_BH(&hashlimit_lock); + spin_lock_bh(&hashlimit_lock); hlist_add_head(&hinfo->node, &hashlimit_htables); - UNLOCK_BH(&hashlimit_lock); + spin_unlock_bh(&hashlimit_lock); return 0; } @@ -301,15 +300,15 @@ static struct ipt_hashlimit_htable *htable_find_get(char *name) struct ipt_hashlimit_htable *hinfo; struct hlist_node *pos; - LOCK_BH(&hashlimit_lock); + spin_lock_bh(&hashlimit_lock); hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) { if (!strcmp(name, hinfo->pde->name)) { atomic_inc(&hinfo->use); - UNLOCK_BH(&hashlimit_lock); + spin_unlock_bh(&hashlimit_lock); return hinfo; } } - UNLOCK_BH(&hashlimit_lock); + spin_unlock_bh(&hashlimit_lock); return NULL; } @@ -317,9 +316,9 @@ static struct ipt_hashlimit_htable *htable_find_get(char *name) static void htable_put(struct ipt_hashlimit_htable *hinfo) { if (atomic_dec_and_test(&hinfo->use)) { - LOCK_BH(&hashlimit_lock); + spin_lock_bh(&hashlimit_lock); hlist_del(&hinfo->node); - UNLOCK_BH(&hashlimit_lock); + spin_unlock_bh(&hashlimit_lock); htable_destroy(hinfo); } } diff --git a/net/ipv4/netfilter/ipt_helper.c b/net/ipv4/netfilter/ipt_helper.c index 33fdf36..3e7dd01 100644 --- a/net/ipv4/netfilter/ipt_helper.c +++ b/net/ipv4/netfilter/ipt_helper.c @@ -53,7 +53,7 @@ match(const struct sk_buff *skb, return ret; } - READ_LOCK(&ip_conntrack_lock); + read_lock_bh(&ip_conntrack_lock); if (!ct->master->helper) { DEBUGP("ipt_helper: master ct %p has no helper\n", exp->expectant); @@ -69,7 +69,7 @@ match(const struct sk_buff *skb, ret ^= !strncmp(ct->master->helper->name, info->name, strlen(ct->master->helper->name)); out_unlock: - READ_UNLOCK(&ip_conntrack_lock); + read_unlock_bh(&ip_conntrack_lock); return ret; } diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c index 25ab9fa..2d44b07 100644 --- a/net/ipv4/netfilter/ipt_recent.c +++ b/net/ipv4/netfilter/ipt_recent.c @@ -223,7 +223,7 @@ static int ip_recent_ctrl(struct file *file, const char __user *input, unsigned curr_table->table[count].last_seen = 0; curr_table->table[count].addr = 0; curr_table->table[count].ttl = 0; - memset(curr_table->table[count].last_pkts,0,ip_pkt_list_tot*sizeof(u_int32_t)); + memset(curr_table->table[count].last_pkts,0,ip_pkt_list_tot*sizeof(unsigned long)); curr_table->table[count].oldest_pkt = 0; curr_table->table[count].time_pos = 0; curr_table->time_info[count].position = count; @@ -502,7 +502,7 @@ match(const struct sk_buff *skb, location = time_info[curr_table->time_pos].position; hash_table[r_list[location].hash_entry] = -1; hash_table[hash_result] = location; - memset(r_list[location].last_pkts,0,ip_pkt_list_tot*sizeof(u_int32_t)); + memset(r_list[location].last_pkts,0,ip_pkt_list_tot*sizeof(unsigned long)); r_list[location].time_pos = curr_table->time_pos; r_list[location].addr = addr; r_list[location].ttl = ttl; @@ -631,7 +631,7 @@ match(const struct sk_buff *skb, r_list[location].last_seen = 0; r_list[location].addr = 0; r_list[location].ttl = 0; - memset(r_list[location].last_pkts,0,ip_pkt_list_tot*sizeof(u_int32_t)); + memset(r_list[location].last_pkts,0,ip_pkt_list_tot*sizeof(unsigned long)); r_list[location].oldest_pkt = 0; ans = !info->invert; } @@ -734,10 +734,10 @@ checkentry(const char *tablename, memset(curr_table->table,0,sizeof(struct recent_ip_list)*ip_list_tot); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: Allocating %d for pkt_list.\n", - sizeof(u_int32_t)*ip_pkt_list_tot*ip_list_tot); + sizeof(unsigned long)*ip_pkt_list_tot*ip_list_tot); #endif - hold = vmalloc(sizeof(u_int32_t)*ip_pkt_list_tot*ip_list_tot); + hold = vmalloc(sizeof(unsigned long)*ip_pkt_list_tot*ip_list_tot); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: After pkt_list allocation.\n"); #endif diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 5b1ec58..d1835b1 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -259,7 +259,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) return 0; } -static int raw_send_hdrinc(struct sock *sk, void *from, int length, +static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, struct rtable *rt, unsigned int flags) { @@ -298,7 +298,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, int length, goto error_fault; /* We don't modify invalid header */ - if (length >= sizeof(*iph) && iph->ihl * 4 <= length) { + if (length >= sizeof(*iph) && iph->ihl * 4U <= length) { if (!iph->saddr) iph->saddr = rt->rt_src; iph->check = 0; @@ -332,7 +332,7 @@ static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg) u8 __user *type = NULL; u8 __user *code = NULL; int probed = 0; - int i; + unsigned int i; if (!msg->msg_iov) return; @@ -384,7 +384,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int err; err = -EMSGSIZE; - if (len < 0 || len > 0xFFFF) + if (len > 0xFFFF) goto out; /* @@ -514,7 +514,10 @@ done: kfree(ipc.opt); ip_rt_put(rt); -out: return err < 0 ? err : len; +out: + if (err < 0) + return err; + return len; do_confirm: dst_confirm(&rt->u.dst); @@ -610,7 +613,10 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, copied = skb->len; done: skb_free_datagram(sk, skb); -out: return err ? err : copied; +out: + if (err) + return err; + return copied; } static int raw_init(struct sock *sk) @@ -691,11 +697,11 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) struct sk_buff *skb; int amount = 0; - spin_lock_irq(&sk->sk_receive_queue.lock); + spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) amount = skb->len; - spin_unlock_irq(&sk->sk_receive_queue.lock); + spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a682d28..80cf633 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1767,7 +1767,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb, struct in_device *in_dev, u32 daddr, u32 saddr, u32 tos) { - struct rtable* rth; + struct rtable* rth = NULL; int err; unsigned hash; @@ -1794,7 +1794,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb, u32 daddr, u32 saddr, u32 tos) { #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED - struct rtable* rth; + struct rtable* rth = NULL; unsigned char hop, hopcount, lasthop; int err = -EINVAL; unsigned int hash; @@ -2239,7 +2239,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp, struct net_device *dev_out, unsigned flags) { - struct rtable *rth; + struct rtable *rth = NULL; int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); unsigned hash; if (err == 0) { @@ -2267,7 +2267,7 @@ static inline int ip_mkroute_output(struct rtable** rp, unsigned char hop; unsigned hash; int err = -EINVAL; - struct rtable *rth; + struct rtable *rth = NULL; if (res->fi && res->fi->fib_nhs > 1) { unsigned char hopcount = res->fi->fib_nhs; @@ -2581,7 +2581,7 @@ int ip_route_output_key(struct rtable **rp, struct flowi *flp) } static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, - int nowait) + int nowait, unsigned int flags) { struct rtable *rt = (struct rtable*)skb->dst; struct rtmsg *r; @@ -2591,9 +2591,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, #ifdef CONFIG_IP_MROUTE struct rtattr *eptr; #endif - nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*r)); + nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); r = NLMSG_DATA(nlh); - nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; r->rtm_family = AF_INET; r->rtm_dst_len = 32; r->rtm_src_len = 0; @@ -2744,7 +2743,7 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, - RTM_NEWROUTE, 0); + RTM_NEWROUTE, 0, 0); if (!err) goto out_free; if (err < 0) { @@ -2781,8 +2780,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) continue; skb->dst = dst_clone(&rt->u.dst); if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - RTM_NEWROUTE, 1) <= 0) { + cb->nlh->nlmsg_seq, RTM_NEWROUTE, + 1, NLM_F_MULTI) <= 0) { dst_release(xchg(&skb->dst, NULL)); rcu_read_unlock_bh(); goto done; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index e923d2f..72d0144 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -169,10 +169,10 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; } -extern struct or_calltable or_ipv4; +extern struct request_sock_ops tcp_request_sock_ops; static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, - struct open_request *req, + struct request_sock *req, struct dst_entry *dst) { struct tcp_sock *tp = tcp_sk(sk); @@ -182,7 +182,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, if (child) tcp_acceptq_queue(sk, req, child); else - tcp_openreq_free(req); + reqsk_free(req); return child; } @@ -190,10 +190,12 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt) { + struct inet_request_sock *ireq; + struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; struct sock *ret = sk; - struct open_request *req; + struct request_sock *req; int mss; struct rtable *rt; __u8 rcv_wscale; @@ -209,19 +211,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); - req = tcp_openreq_alloc(); ret = NULL; + req = reqsk_alloc(&tcp_request_sock_ops); /* for safety */ if (!req) goto out; - req->rcv_isn = htonl(skb->h.th->seq) - 1; - req->snt_isn = cookie; + ireq = inet_rsk(req); + treq = tcp_rsk(req); + treq->rcv_isn = htonl(skb->h.th->seq) - 1; + treq->snt_isn = cookie; req->mss = mss; - req->rmt_port = skb->h.th->source; - req->af.v4_req.loc_addr = skb->nh.iph->daddr; - req->af.v4_req.rmt_addr = skb->nh.iph->saddr; - req->class = &or_ipv4; /* for savety */ - req->af.v4_req.opt = NULL; + ireq->rmt_port = skb->h.th->source; + ireq->loc_addr = skb->nh.iph->daddr; + ireq->rmt_addr = skb->nh.iph->saddr; + ireq->opt = NULL; /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) @@ -229,17 +232,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, if (opt && opt->optlen) { int opt_size = sizeof(struct ip_options) + opt->optlen; - req->af.v4_req.opt = kmalloc(opt_size, GFP_ATOMIC); - if (req->af.v4_req.opt) { - if (ip_options_echo(req->af.v4_req.opt, skb)) { - kfree(req->af.v4_req.opt); - req->af.v4_req.opt = NULL; - } + ireq->opt = kmalloc(opt_size, GFP_ATOMIC); + if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) { + kfree(ireq->opt); + ireq->opt = NULL; } } - req->snd_wscale = req->rcv_wscale = req->tstamp_ok = 0; - req->wscale_ok = req->sack_ok = 0; + ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0; + ireq->wscale_ok = ireq->sack_ok = 0; req->expires = 0UL; req->retrans = 0; @@ -253,15 +254,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ((opt && opt->srr) ? opt->faddr : - req->af.v4_req.rmt_addr), - .saddr = req->af.v4_req.loc_addr, + ireq->rmt_addr), + .saddr = ireq->loc_addr, .tos = RT_CONN_FLAGS(sk) } }, .proto = IPPROTO_TCP, .uli_u = { .ports = { .sport = skb->h.th->dest, .dport = skb->h.th->source } } }; if (ip_route_output_key(&rt, &fl)) { - tcp_openreq_free(req); + reqsk_free(req); goto out; } } @@ -272,7 +273,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, &req->rcv_wnd, &req->window_clamp, 0, &rcv_wscale); /* BTW win scale with syncookies is 0 by definition */ - req->rcv_wscale = rcv_wscale; + ireq->rcv_wscale = rcv_wscale; ret = get_cookie_sock(sk, skb, req, &rt->u.dst); out: return ret; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 3aafb29..e328945 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -23,6 +23,7 @@ extern int sysctl_ip_nonlocal_bind; extern int sysctl_icmp_echo_ignore_all; extern int sysctl_icmp_echo_ignore_broadcasts; extern int sysctl_icmp_ignore_bogus_error_responses; +extern int sysctl_icmp_errors_use_inbound_ifaddr; /* From ip_fragment.c */ extern int sysctl_ipfrag_low_thresh; @@ -117,6 +118,45 @@ static int ipv4_sysctl_forward_strategy(ctl_table *table, return 1; } +static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file * filp, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + char val[TCP_CA_NAME_MAX]; + ctl_table tbl = { + .data = val, + .maxlen = TCP_CA_NAME_MAX, + }; + int ret; + + tcp_get_default_congestion_control(val); + + ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos); + if (write && ret == 0) + ret = tcp_set_default_congestion_control(val); + return ret; +} + +int sysctl_tcp_congestion_control(ctl_table *table, int __user *name, int nlen, + void __user *oldval, size_t __user *oldlenp, + void __user *newval, size_t newlen, + void **context) +{ + char val[TCP_CA_NAME_MAX]; + ctl_table tbl = { + .data = val, + .maxlen = TCP_CA_NAME_MAX, + }; + int ret; + + tcp_get_default_congestion_control(val); + ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen, + context); + if (ret == 0 && newval && newlen) + ret = tcp_set_default_congestion_control(val); + return ret; +} + + ctl_table ipv4_table[] = { { .ctl_name = NET_IPV4_TCP_TIMESTAMPS, @@ -396,6 +436,14 @@ ctl_table ipv4_table[] = { .proc_handler = &proc_dointvec }, { + .ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, + .procname = "icmp_errors_use_inbound_ifaddr", + .data = &sysctl_icmp_errors_use_inbound_ifaddr, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec + }, + { .ctl_name = NET_IPV4_ROUTE, .procname = "route", .maxlen = 0, @@ -603,70 +651,6 @@ ctl_table ipv4_table[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = NET_TCP_WESTWOOD, - .procname = "tcp_westwood", - .data = &sysctl_tcp_westwood, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_VEGAS, - .procname = "tcp_vegas_cong_avoid", - .data = &sysctl_tcp_vegas_cong_avoid, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_VEGAS_ALPHA, - .procname = "tcp_vegas_alpha", - .data = &sysctl_tcp_vegas_alpha, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_VEGAS_BETA, - .procname = "tcp_vegas_beta", - .data = &sysctl_tcp_vegas_beta, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_VEGAS_GAMMA, - .procname = "tcp_vegas_gamma", - .data = &sysctl_tcp_vegas_gamma, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_BIC, - .procname = "tcp_bic", - .data = &sysctl_tcp_bic, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_BIC_FAST_CONVERGENCE, - .procname = "tcp_bic_fast_convergence", - .data = &sysctl_tcp_bic_fast_convergence, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_BIC_LOW_WINDOW, - .procname = "tcp_bic_low_window", - .data = &sysctl_tcp_bic_low_window, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { .ctl_name = NET_TCP_MODERATE_RCVBUF, .procname = "tcp_moderate_rcvbuf", .data = &sysctl_tcp_moderate_rcvbuf, @@ -683,13 +667,14 @@ ctl_table ipv4_table[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = NET_TCP_BIC_BETA, - .procname = "tcp_bic_beta", - .data = &sysctl_tcp_bic_beta, - .maxlen = sizeof(int), + .ctl_name = NET_TCP_CONG_CONTROL, + .procname = "tcp_congestion_control", .mode = 0644, - .proc_handler = &proc_dointvec, + .maxlen = TCP_CA_NAME_MAX, + .proc_handler = &proc_tcp_congestion_control, + .strategy = &sysctl_tcp_congestion_control, }, + { .ctl_name = 0 } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index a037baf..882436d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -271,7 +271,6 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); -kmem_cache_t *tcp_openreq_cachep; kmem_cache_t *tcp_bucket_cachep; kmem_cache_t *tcp_timewait_cachep; @@ -317,7 +316,7 @@ EXPORT_SYMBOL(tcp_enter_memory_pressure); static __inline__ unsigned int tcp_listen_poll(struct sock *sk, poll_table *wait) { - return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0; + return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0; } /* @@ -463,28 +462,15 @@ int tcp_listen_start(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); - struct tcp_listen_opt *lopt; + int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE); + + if (rc != 0) + return rc; sk->sk_max_ack_backlog = 0; sk->sk_ack_backlog = 0; - tp->accept_queue = tp->accept_queue_tail = NULL; - rwlock_init(&tp->syn_wait_lock); tcp_delack_init(tp); - lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL); - if (!lopt) - return -ENOMEM; - - memset(lopt, 0, sizeof(struct tcp_listen_opt)); - for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++) - if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog) - break; - get_random_bytes(&lopt->hash_rnd, 4); - - write_lock_bh(&tp->syn_wait_lock); - tp->listen_opt = lopt; - write_unlock_bh(&tp->syn_wait_lock); - /* There is race window here: we announce ourselves listening, * but this transition is still not validated by get_port(). * It is OK, because this socket enters to hash table only @@ -501,10 +487,7 @@ int tcp_listen_start(struct sock *sk) } sk->sk_state = TCP_CLOSE; - write_lock_bh(&tp->syn_wait_lock); - tp->listen_opt = NULL; - write_unlock_bh(&tp->syn_wait_lock); - kfree(lopt); + reqsk_queue_destroy(&tp->accept_queue); return -EADDRINUSE; } @@ -516,25 +499,23 @@ int tcp_listen_start(struct sock *sk) static void tcp_listen_stop (struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - struct tcp_listen_opt *lopt = tp->listen_opt; - struct open_request *acc_req = tp->accept_queue; - struct open_request *req; + struct listen_sock *lopt; + struct request_sock *acc_req; + struct request_sock *req; int i; tcp_delete_keepalive_timer(sk); /* make all the listen_opt local to us */ - write_lock_bh(&tp->syn_wait_lock); - tp->listen_opt = NULL; - write_unlock_bh(&tp->syn_wait_lock); - tp->accept_queue = tp->accept_queue_tail = NULL; + lopt = reqsk_queue_yank_listen_sk(&tp->accept_queue); + acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue); if (lopt->qlen) { for (i = 0; i < TCP_SYNQ_HSIZE; i++) { while ((req = lopt->syn_table[i]) != NULL) { lopt->syn_table[i] = req->dl_next; lopt->qlen--; - tcp_openreq_free(req); + reqsk_free(req); /* Following specs, it would be better either to send FIN * (and enter FIN-WAIT-1, it is normal close) @@ -574,7 +555,7 @@ static void tcp_listen_stop (struct sock *sk) sock_put(child); sk_acceptq_removed(sk); - tcp_openreq_fastfree(req); + __reqsk_free(req); } BUG_TRAP(!sk->sk_ack_backlog); } @@ -1345,7 +1326,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, cleanup_rbuf(sk, copied); - if (tp->ucopy.task == user_recv) { + if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { /* Install new reader */ if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { user_recv = current; @@ -1868,11 +1849,11 @@ static int wait_for_connect(struct sock *sk, long timeo) prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); release_sock(sk); - if (!tp->accept_queue) + if (reqsk_queue_empty(&tp->accept_queue)) timeo = schedule_timeout(timeo); lock_sock(sk); err = 0; - if (tp->accept_queue) + if (!reqsk_queue_empty(&tp->accept_queue)) break; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) @@ -1895,7 +1876,6 @@ static int wait_for_connect(struct sock *sk, long timeo) struct sock *tcp_accept(struct sock *sk, int flags, int *err) { struct tcp_sock *tp = tcp_sk(sk); - struct open_request *req; struct sock *newsk; int error; @@ -1906,37 +1886,31 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) */ error = -EINVAL; if (sk->sk_state != TCP_LISTEN) - goto out; + goto out_err; /* Find already established connection */ - if (!tp->accept_queue) { + if (reqsk_queue_empty(&tp->accept_queue)) { long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); /* If this is a non blocking socket don't sleep */ error = -EAGAIN; if (!timeo) - goto out; + goto out_err; error = wait_for_connect(sk, timeo); if (error) - goto out; + goto out_err; } - req = tp->accept_queue; - if ((tp->accept_queue = req->dl_next) == NULL) - tp->accept_queue_tail = NULL; - - newsk = req->sk; - sk_acceptq_removed(sk); - tcp_openreq_fastfree(req); + newsk = reqsk_queue_get_child(&tp->accept_queue, sk); BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); - release_sock(sk); - return newsk; - out: release_sock(sk); + return newsk; +out_err: + newsk = NULL; *err = error; - return NULL; + goto out; } /* @@ -1953,6 +1927,25 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, return tp->af_specific->setsockopt(sk, level, optname, optval, optlen); + /* This is a string value all the others are int's */ + if (optname == TCP_CONGESTION) { + char name[TCP_CA_NAME_MAX]; + + if (optlen < 1) + return -EINVAL; + + val = strncpy_from_user(name, optval, + min(TCP_CA_NAME_MAX-1, optlen)); + if (val < 0) + return -EFAULT; + name[val] = 0; + + lock_sock(sk); + err = tcp_set_congestion_control(tp, name); + release_sock(sk); + return err; + } + if (optlen < sizeof(int)) return -EINVAL; @@ -2237,6 +2230,16 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, case TCP_QUICKACK: val = !tp->ack.pingpong; break; + + case TCP_CONGESTION: + if (get_user(len, optlen)) + return -EFAULT; + len = min_t(unsigned int, len, TCP_CA_NAME_MAX); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, tp->ca_ops->name, len)) + return -EFAULT; + return 0; default: return -ENOPROTOOPT; }; @@ -2250,7 +2253,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, extern void __skb_cb_too_small_for_tcp(int, int); -extern void tcpdiag_init(void); +extern struct tcp_congestion_ops tcp_reno; static __initdata unsigned long thash_entries; static int __init set_thash_entries(char *str) @@ -2271,13 +2274,6 @@ void __init tcp_init(void) __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), sizeof(skb->cb)); - tcp_openreq_cachep = kmem_cache_create("tcp_open_request", - sizeof(struct open_request), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); - if (!tcp_openreq_cachep) - panic("tcp_init: Cannot alloc open_request cache."); - tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", sizeof(struct tcp_bind_bucket), 0, SLAB_HWCACHE_ALIGN, @@ -2338,7 +2334,7 @@ void __init tcp_init(void) (tcp_bhash_size * sizeof(struct tcp_bind_hashbucket)); order++) ; - if (order > 4) { + if (order >= 4) { sysctl_local_port_range[0] = 32768; sysctl_local_port_range[1] = 61000; sysctl_tcp_max_tw_buckets = 180000; @@ -2366,6 +2362,8 @@ void __init tcp_init(void) printk(KERN_INFO "TCP: Hash tables configured " "(established %d bind %d)\n", tcp_ehash_size << 1, tcp_bhash_size); + + tcp_register_congestion_control(&tcp_reno); } EXPORT_SYMBOL(tcp_accept); @@ -2374,7 +2372,6 @@ EXPORT_SYMBOL(tcp_destroy_sock); EXPORT_SYMBOL(tcp_disconnect); EXPORT_SYMBOL(tcp_getsockopt); EXPORT_SYMBOL(tcp_ioctl); -EXPORT_SYMBOL(tcp_openreq_cachep); EXPORT_SYMBOL(tcp_poll); EXPORT_SYMBOL(tcp_read_sock); EXPORT_SYMBOL(tcp_recvmsg); diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c new file mode 100644 index 0000000..ec38d45 --- /dev/null +++ b/net/ipv4/tcp_bic.c @@ -0,0 +1,331 @@ +/* + * Binary Increase Congestion control for TCP + * + * This is from the implementation of BICTCP in + * Lison-Xu, Kahaled Harfoush, and Injong Rhee. + * "Binary Increase Congestion Control for Fast, Long Distance + * Networks" in InfoComm 2004 + * Available from: + * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf + * + * Unless BIC is enabled and congestion window is large + * this behaves the same as the original Reno. + */ + +#include <linux/config.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <net/tcp.h> + + +#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation + * max_cwnd = snd_cwnd * beta + */ +#define BICTCP_B 4 /* + * In binary search, + * go to point (max+min)/N + */ + +static int fast_convergence = 1; +static int max_increment = 32; +static int low_window = 14; +static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ +static int low_utilization_threshold = 153; +static int low_utilization_period = 2; +static int initial_ssthresh = 100; +static int smooth_part = 20; + +module_param(fast_convergence, int, 0644); +MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); +module_param(max_increment, int, 0644); +MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search"); +module_param(low_window, int, 0644); +MODULE_PARM_DESC(low_window, "lower bound on congestion window (for TCP friendliness)"); +module_param(beta, int, 0644); +MODULE_PARM_DESC(beta, "beta for multiplicative increase"); +module_param(low_utilization_threshold, int, 0644); +MODULE_PARM_DESC(low_utilization_threshold, "percent (scaled by 1024) for low utilization mode"); +module_param(low_utilization_period, int, 0644); +MODULE_PARM_DESC(low_utilization_period, "if average delay exceeds then goto to low utilization mode (seconds)"); +module_param(initial_ssthresh, int, 0644); +MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); +module_param(smooth_part, int, 0644); +MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax"); + + +/* BIC TCP Parameters */ +struct bictcp { + u32 cnt; /* increase cwnd by 1 after ACKs */ + u32 last_max_cwnd; /* last maximum snd_cwnd */ + u32 loss_cwnd; /* congestion window at last loss */ + u32 last_cwnd; /* the last snd_cwnd */ + u32 last_time; /* time when updated last_cwnd */ + u32 delay_min; /* min delay */ + u32 delay_max; /* max delay */ + u32 last_delay; + u8 low_utilization;/* 0: high; 1: low */ + u32 low_utilization_start; /* starting time of low utilization detection*/ + u32 epoch_start; /* beginning of an epoch */ +#define ACK_RATIO_SHIFT 4 + u32 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ +}; + +static inline void bictcp_reset(struct bictcp *ca) +{ + ca->cnt = 0; + ca->last_max_cwnd = 0; + ca->loss_cwnd = 0; + ca->last_cwnd = 0; + ca->last_time = 0; + ca->delay_min = 0; + ca->delay_max = 0; + ca->last_delay = 0; + ca->low_utilization = 0; + ca->low_utilization_start = 0; + ca->epoch_start = 0; + ca->delayed_ack = 2 << ACK_RATIO_SHIFT; +} + +static void bictcp_init(struct tcp_sock *tp) +{ + bictcp_reset(tcp_ca(tp)); + if (initial_ssthresh) + tp->snd_ssthresh = initial_ssthresh; +} + +/* + * Compute congestion window to use. + */ +static inline void bictcp_update(struct bictcp *ca, u32 cwnd) +{ + if (ca->last_cwnd == cwnd && + (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) + return; + + ca->last_cwnd = cwnd; + ca->last_time = tcp_time_stamp; + + if (ca->epoch_start == 0) /* record the beginning of an epoch */ + ca->epoch_start = tcp_time_stamp; + + /* start off normal */ + if (cwnd <= low_window) { + ca->cnt = cwnd; + return; + } + + /* binary increase */ + if (cwnd < ca->last_max_cwnd) { + __u32 dist = (ca->last_max_cwnd - cwnd) + / BICTCP_B; + + if (dist > max_increment) + /* linear increase */ + ca->cnt = cwnd / max_increment; + else if (dist <= 1U) + /* binary search increase */ + ca->cnt = (cwnd * smooth_part) / BICTCP_B; + else + /* binary search increase */ + ca->cnt = cwnd / dist; + } else { + /* slow start AMD linear increase */ + if (cwnd < ca->last_max_cwnd + BICTCP_B) + /* slow start */ + ca->cnt = (cwnd * smooth_part) / BICTCP_B; + else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) + /* slow start */ + ca->cnt = (cwnd * (BICTCP_B-1)) + / cwnd-ca->last_max_cwnd; + else + /* linear increase */ + ca->cnt = cwnd / max_increment; + } + + /* if in slow start or link utilization is very low */ + if ( ca->loss_cwnd == 0 || + (cwnd > ca->loss_cwnd && ca->low_utilization)) { + if (ca->cnt > 20) /* increase cwnd 5% per RTT */ + ca->cnt = 20; + } + + ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; + if (ca->cnt == 0) /* cannot be zero */ + ca->cnt = 1; +} + + +/* Detect low utilization in congestion avoidance */ +static inline void bictcp_low_utilization(struct tcp_sock *tp, int flag) +{ + struct bictcp *ca = tcp_ca(tp); + u32 dist, delay; + + /* No time stamp */ + if (!(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) || + /* Discard delay samples right after fast recovery */ + tcp_time_stamp < ca->epoch_start + HZ || + /* this delay samples may not be accurate */ + flag == 0) { + ca->last_delay = 0; + goto notlow; + } + + delay = ca->last_delay<<3; /* use the same scale as tp->srtt*/ + ca->last_delay = tcp_time_stamp - tp->rx_opt.rcv_tsecr; + if (delay == 0) /* no previous delay sample */ + goto notlow; + + /* first time call or link delay decreases */ + if (ca->delay_min == 0 || ca->delay_min > delay) { + ca->delay_min = ca->delay_max = delay; + goto notlow; + } + + if (ca->delay_max < delay) + ca->delay_max = delay; + + /* utilization is low, if avg delay < dist*threshold + for checking_period time */ + dist = ca->delay_max - ca->delay_min; + if (dist <= ca->delay_min>>6 || + tp->srtt - ca->delay_min >= (dist*low_utilization_threshold)>>10) + goto notlow; + + if (ca->low_utilization_start == 0) { + ca->low_utilization = 0; + ca->low_utilization_start = tcp_time_stamp; + } else if ((s32)(tcp_time_stamp - ca->low_utilization_start) + > low_utilization_period*HZ) { + ca->low_utilization = 1; + } + + return; + + notlow: + ca->low_utilization = 0; + ca->low_utilization_start = 0; + +} + +static void bictcp_cong_avoid(struct tcp_sock *tp, u32 ack, + u32 seq_rtt, u32 in_flight, int data_acked) +{ + struct bictcp *ca = tcp_ca(tp); + + bictcp_low_utilization(tp, data_acked); + + if (in_flight < tp->snd_cwnd) + return; + + if (tp->snd_cwnd <= tp->snd_ssthresh) { + /* In "safe" area, increase. */ + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + } else { + bictcp_update(ca, tp->snd_cwnd); + + /* In dangerous area, increase slowly. + * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd + */ + if (tp->snd_cwnd_cnt >= ca->cnt) { + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + tp->snd_cwnd_cnt = 0; + } else + tp->snd_cwnd_cnt++; + } + +} + +/* + * behave like Reno until low_window is reached, + * then increase congestion window slowly + */ +static u32 bictcp_recalc_ssthresh(struct tcp_sock *tp) +{ + struct bictcp *ca = tcp_ca(tp); + + ca->epoch_start = 0; /* end of epoch */ + + /* in case of wrong delay_max*/ + if (ca->delay_min > 0 && ca->delay_max > ca->delay_min) + ca->delay_max = ca->delay_min + + ((ca->delay_max - ca->delay_min)* 90) / 100; + + /* Wmax and fast convergence */ + if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence) + ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta)) + / (2 * BICTCP_BETA_SCALE); + else + ca->last_max_cwnd = tp->snd_cwnd; + + ca->loss_cwnd = tp->snd_cwnd; + + + if (tp->snd_cwnd <= low_window) + return max(tp->snd_cwnd >> 1U, 2U); + else + return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); +} + +static u32 bictcp_undo_cwnd(struct tcp_sock *tp) +{ + struct bictcp *ca = tcp_ca(tp); + + return max(tp->snd_cwnd, ca->last_max_cwnd); +} + +static u32 bictcp_min_cwnd(struct tcp_sock *tp) +{ + return tp->snd_ssthresh; +} + +static void bictcp_state(struct tcp_sock *tp, u8 new_state) +{ + if (new_state == TCP_CA_Loss) + bictcp_reset(tcp_ca(tp)); +} + +/* Track delayed acknowledgement ratio using sliding window + * ratio = (15*ratio + sample) / 16 + */ +static void bictcp_acked(struct tcp_sock *tp, u32 cnt) +{ + if (cnt > 0 && tp->ca_state == TCP_CA_Open) { + struct bictcp *ca = tcp_ca(tp); + cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; + ca->delayed_ack += cnt; + } +} + + +static struct tcp_congestion_ops bictcp = { + .init = bictcp_init, + .ssthresh = bictcp_recalc_ssthresh, + .cong_avoid = bictcp_cong_avoid, + .set_state = bictcp_state, + .undo_cwnd = bictcp_undo_cwnd, + .min_cwnd = bictcp_min_cwnd, + .pkts_acked = bictcp_acked, + .owner = THIS_MODULE, + .name = "bic", +}; + +static int __init bictcp_register(void) +{ + BUG_ON(sizeof(struct bictcp) > TCP_CA_PRIV_SIZE); + return tcp_register_congestion_control(&bictcp); +} + +static void __exit bictcp_unregister(void) +{ + tcp_unregister_congestion_control(&bictcp); +} + +module_init(bictcp_register); +module_exit(bictcp_unregister); + +MODULE_AUTHOR("Stephen Hemminger"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("BIC TCP"); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c new file mode 100644 index 0000000..4970d10 --- /dev/null +++ b/net/ipv4/tcp_cong.c @@ -0,0 +1,237 @@ +/* + * Plugable TCP congestion control support and newReno + * congestion control. + * Based on ideas from I/O scheduler suport and Web100. + * + * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/types.h> +#include <linux/list.h> +#include <net/tcp.h> + +static DEFINE_SPINLOCK(tcp_cong_list_lock); +static LIST_HEAD(tcp_cong_list); + +/* Simple linear search, don't expect many entries! */ +static struct tcp_congestion_ops *tcp_ca_find(const char *name) +{ + struct tcp_congestion_ops *e; + + list_for_each_entry_rcu(e, &tcp_cong_list, list) { + if (strcmp(e->name, name) == 0) + return e; + } + + return NULL; +} + +/* + * Attach new congestion control algorthim to the list + * of available options. + */ +int tcp_register_congestion_control(struct tcp_congestion_ops *ca) +{ + int ret = 0; + + /* all algorithms must implement ssthresh and cong_avoid ops */ + if (!ca->ssthresh || !ca->cong_avoid || !ca->min_cwnd) { + printk(KERN_ERR "TCP %s does not implement required ops\n", + ca->name); + return -EINVAL; + } + + spin_lock(&tcp_cong_list_lock); + if (tcp_ca_find(ca->name)) { + printk(KERN_NOTICE "TCP %s already registered\n", ca->name); + ret = -EEXIST; + } else { + list_add_rcu(&ca->list, &tcp_cong_list); + printk(KERN_INFO "TCP %s registered\n", ca->name); + } + spin_unlock(&tcp_cong_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(tcp_register_congestion_control); + +/* + * Remove congestion control algorithm, called from + * the module's remove function. Module ref counts are used + * to ensure that this can't be done till all sockets using + * that method are closed. + */ +void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) +{ + spin_lock(&tcp_cong_list_lock); + list_del_rcu(&ca->list); + spin_unlock(&tcp_cong_list_lock); +} +EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); + +/* Assign choice of congestion control. */ +void tcp_init_congestion_control(struct tcp_sock *tp) +{ + struct tcp_congestion_ops *ca; + + if (tp->ca_ops != &tcp_init_congestion_ops) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(ca, &tcp_cong_list, list) { + if (try_module_get(ca->owner)) { + tp->ca_ops = ca; + break; + } + + } + rcu_read_unlock(); + + if (tp->ca_ops->init) + tp->ca_ops->init(tp); +} + +/* Manage refcounts on socket close. */ +void tcp_cleanup_congestion_control(struct tcp_sock *tp) +{ + if (tp->ca_ops->release) + tp->ca_ops->release(tp); + module_put(tp->ca_ops->owner); +} + +/* Used by sysctl to change default congestion control */ +int tcp_set_default_congestion_control(const char *name) +{ + struct tcp_congestion_ops *ca; + int ret = -ENOENT; + + spin_lock(&tcp_cong_list_lock); + ca = tcp_ca_find(name); +#ifdef CONFIG_KMOD + if (!ca) { + spin_unlock(&tcp_cong_list_lock); + + request_module("tcp_%s", name); + spin_lock(&tcp_cong_list_lock); + ca = tcp_ca_find(name); + } +#endif + + if (ca) { + list_move(&ca->list, &tcp_cong_list); + ret = 0; + } + spin_unlock(&tcp_cong_list_lock); + + return ret; +} + +/* Get current default congestion control */ +void tcp_get_default_congestion_control(char *name) +{ + struct tcp_congestion_ops *ca; + /* We will always have reno... */ + BUG_ON(list_empty(&tcp_cong_list)); + + rcu_read_lock(); + ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); + strncpy(name, ca->name, TCP_CA_NAME_MAX); + rcu_read_unlock(); +} + +/* Change congestion control for socket */ +int tcp_set_congestion_control(struct tcp_sock *tp, const char *name) +{ + struct tcp_congestion_ops *ca; + int err = 0; + + rcu_read_lock(); + ca = tcp_ca_find(name); + if (ca == tp->ca_ops) + goto out; + + if (!ca) + err = -ENOENT; + + else if (!try_module_get(ca->owner)) + err = -EBUSY; + + else { + tcp_cleanup_congestion_control(tp); + tp->ca_ops = ca; + if (tp->ca_ops->init) + tp->ca_ops->init(tp); + } + out: + rcu_read_unlock(); + return err; +} + +/* + * TCP Reno congestion control + * This is special case used for fallback as well. + */ +/* This is Jacobson's slow start and congestion avoidance. + * SIGCOMM '88, p. 328. + */ +void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, u32 in_flight, + int flag) +{ + if (in_flight < tp->snd_cwnd) + return; + + if (tp->snd_cwnd <= tp->snd_ssthresh) { + /* In "safe" area, increase. */ + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + } else { + /* In dangerous area, increase slowly. + * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd + */ + if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + tp->snd_cwnd_cnt = 0; + } else + tp->snd_cwnd_cnt++; + } +} +EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); + +/* Slow start threshold is half the congestion window (min 2) */ +u32 tcp_reno_ssthresh(struct tcp_sock *tp) +{ + return max(tp->snd_cwnd >> 1U, 2U); +} +EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); + +/* Lower bound on congestion window. */ +u32 tcp_reno_min_cwnd(struct tcp_sock *tp) +{ + return tp->snd_ssthresh/2; +} +EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); + +struct tcp_congestion_ops tcp_reno = { + .name = "reno", + .owner = THIS_MODULE, + .ssthresh = tcp_reno_ssthresh, + .cong_avoid = tcp_reno_cong_avoid, + .min_cwnd = tcp_reno_min_cwnd, +}; + +/* Initial congestion control used (until SYN) + * really reno under another name so we can tell difference + * during tcp_set_default_congestion_control + */ +struct tcp_congestion_ops tcp_init_congestion_ops = { + .name = "", + .owner = THIS_MODULE, + .ssthresh = tcp_reno_ssthresh, + .cong_avoid = tcp_reno_cong_avoid, + .min_cwnd = tcp_reno_min_cwnd, +}; +EXPORT_SYMBOL_GPL(tcp_init_congestion_ops); diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 8faa894..f66945c 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -42,15 +42,8 @@ struct tcpdiag_entry static struct sock *tcpnl; - #define TCPDIAG_PUT(skb, attrtype, attrlen) \ -({ int rtalen = RTA_LENGTH(attrlen); \ - struct rtattr *rta; \ - if (skb_tailroom(skb) < RTA_ALIGN(rtalen)) goto nlmsg_failure; \ - rta = (void*)__skb_put(skb, RTA_ALIGN(rtalen)); \ - rta->rta_type = attrtype; \ - rta->rta_len = rtalen; \ - RTA_DATA(rta); }) + RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, int ext, u32 pid, u32 seq, u16 nlmsg_flags) @@ -61,7 +54,6 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, struct nlmsghdr *nlh; struct tcp_info *info = NULL; struct tcpdiag_meminfo *minfo = NULL; - struct tcpvegas_info *vinfo = NULL; unsigned char *b = skb->tail; nlh = NLMSG_PUT(skb, pid, seq, TCPDIAG_GETSOCK, sizeof(*r)); @@ -73,9 +65,11 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, if (ext & (1<<(TCPDIAG_INFO-1))) info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info)); - if ((tcp_is_westwood(tp) || tcp_is_vegas(tp)) - && (ext & (1<<(TCPDIAG_VEGASINFO-1)))) - vinfo = TCPDIAG_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*vinfo)); + if (ext & (1<<(TCPDIAG_CONG-1))) { + size_t len = strlen(tp->ca_ops->name); + strcpy(TCPDIAG_PUT(skb, TCPDIAG_CONG, len+1), + tp->ca_ops->name); + } } r->tcpdiag_family = sk->sk_family; r->tcpdiag_state = sk->sk_state; @@ -166,23 +160,13 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, if (info) tcp_get_info(sk, info); - if (vinfo) { - if (tcp_is_vegas(tp)) { - vinfo->tcpv_enabled = tp->vegas.doing_vegas_now; - vinfo->tcpv_rttcnt = tp->vegas.cntRTT; - vinfo->tcpv_rtt = jiffies_to_usecs(tp->vegas.baseRTT); - vinfo->tcpv_minrtt = jiffies_to_usecs(tp->vegas.minRTT); - } else { - vinfo->tcpv_enabled = 0; - vinfo->tcpv_rttcnt = 0; - vinfo->tcpv_rtt = jiffies_to_usecs(tp->westwood.rtt); - vinfo->tcpv_minrtt = jiffies_to_usecs(tp->westwood.rtt_min); - } - } + if (sk->sk_state < TCP_TIME_WAIT && tp->ca_ops->get_info) + tp->ca_ops->get_info(tp, ext, skb); nlh->nlmsg_len = skb->tail - b; return skb->len; +rtattr_failure: nlmsg_failure: skb_trim(skb, b - skb->data); return -1; @@ -455,9 +439,10 @@ static int tcpdiag_dump_sock(struct sk_buff *skb, struct sock *sk, } static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk, - struct open_request *req, + struct request_sock *req, u32 pid, u32 seq) { + const struct inet_request_sock *ireq = inet_rsk(req); struct inet_sock *inet = inet_sk(sk); unsigned char *b = skb->tail; struct tcpdiagmsg *r; @@ -482,9 +467,9 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk, tmo = 0; r->id.tcpdiag_sport = inet->sport; - r->id.tcpdiag_dport = req->rmt_port; - r->id.tcpdiag_src[0] = req->af.v4_req.loc_addr; - r->id.tcpdiag_dst[0] = req->af.v4_req.rmt_addr; + r->id.tcpdiag_dport = ireq->rmt_port; + r->id.tcpdiag_src[0] = ireq->loc_addr; + r->id.tcpdiag_dst[0] = ireq->rmt_addr; r->tcpdiag_expires = jiffies_to_msecs(tmo), r->tcpdiag_rqueue = 0; r->tcpdiag_wqueue = 0; @@ -493,9 +478,9 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk, #ifdef CONFIG_IP_TCPDIAG_IPV6 if (r->tcpdiag_family == AF_INET6) { ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src, - &req->af.v6_req.loc_addr); + &tcp6_rsk(req)->loc_addr); ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst, - &req->af.v6_req.rmt_addr); + &tcp6_rsk(req)->rmt_addr); } #endif nlh->nlmsg_len = skb->tail - b; @@ -513,7 +498,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, struct tcpdiag_entry entry; struct tcpdiagreq *r = NLMSG_DATA(cb->nlh); struct tcp_sock *tp = tcp_sk(sk); - struct tcp_listen_opt *lopt; + struct listen_sock *lopt; struct rtattr *bc = NULL; struct inet_sock *inet = inet_sk(sk); int j, s_j; @@ -528,9 +513,9 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, entry.family = sk->sk_family; - read_lock_bh(&tp->syn_wait_lock); + read_lock_bh(&tp->accept_queue.syn_wait_lock); - lopt = tp->listen_opt; + lopt = tp->accept_queue.listen_opt; if (!lopt || !lopt->qlen) goto out; @@ -541,13 +526,15 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, } for (j = s_j; j < TCP_SYNQ_HSIZE; j++) { - struct open_request *req, *head = lopt->syn_table[j]; + struct request_sock *req, *head = lopt->syn_table[j]; reqnum = 0; for (req = head; req; reqnum++, req = req->dl_next) { + struct inet_request_sock *ireq = inet_rsk(req); + if (reqnum < s_reqnum) continue; - if (r->id.tcpdiag_dport != req->rmt_port && + if (r->id.tcpdiag_dport != ireq->rmt_port && r->id.tcpdiag_dport) continue; @@ -555,16 +542,16 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, entry.saddr = #ifdef CONFIG_IP_TCPDIAG_IPV6 (entry.family == AF_INET6) ? - req->af.v6_req.loc_addr.s6_addr32 : + tcp6_rsk(req)->loc_addr.s6_addr32 : #endif - &req->af.v4_req.loc_addr; + &ireq->loc_addr; entry.daddr = #ifdef CONFIG_IP_TCPDIAG_IPV6 (entry.family == AF_INET6) ? - req->af.v6_req.rmt_addr.s6_addr32 : + tcp6_rsk(req)->rmt_addr.s6_addr32 : #endif - &req->af.v4_req.rmt_addr; - entry.dport = ntohs(req->rmt_port); + &ireq->rmt_addr; + entry.dport = ntohs(ireq->rmt_port); if (!tcpdiag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) @@ -585,7 +572,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, } out: - read_unlock_bh(&tp->syn_wait_lock); + read_unlock_bh(&tp->accept_queue.syn_wait_lock); return err; } diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c new file mode 100644 index 0000000..36c51f8 --- /dev/null +++ b/net/ipv4/tcp_highspeed.c @@ -0,0 +1,181 @@ +/* + * Sally Floyd's High Speed TCP (RFC 3649) congestion control + * + * See http://www.icir.org/floyd/hstcp.html + * + * John Heffner <jheffner@psc.edu> + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <net/tcp.h> + + +/* From AIMD tables from RFC 3649 appendix B, + * with fixed-point MD scaled <<8. + */ +static const struct hstcp_aimd_val { + unsigned int cwnd; + unsigned int md; +} hstcp_aimd_vals[] = { + { 38, 128, /* 0.50 */ }, + { 118, 112, /* 0.44 */ }, + { 221, 104, /* 0.41 */ }, + { 347, 98, /* 0.38 */ }, + { 495, 93, /* 0.37 */ }, + { 663, 89, /* 0.35 */ }, + { 851, 86, /* 0.34 */ }, + { 1058, 83, /* 0.33 */ }, + { 1284, 81, /* 0.32 */ }, + { 1529, 78, /* 0.31 */ }, + { 1793, 76, /* 0.30 */ }, + { 2076, 74, /* 0.29 */ }, + { 2378, 72, /* 0.28 */ }, + { 2699, 71, /* 0.28 */ }, + { 3039, 69, /* 0.27 */ }, + { 3399, 68, /* 0.27 */ }, + { 3778, 66, /* 0.26 */ }, + { 4177, 65, /* 0.26 */ }, + { 4596, 64, /* 0.25 */ }, + { 5036, 62, /* 0.25 */ }, + { 5497, 61, /* 0.24 */ }, + { 5979, 60, /* 0.24 */ }, + { 6483, 59, /* 0.23 */ }, + { 7009, 58, /* 0.23 */ }, + { 7558, 57, /* 0.22 */ }, + { 8130, 56, /* 0.22 */ }, + { 8726, 55, /* 0.22 */ }, + { 9346, 54, /* 0.21 */ }, + { 9991, 53, /* 0.21 */ }, + { 10661, 52, /* 0.21 */ }, + { 11358, 52, /* 0.20 */ }, + { 12082, 51, /* 0.20 */ }, + { 12834, 50, /* 0.20 */ }, + { 13614, 49, /* 0.19 */ }, + { 14424, 48, /* 0.19 */ }, + { 15265, 48, /* 0.19 */ }, + { 16137, 47, /* 0.19 */ }, + { 17042, 46, /* 0.18 */ }, + { 17981, 45, /* 0.18 */ }, + { 18955, 45, /* 0.18 */ }, + { 19965, 44, /* 0.17 */ }, + { 21013, 43, /* 0.17 */ }, + { 22101, 43, /* 0.17 */ }, + { 23230, 42, /* 0.17 */ }, + { 24402, 41, /* 0.16 */ }, + { 25618, 41, /* 0.16 */ }, + { 26881, 40, /* 0.16 */ }, + { 28193, 39, /* 0.16 */ }, + { 29557, 39, /* 0.15 */ }, + { 30975, 38, /* 0.15 */ }, + { 32450, 38, /* 0.15 */ }, + { 33986, 37, /* 0.15 */ }, + { 35586, 36, /* 0.14 */ }, + { 37253, 36, /* 0.14 */ }, + { 38992, 35, /* 0.14 */ }, + { 40808, 35, /* 0.14 */ }, + { 42707, 34, /* 0.13 */ }, + { 44694, 33, /* 0.13 */ }, + { 46776, 33, /* 0.13 */ }, + { 48961, 32, /* 0.13 */ }, + { 51258, 32, /* 0.13 */ }, + { 53677, 31, /* 0.12 */ }, + { 56230, 30, /* 0.12 */ }, + { 58932, 30, /* 0.12 */ }, + { 61799, 29, /* 0.12 */ }, + { 64851, 28, /* 0.11 */ }, + { 68113, 28, /* 0.11 */ }, + { 71617, 27, /* 0.11 */ }, + { 75401, 26, /* 0.10 */ }, + { 79517, 26, /* 0.10 */ }, + { 84035, 25, /* 0.10 */ }, + { 89053, 24, /* 0.10 */ }, +}; + +#define HSTCP_AIMD_MAX ARRAY_SIZE(hstcp_aimd_vals) + +struct hstcp { + u32 ai; +}; + +static void hstcp_init(struct tcp_sock *tp) +{ + struct hstcp *ca = tcp_ca(tp); + + ca->ai = 0; + + /* Ensure the MD arithmetic works. This is somewhat pedantic, + * since I don't think we will see a cwnd this large. :) */ + tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); +} + +static void hstcp_cong_avoid(struct tcp_sock *tp, u32 adk, u32 rtt, + u32 in_flight, int good) +{ + struct hstcp *ca = tcp_ca(tp); + + if (in_flight < tp->snd_cwnd) + return; + + if (tp->snd_cwnd <= tp->snd_ssthresh) { + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + } else { + /* Update AIMD parameters */ + if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { + while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && + ca->ai < HSTCP_AIMD_MAX) + ca->ai++; + } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { + while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && + ca->ai > 0) + ca->ai--; + } + + /* Do additive increase */ + if (tp->snd_cwnd < tp->snd_cwnd_clamp) { + tp->snd_cwnd_cnt += ca->ai; + if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { + tp->snd_cwnd++; + tp->snd_cwnd_cnt -= tp->snd_cwnd; + } + } + } +} + +static u32 hstcp_ssthresh(struct tcp_sock *tp) +{ + struct hstcp *ca = tcp_ca(tp); + + /* Do multiplicative decrease */ + return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); +} + + +static struct tcp_congestion_ops tcp_highspeed = { + .init = hstcp_init, + .ssthresh = hstcp_ssthresh, + .cong_avoid = hstcp_cong_avoid, + .min_cwnd = tcp_reno_min_cwnd, + + .owner = THIS_MODULE, + .name = "highspeed" +}; + +static int __init hstcp_register(void) +{ + BUG_ON(sizeof(struct hstcp) > TCP_CA_PRIV_SIZE); + return tcp_register_congestion_control(&tcp_highspeed); +} + +static void __exit hstcp_unregister(void) +{ + tcp_unregister_congestion_control(&tcp_highspeed); +} + +module_init(hstcp_register); +module_exit(hstcp_unregister); + +MODULE_AUTHOR("John Heffner"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("High Speed TCP"); diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c new file mode 100644 index 0000000..4016827 --- /dev/null +++ b/net/ipv4/tcp_htcp.c @@ -0,0 +1,289 @@ +/* + * H-TCP congestion control. The algorithm is detailed in: + * R.N.Shorten, D.J.Leith: + * "H-TCP: TCP for high-speed and long-distance networks" + * Proc. PFLDnet, Argonne, 2004. + * http://www.hamilton.ie/net/htcp3.pdf + */ + +#include <linux/config.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <net/tcp.h> + +#define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */ +#define BETA_MIN (1<<6) /* 0.5 with shift << 7 */ +#define BETA_MAX 102 /* 0.8 with shift << 7 */ + +static int use_rtt_scaling = 1; +module_param(use_rtt_scaling, int, 0644); +MODULE_PARM_DESC(use_rtt_scaling, "turn on/off RTT scaling"); + +static int use_bandwidth_switch = 1; +module_param(use_bandwidth_switch, int, 0644); +MODULE_PARM_DESC(use_bandwidth_switch, "turn on/off bandwidth switcher"); + +struct htcp { + u16 alpha; /* Fixed point arith, << 7 */ + u8 beta; /* Fixed point arith, << 7 */ + u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */ + u8 ccount; /* Number of RTTs since last congestion event */ + u8 undo_ccount; + u16 packetcount; + u32 minRTT; + u32 maxRTT; + u32 snd_cwnd_cnt2; + + u32 undo_maxRTT; + u32 undo_old_maxB; + + /* Bandwidth estimation */ + u32 minB; + u32 maxB; + u32 old_maxB; + u32 Bi; + u32 lasttime; +}; + +static inline void htcp_reset(struct htcp *ca) +{ + ca->undo_ccount = ca->ccount; + ca->undo_maxRTT = ca->maxRTT; + ca->undo_old_maxB = ca->old_maxB; + + ca->ccount = 0; + ca->snd_cwnd_cnt2 = 0; +} + +static u32 htcp_cwnd_undo(struct tcp_sock *tp) +{ + struct htcp *ca = tcp_ca(tp); + ca->ccount = ca->undo_ccount; + ca->maxRTT = ca->undo_maxRTT; + ca->old_maxB = ca->undo_old_maxB; + return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta); +} + +static inline void measure_rtt(struct tcp_sock *tp) +{ + struct htcp *ca = tcp_ca(tp); + u32 srtt = tp->srtt>>3; + + /* keep track of minimum RTT seen so far, minRTT is zero at first */ + if (ca->minRTT > srtt || !ca->minRTT) + ca->minRTT = srtt; + + /* max RTT */ + if (tp->ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && ca->ccount > 3) { + if (ca->maxRTT < ca->minRTT) + ca->maxRTT = ca->minRTT; + if (ca->maxRTT < srtt && srtt <= ca->maxRTT+HZ/50) + ca->maxRTT = srtt; + } +} + +static void measure_achieved_throughput(struct tcp_sock *tp, u32 pkts_acked) +{ + struct htcp *ca = tcp_ca(tp); + u32 now = tcp_time_stamp; + + /* achieved throughput calculations */ + if (tp->ca_state != TCP_CA_Open && tp->ca_state != TCP_CA_Disorder) { + ca->packetcount = 0; + ca->lasttime = now; + return; + } + + ca->packetcount += pkts_acked; + + if (ca->packetcount >= tp->snd_cwnd - (ca->alpha>>7? : 1) + && now - ca->lasttime >= ca->minRTT + && ca->minRTT > 0) { + __u32 cur_Bi = ca->packetcount*HZ/(now - ca->lasttime); + if (ca->ccount <= 3) { + /* just after backoff */ + ca->minB = ca->maxB = ca->Bi = cur_Bi; + } else { + ca->Bi = (3*ca->Bi + cur_Bi)/4; + if (ca->Bi > ca->maxB) + ca->maxB = ca->Bi; + if (ca->minB > ca->maxB) + ca->minB = ca->maxB; + } + ca->packetcount = 0; + ca->lasttime = now; + } +} + +static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT) +{ + if (use_bandwidth_switch) { + u32 maxB = ca->maxB; + u32 old_maxB = ca->old_maxB; + ca->old_maxB = ca->maxB; + + if (!between(5*maxB, 4*old_maxB, 6*old_maxB)) { + ca->beta = BETA_MIN; + ca->modeswitch = 0; + return; + } + } + + if (ca->modeswitch && minRTT > max(HZ/100, 1) && maxRTT) { + ca->beta = (minRTT<<7)/maxRTT; + if (ca->beta < BETA_MIN) + ca->beta = BETA_MIN; + else if (ca->beta > BETA_MAX) + ca->beta = BETA_MAX; + } else { + ca->beta = BETA_MIN; + ca->modeswitch = 1; + } +} + +static inline void htcp_alpha_update(struct htcp *ca) +{ + u32 minRTT = ca->minRTT; + u32 factor = 1; + u32 diff = ca->ccount * minRTT; /* time since last backoff */ + + if (diff > HZ) { + diff -= HZ; + factor = 1+ ( 10*diff + ((diff/2)*(diff/2)/HZ) )/HZ; + } + + if (use_rtt_scaling && minRTT) { + u32 scale = (HZ<<3)/(10*minRTT); + scale = min(max(scale, 1U<<2), 10U<<3); /* clamping ratio to interval [0.5,10]<<3 */ + factor = (factor<<3)/scale; + if (!factor) + factor = 1; + } + + ca->alpha = 2*factor*((1<<7)-ca->beta); + if (!ca->alpha) + ca->alpha = ALPHA_BASE; +} + +/* After we have the rtt data to calculate beta, we'd still prefer to wait one + * rtt before we adjust our beta to ensure we are working from a consistent + * data. + * + * This function should be called when we hit a congestion event since only at + * that point do we really have a real sense of maxRTT (the queues en route + * were getting just too full now). + */ +static void htcp_param_update(struct tcp_sock *tp) +{ + struct htcp *ca = tcp_ca(tp); + u32 minRTT = ca->minRTT; + u32 maxRTT = ca->maxRTT; + + htcp_beta_update(ca, minRTT, maxRTT); + htcp_alpha_update(ca); + + /* add slowly fading memory for maxRTT to accommodate routing changes etc */ + if (minRTT > 0 && maxRTT > minRTT) + ca->maxRTT = minRTT + ((maxRTT-minRTT)*95)/100; +} + +static u32 htcp_recalc_ssthresh(struct tcp_sock *tp) +{ + struct htcp *ca = tcp_ca(tp); + htcp_param_update(tp); + return max((tp->snd_cwnd * ca->beta) >> 7, 2U); +} + +static void htcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, + u32 in_flight, int data_acked) +{ + struct htcp *ca = tcp_ca(tp); + + if (in_flight < tp->snd_cwnd) + return; + + if (tp->snd_cwnd <= tp->snd_ssthresh) { + /* In "safe" area, increase. */ + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + } else { + measure_rtt(tp); + + /* keep track of number of round-trip times since last backoff event */ + if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) { + ca->ccount++; + ca->snd_cwnd_cnt2 = 0; + htcp_alpha_update(ca); + } + + /* In dangerous area, increase slowly. + * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd + */ + if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) { + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + tp->snd_cwnd_cnt = 0; + ca->ccount++; + } + } +} + +/* Lower bound on congestion window. */ +static u32 htcp_min_cwnd(struct tcp_sock *tp) +{ + return tp->snd_ssthresh; +} + + +static void htcp_init(struct tcp_sock *tp) +{ + struct htcp *ca = tcp_ca(tp); + + memset(ca, 0, sizeof(struct htcp)); + ca->alpha = ALPHA_BASE; + ca->beta = BETA_MIN; +} + +static void htcp_state(struct tcp_sock *tp, u8 new_state) +{ + switch (new_state) { + case TCP_CA_CWR: + case TCP_CA_Recovery: + case TCP_CA_Loss: + htcp_reset(tcp_ca(tp)); + break; + } +} + +static struct tcp_congestion_ops htcp = { + .init = htcp_init, + .ssthresh = htcp_recalc_ssthresh, + .min_cwnd = htcp_min_cwnd, + .cong_avoid = htcp_cong_avoid, + .set_state = htcp_state, + .undo_cwnd = htcp_cwnd_undo, + .pkts_acked = measure_achieved_throughput, + .owner = THIS_MODULE, + .name = "htcp", +}; + +static int __init htcp_register(void) +{ + BUG_ON(sizeof(struct htcp) > TCP_CA_PRIV_SIZE); + BUILD_BUG_ON(BETA_MIN >= BETA_MAX); + if (!use_bandwidth_switch) + htcp.pkts_acked = NULL; + return tcp_register_congestion_control(&htcp); +} + +static void __exit htcp_unregister(void) +{ + tcp_unregister_congestion_control(&htcp); +} + +module_init(htcp_register); +module_exit(htcp_unregister); + +MODULE_AUTHOR("Baruch Even"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("H-TCP"); diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c new file mode 100644 index 0000000..13a6634 --- /dev/null +++ b/net/ipv4/tcp_hybla.c @@ -0,0 +1,187 @@ +/* + * TCP HYBLA + * + * TCP-HYBLA Congestion control algorithm, based on: + * C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement + * for Heterogeneous Networks", + * International Journal on satellite Communications, + * September 2004 + * Daniele Lacamera + * root at danielinux.net + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <net/tcp.h> + +/* Tcp Hybla structure. */ +struct hybla { + u8 hybla_en; + u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ + u32 rho; /* Rho parameter, integer part */ + u32 rho2; /* Rho * Rho, integer part */ + u32 rho_3ls; /* Rho parameter, <<3 */ + u32 rho2_7ls; /* Rho^2, <<7 */ + u32 minrtt; /* Minimum smoothed round trip time value seen */ +}; + +/* Hybla reference round trip time (default= 1/40 sec = 25 ms), + expressed in jiffies */ +static int rtt0 = 25; +module_param(rtt0, int, 0644); +MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); + + +/* This is called to refresh values for hybla parameters */ +static inline void hybla_recalc_param (struct tcp_sock *tp) +{ + struct hybla *ca = tcp_ca(tp); + + ca->rho_3ls = max_t(u32, tp->srtt / msecs_to_jiffies(rtt0), 8); + ca->rho = ca->rho_3ls >> 3; + ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; + ca->rho2 = ca->rho2_7ls >>7; +} + +static void hybla_init(struct tcp_sock *tp) +{ + struct hybla *ca = tcp_ca(tp); + + ca->rho = 0; + ca->rho2 = 0; + ca->rho_3ls = 0; + ca->rho2_7ls = 0; + ca->snd_cwnd_cents = 0; + ca->hybla_en = 1; + tp->snd_cwnd = 2; + tp->snd_cwnd_clamp = 65535; + + /* 1st Rho measurement based on initial srtt */ + hybla_recalc_param(tp); + + /* set minimum rtt as this is the 1st ever seen */ + ca->minrtt = tp->srtt; + tp->snd_cwnd = ca->rho; +} + +static void hybla_state(struct tcp_sock *tp, u8 ca_state) +{ + struct hybla *ca = tcp_ca(tp); + + ca->hybla_en = (ca_state == TCP_CA_Open); +} + +static inline u32 hybla_fraction(u32 odds) +{ + static const u32 fractions[] = { + 128, 139, 152, 165, 181, 197, 215, 234, + }; + + return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128; +} + +/* TCP Hybla main routine. + * This is the algorithm behavior: + * o Recalc Hybla parameters if min_rtt has changed + * o Give cwnd a new value based on the model proposed + * o remember increments <1 + */ +static void hybla_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, + u32 in_flight, int flag) +{ + struct hybla *ca = tcp_ca(tp); + u32 increment, odd, rho_fractions; + int is_slowstart = 0; + + /* Recalculate rho only if this srtt is the lowest */ + if (tp->srtt < ca->minrtt){ + hybla_recalc_param(tp); + ca->minrtt = tp->srtt; + } + + if (!ca->hybla_en) + return tcp_reno_cong_avoid(tp, ack, rtt, in_flight, flag); + + if (in_flight < tp->snd_cwnd) + return; + + if (ca->rho == 0) + hybla_recalc_param(tp); + + rho_fractions = ca->rho_3ls - (ca->rho << 3); + + if (tp->snd_cwnd < tp->snd_ssthresh) { + /* + * slow start + * INC = 2^RHO - 1 + * This is done by splitting the rho parameter + * into 2 parts: an integer part and a fraction part. + * Inrement<<7 is estimated by doing: + * [2^(int+fract)]<<7 + * that is equal to: + * (2^int) * [(2^fract) <<7] + * 2^int is straightly computed as 1<<int, + * while we will use hybla_slowstart_fraction_increment() to + * calculate 2^fract in a <<7 value. + */ + is_slowstart = 1; + increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) + - 128; + } else { + /* + * congestion avoidance + * INC = RHO^2 / W + * as long as increment is estimated as (rho<<7)/window + * it already is <<7 and we can easily count its fractions. + */ + increment = ca->rho2_7ls / tp->snd_cwnd; + if (increment < 128) + tp->snd_cwnd_cnt++; + } + + odd = increment % 128; + tp->snd_cwnd += increment >> 7; + ca->snd_cwnd_cents += odd; + + /* check when fractions goes >=128 and increase cwnd by 1. */ + while(ca->snd_cwnd_cents >= 128) { + tp->snd_cwnd++; + ca->snd_cwnd_cents -= 128; + tp->snd_cwnd_cnt = 0; + } + + /* clamp down slowstart cwnd to ssthresh value. */ + if (is_slowstart) + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); + + tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); +} + +static struct tcp_congestion_ops tcp_hybla = { + .init = hybla_init, + .ssthresh = tcp_reno_ssthresh, + .min_cwnd = tcp_reno_min_cwnd, + .cong_avoid = hybla_cong_avoid, + .set_state = hybla_state, + + .owner = THIS_MODULE, + .name = "hybla" +}; + +static int __init hybla_register(void) +{ + BUG_ON(sizeof(struct hybla) > TCP_CA_PRIV_SIZE); + return tcp_register_congestion_control(&tcp_hybla); +} + +static void __exit hybla_unregister(void) +{ + tcp_unregister_congestion_control(&tcp_hybla); +} + +module_init(hybla_register); +module_exit(hybla_unregister); + +MODULE_AUTHOR("Daniele Lacamera"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TCP Hybla"); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5bad504..7bbbbc3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -61,7 +61,6 @@ * Panu Kuhlberg: Experimental audit of TCP (re)transmission * engine. Lots of bugs are found. * Pasi Sarolahti: F-RTO for dealing with spurious RTOs - * Angelo Dell'Aera: TCP Westwood+ support */ #include <linux/config.h> @@ -88,23 +87,9 @@ int sysctl_tcp_rfc1337; int sysctl_tcp_max_orphans = NR_FILE; int sysctl_tcp_frto; int sysctl_tcp_nometrics_save; -int sysctl_tcp_westwood; -int sysctl_tcp_vegas_cong_avoid; int sysctl_tcp_moderate_rcvbuf = 1; -/* Default values of the Vegas variables, in fixed-point representation - * with V_PARAM_SHIFT bits to the right of the binary point. - */ -#define V_PARAM_SHIFT 1 -int sysctl_tcp_vegas_alpha = 1<<V_PARAM_SHIFT; -int sysctl_tcp_vegas_beta = 3<<V_PARAM_SHIFT; -int sysctl_tcp_vegas_gamma = 1<<V_PARAM_SHIFT; -int sysctl_tcp_bic = 1; -int sysctl_tcp_bic_fast_convergence = 1; -int sysctl_tcp_bic_low_window = 14; -int sysctl_tcp_bic_beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ - #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ @@ -333,15 +318,6 @@ static void tcp_init_buffer_space(struct sock *sk) tp->snd_cwnd_stamp = tcp_time_stamp; } -static void init_bictcp(struct tcp_sock *tp) -{ - tp->bictcp.cnt = 0; - - tp->bictcp.last_max_cwnd = 0; - tp->bictcp.last_cwnd = 0; - tp->bictcp.last_stamp = 0; -} - /* 5. Recalculate window clamp after socket hit its memory bounds. */ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) { @@ -558,45 +534,6 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ tcp_grow_window(sk, tp, skb); } -/* When starting a new connection, pin down the current choice of - * congestion algorithm. - */ -void tcp_ca_init(struct tcp_sock *tp) -{ - if (sysctl_tcp_westwood) - tp->adv_cong = TCP_WESTWOOD; - else if (sysctl_tcp_bic) - tp->adv_cong = TCP_BIC; - else if (sysctl_tcp_vegas_cong_avoid) { - tp->adv_cong = TCP_VEGAS; - tp->vegas.baseRTT = 0x7fffffff; - tcp_vegas_enable(tp); - } -} - -/* Do RTT sampling needed for Vegas. - * Basically we: - * o min-filter RTT samples from within an RTT to get the current - * propagation delay + queuing delay (we are min-filtering to try to - * avoid the effects of delayed ACKs) - * o min-filter RTT samples from a much longer window (forever for now) - * to find the propagation delay (baseRTT) - */ -static inline void vegas_rtt_calc(struct tcp_sock *tp, __u32 rtt) -{ - __u32 vrtt = rtt + 1; /* Never allow zero rtt or baseRTT */ - - /* Filter to find propagation delay: */ - if (vrtt < tp->vegas.baseRTT) - tp->vegas.baseRTT = vrtt; - - /* Find the min RTT during the last RTT to find - * the current prop. delay + queuing delay: - */ - tp->vegas.minRTT = min(tp->vegas.minRTT, vrtt); - tp->vegas.cntRTT++; -} - /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge @@ -606,13 +543,10 @@ static inline void vegas_rtt_calc(struct tcp_sock *tp, __u32 rtt) * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ -static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt) +static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) { long m = mrtt; /* RTT */ - if (tcp_vegas_enabled(tp)) - vegas_rtt_calc(tp, mrtt); - /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. @@ -670,7 +604,8 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt) tp->rtt_seq = tp->snd_nxt; } - tcp_westwood_update_rtt(tp, tp->srtt >> 3); + if (tp->ca_ops->rtt_sample) + tp->ca_ops->rtt_sample(tp, *usrtt); } /* Calculate rto without backoff. This is the second half of Van Jacobson's @@ -1185,8 +1120,8 @@ void tcp_enter_frto(struct sock *sk) tp->snd_una == tp->high_seq || (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(tp); - if (!tcp_westwood_ssthresh(tp)) - tp->snd_ssthresh = tcp_recalc_ssthresh(tp); + tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); + tcp_ca_event(tp, CA_EVENT_FRTO); } /* Have to clear retransmission markers here to keep the bookkeeping @@ -1252,8 +1187,6 @@ static void tcp_enter_frto_loss(struct sock *sk) tcp_set_ca_state(tp, TCP_CA_Loss); tp->high_seq = tp->frto_highmark; TCP_ECN_queue_cwr(tp); - - init_bictcp(tp); } void tcp_clear_retrans(struct tcp_sock *tp) @@ -1283,7 +1216,8 @@ void tcp_enter_loss(struct sock *sk, int how) if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(tp); - tp->snd_ssthresh = tcp_recalc_ssthresh(tp); + tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); + tcp_ca_event(tp, CA_EVENT_LOSS); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; @@ -1596,28 +1530,14 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) } /* Decrease cwnd each second ack. */ - static void tcp_cwnd_down(struct tcp_sock *tp) { int decr = tp->snd_cwnd_cnt + 1; - __u32 limit; - - /* - * TCP Westwood - * Here limit is evaluated as BWestimation*RTTmin (for obtaining it - * in packets we use mss_cache). If sysctl_tcp_westwood is off - * tcp_westwood_bw_rttmin() returns 0. In such case snd_ssthresh is - * still used as usual. It prevents other strange cases in which - * BWE*RTTmin could assume value 0. It should not happen but... - */ - - if (!(limit = tcp_westwood_bw_rttmin(tp))) - limit = tp->snd_ssthresh/2; tp->snd_cwnd_cnt = decr&1; decr >>= 1; - if (decr && tp->snd_cwnd > limit) + if (decr && tp->snd_cwnd > tp->ca_ops->min_cwnd(tp)) tp->snd_cwnd -= decr; tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); @@ -1654,8 +1574,8 @@ static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg) static void tcp_undo_cwr(struct tcp_sock *tp, int undo) { if (tp->prior_ssthresh) { - if (tcp_is_bic(tp)) - tp->snd_cwnd = max(tp->snd_cwnd, tp->bictcp.last_max_cwnd); + if (tp->ca_ops->undo_cwnd) + tp->snd_cwnd = tp->ca_ops->undo_cwnd(tp); else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); @@ -1767,11 +1687,9 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) static inline void tcp_complete_cwr(struct tcp_sock *tp) { - if (tcp_westwood_cwnd(tp)) - tp->snd_ssthresh = tp->snd_cwnd; - else - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd_stamp = tcp_time_stamp; + tcp_ca_event(tp, CA_EVENT_COMPLETE_CWR); } static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) @@ -1946,7 +1864,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, if (tp->ca_state < TCP_CA_CWR) { if (!(flag&FLAG_ECE)) tp->prior_ssthresh = tcp_current_ssthresh(tp); - tp->snd_ssthresh = tcp_recalc_ssthresh(tp); + tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); TCP_ECN_queue_cwr(tp); } @@ -1963,7 +1881,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, /* Read draft-ietf-tcplw-high-performance before mucking * with this code. (Superceeds RFC1323) */ -static void tcp_ack_saw_tstamp(struct tcp_sock *tp, int flag) +static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) { __u32 seq_rtt; @@ -1983,13 +1901,13 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, int flag) * in window is lost... Voila. --ANK (010210) */ seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; - tcp_rtt_estimator(tp, seq_rtt); + tcp_rtt_estimator(tp, seq_rtt, usrtt); tcp_set_rto(tp); tp->backoff = 0; tcp_bound_rto(tp); } -static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, int flag) +static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag) { /* We don't have a timestamp. Can only use * packets that are not retransmitted to determine @@ -2003,338 +1921,29 @@ static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, int flag) if (flag & FLAG_RETRANS_DATA_ACKED) return; - tcp_rtt_estimator(tp, seq_rtt); + tcp_rtt_estimator(tp, seq_rtt, usrtt); tcp_set_rto(tp); tp->backoff = 0; tcp_bound_rto(tp); } static inline void tcp_ack_update_rtt(struct tcp_sock *tp, - int flag, s32 seq_rtt) + int flag, s32 seq_rtt, u32 *usrtt) { /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) - tcp_ack_saw_tstamp(tp, flag); + tcp_ack_saw_tstamp(tp, usrtt, flag); else if (seq_rtt >= 0) - tcp_ack_no_tstamp(tp, seq_rtt, flag); + tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag); } -/* - * Compute congestion window to use. - * - * This is from the implementation of BICTCP in - * Lison-Xu, Kahaled Harfoush, and Injog Rhee. - * "Binary Increase Congestion Control for Fast, Long Distance - * Networks" in InfoComm 2004 - * Available from: - * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf - * - * Unless BIC is enabled and congestion window is large - * this behaves the same as the original Reno. - */ -static inline __u32 bictcp_cwnd(struct tcp_sock *tp) -{ - /* orignal Reno behaviour */ - if (!tcp_is_bic(tp)) - return tp->snd_cwnd; - - if (tp->bictcp.last_cwnd == tp->snd_cwnd && - (s32)(tcp_time_stamp - tp->bictcp.last_stamp) <= (HZ>>5)) - return tp->bictcp.cnt; - - tp->bictcp.last_cwnd = tp->snd_cwnd; - tp->bictcp.last_stamp = tcp_time_stamp; - - /* start off normal */ - if (tp->snd_cwnd <= sysctl_tcp_bic_low_window) - tp->bictcp.cnt = tp->snd_cwnd; - - /* binary increase */ - else if (tp->snd_cwnd < tp->bictcp.last_max_cwnd) { - __u32 dist = (tp->bictcp.last_max_cwnd - tp->snd_cwnd) - / BICTCP_B; - - if (dist > BICTCP_MAX_INCREMENT) - /* linear increase */ - tp->bictcp.cnt = tp->snd_cwnd / BICTCP_MAX_INCREMENT; - else if (dist <= 1U) - /* binary search increase */ - tp->bictcp.cnt = tp->snd_cwnd * BICTCP_FUNC_OF_MIN_INCR - / BICTCP_B; - else - /* binary search increase */ - tp->bictcp.cnt = tp->snd_cwnd / dist; - } else { - /* slow start amd linear increase */ - if (tp->snd_cwnd < tp->bictcp.last_max_cwnd + BICTCP_B) - /* slow start */ - tp->bictcp.cnt = tp->snd_cwnd * BICTCP_FUNC_OF_MIN_INCR - / BICTCP_B; - else if (tp->snd_cwnd < tp->bictcp.last_max_cwnd - + BICTCP_MAX_INCREMENT*(BICTCP_B-1)) - /* slow start */ - tp->bictcp.cnt = tp->snd_cwnd * (BICTCP_B-1) - / (tp->snd_cwnd-tp->bictcp.last_max_cwnd); - else - /* linear increase */ - tp->bictcp.cnt = tp->snd_cwnd / BICTCP_MAX_INCREMENT; - } - return tp->bictcp.cnt; -} - -/* This is Jacobson's slow start and congestion avoidance. - * SIGCOMM '88, p. 328. - */ -static inline void reno_cong_avoid(struct tcp_sock *tp) +static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, + u32 in_flight, int good) { - if (tp->snd_cwnd <= tp->snd_ssthresh) { - /* In "safe" area, increase. */ - if (tp->snd_cwnd < tp->snd_cwnd_clamp) - tp->snd_cwnd++; - } else { - /* In dangerous area, increase slowly. - * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd - */ - if (tp->snd_cwnd_cnt >= bictcp_cwnd(tp)) { - if (tp->snd_cwnd < tp->snd_cwnd_clamp) - tp->snd_cwnd++; - tp->snd_cwnd_cnt=0; - } else - tp->snd_cwnd_cnt++; - } + tp->ca_ops->cong_avoid(tp, ack, rtt, in_flight, good); tp->snd_cwnd_stamp = tcp_time_stamp; } -/* This is based on the congestion detection/avoidance scheme described in - * Lawrence S. Brakmo and Larry L. Peterson. - * "TCP Vegas: End to end congestion avoidance on a global internet." - * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480, - * October 1995. Available from: - * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps - * - * See http://www.cs.arizona.edu/xkernel/ for their implementation. - * The main aspects that distinguish this implementation from the - * Arizona Vegas implementation are: - * o We do not change the loss detection or recovery mechanisms of - * Linux in any way. Linux already recovers from losses quite well, - * using fine-grained timers, NewReno, and FACK. - * o To avoid the performance penalty imposed by increasing cwnd - * only every-other RTT during slow start, we increase during - * every RTT during slow start, just like Reno. - * o Largely to allow continuous cwnd growth during slow start, - * we use the rate at which ACKs come back as the "actual" - * rate, rather than the rate at which data is sent. - * o To speed convergence to the right rate, we set the cwnd - * to achieve the right ("actual") rate when we exit slow start. - * o To filter out the noise caused by delayed ACKs, we use the - * minimum RTT sample observed during the last RTT to calculate - * the actual rate. - * o When the sender re-starts from idle, it waits until it has - * received ACKs for an entire flight of new data before making - * a cwnd adjustment decision. The original Vegas implementation - * assumed senders never went idle. - */ -static void vegas_cong_avoid(struct tcp_sock *tp, u32 ack, u32 seq_rtt) -{ - /* The key players are v_beg_snd_una and v_beg_snd_nxt. - * - * These are so named because they represent the approximate values - * of snd_una and snd_nxt at the beginning of the current RTT. More - * precisely, they represent the amount of data sent during the RTT. - * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, - * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding - * bytes of data have been ACKed during the course of the RTT, giving - * an "actual" rate of: - * - * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration) - * - * Unfortunately, v_beg_snd_una is not exactly equal to snd_una, - * because delayed ACKs can cover more than one segment, so they - * don't line up nicely with the boundaries of RTTs. - * - * Another unfortunate fact of life is that delayed ACKs delay the - * advance of the left edge of our send window, so that the number - * of bytes we send in an RTT is often less than our cwnd will allow. - * So we keep track of our cwnd separately, in v_beg_snd_cwnd. - */ - - if (after(ack, tp->vegas.beg_snd_nxt)) { - /* Do the Vegas once-per-RTT cwnd adjustment. */ - u32 old_wnd, old_snd_cwnd; - - - /* Here old_wnd is essentially the window of data that was - * sent during the previous RTT, and has all - * been acknowledged in the course of the RTT that ended - * with the ACK we just received. Likewise, old_snd_cwnd - * is the cwnd during the previous RTT. - */ - old_wnd = (tp->vegas.beg_snd_nxt - tp->vegas.beg_snd_una) / - tp->mss_cache_std; - old_snd_cwnd = tp->vegas.beg_snd_cwnd; - - /* Save the extent of the current window so we can use this - * at the end of the next RTT. - */ - tp->vegas.beg_snd_una = tp->vegas.beg_snd_nxt; - tp->vegas.beg_snd_nxt = tp->snd_nxt; - tp->vegas.beg_snd_cwnd = tp->snd_cwnd; - - /* Take into account the current RTT sample too, to - * decrease the impact of delayed acks. This double counts - * this sample since we count it for the next window as well, - * but that's not too awful, since we're taking the min, - * rather than averaging. - */ - vegas_rtt_calc(tp, seq_rtt); - - /* We do the Vegas calculations only if we got enough RTT - * samples that we can be reasonably sure that we got - * at least one RTT sample that wasn't from a delayed ACK. - * If we only had 2 samples total, - * then that means we're getting only 1 ACK per RTT, which - * means they're almost certainly delayed ACKs. - * If we have 3 samples, we should be OK. - */ - - if (tp->vegas.cntRTT <= 2) { - /* We don't have enough RTT samples to do the Vegas - * calculation, so we'll behave like Reno. - */ - if (tp->snd_cwnd > tp->snd_ssthresh) - tp->snd_cwnd++; - } else { - u32 rtt, target_cwnd, diff; - - /* We have enough RTT samples, so, using the Vegas - * algorithm, we determine if we should increase or - * decrease cwnd, and by how much. - */ - - /* Pluck out the RTT we are using for the Vegas - * calculations. This is the min RTT seen during the - * last RTT. Taking the min filters out the effects - * of delayed ACKs, at the cost of noticing congestion - * a bit later. - */ - rtt = tp->vegas.minRTT; - - /* Calculate the cwnd we should have, if we weren't - * going too fast. - * - * This is: - * (actual rate in segments) * baseRTT - * We keep it as a fixed point number with - * V_PARAM_SHIFT bits to the right of the binary point. - */ - target_cwnd = ((old_wnd * tp->vegas.baseRTT) - << V_PARAM_SHIFT) / rtt; - - /* Calculate the difference between the window we had, - * and the window we would like to have. This quantity - * is the "Diff" from the Arizona Vegas papers. - * - * Again, this is a fixed point number with - * V_PARAM_SHIFT bits to the right of the binary - * point. - */ - diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; - - if (tp->snd_cwnd < tp->snd_ssthresh) { - /* Slow start. */ - if (diff > sysctl_tcp_vegas_gamma) { - /* Going too fast. Time to slow down - * and switch to congestion avoidance. - */ - tp->snd_ssthresh = 2; - - /* Set cwnd to match the actual rate - * exactly: - * cwnd = (actual rate) * baseRTT - * Then we add 1 because the integer - * truncation robs us of full link - * utilization. - */ - tp->snd_cwnd = min(tp->snd_cwnd, - (target_cwnd >> - V_PARAM_SHIFT)+1); - - } - } else { - /* Congestion avoidance. */ - u32 next_snd_cwnd; - - /* Figure out where we would like cwnd - * to be. - */ - if (diff > sysctl_tcp_vegas_beta) { - /* The old window was too fast, so - * we slow down. - */ - next_snd_cwnd = old_snd_cwnd - 1; - } else if (diff < sysctl_tcp_vegas_alpha) { - /* We don't have enough extra packets - * in the network, so speed up. - */ - next_snd_cwnd = old_snd_cwnd + 1; - } else { - /* Sending just as fast as we - * should be. - */ - next_snd_cwnd = old_snd_cwnd; - } - - /* Adjust cwnd upward or downward, toward the - * desired value. - */ - if (next_snd_cwnd > tp->snd_cwnd) - tp->snd_cwnd++; - else if (next_snd_cwnd < tp->snd_cwnd) - tp->snd_cwnd--; - } - } - - /* Wipe the slate clean for the next RTT. */ - tp->vegas.cntRTT = 0; - tp->vegas.minRTT = 0x7fffffff; - } - - /* The following code is executed for every ack we receive, - * except for conditions checked in should_advance_cwnd() - * before the call to tcp_cong_avoid(). Mainly this means that - * we only execute this code if the ack actually acked some - * data. - */ - - /* If we are in slow start, increase our cwnd in response to this ACK. - * (If we are not in slow start then we are in congestion avoidance, - * and adjust our congestion window only once per RTT. See the code - * above.) - */ - if (tp->snd_cwnd <= tp->snd_ssthresh) - tp->snd_cwnd++; - - /* to keep cwnd from growing without bound */ - tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); - - /* Make sure that we are never so timid as to reduce our cwnd below - * 2 MSS. - * - * Going below 2 MSS would risk huge delayed ACKs from our receiver. - */ - tp->snd_cwnd = max(tp->snd_cwnd, 2U); - - tp->snd_cwnd_stamp = tcp_time_stamp; -} - -static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 seq_rtt) -{ - if (tcp_vegas_enabled(tp)) - vegas_cong_avoid(tp, ack, seq_rtt); - else - reno_cong_avoid(tp); -} - /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ @@ -2415,13 +2024,18 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, /* Remove acknowledged frames from the retransmission queue. */ -static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) +static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; __u32 now = tcp_time_stamp; int acked = 0; __s32 seq_rtt = -1; + struct timeval usnow; + u32 pkts_acked = 0; + + if (seq_usrtt) + do_gettimeofday(&usnow); while ((skb = skb_peek(&sk->sk_write_queue)) && skb != sk->sk_send_head) { @@ -2448,6 +2062,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) */ if (!(scb->flags & TCPCB_FLAG_SYN)) { acked |= FLAG_DATA_ACKED; + ++pkts_acked; } else { acked |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; @@ -2461,6 +2076,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) seq_rtt = -1; } else if (seq_rtt < 0) seq_rtt = now - scb->when; + if (seq_usrtt) + *seq_usrtt = (usnow.tv_sec - skb->stamp.tv_sec) * 1000000 + + (usnow.tv_usec - skb->stamp.tv_usec); + if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= tcp_skb_pcount(skb); if (sacked & TCPCB_LOST) @@ -2479,8 +2098,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) } if (acked&FLAG_ACKED) { - tcp_ack_update_rtt(tp, acked, seq_rtt); + tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt); tcp_ack_packets_out(sk, tp); + + if (tp->ca_ops->pkts_acked) + tp->ca_ops->pkts_acked(tp, pkts_acked); } #if FASTRETRANS_DEBUG > 0 @@ -2624,257 +2246,6 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) tp->frto_counter = (tp->frto_counter + 1) % 3; } -/* - * TCP Westwood+ - */ - -/* - * @init_westwood - * This function initializes fields used in TCP Westwood+. We can't - * get no information about RTTmin at this time so we simply set it to - * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative - * since in this way we're sure it will be updated in a consistent - * way as soon as possible. It will reasonably happen within the first - * RTT period of the connection lifetime. - */ - -static void init_westwood(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->westwood.bw_ns_est = 0; - tp->westwood.bw_est = 0; - tp->westwood.accounted = 0; - tp->westwood.cumul_ack = 0; - tp->westwood.rtt_win_sx = tcp_time_stamp; - tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT; - tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT; - tp->westwood.snd_una = tp->snd_una; -} - -/* - * @westwood_do_filter - * Low-pass filter. Implemented using constant coeffients. - */ - -static inline __u32 westwood_do_filter(__u32 a, __u32 b) -{ - return (((7 * a) + b) >> 3); -} - -static void westwood_filter(struct sock *sk, __u32 delta) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->westwood.bw_ns_est = - westwood_do_filter(tp->westwood.bw_ns_est, - tp->westwood.bk / delta); - tp->westwood.bw_est = - westwood_do_filter(tp->westwood.bw_est, - tp->westwood.bw_ns_est); -} - -/* - * @westwood_update_rttmin - * It is used to update RTTmin. In this case we MUST NOT use - * WESTWOOD_RTT_MIN minimum bound since we could be on a LAN! - */ - -static inline __u32 westwood_update_rttmin(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - __u32 rttmin = tp->westwood.rtt_min; - - if (tp->westwood.rtt != 0 && - (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin)) - rttmin = tp->westwood.rtt; - - return rttmin; -} - -/* - * @westwood_acked - * Evaluate increases for dk. - */ - -static inline __u32 westwood_acked(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - - return tp->snd_una - tp->westwood.snd_una; -} - -/* - * @westwood_new_window - * It evaluates if we are receiving data inside the same RTT window as - * when we started. - * Return value: - * It returns 0 if we are still evaluating samples in the same RTT - * window, 1 if the sample has to be considered in the next window. - */ - -static int westwood_new_window(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - __u32 left_bound; - __u32 rtt; - int ret = 0; - - left_bound = tp->westwood.rtt_win_sx; - rtt = max(tp->westwood.rtt, (u32) TCP_WESTWOOD_RTT_MIN); - - /* - * A RTT-window has passed. Be careful since if RTT is less than - * 50ms we don't filter but we continue 'building the sample'. - * This minimum limit was choosen since an estimation on small - * time intervals is better to avoid... - * Obvioulsy on a LAN we reasonably will always have - * right_bound = left_bound + WESTWOOD_RTT_MIN - */ - - if ((left_bound + rtt) < tcp_time_stamp) - ret = 1; - - return ret; -} - -/* - * @westwood_update_window - * It updates RTT evaluation window if it is the right moment to do - * it. If so it calls filter for evaluating bandwidth. - */ - -static void __westwood_update_window(struct sock *sk, __u32 now) -{ - struct tcp_sock *tp = tcp_sk(sk); - __u32 delta = now - tp->westwood.rtt_win_sx; - - if (delta) { - if (tp->westwood.rtt) - westwood_filter(sk, delta); - - tp->westwood.bk = 0; - tp->westwood.rtt_win_sx = tcp_time_stamp; - } -} - - -static void westwood_update_window(struct sock *sk, __u32 now) -{ - if (westwood_new_window(sk)) - __westwood_update_window(sk, now); -} - -/* - * @__tcp_westwood_fast_bw - * It is called when we are in fast path. In particular it is called when - * header prediction is successfull. In such case infact update is - * straight forward and doesn't need any particular care. - */ - -static void __tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - westwood_update_window(sk, tcp_time_stamp); - - tp->westwood.bk += westwood_acked(sk); - tp->westwood.snd_una = tp->snd_una; - tp->westwood.rtt_min = westwood_update_rttmin(sk); -} - -static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb) -{ - if (tcp_is_westwood(tcp_sk(sk))) - __tcp_westwood_fast_bw(sk, skb); -} - - -/* - * @westwood_dupack_update - * It updates accounted and cumul_ack when receiving a dupack. - */ - -static void westwood_dupack_update(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->westwood.accounted += tp->mss_cache_std; - tp->westwood.cumul_ack = tp->mss_cache_std; -} - -static inline int westwood_may_change_cumul(struct tcp_sock *tp) -{ - return (tp->westwood.cumul_ack > tp->mss_cache_std); -} - -static inline void westwood_partial_update(struct tcp_sock *tp) -{ - tp->westwood.accounted -= tp->westwood.cumul_ack; - tp->westwood.cumul_ack = tp->mss_cache_std; -} - -static inline void westwood_complete_update(struct tcp_sock *tp) -{ - tp->westwood.cumul_ack -= tp->westwood.accounted; - tp->westwood.accounted = 0; -} - -/* - * @westwood_acked_count - * This function evaluates cumul_ack for evaluating dk in case of - * delayed or partial acks. - */ - -static inline __u32 westwood_acked_count(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->westwood.cumul_ack = westwood_acked(sk); - - /* If cumul_ack is 0 this is a dupack since it's not moving - * tp->snd_una. - */ - if (!(tp->westwood.cumul_ack)) - westwood_dupack_update(sk); - - if (westwood_may_change_cumul(tp)) { - /* Partial or delayed ack */ - if (tp->westwood.accounted >= tp->westwood.cumul_ack) - westwood_partial_update(tp); - else - westwood_complete_update(tp); - } - - tp->westwood.snd_una = tp->snd_una; - - return tp->westwood.cumul_ack; -} - - -/* - * @__tcp_westwood_slow_bw - * It is called when something is going wrong..even if there could - * be no problems! Infact a simple delayed packet may trigger a - * dupack. But we need to be careful in such case. - */ - -static void __tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - westwood_update_window(sk, tcp_time_stamp); - - tp->westwood.bk += westwood_acked_count(sk); - tp->westwood.rtt_min = westwood_update_rttmin(sk); -} - -static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb) -{ - if (tcp_is_westwood(tcp_sk(sk))) - __tcp_westwood_slow_bw(sk, skb); -} - /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) { @@ -2884,6 +2255,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) u32 ack = TCP_SKB_CB(skb)->ack_seq; u32 prior_in_flight; s32 seq_rtt; + s32 seq_usrtt = 0; int prior_packets; /* If the ack is newer than sent or older than previous acks @@ -2902,9 +2274,10 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) */ tcp_update_wl(tp, ack, ack_seq); tp->snd_una = ack; - tcp_westwood_fast_bw(sk, skb); flag |= FLAG_WIN_UPDATE; + tcp_ca_event(tp, CA_EVENT_FAST_ACK); + NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); } else { if (ack_seq != TCP_SKB_CB(skb)->end_seq) @@ -2920,7 +2293,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) flag |= FLAG_ECE; - tcp_westwood_slow_bw(sk,skb); + tcp_ca_event(tp, CA_EVENT_SLOW_ACK); } /* We passed data and got it acked, remove any soft error @@ -2935,22 +2308,20 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) prior_in_flight = tcp_packets_in_flight(tp); /* See if we can take anything off of the retransmit queue. */ - flag |= tcp_clean_rtx_queue(sk, &seq_rtt); + flag |= tcp_clean_rtx_queue(sk, &seq_rtt, + tp->ca_ops->rtt_sample ? &seq_usrtt : NULL); if (tp->frto_counter) tcp_process_frto(sk, prior_snd_una); if (tcp_ack_is_dubious(tp, flag)) { /* Advanve CWND, if state allows this. */ - if ((flag & FLAG_DATA_ACKED) && - (tcp_vegas_enabled(tp) || prior_in_flight >= tp->snd_cwnd) && - tcp_may_raise_cwnd(tp, flag)) - tcp_cong_avoid(tp, ack, seq_rtt); + if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(tp, flag)) + tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 0); tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); } else { - if ((flag & FLAG_DATA_ACKED) && - (tcp_vegas_enabled(tp) || prior_in_flight >= tp->snd_cwnd)) - tcp_cong_avoid(tp, ack, seq_rtt); + if ((flag & FLAG_DATA_ACKED)) + tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 1); } if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) @@ -4552,6 +3923,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tcp_init_metrics(sk); + tcp_init_congestion_control(tp); + /* Prevent spurious tcp_cwnd_restart() on first data * packet. */ @@ -4708,9 +4081,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, if(tp->af_specific->conn_request(sk, skb) < 0) return 1; - init_westwood(sk); - init_bictcp(tp); - /* Now we have several options: In theory there is * nothing else in the frame. KA9Q has an option to * send data with the syn, BSD accepts data with the @@ -4732,9 +4102,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, goto discard; case TCP_SYN_SENT: - init_westwood(sk); - init_bictcp(tp); - queued = tcp_rcv_synsent_state_process(sk, skb, th, len); if (queued >= 0) return queued; @@ -4816,7 +4183,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, */ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !tp->srtt) - tcp_ack_saw_tstamp(tp, 0); + tcp_ack_saw_tstamp(tp, 0, 0); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; @@ -4828,6 +4195,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, tcp_init_metrics(sk); + tcp_init_congestion_control(tp); + /* Prevent spurious tcp_cwnd_restart() on * first data packet. */ diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index dad98e4..ebf1123 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -36,7 +36,7 @@ * ACK bit. * Andi Kleen : Implemented fast path mtu discovery. * Fixed many serious bugs in the - * open_request handling and moved + * request_sock handling and moved * most of it into the af independent code. * Added tail drop and some other bugfixes. * Added new listen sematics. @@ -869,21 +869,23 @@ static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd) return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1)); } -static struct open_request *tcp_v4_search_req(struct tcp_sock *tp, - struct open_request ***prevp, +static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, + struct request_sock ***prevp, __u16 rport, __u32 raddr, __u32 laddr) { - struct tcp_listen_opt *lopt = tp->listen_opt; - struct open_request *req, **prev; + struct listen_sock *lopt = tp->accept_queue.listen_opt; + struct request_sock *req, **prev; for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; (req = *prev) != NULL; prev = &req->dl_next) { - if (req->rmt_port == rport && - req->af.v4_req.rmt_addr == raddr && - req->af.v4_req.loc_addr == laddr && - TCP_INET_FAMILY(req->class->family)) { + const struct inet_request_sock *ireq = inet_rsk(req); + + if (ireq->rmt_port == rport && + ireq->rmt_addr == raddr && + ireq->loc_addr == laddr && + TCP_INET_FAMILY(req->rsk_ops->family)) { BUG_TRAP(!req->sk); *prevp = prev; break; @@ -893,21 +895,13 @@ static struct open_request *tcp_v4_search_req(struct tcp_sock *tp, return req; } -static void tcp_v4_synq_add(struct sock *sk, struct open_request *req) +static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) { struct tcp_sock *tp = tcp_sk(sk); - struct tcp_listen_opt *lopt = tp->listen_opt; - u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd); - - req->expires = jiffies + TCP_TIMEOUT_INIT; - req->retrans = 0; - req->sk = NULL; - req->dl_next = lopt->syn_table[h]; - - write_lock(&tp->syn_wait_lock); - lopt->syn_table[h] = req; - write_unlock(&tp->syn_wait_lock); + struct listen_sock *lopt = tp->accept_queue.listen_opt; + u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); + reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); tcp_synq_added(sk); } @@ -1050,7 +1044,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) } switch (sk->sk_state) { - struct open_request *req, **prev; + struct request_sock *req, **prev; case TCP_LISTEN: if (sock_owned_by_user(sk)) goto out; @@ -1065,7 +1059,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) */ BUG_TRAP(!req->sk); - if (seq != req->snt_isn) { + if (seq != tcp_rsk(req)->snt_isn) { NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); goto out; } @@ -1254,28 +1248,29 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) tcp_tw_put(tw); } -static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req) +static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) { - tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd, + tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); } static struct dst_entry* tcp_v4_route_req(struct sock *sk, - struct open_request *req) + struct request_sock *req) { struct rtable *rt; - struct ip_options *opt = req->af.v4_req.opt; + const struct inet_request_sock *ireq = inet_rsk(req); + struct ip_options *opt = inet_rsk(req)->opt; struct flowi fl = { .oif = sk->sk_bound_dev_if, .nl_u = { .ip4_u = { .daddr = ((opt && opt->srr) ? opt->faddr : - req->af.v4_req.rmt_addr), - .saddr = req->af.v4_req.loc_addr, + ireq->rmt_addr), + .saddr = ireq->loc_addr, .tos = RT_CONN_FLAGS(sk) } }, .proto = IPPROTO_TCP, .uli_u = { .ports = { .sport = inet_sk(sk)->sport, - .dport = req->rmt_port } } }; + .dport = ireq->rmt_port } } }; if (ip_route_output_flow(&rt, &fl, sk, 0)) { IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); @@ -1291,12 +1286,13 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk, /* * Send a SYN-ACK after having received an ACK. - * This still operates on a open_request only, not on a big + * This still operates on a request_sock only, not on a big * socket. */ -static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, +static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, struct dst_entry *dst) { + const struct inet_request_sock *ireq = inet_rsk(req); int err = -1; struct sk_buff * skb; @@ -1310,14 +1306,14 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, struct tcphdr *th = skb->h.th; th->check = tcp_v4_check(th, skb->len, - req->af.v4_req.loc_addr, - req->af.v4_req.rmt_addr, + ireq->loc_addr, + ireq->rmt_addr, csum_partial((char *)th, skb->len, skb->csum)); - err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr, - req->af.v4_req.rmt_addr, - req->af.v4_req.opt); + err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, + ireq->rmt_addr, + ireq->opt); if (err == NET_XMIT_CN) err = 0; } @@ -1328,12 +1324,12 @@ out: } /* - * IPv4 open_request destructor. + * IPv4 request_sock destructor. */ -static void tcp_v4_or_free(struct open_request *req) +static void tcp_v4_reqsk_destructor(struct request_sock *req) { - if (req->af.v4_req.opt) - kfree(req->af.v4_req.opt); + if (inet_rsk(req)->opt) + kfree(inet_rsk(req)->opt); } static inline void syn_flood_warning(struct sk_buff *skb) @@ -1349,7 +1345,7 @@ static inline void syn_flood_warning(struct sk_buff *skb) } /* - * Save and compile IPv4 options into the open_request if needed. + * Save and compile IPv4 options into the request_sock if needed. */ static inline struct ip_options *tcp_v4_save_options(struct sock *sk, struct sk_buff *skb) @@ -1370,33 +1366,20 @@ static inline struct ip_options *tcp_v4_save_options(struct sock *sk, return dopt; } -/* - * Maximum number of SYN_RECV sockets in queue per LISTEN socket. - * One SYN_RECV socket costs about 80bytes on a 32bit machine. - * It would be better to replace it with a global counter for all sockets - * but then some measure against one socket starving all other sockets - * would be needed. - * - * It was 128 by default. Experiments with real servers show, that - * it is absolutely not enough even at 100conn/sec. 256 cures most - * of problems. This value is adjusted to 128 for very small machines - * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). - * Further increasing requires to change hash table size. - */ -int sysctl_max_syn_backlog = 256; - -struct or_calltable or_ipv4 = { +struct request_sock_ops tcp_request_sock_ops = { .family = PF_INET, + .obj_size = sizeof(struct tcp_request_sock), .rtx_syn_ack = tcp_v4_send_synack, - .send_ack = tcp_v4_or_send_ack, - .destructor = tcp_v4_or_free, + .send_ack = tcp_v4_reqsk_send_ack, + .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, }; int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { + struct inet_request_sock *ireq; struct tcp_options_received tmp_opt; - struct open_request *req; + struct request_sock *req; __u32 saddr = skb->nh.iph->saddr; __u32 daddr = skb->nh.iph->daddr; __u32 isn = TCP_SKB_CB(skb)->when; @@ -1433,7 +1416,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) goto drop; - req = tcp_openreq_alloc(); + req = reqsk_alloc(&tcp_request_sock_ops); if (!req) goto drop; @@ -1461,10 +1444,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_openreq_init(req, &tmp_opt, skb); - req->af.v4_req.loc_addr = daddr; - req->af.v4_req.rmt_addr = saddr; - req->af.v4_req.opt = tcp_v4_save_options(sk, skb); - req->class = &or_ipv4; + ireq = inet_rsk(req); + ireq->loc_addr = daddr; + ireq->rmt_addr = saddr; + ireq->opt = tcp_v4_save_options(sk, skb); if (!want_cookie) TCP_ECN_create_request(req, skb->h.th); @@ -1523,20 +1506,20 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) isn = tcp_v4_init_sequence(sk, skb); } - req->snt_isn = isn; + tcp_rsk(req)->snt_isn = isn; if (tcp_v4_send_synack(sk, req, dst)) goto drop_and_free; if (want_cookie) { - tcp_openreq_free(req); + reqsk_free(req); } else { tcp_v4_synq_add(sk, req); } return 0; drop_and_free: - tcp_openreq_free(req); + reqsk_free(req); drop: TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); return 0; @@ -1548,9 +1531,10 @@ drop: * now create the new socket. */ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, - struct open_request *req, + struct request_sock *req, struct dst_entry *dst) { + struct inet_request_sock *ireq; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; @@ -1570,11 +1554,12 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newtp = tcp_sk(newsk); newinet = inet_sk(newsk); - newinet->daddr = req->af.v4_req.rmt_addr; - newinet->rcv_saddr = req->af.v4_req.loc_addr; - newinet->saddr = req->af.v4_req.loc_addr; - newinet->opt = req->af.v4_req.opt; - req->af.v4_req.opt = NULL; + ireq = inet_rsk(req); + newinet->daddr = ireq->rmt_addr; + newinet->rcv_saddr = ireq->loc_addr; + newinet->saddr = ireq->loc_addr; + newinet->opt = ireq->opt; + ireq->opt = NULL; newinet->mc_index = tcp_v4_iif(skb); newinet->mc_ttl = skb->nh.iph->ttl; newtp->ext_header_len = 0; @@ -1605,9 +1590,9 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) struct iphdr *iph = skb->nh.iph; struct tcp_sock *tp = tcp_sk(sk); struct sock *nsk; - struct open_request **prev; + struct request_sock **prev; /* Find possible connection requests. */ - struct open_request *req = tcp_v4_search_req(tp, &prev, th->source, + struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source, iph->saddr, iph->daddr); if (req) return tcp_check_req(sk, skb, req, prev); @@ -2063,6 +2048,7 @@ static int tcp_v4_init_sock(struct sock *sk) tp->mss_cache_std = tp->mss_cache = 536; tp->reordering = sysctl_tcp_reordering; + tp->ca_ops = &tcp_init_congestion_ops; sk->sk_state = TCP_CLOSE; @@ -2085,6 +2071,8 @@ int tcp_v4_destroy_sock(struct sock *sk) tcp_clear_xmit_timers(sk); + tcp_cleanup_congestion_control(tp); + /* Cleanup up the write buffer. */ sk_stream_writequeue_purge(sk); @@ -2144,13 +2132,13 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ++st->num; if (st->state == TCP_SEQ_STATE_OPENREQ) { - struct open_request *req = cur; + struct request_sock *req = cur; tp = tcp_sk(st->syn_wait_sk); req = req->dl_next; while (1) { while (req) { - if (req->class->family == st->family) { + if (req->rsk_ops->family == st->family) { cur = req; goto out; } @@ -2159,17 +2147,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur) if (++st->sbucket >= TCP_SYNQ_HSIZE) break; get_req: - req = tp->listen_opt->syn_table[st->sbucket]; + req = tp->accept_queue.listen_opt->syn_table[st->sbucket]; } sk = sk_next(st->syn_wait_sk); st->state = TCP_SEQ_STATE_LISTENING; - read_unlock_bh(&tp->syn_wait_lock); + read_unlock_bh(&tp->accept_queue.syn_wait_lock); } else { tp = tcp_sk(sk); - read_lock_bh(&tp->syn_wait_lock); - if (tp->listen_opt && tp->listen_opt->qlen) + read_lock_bh(&tp->accept_queue.syn_wait_lock); + if (reqsk_queue_len(&tp->accept_queue)) goto start_req; - read_unlock_bh(&tp->syn_wait_lock); + read_unlock_bh(&tp->accept_queue.syn_wait_lock); sk = sk_next(sk); } get_sk: @@ -2179,8 +2167,8 @@ get_sk: goto out; } tp = tcp_sk(sk); - read_lock_bh(&tp->syn_wait_lock); - if (tp->listen_opt && tp->listen_opt->qlen) { + read_lock_bh(&tp->accept_queue.syn_wait_lock); + if (reqsk_queue_len(&tp->accept_queue)) { start_req: st->uid = sock_i_uid(sk); st->syn_wait_sk = sk; @@ -2188,7 +2176,7 @@ start_req: st->sbucket = 0; goto get_req; } - read_unlock_bh(&tp->syn_wait_lock); + read_unlock_bh(&tp->accept_queue.syn_wait_lock); } if (++st->bucket < TCP_LHTABLE_SIZE) { sk = sk_head(&tcp_listening_hash[st->bucket]); @@ -2375,7 +2363,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) case TCP_SEQ_STATE_OPENREQ: if (v) { struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); - read_unlock_bh(&tp->syn_wait_lock); + read_unlock_bh(&tp->accept_queue.syn_wait_lock); } case TCP_SEQ_STATE_LISTENING: if (v != SEQ_START_TOKEN) @@ -2451,18 +2439,19 @@ void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo) memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); } -static void get_openreq4(struct sock *sk, struct open_request *req, +static void get_openreq4(struct sock *sk, struct request_sock *req, char *tmpbuf, int i, int uid) { + const struct inet_request_sock *ireq = inet_rsk(req); int ttd = req->expires - jiffies; sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p", i, - req->af.v4_req.loc_addr, + ireq->loc_addr, ntohs(inet_sk(sk)->sport), - req->af.v4_req.rmt_addr, - ntohs(req->rmt_port), + ireq->rmt_addr, + ntohs(ireq->rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ @@ -2618,6 +2607,7 @@ struct proto tcp_prot = { .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), + .rsk_prot = &tcp_request_sock_ops, }; @@ -2660,7 +2650,6 @@ EXPORT_SYMBOL(tcp_proc_register); EXPORT_SYMBOL(tcp_proc_unregister); #endif EXPORT_SYMBOL(sysctl_local_port_range); -EXPORT_SYMBOL(sysctl_max_syn_backlog); EXPORT_SYMBOL(sysctl_tcp_low_latency); EXPORT_SYMBOL(sysctl_tcp_tw_reuse); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index eea1a17..f42a284 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -684,7 +684,7 @@ out: * Actually, we could lots of memory writes here. tp of listening * socket contains all necessary default parameters. */ -struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb) +struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) { /* allocate the newsk from the same slab of the master sock, * if not, at sk_free time we'll try to free it from the wrong @@ -692,6 +692,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0); if(newsk != NULL) { + struct inet_request_sock *ireq = inet_rsk(req); + struct tcp_request_sock *treq = tcp_rsk(req); struct tcp_sock *newtp; struct sk_filter *filter; @@ -703,7 +705,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, tcp_sk(newsk)->bind_hash = NULL; /* Clone the TCP header template */ - inet_sk(newsk)->dport = req->rmt_port; + inet_sk(newsk)->dport = ireq->rmt_port; sock_lock_init(newsk); bh_lock_sock(newsk); @@ -739,14 +741,14 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, /* Now setup tcp_sock */ newtp = tcp_sk(newsk); newtp->pred_flags = 0; - newtp->rcv_nxt = req->rcv_isn + 1; - newtp->snd_nxt = req->snt_isn + 1; - newtp->snd_una = req->snt_isn + 1; - newtp->snd_sml = req->snt_isn + 1; + newtp->rcv_nxt = treq->rcv_isn + 1; + newtp->snd_nxt = treq->snt_isn + 1; + newtp->snd_una = treq->snt_isn + 1; + newtp->snd_sml = treq->snt_isn + 1; tcp_prequeue_init(newtp); - tcp_init_wl(newtp, req->snt_isn, req->rcv_isn); + tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); newtp->retransmits = 0; newtp->backoff = 0; @@ -772,13 +774,15 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, newtp->frto_counter = 0; newtp->frto_highmark = 0; + newtp->ca_ops = &tcp_reno; + tcp_set_ca_state(newtp, TCP_CA_Open); tcp_init_xmit_timers(newsk); skb_queue_head_init(&newtp->out_of_order_queue); - newtp->rcv_wup = req->rcv_isn + 1; - newtp->write_seq = req->snt_isn + 1; + newtp->rcv_wup = treq->rcv_isn + 1; + newtp->write_seq = treq->snt_isn + 1; newtp->pushed_seq = newtp->write_seq; - newtp->copied_seq = req->rcv_isn + 1; + newtp->copied_seq = treq->rcv_isn + 1; newtp->rx_opt.saw_tstamp = 0; @@ -788,10 +792,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, newtp->probes_out = 0; newtp->rx_opt.num_sacks = 0; newtp->urg_data = 0; - newtp->listen_opt = NULL; - newtp->accept_queue = newtp->accept_queue_tail = NULL; - /* Deinitialize syn_wait_lock to trap illegal accesses. */ - memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock)); + /* Deinitialize accept_queue to trap illegal accesses. */ + memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue)); /* Back to base struct sock members. */ newsk->sk_err = 0; @@ -808,18 +810,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, newsk->sk_socket = NULL; newsk->sk_sleep = NULL; - newtp->rx_opt.tstamp_ok = req->tstamp_ok; - if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) { + newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; + if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { if (sysctl_tcp_fack) newtp->rx_opt.sack_ok |= 2; } newtp->window_clamp = req->window_clamp; newtp->rcv_ssthresh = req->rcv_wnd; newtp->rcv_wnd = req->rcv_wnd; - newtp->rx_opt.wscale_ok = req->wscale_ok; + newtp->rx_opt.wscale_ok = ireq->wscale_ok; if (newtp->rx_opt.wscale_ok) { - newtp->rx_opt.snd_wscale = req->snd_wscale; - newtp->rx_opt.rcv_wscale = req->rcv_wscale; + newtp->rx_opt.snd_wscale = ireq->snd_wscale; + newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; } else { newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; newtp->window_clamp = min(newtp->window_clamp, 65535U); @@ -842,8 +844,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, if (newtp->ecn_flags&TCP_ECN_OK) sock_set_flag(newsk, SOCK_NO_LARGESEND); - tcp_ca_init(newtp); - TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); } return newsk; @@ -851,12 +851,12 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, /* * Process an incoming packet for SYN_RECV sockets represented - * as an open_request. + * as a request_sock. */ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, - struct open_request *req, - struct open_request **prev) + struct request_sock *req, + struct request_sock **prev) { struct tcphdr *th = skb->h.th; struct tcp_sock *tp = tcp_sk(sk); @@ -881,7 +881,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, } /* Check for pure retransmitted SYN. */ - if (TCP_SKB_CB(skb)->seq == req->rcv_isn && + if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && flg == TCP_FLAG_SYN && !paws_reject) { /* @@ -901,7 +901,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, * Enforce "SYN-ACK" according to figure 8, figure 6 * of RFC793, fixed by RFC1122. */ - req->class->rtx_syn_ack(sk, req, NULL); + req->rsk_ops->rtx_syn_ack(sk, req, NULL); return NULL; } @@ -959,7 +959,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, * Invalid ACK: reset will be sent by listening socket */ if ((flg & TCP_FLAG_ACK) && - (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1)) + (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) return sk; /* Also, it would be not so bad idea to check rcv_tsecr, which @@ -970,10 +970,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, /* RFC793: "first check sequence number". */ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, - req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) { + tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { /* Out of window: send ACK and drop. */ if (!(flg & TCP_FLAG_RST)) - req->class->send_ack(skb, req); + req->rsk_ops->send_ack(skb, req); if (paws_reject) NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); return NULL; @@ -981,12 +981,12 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, /* In sequence, PAWS is OK. */ - if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1)) + if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) req->ts_recent = tmp_opt.rcv_tsval; - if (TCP_SKB_CB(skb)->seq == req->rcv_isn) { + if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { /* Truncate SYN, it is out of window starting - at req->rcv_isn+1. */ + at tcp_rsk(req)->rcv_isn + 1. */ flg &= ~TCP_FLAG_SYN; } @@ -1003,8 +1003,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, return NULL; /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ - if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) { - req->acked = 1; + if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { + inet_rsk(req)->acked = 1; return NULL; } @@ -1026,14 +1026,14 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, listen_overflow: if (!sysctl_tcp_abort_on_overflow) { - req->acked = 1; + inet_rsk(req)->acked = 1; return NULL; } embryonic_reset: NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); if (!(flg & TCP_FLAG_RST)) - req->class->send_reset(skb); + req->rsk_ops->send_reset(skb); tcp_synq_drop(sk, req, prev); return NULL; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fa24e7a..0e17c24 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -111,8 +111,7 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) u32 restart_cwnd = tcp_init_cwnd(tp, dst); u32 cwnd = tp->snd_cwnd; - if (tcp_is_vegas(tp)) - tcp_vegas_enable(tp); + tcp_ca_event(tp, CA_EVENT_CWND_RESTART); tp->snd_ssthresh = tcp_current_ssthresh(tp); restart_cwnd = min(restart_cwnd, cwnd); @@ -280,6 +279,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) #define SYSCTL_FLAG_WSCALE 0x2 #define SYSCTL_FLAG_SACK 0x4 + /* If congestion control is doing timestamping */ + if (tp->ca_ops->rtt_sample) + do_gettimeofday(&skb->stamp); + sysctl_flags = 0; if (tcb->flags & TCPCB_FLAG_SYN) { tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; @@ -304,17 +307,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); } - /* - * If the connection is idle and we are restarting, - * then we don't want to do any Vegas calculations - * until we get fresh RTT samples. So when we - * restart, we reset our Vegas state to a clean - * slate. After we get acks for this flight of - * packets, _then_ we can make Vegas calculations - * again. - */ - if (tcp_is_vegas(tp) && tcp_packets_in_flight(tp) == 0) - tcp_vegas_enable(tp); + if (tcp_packets_in_flight(tp) == 0) + tcp_ca_event(tp, CA_EVENT_TX_START); th = (struct tcphdr *) skb_push(skb, tcp_header_size); skb->h.th = th; @@ -521,6 +515,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len) * skbs, which it never sent before. --ANK */ TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; + buff->stamp = skb->stamp; if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { tp->lost_out -= tcp_skb_pcount(skb); @@ -1356,8 +1351,9 @@ int tcp_send_synack(struct sock *sk) * Prepare a SYN-ACK. */ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, - struct open_request *req) + struct request_sock *req) { + struct inet_request_sock *ireq = inet_rsk(req); struct tcp_sock *tp = tcp_sk(sk); struct tcphdr *th; int tcp_header_size; @@ -1373,47 +1369,47 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, skb->dst = dst_clone(dst); tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + - (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + - (req->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + + (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + + (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + /* SACK_PERM is in the place of NOP NOP of TS */ - ((req->sack_ok && !req->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); + ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; if (dst->dev->features&NETIF_F_TSO) - req->ecn_ok = 0; + ireq->ecn_ok = 0; TCP_ECN_make_synack(req, th); th->source = inet_sk(sk)->sport; - th->dest = req->rmt_port; - TCP_SKB_CB(skb)->seq = req->snt_isn; + th->dest = ireq->rmt_port; + TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; TCP_SKB_CB(skb)->sacked = 0; skb_shinfo(skb)->tso_segs = 1; skb_shinfo(skb)->tso_size = 0; th->seq = htonl(TCP_SKB_CB(skb)->seq); - th->ack_seq = htonl(req->rcv_isn + 1); + th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ __u8 rcv_wscale; /* Set this up on the first call only */ req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); /* tcp_full_space because it is guaranteed to be the first packet */ tcp_select_initial_window(tcp_full_space(sk), - dst_metric(dst, RTAX_ADVMSS) - (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), + dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), &req->rcv_wnd, &req->window_clamp, - req->wscale_ok, + ireq->wscale_ok, &rcv_wscale); - req->rcv_wscale = rcv_wscale; + ireq->rcv_wscale = rcv_wscale; } /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ th->window = htons(req->rcv_wnd); TCP_SKB_CB(skb)->when = tcp_time_stamp; - tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), req->tstamp_ok, - req->sack_ok, req->wscale_ok, req->rcv_wscale, + tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, + ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, TCP_SKB_CB(skb)->when, req->ts_recent); @@ -1448,7 +1444,6 @@ static inline void tcp_connect_init(struct sock *sk) tp->window_clamp = dst_metric(dst, RTAX_WINDOW); tp->advmss = dst_metric(dst, RTAX_ADVMSS); tcp_initialize_rcv_mss(sk); - tcp_ca_init(tp); tcp_select_initial_window(tcp_full_space(sk), tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), @@ -1502,7 +1497,6 @@ int tcp_connect(struct sock *sk) TCP_SKB_CB(buff)->end_seq = tp->write_seq; tp->snd_nxt = tp->write_seq; tp->pushed_seq = tp->write_seq; - tcp_ca_init(tp); /* Send it off. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c new file mode 100644 index 0000000..70e108e --- /dev/null +++ b/net/ipv4/tcp_scalable.c @@ -0,0 +1,68 @@ +/* Tom Kelly's Scalable TCP + * + * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/ + * + * John Heffner <jheffner@sc.edu> + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <net/tcp.h> + +/* These factors derived from the recommended values in the aer: + * .01 and and 7/8. We use 50 instead of 100 to account for + * delayed ack. + */ +#define TCP_SCALABLE_AI_CNT 50U +#define TCP_SCALABLE_MD_SCALE 3 + +static void tcp_scalable_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, + u32 in_flight, int flag) +{ + if (in_flight < tp->snd_cwnd) + return; + + if (tp->snd_cwnd <= tp->snd_ssthresh) { + tp->snd_cwnd++; + } else { + tp->snd_cwnd_cnt++; + if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ + tp->snd_cwnd++; + tp->snd_cwnd_cnt = 0; + } + } + tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); + tp->snd_cwnd_stamp = tcp_time_stamp; +} + +static u32 tcp_scalable_ssthresh(struct tcp_sock *tp) +{ + return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); +} + + +static struct tcp_congestion_ops tcp_scalable = { + .ssthresh = tcp_scalable_ssthresh, + .cong_avoid = tcp_scalable_cong_avoid, + .min_cwnd = tcp_reno_min_cwnd, + + .owner = THIS_MODULE, + .name = "scalable", +}; + +static int __init tcp_scalable_register(void) +{ + return tcp_register_congestion_control(&tcp_scalable); +} + +static void __exit tcp_scalable_unregister(void) +{ + tcp_unregister_congestion_control(&tcp_scalable); +} + +module_init(tcp_scalable_register); +module_exit(tcp_scalable_unregister); + +MODULE_AUTHOR("John Heffner"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Scalable TCP"); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 799ebe0..b127b44 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -464,11 +464,11 @@ out_unlock: static void tcp_synack_timer(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - struct tcp_listen_opt *lopt = tp->listen_opt; + struct listen_sock *lopt = tp->accept_queue.listen_opt; int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries; int thresh = max_retries; unsigned long now = jiffies; - struct open_request **reqp, *req; + struct request_sock **reqp, *req; int i, budget; if (lopt == NULL || lopt->qlen == 0) @@ -513,8 +513,8 @@ static void tcp_synack_timer(struct sock *sk) while ((req = *reqp) != NULL) { if (time_after_eq(now, req->expires)) { if ((req->retrans < thresh || - (req->acked && req->retrans < max_retries)) - && !req->class->rtx_syn_ack(sk, req, NULL)) { + (inet_rsk(req)->acked && req->retrans < max_retries)) + && !req->rsk_ops->rtx_syn_ack(sk, req, NULL)) { unsigned long timeo; if (req->retrans++ == 0) @@ -527,13 +527,9 @@ static void tcp_synack_timer(struct sock *sk) } /* Drop this request */ - write_lock(&tp->syn_wait_lock); - *reqp = req->dl_next; - write_unlock(&tp->syn_wait_lock); - lopt->qlen--; - if (req->retrans == 0) - lopt->qlen_young--; - tcp_openreq_free(req); + tcp_synq_unlink(tp, req, reqp); + reqsk_queue_removed(&tp->accept_queue, req); + reqsk_free(req); continue; } reqp = &req->dl_next; diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c new file mode 100644 index 0000000..9bd443d --- /dev/null +++ b/net/ipv4/tcp_vegas.c @@ -0,0 +1,411 @@ +/* + * TCP Vegas congestion control + * + * This is based on the congestion detection/avoidance scheme described in + * Lawrence S. Brakmo and Larry L. Peterson. + * "TCP Vegas: End to end congestion avoidance on a global internet." + * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480, + * October 1995. Available from: + * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps + * + * See http://www.cs.arizona.edu/xkernel/ for their implementation. + * The main aspects that distinguish this implementation from the + * Arizona Vegas implementation are: + * o We do not change the loss detection or recovery mechanisms of + * Linux in any way. Linux already recovers from losses quite well, + * using fine-grained timers, NewReno, and FACK. + * o To avoid the performance penalty imposed by increasing cwnd + * only every-other RTT during slow start, we increase during + * every RTT during slow start, just like Reno. + * o Largely to allow continuous cwnd growth during slow start, + * we use the rate at which ACKs come back as the "actual" + * rate, rather than the rate at which data is sent. + * o To speed convergence to the right rate, we set the cwnd + * to achieve the right ("actual") rate when we exit slow start. + * o To filter out the noise caused by delayed ACKs, we use the + * minimum RTT sample observed during the last RTT to calculate + * the actual rate. + * o When the sender re-starts from idle, it waits until it has + * received ACKs for an entire flight of new data before making + * a cwnd adjustment decision. The original Vegas implementation + * assumed senders never went idle. + */ + +#include <linux/config.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/tcp_diag.h> + +#include <net/tcp.h> + +/* Default values of the Vegas variables, in fixed-point representation + * with V_PARAM_SHIFT bits to the right of the binary point. + */ +#define V_PARAM_SHIFT 1 +static int alpha = 1<<V_PARAM_SHIFT; +static int beta = 3<<V_PARAM_SHIFT; +static int gamma = 1<<V_PARAM_SHIFT; + +module_param(alpha, int, 0644); +MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)"); +module_param(beta, int, 0644); +MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)"); +module_param(gamma, int, 0644); +MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); + + +/* Vegas variables */ +struct vegas { + u32 beg_snd_nxt; /* right edge during last RTT */ + u32 beg_snd_una; /* left edge during last RTT */ + u32 beg_snd_cwnd; /* saves the size of the cwnd */ + u8 doing_vegas_now;/* if true, do vegas for this RTT */ + u16 cntRTT; /* # of RTTs measured within last RTT */ + u32 minRTT; /* min of RTTs measured within last RTT (in usec) */ + u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */ +}; + +/* There are several situations when we must "re-start" Vegas: + * + * o when a connection is established + * o after an RTO + * o after fast recovery + * o when we send a packet and there is no outstanding + * unacknowledged data (restarting an idle connection) + * + * In these circumstances we cannot do a Vegas calculation at the + * end of the first RTT, because any calculation we do is using + * stale info -- both the saved cwnd and congestion feedback are + * stale. + * + * Instead we must wait until the completion of an RTT during + * which we actually receive ACKs. + */ +static inline void vegas_enable(struct tcp_sock *tp) +{ + struct vegas *vegas = tcp_ca(tp); + + /* Begin taking Vegas samples next time we send something. */ + vegas->doing_vegas_now = 1; + + /* Set the beginning of the next send window. */ + vegas->beg_snd_nxt = tp->snd_nxt; + + vegas->cntRTT = 0; + vegas->minRTT = 0x7fffffff; +} + +/* Stop taking Vegas samples for now. */ +static inline void vegas_disable(struct tcp_sock *tp) +{ + struct vegas *vegas = tcp_ca(tp); + + vegas->doing_vegas_now = 0; +} + +static void tcp_vegas_init(struct tcp_sock *tp) +{ + struct vegas *vegas = tcp_ca(tp); + + vegas->baseRTT = 0x7fffffff; + vegas_enable(tp); +} + +/* Do RTT sampling needed for Vegas. + * Basically we: + * o min-filter RTT samples from within an RTT to get the current + * propagation delay + queuing delay (we are min-filtering to try to + * avoid the effects of delayed ACKs) + * o min-filter RTT samples from a much longer window (forever for now) + * to find the propagation delay (baseRTT) + */ +static void tcp_vegas_rtt_calc(struct tcp_sock *tp, u32 usrtt) +{ + struct vegas *vegas = tcp_ca(tp); + u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */ + + /* Filter to find propagation delay: */ + if (vrtt < vegas->baseRTT) + vegas->baseRTT = vrtt; + + /* Find the min RTT during the last RTT to find + * the current prop. delay + queuing delay: + */ + vegas->minRTT = min(vegas->minRTT, vrtt); + vegas->cntRTT++; +} + +static void tcp_vegas_state(struct tcp_sock *tp, u8 ca_state) +{ + + if (ca_state == TCP_CA_Open) + vegas_enable(tp); + else + vegas_disable(tp); +} + +/* + * If the connection is idle and we are restarting, + * then we don't want to do any Vegas calculations + * until we get fresh RTT samples. So when we + * restart, we reset our Vegas state to a clean + * slate. After we get acks for this flight of + * packets, _then_ we can make Vegas calculations + * again. + */ +static void tcp_vegas_cwnd_event(struct tcp_sock *tp, enum tcp_ca_event event) +{ + if (event == CA_EVENT_CWND_RESTART || + event == CA_EVENT_TX_START) + tcp_vegas_init(tp); +} + +static void tcp_vegas_cong_avoid(struct tcp_sock *tp, u32 ack, + u32 seq_rtt, u32 in_flight, int flag) +{ + struct vegas *vegas = tcp_ca(tp); + + if (!vegas->doing_vegas_now) + return tcp_reno_cong_avoid(tp, ack, seq_rtt, in_flight, flag); + + /* The key players are v_beg_snd_una and v_beg_snd_nxt. + * + * These are so named because they represent the approximate values + * of snd_una and snd_nxt at the beginning of the current RTT. More + * precisely, they represent the amount of data sent during the RTT. + * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, + * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding + * bytes of data have been ACKed during the course of the RTT, giving + * an "actual" rate of: + * + * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration) + * + * Unfortunately, v_beg_snd_una is not exactly equal to snd_una, + * because delayed ACKs can cover more than one segment, so they + * don't line up nicely with the boundaries of RTTs. + * + * Another unfortunate fact of life is that delayed ACKs delay the + * advance of the left edge of our send window, so that the number + * of bytes we send in an RTT is often less than our cwnd will allow. + * So we keep track of our cwnd separately, in v_beg_snd_cwnd. + */ + + if (after(ack, vegas->beg_snd_nxt)) { + /* Do the Vegas once-per-RTT cwnd adjustment. */ + u32 old_wnd, old_snd_cwnd; + + + /* Here old_wnd is essentially the window of data that was + * sent during the previous RTT, and has all + * been acknowledged in the course of the RTT that ended + * with the ACK we just received. Likewise, old_snd_cwnd + * is the cwnd during the previous RTT. + */ + old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) / + tp->mss_cache; + old_snd_cwnd = vegas->beg_snd_cwnd; + + /* Save the extent of the current window so we can use this + * at the end of the next RTT. + */ + vegas->beg_snd_una = vegas->beg_snd_nxt; + vegas->beg_snd_nxt = tp->snd_nxt; + vegas->beg_snd_cwnd = tp->snd_cwnd; + + /* Take into account the current RTT sample too, to + * decrease the impact of delayed acks. This double counts + * this sample since we count it for the next window as well, + * but that's not too awful, since we're taking the min, + * rather than averaging. + */ + tcp_vegas_rtt_calc(tp, seq_rtt*1000); + + /* We do the Vegas calculations only if we got enough RTT + * samples that we can be reasonably sure that we got + * at least one RTT sample that wasn't from a delayed ACK. + * If we only had 2 samples total, + * then that means we're getting only 1 ACK per RTT, which + * means they're almost certainly delayed ACKs. + * If we have 3 samples, we should be OK. + */ + + if (vegas->cntRTT <= 2) { + /* We don't have enough RTT samples to do the Vegas + * calculation, so we'll behave like Reno. + */ + if (tp->snd_cwnd > tp->snd_ssthresh) + tp->snd_cwnd++; + } else { + u32 rtt, target_cwnd, diff; + + /* We have enough RTT samples, so, using the Vegas + * algorithm, we determine if we should increase or + * decrease cwnd, and by how much. + */ + + /* Pluck out the RTT we are using for the Vegas + * calculations. This is the min RTT seen during the + * last RTT. Taking the min filters out the effects + * of delayed ACKs, at the cost of noticing congestion + * a bit later. + */ + rtt = vegas->minRTT; + + /* Calculate the cwnd we should have, if we weren't + * going too fast. + * + * This is: + * (actual rate in segments) * baseRTT + * We keep it as a fixed point number with + * V_PARAM_SHIFT bits to the right of the binary point. + */ + target_cwnd = ((old_wnd * vegas->baseRTT) + << V_PARAM_SHIFT) / rtt; + + /* Calculate the difference between the window we had, + * and the window we would like to have. This quantity + * is the "Diff" from the Arizona Vegas papers. + * + * Again, this is a fixed point number with + * V_PARAM_SHIFT bits to the right of the binary + * point. + */ + diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; + + if (tp->snd_cwnd < tp->snd_ssthresh) { + /* Slow start. */ + if (diff > gamma) { + /* Going too fast. Time to slow down + * and switch to congestion avoidance. + */ + tp->snd_ssthresh = 2; + + /* Set cwnd to match the actual rate + * exactly: + * cwnd = (actual rate) * baseRTT + * Then we add 1 because the integer + * truncation robs us of full link + * utilization. + */ + tp->snd_cwnd = min(tp->snd_cwnd, + (target_cwnd >> + V_PARAM_SHIFT)+1); + + } + } else { + /* Congestion avoidance. */ + u32 next_snd_cwnd; + + /* Figure out where we would like cwnd + * to be. + */ + if (diff > beta) { + /* The old window was too fast, so + * we slow down. + */ + next_snd_cwnd = old_snd_cwnd - 1; + } else if (diff < alpha) { + /* We don't have enough extra packets + * in the network, so speed up. + */ + next_snd_cwnd = old_snd_cwnd + 1; + } else { + /* Sending just as fast as we + * should be. + */ + next_snd_cwnd = old_snd_cwnd; + } + + /* Adjust cwnd upward or downward, toward the + * desired value. + */ + if (next_snd_cwnd > tp->snd_cwnd) + tp->snd_cwnd++; + else if (next_snd_cwnd < tp->snd_cwnd) + tp->snd_cwnd--; + } + } + + /* Wipe the slate clean for the next RTT. */ + vegas->cntRTT = 0; + vegas->minRTT = 0x7fffffff; + } + + /* The following code is executed for every ack we receive, + * except for conditions checked in should_advance_cwnd() + * before the call to tcp_cong_avoid(). Mainly this means that + * we only execute this code if the ack actually acked some + * data. + */ + + /* If we are in slow start, increase our cwnd in response to this ACK. + * (If we are not in slow start then we are in congestion avoidance, + * and adjust our congestion window only once per RTT. See the code + * above.) + */ + if (tp->snd_cwnd <= tp->snd_ssthresh) + tp->snd_cwnd++; + + /* to keep cwnd from growing without bound */ + tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); + + /* Make sure that we are never so timid as to reduce our cwnd below + * 2 MSS. + * + * Going below 2 MSS would risk huge delayed ACKs from our receiver. + */ + tp->snd_cwnd = max(tp->snd_cwnd, 2U); +} + +/* Extract info for Tcp socket info provided via netlink. */ +static void tcp_vegas_get_info(struct tcp_sock *tp, u32 ext, + struct sk_buff *skb) +{ + const struct vegas *ca = tcp_ca(tp); + if (ext & (1<<(TCPDIAG_VEGASINFO-1))) { + struct tcpvegas_info *info; + + info = RTA_DATA(__RTA_PUT(skb, TCPDIAG_VEGASINFO, + sizeof(*info))); + + info->tcpv_enabled = ca->doing_vegas_now; + info->tcpv_rttcnt = ca->cntRTT; + info->tcpv_rtt = ca->baseRTT; + info->tcpv_minrtt = ca->minRTT; + rtattr_failure: ; + } +} + +static struct tcp_congestion_ops tcp_vegas = { + .init = tcp_vegas_init, + .ssthresh = tcp_reno_ssthresh, + .cong_avoid = tcp_vegas_cong_avoid, + .min_cwnd = tcp_reno_min_cwnd, + .rtt_sample = tcp_vegas_rtt_calc, + .set_state = tcp_vegas_state, + .cwnd_event = tcp_vegas_cwnd_event, + .get_info = tcp_vegas_get_info, + + .owner = THIS_MODULE, + .name = "vegas", +}; + +static int __init tcp_vegas_register(void) +{ + BUG_ON(sizeof(struct vegas) > TCP_CA_PRIV_SIZE); + tcp_register_congestion_control(&tcp_vegas); + return 0; +} + +static void __exit tcp_vegas_unregister(void) +{ + tcp_unregister_congestion_control(&tcp_vegas); +} + +module_init(tcp_vegas_register); +module_exit(tcp_vegas_unregister); + +MODULE_AUTHOR("Stephen Hemminger"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TCP Vegas"); diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c new file mode 100644 index 0000000..ef82724 --- /dev/null +++ b/net/ipv4/tcp_westwood.c @@ -0,0 +1,259 @@ +/* + * TCP Westwood+ + * + * Angelo Dell'Aera: TCP Westwood+ support + */ + +#include <linux/config.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/tcp_diag.h> +#include <net/tcp.h> + +/* TCP Westwood structure */ +struct westwood { + u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */ + u32 bw_est; /* bandwidth estimate */ + u32 rtt_win_sx; /* here starts a new evaluation... */ + u32 bk; + u32 snd_una; /* used for evaluating the number of acked bytes */ + u32 cumul_ack; + u32 accounted; + u32 rtt; + u32 rtt_min; /* minimum observed RTT */ +}; + + +/* TCP Westwood functions and constants */ +#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ +#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ + +/* + * @tcp_westwood_create + * This function initializes fields used in TCP Westwood+, + * it is called after the initial SYN, so the sequence numbers + * are correct but new passive connections we have no + * information about RTTmin at this time so we simply set it to + * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative + * since in this way we're sure it will be updated in a consistent + * way as soon as possible. It will reasonably happen within the first + * RTT period of the connection lifetime. + */ +static void tcp_westwood_init(struct tcp_sock *tp) +{ + struct westwood *w = tcp_ca(tp); + + w->bk = 0; + w->bw_ns_est = 0; + w->bw_est = 0; + w->accounted = 0; + w->cumul_ack = 0; + w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; + w->rtt_win_sx = tcp_time_stamp; + w->snd_una = tp->snd_una; +} + +/* + * @westwood_do_filter + * Low-pass filter. Implemented using constant coefficients. + */ +static inline u32 westwood_do_filter(u32 a, u32 b) +{ + return (((7 * a) + b) >> 3); +} + +static inline void westwood_filter(struct westwood *w, u32 delta) +{ + w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta); + w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est); +} + +/* + * @westwood_pkts_acked + * Called after processing group of packets. + * but all westwood needs is the last sample of srtt. + */ +static void tcp_westwood_pkts_acked(struct tcp_sock *tp, u32 cnt) +{ + struct westwood *w = tcp_ca(tp); + if (cnt > 0) + w->rtt = tp->srtt >> 3; +} + +/* + * @westwood_update_window + * It updates RTT evaluation window if it is the right moment to do + * it. If so it calls filter for evaluating bandwidth. + */ +static void westwood_update_window(struct tcp_sock *tp) +{ + struct westwood *w = tcp_ca(tp); + s32 delta = tcp_time_stamp - w->rtt_win_sx; + + /* + * See if a RTT-window has passed. + * Be careful since if RTT is less than + * 50ms we don't filter but we continue 'building the sample'. + * This minimum limit was chosen since an estimation on small + * time intervals is better to avoid... + * Obviously on a LAN we reasonably will always have + * right_bound = left_bound + WESTWOOD_RTT_MIN + */ + if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) { + westwood_filter(w, delta); + + w->bk = 0; + w->rtt_win_sx = tcp_time_stamp; + } +} + +/* + * @westwood_fast_bw + * It is called when we are in fast path. In particular it is called when + * header prediction is successful. In such case in fact update is + * straight forward and doesn't need any particular care. + */ +static inline void westwood_fast_bw(struct tcp_sock *tp) +{ + struct westwood *w = tcp_ca(tp); + + westwood_update_window(tp); + + w->bk += tp->snd_una - w->snd_una; + w->snd_una = tp->snd_una; + w->rtt_min = min(w->rtt, w->rtt_min); +} + +/* + * @westwood_acked_count + * This function evaluates cumul_ack for evaluating bk in case of + * delayed or partial acks. + */ +static inline u32 westwood_acked_count(struct tcp_sock *tp) +{ + struct westwood *w = tcp_ca(tp); + + w->cumul_ack = tp->snd_una - w->snd_una; + + /* If cumul_ack is 0 this is a dupack since it's not moving + * tp->snd_una. + */ + if (!w->cumul_ack) { + w->accounted += tp->mss_cache; + w->cumul_ack = tp->mss_cache; + } + + if (w->cumul_ack > tp->mss_cache) { + /* Partial or delayed ack */ + if (w->accounted >= w->cumul_ack) { + w->accounted -= w->cumul_ack; + w->cumul_ack = tp->mss_cache; + } else { + w->cumul_ack -= w->accounted; + w->accounted = 0; + } + } + + w->snd_una = tp->snd_una; + + return w->cumul_ack; +} + +static inline u32 westwood_bw_rttmin(const struct tcp_sock *tp) +{ + struct westwood *w = tcp_ca(tp); + return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); +} + +/* + * TCP Westwood + * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it + * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 + * so avoids ever returning 0. + */ +static u32 tcp_westwood_cwnd_min(struct tcp_sock *tp) +{ + return westwood_bw_rttmin(tp); +} + +static void tcp_westwood_event(struct tcp_sock *tp, enum tcp_ca_event event) +{ + struct westwood *w = tcp_ca(tp); + + switch(event) { + case CA_EVENT_FAST_ACK: + westwood_fast_bw(tp); + break; + + case CA_EVENT_COMPLETE_CWR: + tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(tp); + break; + + case CA_EVENT_FRTO: + tp->snd_ssthresh = westwood_bw_rttmin(tp); + break; + + case CA_EVENT_SLOW_ACK: + westwood_update_window(tp); + w->bk += westwood_acked_count(tp); + w->rtt_min = min(w->rtt, w->rtt_min); + break; + + default: + /* don't care */ + break; + } +} + + +/* Extract info for Tcp socket info provided via netlink. */ +static void tcp_westwood_info(struct tcp_sock *tp, u32 ext, + struct sk_buff *skb) +{ + const struct westwood *ca = tcp_ca(tp); + if (ext & (1<<(TCPDIAG_VEGASINFO-1))) { + struct rtattr *rta; + struct tcpvegas_info *info; + + rta = __RTA_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*info)); + info = RTA_DATA(rta); + info->tcpv_enabled = 1; + info->tcpv_rttcnt = 0; + info->tcpv_rtt = jiffies_to_usecs(ca->rtt); + info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min); + rtattr_failure: ; + } +} + + +static struct tcp_congestion_ops tcp_westwood = { + .init = tcp_westwood_init, + .ssthresh = tcp_reno_ssthresh, + .cong_avoid = tcp_reno_cong_avoid, + .min_cwnd = tcp_westwood_cwnd_min, + .cwnd_event = tcp_westwood_event, + .get_info = tcp_westwood_info, + .pkts_acked = tcp_westwood_pkts_acked, + + .owner = THIS_MODULE, + .name = "westwood" +}; + +static int __init tcp_westwood_register(void) +{ + BUG_ON(sizeof(struct westwood) > TCP_CA_PRIV_SIZE); + return tcp_register_congestion_control(&tcp_westwood); +} + +static void __exit tcp_westwood_unregister(void) +{ + tcp_unregister_congestion_control(&tcp_westwood); +} + +module_init(tcp_westwood_register); +module_exit(tcp_westwood_unregister); + +MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TCP Westwood+"); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4a6952e..7c24e64 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -738,7 +738,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) unsigned long amount; amount = 0; - spin_lock_irq(&sk->sk_receive_queue.lock); + spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* @@ -748,7 +748,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) */ amount = skb->len - sizeof(struct udphdr); } - spin_unlock_irq(&sk->sk_receive_queue.lock); + spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } @@ -848,12 +848,12 @@ csum_copy_err: /* Clear queue. */ if (flags&MSG_PEEK) { int clear = 0; - spin_lock_irq(&sk->sk_receive_queue.lock); + spin_lock_bh(&sk->sk_receive_queue.lock); if (skb == skb_peek(&sk->sk_receive_queue)) { __skb_unlink(skb, &sk->sk_receive_queue); clear = 1; } - spin_unlock_irq(&sk->sk_receive_queue.lock); + spin_unlock_bh(&sk->sk_receive_queue.lock); if (clear) kfree_skb(skb); } @@ -1334,7 +1334,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) struct sk_buff_head *rcvq = &sk->sk_receive_queue; struct sk_buff *skb; - spin_lock_irq(&rcvq->lock); + spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL) { if (udp_checksum_complete(skb)) { UDP_INC_STATS_BH(UDP_MIB_INERRORS); @@ -1345,7 +1345,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) break; } } - spin_unlock_irq(&rcvq->lock); + spin_unlock_bh(&rcvq->lock); /* nothing to see, move along */ if (skb == NULL) diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index af2392a..66620a9 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -33,6 +33,7 @@ static void xfrm4_encap(struct sk_buff *skb) struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; struct iphdr *iph, *top_iph; + int flags; iph = skb->nh.iph; skb->h.ipiph = iph; @@ -51,10 +52,13 @@ static void xfrm4_encap(struct sk_buff *skb) /* DS disclosed */ top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos); - if (x->props.flags & XFRM_STATE_NOECN) + + flags = x->props.flags; + if (flags & XFRM_STATE_NOECN) IP_ECN_clear(top_iph); - top_iph->frag_off = iph->frag_off & htons(IP_DF); + top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? + 0 : (iph->frag_off & htons(IP_DF)); if (!top_iph->frag_off) __ip_select_ident(top_iph, dst, 0); diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 223a2e8..050611d 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c @@ -7,12 +7,20 @@ * */ +#include <net/ip.h> #include <net/xfrm.h> #include <linux/pfkeyv2.h> #include <linux/ipsec.h> static struct xfrm_state_afinfo xfrm4_state_afinfo; +static int xfrm4_init_flags(struct xfrm_state *x) +{ + if (ipv4_config.no_pmtu_disc) + x->props.flags |= XFRM_STATE_NOPMTUDISC; + return 0; +} + static void __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, struct xfrm_tmpl *tmpl, @@ -109,6 +117,7 @@ __xfrm4_find_acq(u8 mode, u32 reqid, u8 proto, static struct xfrm_state_afinfo xfrm4_state_afinfo = { .family = AF_INET, .lock = RW_LOCK_UNLOCKED, + .init_flags = xfrm4_init_flags, .init_tempsel = __xfrm4_init_tempsel, .state_lookup = __xfrm4_state_lookup, .find_acq = __xfrm4_find_acq, diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index 413191f..e1fe360e 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c @@ -84,7 +84,7 @@ static void ipip_err(struct sk_buff *skb, u32 info) handler->err_handler(skb, &arg); } -static int ipip_init_state(struct xfrm_state *x, void *args) +static int ipip_init_state(struct xfrm_state *x) { if (!x->props.mode) return -EINVAL; |