diff options
Diffstat (limited to 'net/ipv4')
51 files changed, 874 insertions, 616 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1fbff5f..e328681 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1070,11 +1070,8 @@ static int inet_sk_reselect_saddr(struct sock *sk) return 0; if (sysctl_ip_dynaddr > 1) { - printk(KERN_INFO "%s(): shifting inet->" - "saddr from " NIPQUAD_FMT " to " NIPQUAD_FMT "\n", - __func__, - NIPQUAD(old_saddr), - NIPQUAD(new_saddr)); + printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n", + __func__, &old_saddr, &new_saddr); } inet->saddr = inet->rcv_saddr = new_saddr; diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 8219b7e..3f20518 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -201,8 +201,8 @@ out: static void ah4_err(struct sk_buff *skb, u32 info) { - struct iphdr *iph = (struct iphdr*)skb->data; - struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+(iph->ihl<<2)); + struct iphdr *iph = (struct iphdr *)skb->data; + struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 1a9dd66..957c87d 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -506,7 +506,7 @@ int arp_bind_neighbour(struct dst_entry *dst) if (dev == NULL) return -EINVAL; if (n == NULL) { - __be32 nexthop = ((struct rtable*)dst)->rt_gateway; + __be32 nexthop = ((struct rtable *)dst)->rt_gateway; if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT)) nexthop = 0; n = __neigh_lookup_errno( @@ -640,14 +640,14 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, arp_ptr=(unsigned char *)(arp+1); memcpy(arp_ptr, src_hw, dev->addr_len); - arp_ptr+=dev->addr_len; - memcpy(arp_ptr, &src_ip,4); - arp_ptr+=4; + arp_ptr += dev->addr_len; + memcpy(arp_ptr, &src_ip, 4); + arp_ptr += 4; if (target_hw != NULL) memcpy(arp_ptr, target_hw, dev->addr_len); else memset(arp_ptr, 0, dev->addr_len); - arp_ptr+=dev->addr_len; + arp_ptr += dev->addr_len; memcpy(arp_ptr, &dest_ip, 4); return skb; @@ -823,9 +823,9 @@ static int arp_process(struct sk_buff *skb) int dont_send = 0; if (!dont_send) - dont_send |= arp_ignore(in_dev,sip,tip); + dont_send |= arp_ignore(in_dev, sip, tip); if (!dont_send && IN_DEV_ARPFILTER(in_dev)) - dont_send |= arp_filter(sip,tip,dev); + dont_send |= arp_filter(sip, tip, dev); if (!dont_send) arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); @@ -1308,7 +1308,7 @@ static void arp_format_neigh_entry(struct seq_file *seq, #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) } #endif - sprintf(tbuf, NIPQUAD_FMT, NIPQUAD(*(u32*)n->primary_key)); + sprintf(tbuf, "%pI4", n->primary_key); seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); read_unlock(&n->lock); @@ -1321,7 +1321,7 @@ static void arp_format_pneigh_entry(struct seq_file *seq, int hatype = dev ? dev->type : 0; char tbuf[16]; - sprintf(tbuf, NIPQUAD_FMT, NIPQUAD(*(u32*)n->key)); + sprintf(tbuf, "%pI4", n->key); seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00", dev ? dev->name : "*"); diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 2e78f6b..e527990 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -490,7 +490,6 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) } atomic_set(&doi_def->refcount, 1); - INIT_RCU_HEAD(&doi_def->rcu); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 56fce3a..309997e 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -112,13 +112,7 @@ static inline void devinet_sysctl_unregister(struct in_device *idev) static struct in_ifaddr *inet_alloc_ifa(void) { - struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); - - if (ifa) { - INIT_RCU_HEAD(&ifa->rcu_head); - } - - return ifa; + return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL); } static void inet_rcu_free_ifa(struct rcu_head *head) @@ -161,7 +155,6 @@ static struct in_device *inetdev_init(struct net_device *dev) in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL); if (!in_dev) goto out; - INIT_RCU_HEAD(&in_dev->rcu_head); memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt, sizeof(in_dev->cnf)); in_dev->cnf.sysctl = NULL; @@ -1108,7 +1101,7 @@ out: } static struct notifier_block ip_netdev_notifier = { - .notifier_call =inetdev_event, + .notifier_call = inetdev_event, }; static inline size_t inet_nlmsg_size(void) @@ -1195,7 +1188,7 @@ done: return skb->len; } -static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh, +static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, u32 pid) { struct sk_buff *skb; @@ -1262,7 +1255,7 @@ static void inet_forward_change(struct net *net) } static int devinet_conf_proc(ctl_table *ctl, int write, - struct file* filp, void __user *buffer, + struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); @@ -1334,7 +1327,7 @@ static int devinet_conf_sysctl(ctl_table *table, } static int devinet_sysctl_forward(ctl_table *ctl, int write, - struct file* filp, void __user *buffer, + struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; @@ -1363,7 +1356,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, } int ipv4_doint_and_flush(ctl_table *ctl, int write, - struct file* filp, void __user *buffer, + struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 21515d4..95a9c65 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -413,8 +413,8 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) static void esp4_err(struct sk_buff *skb, u32 info) { - struct iphdr *iph = (struct iphdr*)skb->data; - struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2)); + struct iphdr *iph = (struct iphdr *)skb->data; + struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 65c1503..741e4fa 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -578,7 +578,7 @@ errout: return err; } -static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) +static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct fib_config cfg; @@ -600,7 +600,7 @@ errout: return err; } -static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) +static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct fib_config cfg; @@ -903,7 +903,7 @@ static void fib_disable_ip(struct net_device *dev, int force) static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct in_ifaddr *ifa = (struct in_ifaddr*)ptr; + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; switch (event) { @@ -964,11 +964,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo } static struct notifier_block fib_inetaddr_notifier = { - .notifier_call =fib_inetaddr_event, + .notifier_call = fib_inetaddr_event, }; static struct notifier_block fib_netdev_notifier = { - .notifier_call =fib_netdev_event, + .notifier_call = fib_netdev_event, }; static int __net_init ip_fib_net_init(struct net *net) diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index c8cac6c..ded8c44 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c @@ -247,7 +247,7 @@ fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result { int err; struct fn_zone *fz; - struct fn_hash *t = (struct fn_hash*)tb->tb_data; + struct fn_hash *t = (struct fn_hash *)tb->tb_data; read_lock(&fib_hash_lock); for (fz = t->fn_zone_list; fz; fz = fz->fz_next) { @@ -283,7 +283,7 @@ fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib struct fib_node *f; struct fib_info *fi = NULL; struct fib_info *last_resort; - struct fn_hash *t = (struct fn_hash*)tb->tb_data; + struct fn_hash *t = (struct fn_hash *)tb->tb_data; struct fn_zone *fz = t->fn_zones[0]; if (fz == NULL) @@ -548,7 +548,7 @@ out: static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg) { - struct fn_hash *table = (struct fn_hash*)tb->tb_data; + struct fn_hash *table = (struct fn_hash *)tb->tb_data; struct fib_node *f; struct fib_alias *fa, *fa_to_delete; struct fn_zone *fz; @@ -748,7 +748,7 @@ static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlin { int m, s_m; struct fn_zone *fz; - struct fn_hash *table = (struct fn_hash*)tb->tb_data; + struct fn_hash *table = (struct fn_hash *)tb->tb_data; s_m = cb->args[2]; read_lock(&fib_hash_lock); @@ -845,10 +845,10 @@ static struct fib_alias *fib_get_first(struct seq_file *seq) struct hlist_node *node; struct fib_node *fn; - hlist_for_each_entry(fn,node,iter->hash_head,fn_hash) { + hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) { struct fib_alias *fa; - list_for_each_entry(fa,&fn->fn_alias,fa_list) { + list_for_each_entry(fa, &fn->fn_alias, fa_list) { iter->fn = fn; iter->fa = fa; goto out; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ded2ae3..4817dea 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -63,16 +63,16 @@ static DEFINE_SPINLOCK(fib_multipath_lock); for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) #define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ -for (nhsel=0, nh = (struct fib_nh*)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) +for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) #else /* CONFIG_IP_ROUTE_MULTIPATH */ /* Hope, that gcc will optimize it to get rid of dummy loop */ -#define for_nexthops(fi) { int nhsel=0; const struct fib_nh * nh = (fi)->fib_nh; \ +#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ for (nhsel=0; nhsel < 1; nhsel++) -#define change_nexthops(fi) { int nhsel=0; struct fib_nh * nh = (struct fib_nh*)((fi)->fib_nh); \ +#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \ for (nhsel=0; nhsel < 1; nhsel++) #endif /* CONFIG_IP_ROUTE_MULTIPATH */ @@ -358,7 +358,7 @@ int fib_detect_death(struct fib_info *fi, int order, state = n->nud_state; neigh_release(n); } - if (state==NUD_REACHABLE) + if (state == NUD_REACHABLE) return 0; if ((state&NUD_VALID) && order != dflt) return 0; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 5cb7278..ec0ae49 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2399,8 +2399,8 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) __be32 prf = htonl(mask_pfx(tn->key, tn->pos)); seq_indent(seq, iter->depth-1); - seq_printf(seq, " +-- " NIPQUAD_FMT "/%d %d %d %d\n", - NIPQUAD(prf), tn->pos, tn->bits, tn->full_children, + seq_printf(seq, " +-- %pI4/%d %d %d %d\n", + &prf, tn->pos, tn->bits, tn->full_children, tn->empty_children); } else { @@ -2410,7 +2410,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) __be32 val = htonl(l->key); seq_indent(seq, iter->depth); - seq_printf(seq, " |-- " NIPQUAD_FMT "\n", NIPQUAD(val)); + seq_printf(seq, " |-- %pI4\n", &val); hlist_for_each_entry_rcu(li, node, &l->list, hlist) { struct fib_alias *fa; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 72b2de7..21e497e 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -683,10 +683,8 @@ static void icmp_unreach(struct sk_buff *skb) break; case ICMP_FRAG_NEEDED: if (ipv4_config.no_pmtu_disc) { - LIMIT_NETDEBUG(KERN_INFO "ICMP: " NIPQUAD_FMT ": " - "fragmentation needed " - "and DF set.\n", - NIPQUAD(iph->daddr)); + LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n", + &iph->daddr); } else { info = ip_rt_frag_needed(net, iph, ntohs(icmph->un.frag.mtu), @@ -696,9 +694,8 @@ static void icmp_unreach(struct sk_buff *skb) } break; case ICMP_SR_FAILED: - LIMIT_NETDEBUG(KERN_INFO "ICMP: " NIPQUAD_FMT ": Source " - "Route Failed.\n", - NIPQUAD(iph->daddr)); + LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n", + &iph->daddr); break; default: break; @@ -729,12 +726,12 @@ static void icmp_unreach(struct sk_buff *skb) if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { if (net_ratelimit()) - printk(KERN_WARNING NIPQUAD_FMT " sent an invalid ICMP " + printk(KERN_WARNING "%pI4 sent an invalid ICMP " "type %u, code %u " - "error to a broadcast: " NIPQUAD_FMT " on %s\n", - NIPQUAD(ip_hdr(skb)->saddr), + "error to a broadcast: %pI4 on %s\n", + &ip_hdr(skb)->saddr, icmph->type, icmph->code, - NIPQUAD(iph->daddr), + &iph->daddr, skb->dev->name); goto out; } @@ -952,9 +949,8 @@ static void icmp_address_reply(struct sk_buff *skb) break; } if (!ifa && net_ratelimit()) { - printk(KERN_INFO "Wrong address mask " NIPQUAD_FMT " from " - "%s/" NIPQUAD_FMT "\n", - NIPQUAD(*mp), dev->name, NIPQUAD(rt->rt_src)); + printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", + mp, dev->name, &rt->rt_src); } } rcu_read_unlock(); @@ -976,9 +972,10 @@ int icmp_rcv(struct sk_buff *skb) struct net *net = dev_net(rt->u.dst.dev); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { + struct sec_path *sp = skb_sec_path(skb); int nh; - if (!(skb->sp && skb->sp->xvec[skb->sp->len - 1]->props.flags & + if (!(sp && sp->xvec[sp->len - 1]->props.flags & XFRM_STATE_ICMP)) goto drop; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index a0d8645..f92733e 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -167,7 +167,7 @@ static __inline__ void igmp_stop_timer(struct ip_mc_list *im) spin_lock_bh(&im->lock); if (del_timer(&im->timer)) atomic_dec(&im->refcnt); - im->tm_running=0; + im->tm_running = 0; im->reporter = 0; im->unsolicit_count = 0; spin_unlock_bh(&im->lock); @@ -176,9 +176,9 @@ static __inline__ void igmp_stop_timer(struct ip_mc_list *im) /* It must be called with locked im->lock */ static void igmp_start_timer(struct ip_mc_list *im, int max_delay) { - int tv=net_random() % max_delay; + int tv = net_random() % max_delay; - im->tm_running=1; + im->tm_running = 1; if (!mod_timer(&im->timer, jiffies+tv+2)) atomic_inc(&im->refcnt); } @@ -207,7 +207,7 @@ static void igmp_mod_timer(struct ip_mc_list *im, int max_delay) if (del_timer(&im->timer)) { if ((long)(im->timer.expires-jiffies) < max_delay) { add_timer(&im->timer); - im->tm_running=1; + im->tm_running = 1; spin_unlock_bh(&im->lock); return; } @@ -358,7 +358,7 @@ static int igmpv3_sendpack(struct sk_buff *skb) static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) { - return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc,type,gdel,sdel); + return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel); } static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, @@ -653,7 +653,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, return -1; } - skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); + skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); if (skb == NULL) { ip_rt_put(rt); return -1; @@ -682,11 +682,11 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, ((u8*)&iph[1])[3] = 0; ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); - ih->type=type; - ih->code=0; - ih->csum=0; - ih->group=group; - ih->csum=ip_compute_csum((void *)ih, sizeof(struct igmphdr)); + ih->type = type; + ih->code = 0; + ih->csum = 0; + ih->group = group; + ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); return ip_local_out(skb); } @@ -728,7 +728,7 @@ static void igmp_timer_expire(unsigned long data) struct in_device *in_dev = im->interface; spin_lock(&im->lock); - im->tm_running=0; + im->tm_running = 0; if (im->unsolicit_count) { im->unsolicit_count--; @@ -997,7 +997,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) --ANK */ if (arp_mc_map(addr, buf, dev, 0) == 0) - dev_mc_add(dev,buf,dev->addr_len,0); + dev_mc_add(dev, buf, dev->addr_len, 0); } /* @@ -1010,7 +1010,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) struct net_device *dev = in_dev->dev; if (arp_mc_map(addr, buf, dev, 0) == 0) - dev_mc_delete(dev,buf,dev->addr_len,0); + dev_mc_delete(dev, buf, dev->addr_len, 0); } #ifdef CONFIG_IP_MULTICAST @@ -1210,10 +1210,10 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) if (!im) goto out; - im->users=1; - im->interface=in_dev; + im->users = 1; + im->interface = in_dev; in_dev_hold(in_dev); - im->multiaddr=addr; + im->multiaddr = addr; /* initial mode is (EX, empty) */ im->sfmode = MCAST_EXCLUDE; im->sfcount[MCAST_INCLUDE] = 0; @@ -1224,7 +1224,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) atomic_set(&im->refcnt, 1); spin_lock_init(&im->lock); #ifdef CONFIG_IP_MULTICAST - im->tm_running=0; + im->tm_running = 0; setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); im->unsolicit_count = IGMP_Unsolicited_Report_Count; im->reporter = 0; @@ -1232,8 +1232,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) #endif im->loaded = 0; write_lock_bh(&in_dev->mc_list_lock); - im->next=in_dev->mc_list; - in_dev->mc_list=im; + im->next = in_dev->mc_list; + in_dev->mc_list = im; in_dev->mc_count++; write_unlock_bh(&in_dev->mc_list_lock); #ifdef CONFIG_IP_MULTICAST @@ -1279,7 +1279,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) ASSERT_RTNL(); for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { - if (i->multiaddr==addr) { + if (i->multiaddr == addr) { if (--i->users == 0) { write_lock_bh(&in_dev->mc_list_lock); *ip = i->next; @@ -1738,7 +1738,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) { int err; __be32 addr = imr->imr_multiaddr.s_addr; - struct ip_mc_socklist *iml=NULL, *i; + struct ip_mc_socklist *iml = NULL, *i; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); @@ -1769,7 +1769,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) err = -ENOBUFS; if (count >= sysctl_igmp_max_memberships) goto done; - iml = sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL); + iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); if (iml == NULL) goto done; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index bd1278a..36f4cbc 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -323,7 +323,7 @@ void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); -struct dst_entry* inet_csk_route_req(struct sock *sk, +struct dst_entry *inet_csk_route_req(struct sock *sk, const struct request_sock *req) { struct rtable *rt; diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index a456cee..b1fbe18 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -144,7 +144,7 @@ static void unlink_from_unused(struct inet_peer *p) * _stack is known to be NULL or not at compile time, * so compiler will optimize the if (_stack) tests. */ -#define lookup(_daddr,_stack) \ +#define lookup(_daddr, _stack) \ ({ \ struct inet_peer *u, **v; \ if (_stack != NULL) { \ diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 450016b..df3fe50 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -106,7 +106,7 @@ int ip_forward(struct sk_buff *skb) * We now generate an ICMP HOST REDIRECT giving the route * we calculated. */ - if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb->sp) + if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) ip_rt_send_redirect(skb); skb->priority = rt_tos2priority(iph->tos); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e4f81f5..6659ac0 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -56,7 +56,7 @@ struct ipfrag_skb_cb int offset; }; -#define FRAG_CB(skb) ((struct ipfrag_skb_cb*)((skb)->cb)) +#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { @@ -559,9 +559,8 @@ out_nomem: goto out_fail; out_oversize: if (net_ratelimit()) - printk(KERN_INFO - "Oversized IP packet from " NIPQUAD_FMT ".\n", - NIPQUAD(qp->saddr)); + printk(KERN_INFO "Oversized IP packet from %pI4.\n", + &qp->saddr); out_fail: IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS); return err; @@ -608,7 +607,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = { .data = &init_net.ipv4.frags.high_thresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH, @@ -616,7 +615,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = { .data = &init_net.ipv4.frags.low_thresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_IPFRAG_TIME, @@ -624,8 +623,8 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = { .data = &init_net.ipv4.frags.timeout, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, { } }; @@ -637,15 +636,15 @@ static struct ctl_table ip4_frags_ctl_table[] = { .data = &ip4_frags.secret_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, { .procname = "ipfrag_max_dist", .data = &sysctl_ipfrag_max_dist, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero }, { } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 85c487b..191ef75 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -371,7 +371,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) by themself??? */ - struct iphdr *iph = (struct iphdr*)skb->data; + struct iphdr *iph = (struct iphdr *)skb->data; __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); int grehlen = (iph->ihl<<2) + 4; const int type = icmp_hdr(skb)->type; @@ -632,7 +632,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (dev->header_ops && dev->type == ARPHRD_IPGRE) { gre_hlen = 0; - tiph = (struct iphdr*)skb->data; + tiph = (struct iphdr *)skb->data; } else { gre_hlen = tunnel->hlen; tiph = &tunnel->parms.iph; @@ -660,7 +660,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (neigh == NULL) goto tx_error; - addr6 = (struct in6_addr*)&neigh->primary_key; + addr6 = (struct in6_addr *)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if (addr_type == IPV6_ADDR_ANY) { @@ -726,7 +726,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) } #ifdef CONFIG_IPV6 else if (skb->protocol == htons(ETH_P_IPV6)) { - struct rt6_info *rt6 = (struct rt6_info*)skb->dst; + struct rt6_info *rt6 = (struct rt6_info *)skb->dst; if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) { if ((tunnel->parms.iph.daddr && @@ -800,7 +800,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) iph->ttl = old_iph->ttl; #ifdef CONFIG_IPV6 else if (skb->protocol == htons(ETH_P_IPV6)) - iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit; + iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; #endif else iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); @@ -962,7 +962,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) break; } } else { - unsigned nflags=0; + unsigned nflags = 0; t = netdev_priv(dev); @@ -1104,7 +1104,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) { - struct iphdr *iph = (struct iphdr*) skb_mac_header(skb); + struct iphdr *iph = (struct iphdr *) skb_mac_header(skb); memcpy(haddr, &iph->saddr, 4); return 4; } diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 861978a..70bedab 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -294,10 +294,8 @@ static inline int ip_rcv_options(struct sk_buff *skb) if (!IN_DEV_SOURCE_ROUTE(in_dev)) { if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - printk(KERN_INFO "source route option " - NIPQUAD_FMT " -> " NIPQUAD_FMT "\n", - NIPQUAD(iph->saddr), - NIPQUAD(iph->daddr)); + printk(KERN_INFO "source route option %pI4 -> %pI4\n", + &iph->saddr, &iph->daddr); in_dev_put(in_dev); goto drop; } @@ -342,9 +340,9 @@ static int ip_rcv_finish(struct sk_buff *skb) struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); u32 idx = skb->dst->tclassid; st[idx&0xFF].o_packets++; - st[idx&0xFF].o_bytes+=skb->len; + st[idx&0xFF].o_bytes += skb->len; st[(idx>>16)&0xFF].i_packets++; - st[(idx>>16)&0xFF].i_bytes+=skb->len; + st[(idx>>16)&0xFF].i_bytes += skb->len; } #endif diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index d2a8f8b..46d7be2 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -430,7 +430,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) * single device frame, and queue such a frame for sending. */ -int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) +int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct iphdr *iph; int raw = 0; @@ -720,7 +720,7 @@ static inline int ip_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, - int transhdrlen, int mtu,unsigned int flags) + int transhdrlen, int mtu, unsigned int flags) { struct sk_buff *skb; int err; @@ -741,7 +741,7 @@ static inline int ip_ufo_append_data(struct sock *sk, skb_reserve(skb, hh_len); /* create space for UDP/IP header */ - skb_put(skb,fragheaderlen + transhdrlen); + skb_put(skb, fragheaderlen + transhdrlen); /* initialize network header pointer */ skb_reset_network_header(skb); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 465abf0..e976efe 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -94,7 +94,7 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) { unsigned char optbuf[sizeof(struct ip_options) + 40]; - struct ip_options * opt = (struct ip_options*)optbuf; + struct ip_options * opt = (struct ip_options *)optbuf; if (IPCB(skb)->opt.optlen == 0) return; @@ -411,7 +411,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen) { struct inet_sock *inet = inet_sk(sk); - int val=0,err; + int val = 0, err; if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | @@ -437,7 +437,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, /* If optlen==0, it is equivalent to val == 0 */ if (ip_mroute_opt(optname)) - return ip_mroute_setsockopt(sk,optname,optval,optlen); + return ip_mroute_setsockopt(sk, optname, optval, optlen); err = 0; lock_sock(sk); @@ -549,7 +549,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, goto e_inval; if (optlen<1) goto e_inval; - if (val==-1) + if (val == -1) val = 1; if (val < 0 || val > 255) goto e_inval; @@ -573,12 +573,12 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -EFAULT; if (optlen >= sizeof(struct ip_mreqn)) { - if (copy_from_user(&mreq,optval,sizeof(mreq))) + if (copy_from_user(&mreq, optval, sizeof(mreq))) break; } else { memset(&mreq, 0, sizeof(mreq)); if (optlen >= sizeof(struct in_addr) && - copy_from_user(&mreq.imr_address,optval,sizeof(struct in_addr))) + copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr))) break; } @@ -626,11 +626,11 @@ static int do_ip_setsockopt(struct sock *sk, int level, goto e_inval; err = -EFAULT; if (optlen >= sizeof(struct ip_mreqn)) { - if (copy_from_user(&mreq,optval,sizeof(mreq))) + if (copy_from_user(&mreq, optval, sizeof(mreq))) break; } else { memset(&mreq, 0, sizeof(mreq)); - if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq))) + if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq))) break; } @@ -808,7 +808,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -ENOBUFS; break; } - gsf = kmalloc(optlen,GFP_KERNEL); + gsf = kmalloc(optlen, GFP_KERNEL); if (!gsf) { err = -ENOBUFS; break; @@ -828,7 +828,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, goto mc_msf_out; } msize = IP_MSFILTER_SIZE(gsf->gf_numsrc); - msf = kmalloc(msize,GFP_KERNEL); + msf = kmalloc(msize, GFP_KERNEL); if (!msf) { err = -ENOBUFS; goto mc_msf_out; @@ -971,9 +971,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, return -EOPNOTSUPP; if (ip_mroute_opt(optname)) - return ip_mroute_getsockopt(sk,optname,optval,optlen); + return ip_mroute_getsockopt(sk, optname, optval, optlen); - if (get_user(len,optlen)) + if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; @@ -984,7 +984,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, case IP_OPTIONS: { unsigned char optbuf[sizeof(struct ip_options)+40]; - struct ip_options * opt = (struct ip_options*)optbuf; + struct ip_options * opt = (struct ip_options *)optbuf; opt->optlen = 0; if (inet->opt) memcpy(optbuf, inet->opt, @@ -1154,13 +1154,13 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, len = 1; if (put_user(len, optlen)) return -EFAULT; - if (copy_to_user(optval,&ucval,1)) + if (copy_to_user(optval, &ucval, 1)) return -EFAULT; } else { len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; - if (copy_to_user(optval,&val,len)) + if (copy_to_user(optval, &val, len)) return -EFAULT; } return 0; @@ -1178,7 +1178,7 @@ int ip_getsockopt(struct sock *sk, int level, !ip_mroute_opt(optname)) { int len; - if (get_user(len,optlen)) + if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 38ccb6d..ec8264a 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -39,8 +39,8 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) spi, IPPROTO_COMP, AF_INET); if (!x) return; - NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/" NIPQUAD_FMT "\n", - spi, NIPQUAD(iph->daddr)); + NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n", + spi, &iph->daddr); xfrm_state_put(x); } diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 42065ff..42a0f3d 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -374,7 +374,7 @@ static int __init ic_defaults(void) */ if (!ic_host_name_set) - sprintf(init_utsname()->nodename, NIPQUAD_FMT, NIPQUAD(ic_myaddr)); + sprintf(init_utsname()->nodename, "%pI4", &ic_myaddr); if (root_server_addr == NONE) root_server_addr = ic_servaddr; @@ -387,11 +387,11 @@ static int __init ic_defaults(void) else if (IN_CLASSC(ntohl(ic_myaddr))) ic_netmask = htonl(IN_CLASSC_NET); else { - printk(KERN_ERR "IP-Config: Unable to guess netmask for address " NIPQUAD_FMT "\n", - NIPQUAD(ic_myaddr)); + printk(KERN_ERR "IP-Config: Unable to guess netmask for address %pI4\n", + &ic_myaddr); return -1; } - printk("IP-Config: Guessing netmask " NIPQUAD_FMT "\n", NIPQUAD(ic_netmask)); + printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask); } return 0; @@ -979,10 +979,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str ic_myaddr = b->your_ip; ic_servaddr = server_id; #ifdef IPCONFIG_DEBUG - printk("DHCP: Offered address " NIPQUAD_FMT, - NIPQUAD(ic_myaddr)); - printk(" by server " NIPQUAD_FMT "\n", - NIPQUAD(ic_servaddr)); + printk("DHCP: Offered address %pI4 by server %pI4\n", + &ic_myaddr, &ic_servaddr); #endif /* The DHCP indicated server address takes * precedence over the bootp header one if @@ -1177,11 +1175,11 @@ static int __init ic_dynamic(void) return -1; } - printk("IP-Config: Got %s answer from " NIPQUAD_FMT ", ", + printk("IP-Config: Got %s answer from %pI4, ", ((ic_got_reply & IC_RARP) ? "RARP" : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), - NIPQUAD(ic_servaddr)); - printk("my address is " NIPQUAD_FMT "\n", NIPQUAD(ic_myaddr)); + &ic_servaddr); + printk("my address is %pI4\n", &ic_myaddr); return 0; } @@ -1206,14 +1204,12 @@ static int pnp_seq_show(struct seq_file *seq, void *v) "domain %s\n", ic_domain); for (i = 0; i < CONF_NAMESERVERS_MAX; i++) { if (ic_nameservers[i] != NONE) - seq_printf(seq, - "nameserver " NIPQUAD_FMT "\n", - NIPQUAD(ic_nameservers[i])); + seq_printf(seq, "nameserver %pI4\n", + &ic_nameservers[i]); } if (ic_servaddr != NONE) - seq_printf(seq, - "bootserver " NIPQUAD_FMT "\n", - NIPQUAD(ic_servaddr)); + seq_printf(seq, "bootserver %pI4\n", + &ic_servaddr); return 0; } @@ -1387,13 +1383,13 @@ static int __init ip_auto_config(void) */ printk("IP-Config: Complete:"); printk("\n device=%s", ic_dev->name); - printk(", addr=" NIPQUAD_FMT, NIPQUAD(ic_myaddr)); - printk(", mask=" NIPQUAD_FMT, NIPQUAD(ic_netmask)); - printk(", gw=" NIPQUAD_FMT, NIPQUAD(ic_gateway)); + printk(", addr=%pI4", &ic_myaddr); + printk(", mask=%pI4", &ic_netmask); + printk(", gw=%pI4", &ic_gateway); printk(",\n host=%s, domain=%s, nis-domain=%s", utsname()->nodename, ic_domain, utsname()->domainname); - printk(",\n bootserver=" NIPQUAD_FMT, NIPQUAD(ic_servaddr)); - printk(", rootserver=" NIPQUAD_FMT, NIPQUAD(root_server_addr)); + printk(",\n bootserver=%pI4", &ic_servaddr); + printk(", rootserver=%pI4", &root_server_addr); printk(", rootpath=%s", root_server_path); printk("\n"); #endif /* !SILENT */ diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 29609d2..b3c3d7b 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -281,7 +281,7 @@ static int ipip_err(struct sk_buff *skb, u32 info) 8 bytes of packet payload. It means, that precise relaying of ICMP in the real Internet is absolutely infeasible. */ - struct iphdr *iph = (struct iphdr*)skb->data; + struct iphdr *iph = (struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b42e082..05ed336 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -331,7 +331,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c) atomic_dec(&cache_resolve_queue_len); - while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) { + while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); nlh->nlmsg_type = NLMSG_ERROR; @@ -477,13 +477,13 @@ static int vif_add(struct vifctl *vifc, int mrtsock) /* * Fill in the VIF structures */ - v->rate_limit=vifc->vifc_rate_limit; - v->local=vifc->vifc_lcl_addr.s_addr; - v->remote=vifc->vifc_rmt_addr.s_addr; - v->flags=vifc->vifc_flags; + v->rate_limit = vifc->vifc_rate_limit; + v->local = vifc->vifc_lcl_addr.s_addr; + v->remote = vifc->vifc_rmt_addr.s_addr; + v->flags = vifc->vifc_flags; if (!mrtsock) v->flags |= VIFF_STATIC; - v->threshold=vifc->vifc_threshold; + v->threshold = vifc->vifc_threshold; v->bytes_in = 0; v->bytes_out = 0; v->pkt_in = 0; @@ -494,7 +494,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) /* And finish update writing critical data */ write_lock_bh(&mrt_lock); - v->dev=dev; + v->dev = dev; #ifdef CONFIG_IP_PIMSM if (v->flags&VIFF_REGISTER) reg_vif_num = vifi; @@ -507,7 +507,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) { - int line=MFC_HASH(mcastgrp,origin); + int line = MFC_HASH(mcastgrp, origin); struct mfc_cache *c; for (c=mfc_cache_array[line]; c; c = c->next) { @@ -522,8 +522,8 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) */ static struct mfc_cache *ipmr_cache_alloc(void) { - struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); - if (c==NULL) + struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); + if (c == NULL) return NULL; c->mfc_un.res.minvif = MAXVIFS; return c; @@ -531,8 +531,8 @@ static struct mfc_cache *ipmr_cache_alloc(void) static struct mfc_cache *ipmr_cache_alloc_unres(void) { - struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); - if (c==NULL) + struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); + if (c == NULL) return NULL; skb_queue_head_init(&c->mfc_un.unres.unresolved); c->mfc_un.unres.expires = jiffies + 10*HZ; @@ -552,7 +552,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) * Play the pending entries through our router */ - while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) { + while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); @@ -637,7 +637,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) * Add our header */ - igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); + igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); igmp->type = msg->im_msgtype = assert; igmp->code = 0; @@ -653,7 +653,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) /* * Deliver to mrouted */ - if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) { + if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) { if (net_ratelimit()) printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); kfree_skb(skb); @@ -685,7 +685,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) * Create a new entry if allowable */ - if (atomic_read(&cache_resolve_queue_len)>=10 || + if (atomic_read(&cache_resolve_queue_len) >= 10 || (c=ipmr_cache_alloc_unres())==NULL) { spin_unlock_bh(&mfc_unres_lock); @@ -728,7 +728,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) kfree_skb(skb); err = -ENOBUFS; } else { - skb_queue_tail(&c->mfc_un.unres.unresolved,skb); + skb_queue_tail(&c->mfc_un.unres.unresolved, skb); err = 0; } @@ -745,7 +745,7 @@ static int ipmr_mfc_delete(struct mfcctl *mfc) int line; struct mfc_cache *c, **cp; - line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); + line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && @@ -766,7 +766,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) int line; struct mfc_cache *uc, *c, **cp; - line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); + line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && @@ -787,13 +787,13 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) return -EINVAL; - c=ipmr_cache_alloc(); - if (c==NULL) + c = ipmr_cache_alloc(); + if (c == NULL) return -ENOMEM; - c->mfc_origin=mfc->mfcc_origin.s_addr; - c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr; - c->mfc_parent=mfc->mfcc_parent; + c->mfc_origin = mfc->mfcc_origin.s_addr; + c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; + c->mfc_parent = mfc->mfcc_parent; ipmr_update_thresholds(c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; @@ -846,7 +846,7 @@ static void mroute_clean_tables(struct sock *sk) /* * Wipe the cache */ - for (i=0;i<MFC_LINES;i++) { + for (i=0; i<MFC_LINES; i++) { struct mfc_cache *c, **cp; cp = &mfc_cache_array[i]; @@ -887,7 +887,7 @@ static void mrtsock_destruct(struct sock *sk) IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--; write_lock_bh(&mrt_lock); - mroute_socket=NULL; + mroute_socket = NULL; write_unlock_bh(&mrt_lock); mroute_clean_tables(sk); @@ -902,7 +902,7 @@ static void mrtsock_destruct(struct sock *sk) * MOSPF/PIM router set up we can clean this up. */ -int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen) +int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen) { int ret; struct vifctl vif; @@ -918,7 +918,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_IGMP) return -EOPNOTSUPP; - if (optlen!=sizeof(int)) + if (optlen != sizeof(int)) return -ENOPROTOOPT; rtnl_lock(); @@ -930,7 +930,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt ret = ip_ra_control(sk, 1, mrtsock_destruct); if (ret == 0) { write_lock_bh(&mrt_lock); - mroute_socket=sk; + mroute_socket = sk; write_unlock_bh(&mrt_lock); IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++; @@ -938,19 +938,19 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt rtnl_unlock(); return ret; case MRT_DONE: - if (sk!=mroute_socket) + if (sk != mroute_socket) return -EACCES; return ip_ra_control(sk, 0, NULL); case MRT_ADD_VIF: case MRT_DEL_VIF: - if (optlen!=sizeof(vif)) + if (optlen != sizeof(vif)) return -EINVAL; - if (copy_from_user(&vif,optval,sizeof(vif))) + if (copy_from_user(&vif, optval, sizeof(vif))) return -EFAULT; if (vif.vifc_vifi >= MAXVIFS) return -ENFILE; rtnl_lock(); - if (optname==MRT_ADD_VIF) { + if (optname == MRT_ADD_VIF) { ret = vif_add(&vif, sk==mroute_socket); } else { ret = vif_delete(vif.vifc_vifi, 0); @@ -964,12 +964,12 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt */ case MRT_ADD_MFC: case MRT_DEL_MFC: - if (optlen!=sizeof(mfc)) + if (optlen != sizeof(mfc)) return -EINVAL; - if (copy_from_user(&mfc,optval, sizeof(mfc))) + if (copy_from_user(&mfc, optval, sizeof(mfc))) return -EFAULT; rtnl_lock(); - if (optname==MRT_DEL_MFC) + if (optname == MRT_DEL_MFC) ret = ipmr_mfc_delete(&mfc); else ret = ipmr_mfc_add(&mfc, sk==mroute_socket); @@ -1028,12 +1028,12 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt * Getsock opt support for the multicast routing system. */ -int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen) +int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) { int olr; int val; - if (optname!=MRT_VERSION && + if (optname != MRT_VERSION && #ifdef CONFIG_IP_PIMSM optname!=MRT_PIM && #endif @@ -1047,17 +1047,17 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u if (olr < 0) return -EINVAL; - if (put_user(olr,optlen)) + if (put_user(olr, optlen)) return -EFAULT; - if (optname==MRT_VERSION) - val=0x0305; + if (optname == MRT_VERSION) + val = 0x0305; #ifdef CONFIG_IP_PIMSM - else if (optname==MRT_PIM) - val=mroute_do_pim; + else if (optname == MRT_PIM) + val = mroute_do_pim; #endif else - val=mroute_do_assert; - if (copy_to_user(optval,&val,olr)) + val = mroute_do_assert; + if (copy_to_user(optval, &val, olr)) return -EFAULT; return 0; } @@ -1075,27 +1075,27 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) switch (cmd) { case SIOCGETVIFCNT: - if (copy_from_user(&vr,arg,sizeof(vr))) + if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; - if (vr.vifi>=maxvif) + if (vr.vifi >= maxvif) return -EINVAL; read_lock(&mrt_lock); vif=&vif_table[vr.vifi]; if (VIF_EXISTS(vr.vifi)) { - vr.icount=vif->pkt_in; - vr.ocount=vif->pkt_out; - vr.ibytes=vif->bytes_in; - vr.obytes=vif->bytes_out; + vr.icount = vif->pkt_in; + vr.ocount = vif->pkt_out; + vr.ibytes = vif->bytes_in; + vr.obytes = vif->bytes_out; read_unlock(&mrt_lock); - if (copy_to_user(arg,&vr,sizeof(vr))) + if (copy_to_user(arg, &vr, sizeof(vr))) return -EFAULT; return 0; } read_unlock(&mrt_lock); return -EADDRNOTAVAIL; case SIOCGETSGCNT: - if (copy_from_user(&sr,arg,sizeof(sr))) + if (copy_from_user(&sr, arg, sizeof(sr))) return -EFAULT; read_lock(&mrt_lock); @@ -1106,7 +1106,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) sr.wrong_if = c->mfc_un.res.wrong_if; read_unlock(&mrt_lock); - if (copy_to_user(arg,&sr,sizeof(sr))) + if (copy_to_user(arg, &sr, sizeof(sr))) return -EFAULT; return 0; } @@ -1130,15 +1130,15 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; v=&vif_table[0]; - for (ct=0;ct<maxvif;ct++,v++) { - if (v->dev==dev) + for (ct=0; ct<maxvif; ct++,v++) { + if (v->dev == dev) vif_delete(ct, 1); } return NOTIFY_DONE; } -static struct notifier_block ip_mr_notifier={ +static struct notifier_block ip_mr_notifier = { .notifier_call = ipmr_device_event, }; @@ -1204,7 +1204,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) #ifdef CONFIG_IP_PIMSM if (vif->flags & VIFF_REGISTER) { vif->pkt_out++; - vif->bytes_out+=skb->len; + vif->bytes_out += skb->len; vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); @@ -1254,7 +1254,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) } vif->pkt_out++; - vif->bytes_out+=skb->len; + vif->bytes_out += skb->len; dst_release(skb->dst); skb->dst = &rt->u.dst; @@ -1352,7 +1352,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local } vif_table[vif].pkt_in++; - vif_table[vif].bytes_in+=skb->len; + vif_table[vif].bytes_in += skb->len; /* * Forward the frame @@ -1364,7 +1364,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local if (skb2) ipmr_queue_xmit(skb2, cache, psend); } - psend=ct; + psend = ct; } } if (psend != -1) { @@ -1428,7 +1428,7 @@ int ip_mr_input(struct sk_buff *skb) /* * No usable cache entry */ - if (cache==NULL) { + if (cache == NULL) { int vif; if (local) { @@ -1602,13 +1602,13 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) if (dev) RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); - mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0)); + mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { if (c->mfc_un.res.ttls[ct] < 255) { if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) goto rtattr_failure; - nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); + nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); nhp->rtnh_flags = 0; nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; nhp->rtnh_ifindex = vif_table[ct].dev->ifindex; @@ -1634,7 +1634,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) read_lock(&mrt_lock); cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); - if (cache==NULL) { + if (cache == NULL) { struct sk_buff *skb2; struct iphdr *iph; struct net_device *dev; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 8d70d29..7ea88b6 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -142,15 +142,15 @@ static inline int arp_packet_match(const struct arphdr *arphdr, ARPT_INV_TGTIP)) { dprintf("Source or target IP address mismatch.\n"); - dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n", - NIPQUAD(src_ipaddr), - NIPQUAD(arpinfo->smsk.s_addr), - NIPQUAD(arpinfo->src.s_addr), + dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", + &src_ipaddr, + &arpinfo->smsk.s_addr, + &arpinfo->src.s_addr, arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); - dprintf("TGT: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n", - NIPQUAD(tgt_ipaddr), - NIPQUAD(arpinfo->tmsk.s_addr), - NIPQUAD(arpinfo->tgt.s_addr), + dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", + &tgt_ipaddr, + &arpinfo->tmsk.s_addr, + &arpinfo->tgt.s_addr, arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); return 0; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 213fb27..ef8b6ca 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -94,15 +94,11 @@ ip_packet_match(const struct iphdr *ip, IPT_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); - dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n", - NIPQUAD(ip->saddr), - NIPQUAD(ipinfo->smsk.s_addr), - NIPQUAD(ipinfo->src.s_addr), + dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", + &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); - dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n", - NIPQUAD(ip->daddr), - NIPQUAD(ipinfo->dmsk.s_addr), - NIPQUAD(ipinfo->dst.s_addr), + dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", + &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); return false; } diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 7ac1677..2e4f98b 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -168,7 +168,7 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, char buffer[16]; /* create proc dir entry */ - sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip)); + sprintf(buffer, "%pI4", &ip); c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR, clusterip_procdir, &clusterip_proc_fops, c); @@ -373,7 +373,7 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par) config = clusterip_config_find_get(e->ip.dst.s_addr, 1); if (!config) { if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) { - printk(KERN_WARNING "CLUSTERIP: no config found for %u.%u.%u.%u, need 'new'\n", NIPQUAD(e->ip.dst.s_addr)); + printk(KERN_WARNING "CLUSTERIP: no config found for %pI4, need 'new'\n", &e->ip.dst.s_addr); return false; } else { struct net_device *dev; @@ -478,9 +478,8 @@ static void arp_print(struct arp_payload *payload) } hbuffer[--k]='\0'; - printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n", - NIPQUAD(payload->src_ip), hbuffer, - NIPQUAD(payload->dst_ip)); + printk("src %pI4@%s, dst %pI4\n", + &payload->src_ip, hbuffer, &payload->dst_ip); } #endif diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index fc6ce04..4614a696f 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c @@ -54,8 +54,8 @@ static void dump_packet(const struct nf_loginfo *info, /* Important fields: * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */ /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */ - printk("SRC=%u.%u.%u.%u DST=%u.%u.%u.%u ", - NIPQUAD(ih->saddr), NIPQUAD(ih->daddr)); + printk("SRC=%pI4 DST=%pI4 ", + &ih->saddr, &ih->daddr); /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */ printk("LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ", @@ -262,8 +262,7 @@ static void dump_packet(const struct nf_loginfo *info, break; case ICMP_REDIRECT: /* Max length: 24 "GATEWAY=255.255.255.255 " */ - printk("GATEWAY=%u.%u.%u.%u ", - NIPQUAD(ich->un.gateway)); + printk("GATEWAY=%pI4 ", &ich->un.gateway); /* Fall through */ case ICMP_DEST_UNREACH: case ICMP_SOURCE_QUENCH: diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 4a7c352..b2141e1 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -60,9 +60,8 @@ static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple, static int ipv4_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { - return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ", - NIPQUAD(tuple->src.u3.ip), - NIPQUAD(tuple->dst.u3.ip)); + return seq_printf(s, "src=%pI4 dst=%pI4 ", + &tuple->src.u3.ip, &tuple->dst.u3.ip); } static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, @@ -198,7 +197,7 @@ static ctl_table ip_ct_sysctl_table[] = { .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_NF_CONNTRACK_COUNT, @@ -206,7 +205,7 @@ static ctl_table ip_ct_sysctl_table[] = { .data = &init_net.ct.count, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS, @@ -214,7 +213,7 @@ static ctl_table ip_ct_sysctl_table[] = { .data = &nf_conntrack_htable_size, .maxlen = sizeof(unsigned int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_NF_CONNTRACK_CHECKSUM, @@ -222,7 +221,7 @@ static ctl_table ip_ct_sysctl_table[] = { .data = &init_net.ct.sysctl_checksum, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_NF_CONNTRACK_LOG_INVALID, @@ -230,8 +229,8 @@ static ctl_table ip_ct_sysctl_table[] = { .data = &init_net.ct.sysctl_log_invalid, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, + .proc_handler = proc_dointvec_minmax, + .strategy = sysctl_intvec, .extra1 = &log_invalid_proto_min, .extra2 = &log_invalid_proto_max, }, @@ -284,17 +283,17 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) .tuple.dst.u3.ip; memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); - pr_debug("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", - NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); + pr_debug("SO_ORIGINAL_DST: %pI4 %u\n", + &sin.sin_addr.s_addr, ntohs(sin.sin_port)); nf_ct_put(ct); if (copy_to_user(user, &sin, sizeof(sin)) != 0) return -EFAULT; else return 0; } - pr_debug("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n", - NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port), - NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port)); + pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n", + &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port), + &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port)); return -ENOENT; } diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 4e88792..1fd3ef7 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -272,7 +272,7 @@ static struct ctl_table icmp_sysctl_table[] = { .data = &nf_ct_icmp_timeout, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, + .proc_handler = proc_dointvec_jiffies, }, { .ctl_name = 0 @@ -285,7 +285,7 @@ static struct ctl_table icmp_compat_sysctl_table[] = { .data = &nf_ct_icmp_timeout, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, + .proc_handler = proc_dointvec_jiffies, }, { .ctl_name = 0 diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index ee47bf2..7e8e6fc 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -119,10 +119,9 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, (ntohl(addr.ip) & 0xff000000) == 0x7f000000) i = 0; - pr_debug("nf_nat_ras: set signal address " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(addr.ip), port, - NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), + pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n", + &addr.ip, port, + &ct->tuplehash[!dir].tuple.dst.u3.ip, info->sig_port[!dir]); return set_h225_addr(skb, data, 0, &taddr[i], &ct->tuplehash[!dir]. @@ -131,10 +130,9 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && port == info->sig_port[dir]) { /* GK->GW */ - pr_debug("nf_nat_ras: set signal address " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(addr.ip), port, - NIPQUAD(ct->tuplehash[!dir].tuple.src.u3.ip), + pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n", + &addr.ip, port, + &ct->tuplehash[!dir].tuple.src.u3.ip, info->sig_port[!dir]); return set_h225_addr(skb, data, 0, &taddr[i], &ct->tuplehash[!dir]. @@ -162,10 +160,9 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct, if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && port == ct->tuplehash[dir].tuple.src.u.udp.port) { - pr_debug("nf_nat_ras: set rasAddress " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(addr.ip), ntohs(port), - NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), + pr_debug("nf_nat_ras: set rasAddress %pI4:%hu->%pI4:%hu\n", + &addr.ip, ntohs(port), + &ct->tuplehash[!dir].tuple.dst.u3.ip, ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); return set_h225_addr(skb, data, 0, &taddr[i], &ct->tuplehash[!dir].tuple.dst.u3, @@ -257,15 +254,15 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, } /* Success */ - pr_debug("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(rtp_exp->tuple.src.u3.ip), + pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n", + &rtp_exp->tuple.src.u3.ip, ntohs(rtp_exp->tuple.src.u.udp.port), - NIPQUAD(rtp_exp->tuple.dst.u3.ip), + &rtp_exp->tuple.dst.u3.ip, ntohs(rtp_exp->tuple.dst.u.udp.port)); - pr_debug("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(rtcp_exp->tuple.src.u3.ip), + pr_debug("nf_nat_h323: expect RTCP %pI4:%hu->%pI4:%hu\n", + &rtcp_exp->tuple.src.u3.ip, ntohs(rtcp_exp->tuple.src.u.udp.port), - NIPQUAD(rtcp_exp->tuple.dst.u3.ip), + &rtcp_exp->tuple.dst.u3.ip, ntohs(rtcp_exp->tuple.dst.u.udp.port)); return 0; @@ -307,10 +304,10 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, return -1; } - pr_debug("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.u3.ip), + pr_debug("nf_nat_h323: expect T.120 %pI4:%hu->%pI4:%hu\n", + &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.u3.ip), + &exp->tuple.dst.u3.ip, ntohs(exp->tuple.dst.u.tcp.port)); return 0; @@ -361,10 +358,10 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, return -1; } - pr_debug("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.u3.ip), + pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n", + &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.u3.ip), + &exp->tuple.dst.u3.ip, ntohs(exp->tuple.dst.u.tcp.port)); return 0; @@ -455,10 +452,10 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, } /* Success */ - pr_debug("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.u3.ip), + pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n", + &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.u3.ip), + &exp->tuple.dst.u3.ip, ntohs(exp->tuple.dst.u.tcp.port)); return 0; @@ -524,11 +521,10 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, } /* Success */ - pr_debug("nf_nat_q931: expect Call Forwarding " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.u3.ip), + pr_debug("nf_nat_q931: expect Call Forwarding %pI4:%hu->%pI4:%hu\n", + &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.u3.ip), + &exp->tuple.dst.u3.ip, ntohs(exp->tuple.dst.u.tcp.port)); return 0; diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c index fe6f9ce..ea83a88 100644 --- a/net/ipv4/netfilter/nf_nat_irc.c +++ b/net/ipv4/netfilter/nf_nat_irc.c @@ -55,8 +55,8 @@ static unsigned int help(struct sk_buff *skb, ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); sprintf(buffer, "%u %u", ip, port); - pr_debug("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n", - buffer, NIPQUAD(ip), port); + pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", + buffer, &ip, port); ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, matchoff, matchlen, buffer, diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index bea54a6..a4f1c347 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c @@ -98,8 +98,7 @@ static void warn_if_extra_mangle(struct net *net, __be32 dstip, __be32 srcip) if (rt->rt_src != srcip && !warned) { printk("NAT: no longer support implicit source local NAT\n"); - printk("NAT: packet src %u.%u.%u.%u -> dst %u.%u.%u.%u\n", - NIPQUAD(srcip), NIPQUAD(dstip)); + printk("NAT: packet src %pI4 -> dst %pI4\n", &srcip, &dstip); warned = 1; } ip_rt_put(rt); @@ -166,8 +165,7 @@ alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) struct nf_nat_range range = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; - pr_debug("Allocating NULL binding for %p (%u.%u.%u.%u)\n", - ct, NIPQUAD(ip)); + pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, &ip); return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); } diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 1454432..07d61a5 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -74,8 +74,7 @@ static int map_addr(struct sk_buff *skb, if (newaddr == addr->ip && newport == port) return 1; - buflen = sprintf(buffer, "%u.%u.%u.%u:%u", - NIPQUAD(newaddr), ntohs(newport)); + buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport)); return mangle_packet(skb, dptr, datalen, matchoff, matchlen, buffer, buflen); @@ -152,8 +151,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, &addr) > 0 && addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { - __be32 ip = ct->tuplehash[!dir].tuple.dst.u3.ip; - buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip)); + buflen = sprintf(buffer, "%pI4", + &ct->tuplehash[!dir].tuple.dst.u3.ip); if (!mangle_packet(skb, dptr, datalen, poff, plen, buffer, buflen)) return NF_DROP; @@ -166,8 +165,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, &addr) > 0 && addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { - __be32 ip = ct->tuplehash[!dir].tuple.src.u3.ip; - buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip)); + buflen = sprintf(buffer, "%pI4", + &ct->tuplehash[!dir].tuple.src.u3.ip); if (!mangle_packet(skb, dptr, datalen, poff, plen, buffer, buflen)) return NF_DROP; @@ -279,8 +278,7 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, if (exp->tuple.dst.u3.ip != exp->saved_ip || exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { - buflen = sprintf(buffer, "%u.%u.%u.%u:%u", - NIPQUAD(newip), port); + buflen = sprintf(buffer, "%pI4:%u", &newip, port); if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, buffer, buflen)) goto err; @@ -345,7 +343,7 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr, char buffer[sizeof("nnn.nnn.nnn.nnn")]; unsigned int buflen; - buflen = sprintf(buffer, NIPQUAD_FMT, NIPQUAD(addr->ip)); + buflen = sprintf(buffer, "%pI4", &addr->ip); if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term, buffer, buflen)) return 0; @@ -380,7 +378,7 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr, unsigned int buflen; /* Mangle session description owner and contact addresses */ - buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(addr->ip)); + buflen = sprintf(buffer, "%pI4", &addr->ip); if (mangle_sdp_packet(skb, dptr, dataoff, datalen, SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, buffer, buflen)) diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 8303e4b..182f845 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -930,8 +930,8 @@ static inline void mangle_address(unsigned char *begin, } if (debug) - printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to " - "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr)); + printk(KERN_DEBUG "bsalg: mapped %pI4 to %pI4\n", + &old, addr); } } @@ -1267,9 +1267,8 @@ static int help(struct sk_buff *skb, unsigned int protoff, */ if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { if (net_ratelimit()) - printk(KERN_WARNING "SNMP: dropping malformed packet " - "src=%u.%u.%u.%u dst=%u.%u.%u.%u\n", - NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); + printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", + &iph->saddr, &iph->daddr); return NF_DROP; } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index cd97574..998fcff 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -247,7 +247,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) } if (inet->recverr) { - struct iphdr *iph = (struct iphdr*)skb->data; + struct iphdr *iph = (struct iphdr *)skb->data; u8 *payload = skb->data + (iph->ihl << 2); if (inet->hdrincl) @@ -465,7 +465,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, */ if (msg->msg_namelen) { - struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name; + struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; err = -EINVAL; if (msg->msg_namelen < sizeof(*usin)) goto out; @@ -851,7 +851,7 @@ struct proto raw_prot = { static struct sock *raw_get_first(struct seq_file *seq) { struct sock *sk; - struct raw_iter_state* state = raw_seq_private(seq); + struct raw_iter_state *state = raw_seq_private(seq); for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; ++state->bucket) { @@ -868,7 +868,7 @@ found: static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) { - struct raw_iter_state* state = raw_seq_private(seq); + struct raw_iter_state *state = raw_seq_private(seq); do { sk = sk_next(sk); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2ea6dcc..0dc0c38 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_advmss __read_mostly = 256; static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ; +static int rt_chain_length_max __read_mostly = 20; static void rt_worker_func(struct work_struct *work); static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); @@ -145,6 +146,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); static int rt_garbage_collect(struct dst_ops *ops); +static void rt_emergency_hash_rebuild(struct net *net); static struct dst_ops ipv4_dst_ops = { @@ -201,6 +203,7 @@ const __u8 ip_tos2prio[16] = { struct rt_hash_bucket { struct rtable *chain; }; + #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ defined(CONFIG_PROVE_LOCKING) /* @@ -674,6 +677,20 @@ static inline u32 rt_score(struct rtable *rt) return score; } +static inline bool rt_caching(const struct net *net) +{ + return net->ipv4.current_rt_cache_rebuild_count <= + net->ipv4.sysctl_rt_cache_rebuild_count; +} + +static inline bool compare_hash_inputs(const struct flowi *fl1, + const struct flowi *fl2) +{ + return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | + (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | + (fl1->iif ^ fl2->iif)) == 0); +} + static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) { return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | @@ -753,11 +770,24 @@ static void rt_do_flush(int process_context) } } +/* + * While freeing expired entries, we compute average chain length + * and standard deviation, using fixed-point arithmetic. + * This to have an estimation of rt_chain_length_max + * rt_chain_length_max = max(elasticity, AVG + 4*SD) + * We use 3 bits for frational part, and 29 (or 61) for magnitude. + */ + +#define FRACT_BITS 3 +#define ONE (1UL << FRACT_BITS) + static void rt_check_expire(void) { static unsigned int rover; unsigned int i = rover, goal; struct rtable *rth, **rthp; + unsigned long length = 0, samples = 0; + unsigned long sum = 0, sum2 = 0; u64 mult; mult = ((u64)ip_rt_gc_interval) << rt_hash_log; @@ -766,6 +796,7 @@ static void rt_check_expire(void) goal = (unsigned int)mult; if (goal > rt_hash_mask) goal = rt_hash_mask + 1; + length = 0; for (; goal > 0; goal--) { unsigned long tmo = ip_rt_gc_timeout; @@ -775,6 +806,8 @@ static void rt_check_expire(void) if (need_resched()) cond_resched(); + samples++; + if (*rthp == NULL) continue; spin_lock_bh(rt_hash_lock_addr(i)); @@ -789,11 +822,29 @@ static void rt_check_expire(void) if (time_before_eq(jiffies, rth->u.dst.expires)) { tmo >>= 1; rthp = &rth->u.dst.rt_next; + /* + * Only bump our length if the hash + * inputs on entries n and n+1 are not + * the same, we only count entries on + * a chain with equal hash inputs once + * so that entries for different QOS + * levels, and other non-hash input + * attributes don't unfairly skew + * the length computation + */ + if ((*rthp == NULL) || + !compare_hash_inputs(&(*rthp)->fl, + &rth->fl)) + length += ONE; continue; } } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { tmo >>= 1; rthp = &rth->u.dst.rt_next; + if ((*rthp == NULL) || + !compare_hash_inputs(&(*rthp)->fl, + &rth->fl)) + length += ONE; continue; } @@ -802,6 +853,15 @@ static void rt_check_expire(void) rt_free(rth); } spin_unlock_bh(rt_hash_lock_addr(i)); + sum += length; + sum2 += length*length; + } + if (samples) { + unsigned long avg = sum / samples; + unsigned long sd = int_sqrt(sum2 / samples - avg*avg); + rt_chain_length_max = max_t(unsigned long, + ip_rt_gc_elasticity, + (avg + 4*sd) >> FRACT_BITS); } rover = i; } @@ -851,6 +911,26 @@ static void rt_secret_rebuild(unsigned long __net) mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval); } +static void rt_secret_rebuild_oneshot(struct net *net) +{ + del_timer_sync(&net->ipv4.rt_secret_timer); + rt_cache_invalidate(net); + if (ip_rt_secret_interval) { + net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval; + add_timer(&net->ipv4.rt_secret_timer); + } +} + +static void rt_emergency_hash_rebuild(struct net *net) +{ + if (net_ratelimit()) { + printk(KERN_WARNING "Route hash chain too long!\n"); + printk(KERN_WARNING "Adjust your secret_interval!\n"); + } + + rt_secret_rebuild_oneshot(net); +} + /* Short description of GC goals. @@ -989,6 +1069,7 @@ out: return 0; static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) { struct rtable *rth, **rthp; + struct rtable *rthi; unsigned long now; struct rtable *cand, **candp; u32 min_score; @@ -1002,7 +1083,13 @@ restart: candp = NULL; now = jiffies; + if (!rt_caching(dev_net(rt->u.dst.dev))) { + rt_drop(rt); + return 0; + } + rthp = &rt_hash_table[hash].chain; + rthi = NULL; spin_lock_bh(rt_hash_lock_addr(hash)); while ((rth = *rthp) != NULL) { @@ -1048,6 +1135,17 @@ restart: chain_length++; rthp = &rth->u.dst.rt_next; + + /* + * check to see if the next entry in the chain + * contains the same hash input values as rt. If it does + * This is where we will insert into the list, instead of + * at the head. This groups entries that differ by aspects not + * relvant to the hash function together, which we use to adjust + * our chain length + */ + if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl)) + rthi = rth; } if (cand) { @@ -1061,6 +1159,16 @@ restart: *candp = cand->u.dst.rt_next; rt_free(cand); } + } else { + if (chain_length > rt_chain_length_max) { + struct net *net = dev_net(rt->u.dst.dev); + int num = ++net->ipv4.current_rt_cache_rebuild_count; + if (!rt_caching(dev_net(rt->u.dst.dev))) { + printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", + rt->u.dst.dev->name, num); + } + rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); + } } /* Try to bind route to arp only if it is output @@ -1098,14 +1206,17 @@ restart: } } - rt->u.dst.rt_next = rt_hash_table[hash].chain; + if (rthi) + rt->u.dst.rt_next = rthi->u.dst.rt_next; + else + rt->u.dst.rt_next = rt_hash_table[hash].chain; + #if RT_CACHE_DEBUG >= 2 if (rt->u.dst.rt_next) { struct rtable *trt; - printk(KERN_DEBUG "rt_cache @%02x: " NIPQUAD_FMT, hash, - NIPQUAD(rt->rt_dst)); + printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst); for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) - printk(" . " NIPQUAD_FMT, NIPQUAD(trt->rt_dst)); + printk(" . %pI4", &trt->rt_dst); printk("\n"); } #endif @@ -1114,7 +1225,11 @@ restart: * previous writes to rt are comitted to memory * before making rt visible to other CPUS. */ - rcu_assign_pointer(rt_hash_table[hash].chain, rt); + if (rthi) + rcu_assign_pointer(rthi->u.dst.rt_next, rt); + else + rcu_assign_pointer(rt_hash_table[hash].chain, rt); + spin_unlock_bh(rt_hash_lock_addr(hash)); *rp = rt; return 0; @@ -1217,6 +1332,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, || ipv4_is_zeronet(new_gw)) goto reject_redirect; + if (!rt_caching(net)) + goto reject_redirect; + if (!IN_DEV_SHARED_MEDIA(in_dev)) { if (!inet_addr_onlink(in_dev, new_gw, old_gw)) goto reject_redirect; @@ -1267,7 +1385,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, /* Copy all the information. */ *rt = *rth; - INIT_RCU_HEAD(&rt->u.dst.rcu_head); rt->u.dst.__use = 1; atomic_set(&rt->u.dst.__refcnt, 1); rt->u.dst.child = NULL; @@ -1280,7 +1397,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, rt->u.dst.path = &rt->u.dst; rt->u.dst.neighbour = NULL; rt->u.dst.hh = NULL; +#ifdef CONFIG_XFRM rt->u.dst.xfrm = NULL; +#endif rt->rt_genid = rt_genid(net); rt->rt_flags |= RTCF_REDIRECTED; @@ -1324,11 +1443,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, reject_redirect: #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - printk(KERN_INFO "Redirect from " NIPQUAD_FMT " on %s about " - NIPQUAD_FMT " ignored.\n" - " Advised path = " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n", - NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw), - NIPQUAD(saddr), NIPQUAD(daddr)); + printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n" + " Advised path = %pI4 -> %pI4\n", + &old_gw, dev->name, &new_gw, + &saddr, &daddr); #endif in_dev_put(in_dev); } @@ -1348,9 +1466,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) rt->fl.oif, rt_genid(dev_net(dst->dev))); #if RT_CACHE_DEBUG >= 1 - printk(KERN_DEBUG "ipv4_negative_advice: redirect to " - NIPQUAD_FMT "/%02x dropped\n", - NIPQUAD(rt->rt_dst), rt->fl.fl4_tos); + printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n", + &rt->rt_dst, rt->fl.fl4_tos); #endif rt_del(hash, rt); ret = NULL; @@ -1414,10 +1531,9 @@ void ip_rt_send_redirect(struct sk_buff *skb) if (IN_DEV_LOG_MARTIANS(in_dev) && rt->u.dst.rate_tokens == ip_rt_redirect_number && net_ratelimit()) - printk(KERN_WARNING "host " NIPQUAD_FMT "/if%d ignores " - "redirects for " NIPQUAD_FMT " to " NIPQUAD_FMT ".\n", - NIPQUAD(rt->rt_src), rt->rt_iif, - NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway)); + printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", + &rt->rt_src, rt->rt_iif, + &rt->rt_dst, &rt->rt_gateway); #endif } out: @@ -1610,8 +1726,8 @@ static void ipv4_link_failure(struct sk_buff *skb) static int ip_rt_bug(struct sk_buff *skb) { - printk(KERN_DEBUG "ip_rt_bug: " NIPQUAD_FMT " -> " NIPQUAD_FMT ", %s\n", - NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr), + printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n", + &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, skb->dev ? skb->dev->name : "?"); kfree_skb(skb); return 0; @@ -1788,9 +1904,8 @@ static void ip_handle_martian_source(struct net_device *dev, * RFC1812 recommendation, if source is martian, * the only hint is MAC header. */ - printk(KERN_WARNING "martian source " NIPQUAD_FMT " from " - NIPQUAD_FMT", on dev %s\n", - NIPQUAD(daddr), NIPQUAD(saddr), dev->name); + printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n", + &daddr, &saddr, dev->name); if (dev->hard_header_len && skb_mac_header_was_set(skb)) { int i; const unsigned char *p = skb_mac_header(skb); @@ -2099,9 +2214,8 @@ martian_destination: RT_CACHE_STAT_INC(in_martian_dst); #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - printk(KERN_WARNING "martian destination " NIPQUAD_FMT " from " - NIPQUAD_FMT ", dev %s\n", - NIPQUAD(daddr), NIPQUAD(saddr), dev->name); + printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n", + &daddr, &saddr, dev->name); #endif e_hostunreach: @@ -2130,6 +2244,10 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, struct net *net; net = dev_net(dev); + + if (!rt_caching(net)) + goto skip_cache; + tos &= IPTOS_RT_MASK; hash = rt_hash(daddr, saddr, iif, rt_genid(net)); @@ -2154,6 +2272,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, } rcu_read_unlock(); +skip_cache: /* Multicast recognition logic is moved from route cache to here. The problem was that too many Ethernet cards have broken/missing hardware multicast filters :-( As result the host on multicasting @@ -2539,6 +2658,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, unsigned hash; struct rtable *rth; + if (!rt_caching(net)) + goto slow_output; + hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); rcu_read_lock_bh(); @@ -2563,6 +2685,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, } rcu_read_unlock_bh(); +slow_output: return ip_route_output_slow(net, rp, flp); } @@ -2995,7 +3118,7 @@ static ctl_table ipv4_route_table[] = { .data = &ipv4_dst_ops.gc_thresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_MAX_SIZE, @@ -3003,7 +3126,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_max_size, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { /* Deprecated. Use gc_min_interval_ms */ @@ -3013,8 +3136,8 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies, + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies, }, { .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, @@ -3022,8 +3145,8 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_ms_jiffies, - .strategy = &sysctl_ms_jiffies, + .proc_handler = proc_dointvec_ms_jiffies, + .strategy = sysctl_ms_jiffies, }, { .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT, @@ -3031,8 +3154,8 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_gc_timeout, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies, + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies, }, { .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL, @@ -3040,8 +3163,8 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_gc_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies, + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies, }, { .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD, @@ -3049,7 +3172,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_redirect_load, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER, @@ -3057,7 +3180,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_redirect_number, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE, @@ -3065,7 +3188,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_redirect_silence, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_ERROR_COST, @@ -3073,7 +3196,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_error_cost, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_ERROR_BURST, @@ -3081,7 +3204,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_error_burst, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY, @@ -3089,7 +3212,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_gc_elasticity, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES, @@ -3097,8 +3220,8 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_mtu_expires, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies, + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies, }, { .ctl_name = NET_IPV4_ROUTE_MIN_PMTU, @@ -3106,7 +3229,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_min_pmtu, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS, @@ -3114,7 +3237,7 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_min_advmss, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL, @@ -3122,8 +3245,8 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_secret_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &ipv4_sysctl_rt_secret_interval, - .strategy = &ipv4_sysctl_rt_secret_interval_strategy, + .proc_handler = ipv4_sysctl_rt_secret_interval, + .strategy = ipv4_sysctl_rt_secret_interval_strategy, }, { .ctl_name = 0 } }; @@ -3151,8 +3274,8 @@ static struct ctl_table ipv4_route_flush_table[] = { .procname = "flush", .maxlen = sizeof(int), .mode = 0200, - .proc_handler = &ipv4_sysctl_rtcache_flush, - .strategy = &ipv4_sysctl_rtcache_flush_strategy, + .proc_handler = ipv4_sysctl_rtcache_flush, + .strategy = ipv4_sysctl_rtcache_flush_strategy, }, { .ctl_name = 0 }, }; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1bb10df..4710d21 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -195,7 +195,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_timestamps, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_WINDOW_SCALING, @@ -203,7 +203,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_window_scaling, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_SACK, @@ -211,7 +211,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_sack, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE, @@ -219,7 +219,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_retrans_collapse, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_DEFAULT_TTL, @@ -227,8 +227,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_ip_default_ttl, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &ipv4_doint_and_flush, - .strategy = &ipv4_doint_and_flush_strategy, + .proc_handler = ipv4_doint_and_flush, + .strategy = ipv4_doint_and_flush_strategy, .extra2 = &init_net, }, { @@ -237,7 +237,7 @@ static struct ctl_table ipv4_table[] = { .data = &ipv4_config.no_pmtu_disc, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_NONLOCAL_BIND, @@ -245,7 +245,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_ip_nonlocal_bind, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_SYN_RETRIES, @@ -253,7 +253,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_syn_retries, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_SYNACK_RETRIES, @@ -261,7 +261,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_synack_retries, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_MAX_ORPHANS, @@ -269,7 +269,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_max_orphans, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_MAX_TW_BUCKETS, @@ -277,7 +277,7 @@ static struct ctl_table ipv4_table[] = { .data = &tcp_death_row.sysctl_max_tw_buckets, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_DYNADDR, @@ -285,7 +285,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_ip_dynaddr, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_KEEPALIVE_TIME, @@ -293,8 +293,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_keepalive_time, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_TCP_KEEPALIVE_PROBES, @@ -302,7 +302,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_keepalive_probes, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_KEEPALIVE_INTVL, @@ -310,8 +310,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_keepalive_intvl, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_TCP_RETRIES1, @@ -319,8 +319,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_retries1, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, + .proc_handler = proc_dointvec_minmax, + .strategy = sysctl_intvec, .extra2 = &tcp_retr1_max }, { @@ -329,7 +329,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_retries2, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_FIN_TIMEOUT, @@ -337,8 +337,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_fin_timeout, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, #ifdef CONFIG_SYN_COOKIES { @@ -347,7 +347,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_syncookies, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, #endif { @@ -356,7 +356,7 @@ static struct ctl_table ipv4_table[] = { .data = &tcp_death_row.sysctl_tw_recycle, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_ABORT_ON_OVERFLOW, @@ -364,7 +364,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_abort_on_overflow, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_STDURG, @@ -372,7 +372,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_stdurg, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_RFC1337, @@ -380,7 +380,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_rfc1337, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_MAX_SYN_BACKLOG, @@ -388,7 +388,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_max_syn_backlog, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_LOCAL_PORT_RANGE, @@ -396,8 +396,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_local_ports.range, .maxlen = sizeof(sysctl_local_ports.range), .mode = 0644, - .proc_handler = &ipv4_local_port_range, - .strategy = &ipv4_sysctl_local_port_range, + .proc_handler = ipv4_local_port_range, + .strategy = ipv4_sysctl_local_port_range, }, #ifdef CONFIG_IP_MULTICAST { @@ -406,7 +406,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_igmp_max_memberships, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, #endif @@ -416,7 +416,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_igmp_max_msf, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_INET_PEER_THRESHOLD, @@ -424,7 +424,7 @@ static struct ctl_table ipv4_table[] = { .data = &inet_peer_threshold, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_INET_PEER_MINTTL, @@ -432,8 +432,8 @@ static struct ctl_table ipv4_table[] = { .data = &inet_peer_minttl, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_INET_PEER_MAXTTL, @@ -441,8 +441,8 @@ static struct ctl_table ipv4_table[] = { .data = &inet_peer_maxttl, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_INET_PEER_GC_MINTIME, @@ -450,8 +450,8 @@ static struct ctl_table ipv4_table[] = { .data = &inet_peer_gc_mintime, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_INET_PEER_GC_MAXTIME, @@ -459,8 +459,8 @@ static struct ctl_table ipv4_table[] = { .data = &inet_peer_gc_maxtime, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies + .proc_handler = proc_dointvec_jiffies, + .strategy = sysctl_jiffies }, { .ctl_name = NET_TCP_ORPHAN_RETRIES, @@ -468,7 +468,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_orphan_retries, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_FACK, @@ -476,7 +476,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_fack, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_REORDERING, @@ -484,7 +484,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_reordering, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_ECN, @@ -492,7 +492,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_ecn, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_DSACK, @@ -500,7 +500,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_dsack, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_MEM, @@ -508,7 +508,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_mem, .maxlen = sizeof(sysctl_tcp_mem), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_WMEM, @@ -516,7 +516,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_wmem, .maxlen = sizeof(sysctl_tcp_wmem), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_RMEM, @@ -524,7 +524,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_rmem, .maxlen = sizeof(sysctl_tcp_rmem), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_APP_WIN, @@ -532,7 +532,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_app_win, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_ADV_WIN_SCALE, @@ -540,7 +540,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_adv_win_scale, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_TW_REUSE, @@ -548,7 +548,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_tw_reuse, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_FRTO, @@ -556,7 +556,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_frto, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_FRTO_RESPONSE, @@ -564,7 +564,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_frto_response, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_LOW_LATENCY, @@ -572,7 +572,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_low_latency, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_NO_METRICS_SAVE, @@ -580,7 +580,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_nometrics_save, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_MODERATE_RCVBUF, @@ -588,7 +588,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_moderate_rcvbuf, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_TSO_WIN_DIVISOR, @@ -596,15 +596,15 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_tso_win_divisor, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_CONG_CONTROL, .procname = "tcp_congestion_control", .mode = 0644, .maxlen = TCP_CA_NAME_MAX, - .proc_handler = &proc_tcp_congestion_control, - .strategy = &sysctl_tcp_congestion_control, + .proc_handler = proc_tcp_congestion_control, + .strategy = sysctl_tcp_congestion_control, }, { .ctl_name = NET_TCP_ABC, @@ -612,7 +612,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_abc, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_MTU_PROBING, @@ -620,7 +620,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_mtu_probing, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_BASE_MSS, @@ -628,7 +628,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_base_mss, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, @@ -636,7 +636,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_workaround_signed_windows, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, #ifdef CONFIG_NET_DMA { @@ -645,7 +645,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_dma_copybreak, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, #endif { @@ -654,7 +654,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_slow_start_after_idle, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, #ifdef CONFIG_NETLABEL { @@ -663,7 +663,7 @@ static struct ctl_table ipv4_table[] = { .data = &cipso_v4_cache_enabled, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_CIPSOV4_CACHE_BUCKET_SIZE, @@ -671,7 +671,7 @@ static struct ctl_table ipv4_table[] = { .data = &cipso_v4_cache_bucketsize, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_CIPSOV4_RBM_OPTFMT, @@ -679,7 +679,7 @@ static struct ctl_table ipv4_table[] = { .data = &cipso_v4_rbm_optfmt, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = NET_CIPSOV4_RBM_STRICTVALID, @@ -687,22 +687,22 @@ static struct ctl_table ipv4_table[] = { .data = &cipso_v4_rbm_strictvalid, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif /* CONFIG_NETLABEL */ { .procname = "tcp_available_congestion_control", .maxlen = TCP_CA_BUF_MAX, .mode = 0444, - .proc_handler = &proc_tcp_available_congestion_control, + .proc_handler = proc_tcp_available_congestion_control, }, { .ctl_name = NET_TCP_ALLOWED_CONG_CONTROL, .procname = "tcp_allowed_congestion_control", .maxlen = TCP_CA_BUF_MAX, .mode = 0644, - .proc_handler = &proc_allowed_congestion_control, - .strategy = &strategy_allowed_congestion_control, + .proc_handler = proc_allowed_congestion_control, + .strategy = strategy_allowed_congestion_control, }, { .ctl_name = NET_TCP_MAX_SSTHRESH, @@ -710,7 +710,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_max_ssthresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .ctl_name = CTL_UNNUMBERED, @@ -718,8 +718,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_udp_mem, .maxlen = sizeof(sysctl_udp_mem), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, + .proc_handler = proc_dointvec_minmax, + .strategy = sysctl_intvec, .extra1 = &zero }, { @@ -728,8 +728,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_udp_rmem_min, .maxlen = sizeof(sysctl_udp_rmem_min), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, + .proc_handler = proc_dointvec_minmax, + .strategy = sysctl_intvec, .extra1 = &zero }, { @@ -738,8 +738,8 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_udp_wmem_min, .maxlen = sizeof(sysctl_udp_wmem_min), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, + .proc_handler = proc_dointvec_minmax, + .strategy = sysctl_intvec, .extra1 = &zero }, { .ctl_name = 0 } @@ -752,7 +752,7 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_icmp_echo_ignore_all, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, @@ -760,7 +760,7 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_icmp_echo_ignore_broadcasts, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, @@ -768,7 +768,7 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, @@ -776,7 +776,7 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_icmp_errors_use_inbound_ifaddr, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_RATELIMIT, @@ -784,8 +784,8 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_icmp_ratelimit, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_ms_jiffies, - .strategy = &sysctl_ms_jiffies + .proc_handler = proc_dointvec_ms_jiffies, + .strategy = sysctl_ms_jiffies }, { .ctl_name = NET_IPV4_ICMP_RATEMASK, @@ -793,7 +793,15 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_icmp_ratemask, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "rt_cache_rebuild_count", + .data = &init_net.ipv4.sysctl_rt_cache_rebuild_count, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec }, { } }; @@ -827,8 +835,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) &net->ipv4.sysctl_icmp_ratelimit; table[5].data = &net->ipv4.sysctl_icmp_ratemask; + table[6].data = + &net->ipv4.sysctl_rt_cache_rebuild_count; } + net->ipv4.sysctl_rt_cache_rebuild_count = 4; + net->ipv4.ipv4_hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); if (net->ipv4.ipv4_hdr == NULL) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c5aca0b..f60a591 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1680,7 +1680,7 @@ void tcp_set_state(struct sock *sk, int state) inet_put_port(sk); /* fall through */ default: - if (oldstate==TCP_ESTABLISHED) + if (oldstate == TCP_ESTABLISHED) TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); } @@ -1690,7 +1690,7 @@ void tcp_set_state(struct sock *sk, int state) sk->sk_state = state; #ifdef STATE_TRACE - SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); + SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); #endif } EXPORT_SYMBOL_GPL(tcp_set_state); @@ -2650,7 +2650,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key); void tcp_done(struct sock *sk) { - if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) + if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); tcp_set_state(sk, TCP_CLOSE); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 4a1221e..ee467ec 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -1,13 +1,23 @@ /* - * TCP CUBIC: Binary Increase Congestion control for TCP v2.2 + * TCP CUBIC: Binary Increase Congestion control for TCP v2.3 * Home page: * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC * This is from the implementation of CUBIC TCP in - * Injong Rhee, Lisong Xu. - * "CUBIC: A New TCP-Friendly High-Speed TCP Variant - * in PFLDnet 2005 + * Sangtae Ha, Injong Rhee and Lisong Xu, + * "CUBIC: A New TCP-Friendly High-Speed TCP Variant" + * in ACM SIGOPS Operating System Review, July 2008. * Available from: - * http://netsrv.csc.ncsu.edu/export/cubic-paper.pdf + * http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf + * + * CUBIC integrates a new slow start algorithm, called HyStart. + * The details of HyStart are presented in + * Sangtae Ha and Injong Rhee, + * "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008. + * Available from: + * http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf + * + * All testing results are available from: + * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing * * Unless CUBIC is enabled and congestion window is large * this behaves the same as the original Reno. @@ -23,12 +33,26 @@ */ #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ +/* Two methods of hybrid slow start */ +#define HYSTART_ACK_TRAIN 0x1 +#define HYSTART_DELAY 0x2 + +/* Number of delay samples for detecting the increase of delay */ +#define HYSTART_MIN_SAMPLES 8 +#define HYSTART_DELAY_MIN (2U<<3) +#define HYSTART_DELAY_MAX (16U<<3) +#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX) + static int fast_convergence __read_mostly = 1; static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */ static int initial_ssthresh __read_mostly; static int bic_scale __read_mostly = 41; static int tcp_friendliness __read_mostly = 1; +static int hystart __read_mostly = 1; +static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY; +static int hystart_low_window __read_mostly = 16; + static u32 cube_rtt_scale __read_mostly; static u32 beta_scale __read_mostly; static u64 cube_factor __read_mostly; @@ -44,6 +68,13 @@ module_param(bic_scale, int, 0444); MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)"); module_param(tcp_friendliness, int, 0644); MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness"); +module_param(hystart, int, 0644); +MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm"); +module_param(hystart_detect, int, 0644); +MODULE_PARM_DESC(hystart_detect, "hyrbrid slow start detection mechanisms" + " 1: packet-train 2: delay 3: both packet-train and delay"); +module_param(hystart_low_window, int, 0644); +MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start"); /* BIC TCP Parameters */ struct bictcp { @@ -59,7 +90,13 @@ struct bictcp { u32 ack_cnt; /* number of acks */ u32 tcp_cwnd; /* estimated tcp cwnd */ #define ACK_RATIO_SHIFT 4 - u32 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ + u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ + u8 sample_cnt; /* number of samples to decide curr_rtt */ + u8 found; /* the exit point is found? */ + u32 round_start; /* beginning of each round */ + u32 end_seq; /* end_seq of the round */ + u32 last_jiffies; /* last time when the ACK spacing is close */ + u32 curr_rtt; /* the minimum rtt of current round */ }; static inline void bictcp_reset(struct bictcp *ca) @@ -76,12 +113,28 @@ static inline void bictcp_reset(struct bictcp *ca) ca->delayed_ack = 2 << ACK_RATIO_SHIFT; ca->ack_cnt = 0; ca->tcp_cwnd = 0; + ca->found = 0; +} + +static inline void bictcp_hystart_reset(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct bictcp *ca = inet_csk_ca(sk); + + ca->round_start = ca->last_jiffies = jiffies; + ca->end_seq = tp->snd_nxt; + ca->curr_rtt = 0; + ca->sample_cnt = 0; } static void bictcp_init(struct sock *sk) { bictcp_reset(inet_csk_ca(sk)); - if (initial_ssthresh) + + if (hystart) + bictcp_hystart_reset(sk); + + if (!hystart && initial_ssthresh) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; } @@ -235,9 +288,11 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) if (!tcp_is_cwnd_limited(sk, in_flight)) return; - if (tp->snd_cwnd <= tp->snd_ssthresh) + if (tp->snd_cwnd <= tp->snd_ssthresh) { + if (hystart && after(ack, ca->end_seq)) + bictcp_hystart_reset(sk); tcp_slow_start(tp); - else { + } else { bictcp_update(ca, tp->snd_cwnd); /* In dangerous area, increase slowly. @@ -281,8 +336,45 @@ static u32 bictcp_undo_cwnd(struct sock *sk) static void bictcp_state(struct sock *sk, u8 new_state) { - if (new_state == TCP_CA_Loss) + if (new_state == TCP_CA_Loss) { bictcp_reset(inet_csk_ca(sk)); + bictcp_hystart_reset(sk); + } +} + +static void hystart_update(struct sock *sk, u32 delay) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct bictcp *ca = inet_csk_ca(sk); + + if (!(ca->found & hystart_detect)) { + u32 curr_jiffies = jiffies; + + /* first detection parameter - ack-train detection */ + if (curr_jiffies - ca->last_jiffies <= msecs_to_jiffies(2)) { + ca->last_jiffies = curr_jiffies; + if (curr_jiffies - ca->round_start >= ca->delay_min>>4) + ca->found |= HYSTART_ACK_TRAIN; + } + + /* obtain the minimum delay of more than sampling packets */ + if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { + if (ca->curr_rtt == 0 || ca->curr_rtt > delay) + ca->curr_rtt = delay; + + ca->sample_cnt++; + } else { + if (ca->curr_rtt > ca->delay_min + + HYSTART_DELAY_THRESH(ca->delay_min>>4)) + ca->found |= HYSTART_DELAY; + } + /* + * Either one of two conditions are met, + * we exit from slow start immediately. + */ + if (ca->found & hystart_detect) + tp->snd_ssthresh = tp->snd_cwnd; + } } /* Track delayed acknowledgment ratio using sliding window @@ -291,6 +383,7 @@ static void bictcp_state(struct sock *sk, u8 new_state) static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) { const struct inet_connection_sock *icsk = inet_csk(sk); + const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); u32 delay; @@ -314,6 +407,11 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) /* first time call or link delay decreases */ if (ca->delay_min == 0 || ca->delay_min > delay) ca->delay_min = delay; + + /* hystart triggers when cwnd is larger than some threshold */ + if (hystart && tp->snd_cwnd <= tp->snd_ssthresh && + tp->snd_cwnd >= hystart_low_window) + hystart_update(sk, delay); } static struct tcp_congestion_ops cubictcp = { @@ -372,4 +470,4 @@ module_exit(cubictcp_unregister); MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CUBIC TCP"); -MODULE_VERSION("2.2"); +MODULE_VERSION("2.3"); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d77c0d2..097294b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2336,9 +2336,9 @@ static void DBGUNDO(struct sock *sk, const char *msg) struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { - printk(KERN_DEBUG "Undo %s " NIPQUAD_FMT "/%u c%u l%u ss%u/%u p%u\n", + printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, - NIPQUAD(inet->daddr), ntohs(inet->dport), + &inet->daddr, ntohs(inet->dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); @@ -2346,9 +2346,9 @@ static void DBGUNDO(struct sock *sk, const char *msg) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); - printk(KERN_DEBUG "Undo %s " NIP6_FMT "/%u c%u l%u ss%u/%u p%u\n", + printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, - NIP6(np->daddr), ntohs(inet->dport), + &np->daddr, ntohs(inet->dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5c8fa7f..d49233f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1139,10 +1139,9 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) if (genhash || memcmp(hash_location, newhash, 16) != 0) { if (net_ratelimit()) { - printk(KERN_INFO "MD5 Hash failed for " - "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n", - NIPQUAD(iph->saddr), ntohs(th->source), - NIPQUAD(iph->daddr), ntohs(th->dest), + printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", + &iph->saddr, ntohs(th->source), + &iph->daddr, ntohs(th->dest), genhash ? " tcp_v4_calc_md5_hash failed" : ""); } return 1; @@ -1297,10 +1296,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * to destinations, already remembered * to the moment of synflood. */ - LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " - "request from " NIPQUAD_FMT "/%u\n", - NIPQUAD(saddr), - ntohs(tcp_hdr(skb)->source)); + LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n", + &saddr, ntohs(tcp_hdr(skb)->source)); goto drop_and_release; } @@ -1877,7 +1874,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) struct inet_connection_sock *icsk; struct hlist_node *node; struct sock *sk = cur; - struct tcp_iter_state* st = seq->private; + struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); if (!sk) { @@ -1963,7 +1960,7 @@ static inline int empty_bucket(struct tcp_iter_state *st) static void *established_get_first(struct seq_file *seq) { - struct tcp_iter_state* st = seq->private; + struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; @@ -2008,7 +2005,7 @@ static void *established_get_next(struct seq_file *seq, void *cur) struct sock *sk = cur; struct inet_timewait_sock *tw; struct hlist_node *node; - struct tcp_iter_state* st = seq->private; + struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); ++st->num; @@ -2067,7 +2064,7 @@ static void *established_get_idx(struct seq_file *seq, loff_t pos) static void *tcp_get_idx(struct seq_file *seq, loff_t pos) { void *rc; - struct tcp_iter_state* st = seq->private; + struct tcp_iter_state *st = seq->private; inet_listen_lock(&tcp_hashinfo); st->state = TCP_SEQ_STATE_LISTENING; @@ -2084,7 +2081,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos) static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) { - struct tcp_iter_state* st = seq->private; + struct tcp_iter_state *st = seq->private; st->state = TCP_SEQ_STATE_LISTENING; st->num = 0; return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; @@ -2093,7 +2090,7 @@ static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { void *rc = NULL; - struct tcp_iter_state* st; + struct tcp_iter_state *st; if (v == SEQ_START_TOKEN) { rc = tcp_get_idx(seq, 0); @@ -2123,7 +2120,7 @@ out: static void tcp_seq_stop(struct seq_file *seq, void *v) { - struct tcp_iter_state* st = seq->private; + struct tcp_iter_state *st = seq->private; switch (st->state) { case TCP_SEQ_STATE_OPENREQ: @@ -2284,7 +2281,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw, static int tcp4_seq_show(struct seq_file *seq, void *v) { - struct tcp_iter_state* st; + struct tcp_iter_state *st; int len; if (v == SEQ_START_TOKEN) { diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 779f2e9..f67effb 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -491,7 +491,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, * as a request_sock. */ -struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, +struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct request_sock **prev) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ba85d88..a524627 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -42,7 +42,7 @@ /* People can turn this off for buggy TCP's found in printers etc. */ int sysctl_tcp_retrans_collapse __read_mostly = 1; -/* People can turn this on to work with those rare, broken TCPs that +/* People can turn this on to work with those rare, broken TCPs that * interpret the window field as a signed quantity. */ int sysctl_tcp_workaround_signed_windows __read_mostly = 0; @@ -484,7 +484,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, } if (likely(sysctl_tcp_window_scaling)) { opts->ws = tp->rx_opt.rcv_wscale; - if(likely(opts->ws)) + if (likely(opts->ws)) size += TCPOLEN_WSCALE_ALIGNED; } if (likely(sysctl_tcp_sack)) { @@ -526,7 +526,7 @@ static unsigned tcp_synack_options(struct sock *sk, if (likely(ireq->wscale_ok)) { opts->ws = ireq->rcv_wscale; - if(likely(opts->ws)) + if (likely(opts->ws)) size += TCPOLEN_WSCALE_ALIGNED; } if (likely(doing_ts)) { @@ -1172,7 +1172,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, static inline int tcp_minshall_check(const struct tcp_sock *tp) { - return after(tp->snd_sml,tp->snd_una) && + return after(tp->snd_sml, tp->snd_una) && !after(tp->snd_sml, tp->snd_nxt); } diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 7ddc30f..25524d4 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -153,12 +153,11 @@ static int tcpprobe_sprint(char *tbuf, int n) = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); return snprintf(tbuf, n, - "%lu.%09lu " NIPQUAD_FMT ":%u " NIPQUAD_FMT ":%u" - " %d %#x %#x %u %u %u %u\n", + "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n", (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec, - NIPQUAD(p->saddr), ntohs(p->sport), - NIPQUAD(p->daddr), ntohs(p->dport), + &p->saddr, ntohs(p->sport), + &p->daddr, ntohs(p->dport), p->length, p->snd_nxt, p->snd_una, p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt); } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 6b6dff1..3df339e 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -171,7 +171,7 @@ static int tcp_write_timeout(struct sock *sk) static void tcp_delack_timer(unsigned long data) { - struct sock *sk = (struct sock*)data; + struct sock *sk = (struct sock *)data; struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); @@ -299,15 +299,15 @@ static void tcp_retransmit_timer(struct sock *sk) #ifdef TCP_DEBUG struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { - LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer " NIPQUAD_FMT ":%u/%u shrinks window %u:%u. Repaired.\n", - NIPQUAD(inet->daddr), ntohs(inet->dport), + LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer %pI4:%u/%u shrinks window %u:%u. Repaired.\n", + &inet->daddr, ntohs(inet->dport), inet->num, tp->snd_una, tp->snd_nxt); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); - LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer " NIP6_FMT ":%u/%u shrinks window %u:%u. Repaired.\n", - NIP6(np->daddr), ntohs(inet->dport), + LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer %pI6:%u/%u shrinks window %u:%u. Repaired.\n", + &np->daddr, ntohs(inet->dport), inet->num, tp->snd_una, tp->snd_nxt); } #endif @@ -396,7 +396,7 @@ out:; static void tcp_write_timer(unsigned long data) { - struct sock *sk = (struct sock*)data; + struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); int event; diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index e03b101..9ec843a 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -83,7 +83,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) else if (!yeah->doing_reno_now) { /* Scalable */ - tp->snd_cwnd_cnt+=yeah->pkts_acked; + tp->snd_cwnd_cnt += yeah->pkts_acked; if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; @@ -224,7 +224,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) { reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA); } else - reduction = max(tp->snd_cwnd>>1,2U); + reduction = max(tp->snd_cwnd>>1, 2U); yeah->fast_count = 0; yeah->reno_count = max(yeah->reno_count>>1, 2U); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cf02701..54badc9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -81,6 +81,8 @@ #include <asm/uaccess.h> #include <asm/ioctls.h> #include <linux/bootmem.h> +#include <linux/highmem.h> +#include <linux/swap.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> @@ -104,12 +106,8 @@ #include <net/xfrm.h> #include "udp_impl.h" -/* - * Snmp MIB for the UDP layer - */ - -struct hlist_head udp_hash[UDP_HTABLE_SIZE]; -DEFINE_RWLOCK(udp_hash_lock); +struct udp_table udp_table; +EXPORT_SYMBOL(udp_table); int sysctl_udp_mem[3] __read_mostly; int sysctl_udp_rmem_min __read_mostly; @@ -123,7 +121,7 @@ atomic_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); static int udp_lib_lport_inuse(struct net *net, __u16 num, - const struct hlist_head udptable[], + const struct udp_hslot *hslot, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2)) @@ -131,7 +129,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, struct sock *sk2; struct hlist_node *node; - sk_for_each(sk2, node, &udptable[udp_hashfn(net, num)]) + sk_for_each(sk2, node, &hslot->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && sk2->sk_hash == num && @@ -154,12 +152,11 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2 ) ) { - struct hlist_head *udptable = sk->sk_prot->h.udp_hash; + struct udp_hslot *hslot; + struct udp_table *udptable = sk->sk_prot->h.udp_table; int error = 1; struct net *net = sock_net(sk); - write_lock_bh(&udp_hash_lock); - if (!snum) { int low, high, remaining; unsigned rand; @@ -171,26 +168,39 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, rand = net_random(); snum = first = rand % remaining + low; rand |= 1; - while (udp_lib_lport_inuse(net, snum, udptable, sk, - saddr_comp)) { + for (;;) { + hslot = &udptable->hash[udp_hashfn(net, snum)]; + spin_lock_bh(&hslot->lock); + if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) + break; + spin_unlock_bh(&hslot->lock); do { snum = snum + rand; } while (snum < low || snum > high); if (snum == first) goto fail; } - } else if (udp_lib_lport_inuse(net, snum, udptable, sk, saddr_comp)) - goto fail; - + } else { + hslot = &udptable->hash[udp_hashfn(net, snum)]; + spin_lock_bh(&hslot->lock); + if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) + goto fail_unlock; + } inet_sk(sk)->num = snum; sk->sk_hash = snum; if (sk_unhashed(sk)) { - sk_add_node(sk, &udptable[udp_hashfn(net, snum)]); + /* + * We need that previous write to sk->sk_hash committed + * before write to sk->next done in following add_node() variant + */ + smp_wmb(); + sk_add_node_rcu(sk, &hslot->head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); } error = 0; +fail_unlock: + spin_unlock_bh(&hslot->lock); fail: - write_unlock_bh(&udp_hash_lock); return error; } @@ -208,63 +218,89 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum) return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal); } +static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, + unsigned short hnum, + __be16 sport, __be32 daddr, __be16 dport, int dif) +{ + int score = -1; + + if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && + !ipv6_only_sock(sk)) { + struct inet_sock *inet = inet_sk(sk); + + score = (sk->sk_family == PF_INET ? 1 : 0); + if (inet->rcv_saddr) { + if (inet->rcv_saddr != daddr) + return -1; + score += 2; + } + if (inet->daddr) { + if (inet->daddr != saddr) + return -1; + score += 2; + } + if (inet->dport) { + if (inet->dport != sport) + return -1; + score += 2; + } + if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if != dif) + return -1; + score += 2; + } + } + return score; +} + /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, - int dif, struct hlist_head udptable[]) + int dif, struct udp_table *udptable) { - struct sock *sk, *result = NULL; - struct hlist_node *node; + struct sock *sk, *result; + struct hlist_node *node, *next; unsigned short hnum = ntohs(dport); - int badness = -1; - - read_lock(&udp_hash_lock); - sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) { - struct inet_sock *inet = inet_sk(sk); - - if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && - !ipv6_only_sock(sk)) { - int score = (sk->sk_family == PF_INET ? 1 : 0); - if (inet->rcv_saddr) { - if (inet->rcv_saddr != daddr) - continue; - score+=2; - } - if (inet->daddr) { - if (inet->daddr != saddr) - continue; - score+=2; - } - if (inet->dport) { - if (inet->dport != sport) - continue; - score+=2; - } - if (sk->sk_bound_dev_if) { - if (sk->sk_bound_dev_if != dif) - continue; - score+=2; - } - if (score == 9) { - result = sk; - break; - } else if (score > badness) { - result = sk; - badness = score; - } + unsigned int hash = udp_hashfn(net, hnum); + struct udp_hslot *hslot = &udptable->hash[hash]; + int score, badness; + + rcu_read_lock(); +begin: + result = NULL; + badness = -1; + sk_for_each_rcu_safenext(sk, node, &hslot->head, next) { + /* + * lockless reader, and SLAB_DESTROY_BY_RCU items: + * We must check this item was not moved to another chain + */ + if (udp_hashfn(net, sk->sk_hash) != hash) + goto begin; + score = compute_score(sk, net, saddr, hnum, sport, + daddr, dport, dif); + if (score > badness) { + result = sk; + badness = score; } } - if (result) - sock_hold(result); - read_unlock(&udp_hash_lock); + if (result) { + if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) + result = NULL; + else if (unlikely(compute_score(result, net, saddr, hnum, sport, + daddr, dport, dif) < badness)) { + sock_put(result); + goto begin; + } + } + rcu_read_unlock(); return result; } static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, - struct hlist_head udptable[]) + struct udp_table *udptable) { struct sock *sk; const struct iphdr *iph = ip_hdr(skb); @@ -280,7 +316,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { - return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, udp_hash); + return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); } EXPORT_SYMBOL_GPL(udp4_lib_lookup); @@ -324,7 +360,7 @@ found: * to find the appropriate port. */ -void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) +void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) { struct inet_sock *inet; struct iphdr *iph = (struct iphdr*)skb->data; @@ -393,7 +429,7 @@ out: void udp_err(struct sk_buff *skb, u32 info) { - __udp4_lib_err(skb, info, udp_hash); + __udp4_lib_err(skb, info, &udp_table); } /* @@ -934,6 +970,21 @@ int udp_disconnect(struct sock *sk, int flags) return 0; } +void udp_lib_unhash(struct sock *sk) +{ + struct udp_table *udptable = sk->sk_prot->h.udp_table; + unsigned int hash = udp_hashfn(sock_net(sk), sk->sk_hash); + struct udp_hslot *hslot = &udptable->hash[hash]; + + spin_lock_bh(&hslot->lock); + if (sk_del_node_init_rcu(sk)) { + inet_sk(sk)->num = 0; + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); + } + spin_unlock_bh(&hslot->lock); +} +EXPORT_SYMBOL(udp_lib_unhash); + static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int is_udplite = IS_UDPLITE(sk); @@ -1072,13 +1123,14 @@ drop: static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udphdr *uh, __be32 saddr, __be32 daddr, - struct hlist_head udptable[]) + struct udp_table *udptable) { struct sock *sk; + struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))]; int dif; - read_lock(&udp_hash_lock); - sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]); + spin_lock(&hslot->lock); + sk = sk_head(&hslot->head); dif = skb->dev->ifindex; sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); if (sk) { @@ -1104,7 +1156,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, } while (sknext); } else kfree_skb(skb); - read_unlock(&udp_hash_lock); + spin_unlock(&hslot->lock); return 0; } @@ -1150,7 +1202,7 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, * All we need to do is get the socket, and then do a checksum. */ -int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], +int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct sock *sk; @@ -1218,13 +1270,13 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], return 0; short_packet: - LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From " NIPQUAD_FMT ":%u %d/%d to " NIPQUAD_FMT ":%u\n", + LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", proto == IPPROTO_UDPLITE ? "-Lite" : "", - NIPQUAD(saddr), + &saddr, ntohs(uh->source), ulen, skb->len, - NIPQUAD(daddr), + &daddr, ntohs(uh->dest)); goto drop; @@ -1233,11 +1285,11 @@ csum_error: * RFC1122: OK. Discards the bad packet silently (as far as * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ - LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From " NIPQUAD_FMT ":%u to " NIPQUAD_FMT ":%u ulen %d\n", + LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", proto == IPPROTO_UDPLITE ? "-Lite" : "", - NIPQUAD(saddr), + &saddr, ntohs(uh->source), - NIPQUAD(daddr), + &daddr, ntohs(uh->dest), ulen); drop: @@ -1248,7 +1300,7 @@ drop: int udp_rcv(struct sk_buff *skb) { - return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); + return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); } void udp_destroy_sock(struct sock *sk) @@ -1490,7 +1542,8 @@ struct proto udp_prot = { .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp_sock), - .h.udp_hash = udp_hash, + .slab_flags = SLAB_DESTROY_BY_RCU, + .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, @@ -1500,20 +1553,23 @@ struct proto udp_prot = { /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS -static struct sock *udp_get_first(struct seq_file *seq) +static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); - for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { + for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { struct hlist_node *node; - sk_for_each(sk, node, state->hashtable + state->bucket) { + struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; + spin_lock_bh(&hslot->lock); + sk_for_each(sk, node, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == state->family) goto found; } + spin_unlock_bh(&hslot->lock); } sk = NULL; found: @@ -1527,20 +1583,18 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) do { sk = sk_next(sk); -try_again: - ; } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); - if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { - sk = sk_head(state->hashtable + state->bucket); - goto try_again; + if (!sk) { + spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); + return udp_get_first(seq, state->bucket + 1); } return sk; } static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) { - struct sock *sk = udp_get_first(seq); + struct sock *sk = udp_get_first(seq, 0); if (sk) while (pos && (sk = udp_get_next(seq, sk)) != NULL) @@ -1549,9 +1603,7 @@ static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) } static void *udp_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(udp_hash_lock) { - read_lock(&udp_hash_lock); return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } @@ -1569,9 +1621,11 @@ static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void udp_seq_stop(struct seq_file *seq, void *v) - __releases(udp_hash_lock) { - read_unlock(&udp_hash_lock); + struct udp_iter_state *state = seq->private; + + if (state->bucket < UDP_HTABLE_SIZE) + spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); } static int udp_seq_open(struct inode *inode, struct file *file) @@ -1587,7 +1641,7 @@ static int udp_seq_open(struct inode *inode, struct file *file) s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; - s->hashtable = afinfo->hashtable; + s->udp_table = afinfo->udp_table; return err; } @@ -1659,7 +1713,7 @@ int udp4_seq_show(struct seq_file *seq, void *v) static struct udp_seq_afinfo udp4_seq_afinfo = { .name = "udp", .family = AF_INET, - .hashtable = udp_hash, + .udp_table = &udp_table, .seq_fops = { .owner = THIS_MODULE, }, @@ -1694,16 +1748,28 @@ void udp4_proc_exit(void) } #endif /* CONFIG_PROC_FS */ +void __init udp_table_init(struct udp_table *table) +{ + int i; + + for (i = 0; i < UDP_HTABLE_SIZE; i++) { + INIT_HLIST_HEAD(&table->hash[i].head); + spin_lock_init(&table->hash[i].lock); + } +} + void __init udp_init(void) { - unsigned long limit; + unsigned long nr_pages, limit; + udp_table_init(&udp_table); /* Set the pressure threshold up by the same strategy of TCP. It is a * fraction of global memory that is up to 1/2 at 256 MB, decreasing * toward zero with the amount of memory, with a floor of 128 pages. */ - limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); - limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); + nr_pages = totalram_pages - totalhigh_pages; + limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); + limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); limit = max(limit, 128UL); sysctl_udp_mem[0] = limit / 4 * 3; sysctl_udp_mem[1] = limit; @@ -1714,8 +1780,6 @@ void __init udp_init(void) } EXPORT_SYMBOL(udp_disconnect); -EXPORT_SYMBOL(udp_hash); -EXPORT_SYMBOL(udp_hash_lock); EXPORT_SYMBOL(udp_ioctl); EXPORT_SYMBOL(udp_prot); EXPORT_SYMBOL(udp_sendmsg); diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 2e9bad2..9f4a616 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -5,8 +5,8 @@ #include <net/protocol.h> #include <net/inet_common.h> -extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int ); -extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []); +extern int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int ); +extern void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); extern int udp_v4_get_port(struct sock *sk, unsigned short snum); diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 3c80796..c784891 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -12,16 +12,17 @@ */ #include "udp_impl.h" -struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; +struct udp_table udplite_table; +EXPORT_SYMBOL(udplite_table); static int udplite_rcv(struct sk_buff *skb) { - return __udp4_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE); + return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); } static void udplite_err(struct sk_buff *skb, u32 info) { - __udp4_lib_err(skb, info, udplite_hash); + __udp4_lib_err(skb, info, &udplite_table); } static struct net_protocol udplite_protocol = { @@ -50,7 +51,8 @@ struct proto udplite_prot = { .unhash = udp_lib_unhash, .get_port = udp_v4_get_port, .obj_size = sizeof(struct udp_sock), - .h.udp_hash = udplite_hash, + .slab_flags = SLAB_DESTROY_BY_RCU, + .h.udp_table = &udplite_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, @@ -71,7 +73,7 @@ static struct inet_protosw udplite4_protosw = { static struct udp_seq_afinfo udplite4_seq_afinfo = { .name = "udplite", .family = AF_INET, - .hashtable = udplite_hash, + .udp_table = &udplite_table, .seq_fops = { .owner = THIS_MODULE, }, @@ -108,6 +110,7 @@ static inline int udplite4_proc_init(void) void __init udplite4_register(void) { + udp_table_init(&udplite_table); if (proto_register(&udplite_prot, 1)) goto out_register_err; @@ -126,5 +129,4 @@ out_register_err: printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); } -EXPORT_SYMBOL(udplite_hash); EXPORT_SYMBOL(udplite_prot); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index c63de0a..f9a775b 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -65,7 +65,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy) read_lock_bh(&policy->lock); for (dst = policy->bundles; dst; dst = dst->next) { - struct xfrm_dst *xdst = (struct xfrm_dst*)dst; + struct xfrm_dst *xdst = (struct xfrm_dst *)dst; if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ xdst->u.rt.fl.fl4_dst == fl->fl4_dst && xdst->u.rt.fl.fl4_src == fl->fl4_src && |