diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-18 10:24:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-18 10:24:36 -0700 |
commit | 485cf925d8b7a6b3c62fe5f1e167f2d0d4edf32a (patch) | |
tree | 57798f48123a62dd1801f039b676b06913e34e72 /net | |
parent | 31bdc5dc7666aa2fe04c626cea30fe3c20cf481c (diff) | |
parent | 3fd8f9e4b6c184d03d340bc86630f700de967fa8 (diff) | |
download | op-kernel-dev-485cf925d8b7a6b3c62fe5f1e167f2d0d4edf32a.zip op-kernel-dev-485cf925d8b7a6b3c62fe5f1e167f2d0d4edf32a.tar.gz |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (24 commits)
[NETFILTER]: xt_connlimit needs to depend on nf_conntrack
[NETFILTER]: ipt_iprange.h must #include <linux/types.h>
[IrDA]: Fix IrDA build failure
[ATM]: nicstar needs virt_to_bus
[NET]: move __dev_addr_discard adjacent to dev_addr_discard for readability
[NET]: merge dev_unicast_discard and dev_mc_discard into one
[NET]: move dev_mc_discard from dev_mcast.c to dev.c
[NETLINK]: negative groups in netlink_setsockopt
[PPPOL2TP]: Reset meta-data in xmit function
[PPPOL2TP]: Fix use-after-free
[PKT_SCHED]: Some typo fixes in net/sched/Kconfig
[XFRM]: Fix crash introduced by struct dst_entry reordering
[TCP]: remove unused argument to cong_avoid op
[ATM]: [idt77252] Rename CONFIG_ATM_IDT77252_SEND_IDLE to not resemble a Kconfig variable
[ATM]: [drivers] ioremap balanced with iounmap
[ATM]: [lanai] sram_test_word() must be __devinit
[ATM]: [nicstar] Replace C code with call to ARRAY_SIZE() macro.
[ATM]: Eliminate dead config variable CONFIG_BR2684_FAST_TRANS.
[ATM]: Replacing kmalloc/memset combination with kzalloc.
[NET]: gen_estimator deadlock fix
...
Diffstat (limited to 'net')
-rw-r--r-- | net/atm/br2684.c | 4 | ||||
-rw-r--r-- | net/core/dev.c | 38 | ||||
-rw-r--r-- | net/core/dev_mcast.c | 12 | ||||
-rw-r--r-- | net/core/gen_estimator.c | 81 | ||||
-rw-r--r-- | net/ipv4/tcp_bic.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_cong.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_cubic.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_highspeed.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_htcp.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_hybla.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_illinois.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_lp.c | 5 | ||||
-rw-r--r-- | net/ipv4/tcp_scalable.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_vegas.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_veno.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_yeah.c | 2 | ||||
-rw-r--r-- | net/irda/af_irda.c | 2 | ||||
-rw-r--r-- | net/irda/irda_device.c | 4 | ||||
-rw-r--r-- | net/irda/iriap.c | 2 | ||||
-rw-r--r-- | net/irda/irlap.c | 2 | ||||
-rw-r--r-- | net/irda/irlmp.c | 2 | ||||
-rw-r--r-- | net/irda/irproc.c | 2 | ||||
-rw-r--r-- | net/irda/irsysctl.c | 2 | ||||
-rw-r--r-- | net/irda/irttp.c | 2 | ||||
-rw-r--r-- | net/netfilter/Kconfig | 1 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 5 | ||||
-rw-r--r-- | net/sched/Kconfig | 6 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 3 | ||||
-rw-r--r-- | net/xfrm/xfrm_policy.c | 2 |
30 files changed, 110 insertions, 106 deletions
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index faa6aaf..c0f6861 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -460,11 +460,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) skb_pull(skb, plen); skb_set_mac_header(skb, -ETH_HLEN); skb->pkt_type = PACKET_HOST; -#ifdef CONFIG_BR2684_FAST_TRANS - skb->protocol = ((u16 *) skb->data)[-1]; -#else /* some protocols might require this: */ skb->protocol = br_type_trans(skb, net_dev); -#endif /* CONFIG_BR2684_FAST_TRANS */ #else skb_pull(skb, plen - ETH_HLEN); skb->protocol = eth_type_trans(skb, net_dev); diff --git a/net/core/dev.c b/net/core/dev.c index 13a0d9f..6357f54c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2715,20 +2715,6 @@ int __dev_addr_add(struct dev_addr_list **list, int *count, return 0; } -void __dev_addr_discard(struct dev_addr_list **list) -{ - struct dev_addr_list *tmp; - - while (*list != NULL) { - tmp = *list; - *list = tmp->next; - if (tmp->da_users > tmp->da_gusers) - printk("__dev_addr_discard: address leakage! " - "da_users=%d\n", tmp->da_users); - kfree(tmp); - } -} - /** * dev_unicast_delete - Release secondary unicast address. * @dev: device @@ -2777,11 +2763,30 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen) } EXPORT_SYMBOL(dev_unicast_add); -static void dev_unicast_discard(struct net_device *dev) +static void __dev_addr_discard(struct dev_addr_list **list) +{ + struct dev_addr_list *tmp; + + while (*list != NULL) { + tmp = *list; + *list = tmp->next; + if (tmp->da_users > tmp->da_gusers) + printk("__dev_addr_discard: address leakage! " + "da_users=%d\n", tmp->da_users); + kfree(tmp); + } +} + +static void dev_addr_discard(struct net_device *dev) { netif_tx_lock_bh(dev); + __dev_addr_discard(&dev->uc_list); dev->uc_count = 0; + + __dev_addr_discard(&dev->mc_list); + dev->mc_count = 0; + netif_tx_unlock_bh(dev); } @@ -3739,8 +3744,7 @@ void unregister_netdevice(struct net_device *dev) /* * Flush the unicast and multicast chains */ - dev_unicast_discard(dev); - dev_mc_discard(dev); + dev_addr_discard(dev); if (dev->uninit) dev->uninit(dev); diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 235a2a8..99aece1 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c @@ -177,18 +177,6 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from) } EXPORT_SYMBOL(dev_mc_unsync); -/* - * Discard multicast list when a device is downed - */ - -void dev_mc_discard(struct net_device *dev) -{ - netif_tx_lock_bh(dev); - __dev_addr_discard(&dev->mc_list); - dev->mc_count = 0; - netif_tx_unlock_bh(dev); -} - #ifdef CONFIG_PROC_FS static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos) { diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cc84d8d..590a767 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -79,27 +79,27 @@ struct gen_estimator { - struct gen_estimator *next; + struct list_head list; struct gnet_stats_basic *bstats; struct gnet_stats_rate_est *rate_est; spinlock_t *stats_lock; - unsigned interval; int ewma_log; u64 last_bytes; u32 last_packets; u32 avpps; u32 avbps; + struct rcu_head e_rcu; }; struct gen_estimator_head { struct timer_list timer; - struct gen_estimator *list; + struct list_head list; }; static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; -/* Estimator array lock */ +/* Protects against NULL dereference */ static DEFINE_RWLOCK(est_lock); static void est_timer(unsigned long arg) @@ -107,13 +107,17 @@ static void est_timer(unsigned long arg) int idx = (int)arg; struct gen_estimator *e; - read_lock(&est_lock); - for (e = elist[idx].list; e; e = e->next) { + rcu_read_lock(); + list_for_each_entry_rcu(e, &elist[idx].list, list) { u64 nbytes; u32 npackets; u32 rate; spin_lock(e->stats_lock); + read_lock(&est_lock); + if (e->bstats == NULL) + goto skip; + nbytes = e->bstats->bytes; npackets = e->bstats->packets; rate = (nbytes - e->last_bytes)<<(7 - idx); @@ -125,12 +129,14 @@ static void est_timer(unsigned long arg) e->last_packets = npackets; e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; e->rate_est->pps = (e->avpps+0x1FF)>>10; +skip: + read_unlock(&est_lock); spin_unlock(e->stats_lock); } - if (elist[idx].list != NULL) + if (!list_empty(&elist[idx].list)) mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4)); - read_unlock(&est_lock); + rcu_read_unlock(); } /** @@ -147,12 +153,17 @@ static void est_timer(unsigned long arg) * &rate_est with the statistics lock grabed during this period. * * Returns 0 on success or a negative error code. + * + * NOTE: Called under rtnl_mutex */ int gen_new_estimator(struct gnet_stats_basic *bstats, - struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt) + struct gnet_stats_rate_est *rate_est, + spinlock_t *stats_lock, + struct rtattr *opt) { struct gen_estimator *est; struct gnet_estimator *parm = RTA_DATA(opt); + int idx; if (RTA_PAYLOAD(opt) < sizeof(*parm)) return -EINVAL; @@ -164,7 +175,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, if (est == NULL) return -ENOBUFS; - est->interval = parm->interval + 2; + idx = parm->interval + 2; est->bstats = bstats; est->rate_est = rate_est; est->stats_lock = stats_lock; @@ -174,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, est->last_packets = bstats->packets; est->avpps = rate_est->pps<<10; - est->next = elist[est->interval].list; - if (est->next == NULL) { - init_timer(&elist[est->interval].timer); - elist[est->interval].timer.data = est->interval; - elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4); - elist[est->interval].timer.function = est_timer; - add_timer(&elist[est->interval].timer); + if (!elist[idx].timer.function) { + INIT_LIST_HEAD(&elist[idx].list); + setup_timer(&elist[idx].timer, est_timer, idx); } - write_lock_bh(&est_lock); - elist[est->interval].list = est; - write_unlock_bh(&est_lock); + + if (list_empty(&elist[idx].list)) + mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4)); + + list_add_rcu(&est->list, &elist[idx].list); return 0; } +static void __gen_kill_estimator(struct rcu_head *head) +{ + struct gen_estimator *e = container_of(head, + struct gen_estimator, e_rcu); + kfree(e); +} + /** * gen_kill_estimator - remove a rate estimator * @bstats: basic statistics @@ -195,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, * * Removes the rate estimator specified by &bstats and &rate_est * and deletes the timer. + * + * NOTE: Called under rtnl_mutex */ void gen_kill_estimator(struct gnet_stats_basic *bstats, struct gnet_stats_rate_est *rate_est) { int idx; - struct gen_estimator *est, **pest; + struct gen_estimator *e, *n; for (idx=0; idx <= EST_MAX_INTERVAL; idx++) { - int killed = 0; - pest = &elist[idx].list; - while ((est=*pest) != NULL) { - if (est->rate_est != rate_est || est->bstats != bstats) { - pest = &est->next; + + /* Skip non initialized indexes */ + if (!elist[idx].timer.function) + continue; + + list_for_each_entry_safe(e, n, &elist[idx].list, list) { + if (e->rate_est != rate_est || e->bstats != bstats) continue; - } write_lock_bh(&est_lock); - *pest = est->next; + e->bstats = NULL; write_unlock_bh(&est_lock); - kfree(est); - killed++; + list_del_rcu(&e->list); + call_rcu(&e->e_rcu, __gen_kill_estimator); } - if (killed && elist[idx].list == NULL) - del_timer(&elist[idx].timer); } } diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index dd9ef65..519de09 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -137,7 +137,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) } static void bictcp_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int data_acked) + u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 1260e52..55fca18 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -324,8 +324,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start); /* This is Jacobson's slow start and congestion avoidance. * SIGCOMM '88, p. 328. */ -void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, - int flag) +void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index ebfaac2..d17da30 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -270,7 +270,7 @@ static inline void measure_delay(struct sock *sk) } static void bictcp_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int data_acked) + u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 43d624e5..14a073d 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c @@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk) tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); } -static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, +static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 4ba4a7a..632c05a 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk) return max((tp->snd_cwnd * ca->beta) >> 7, 2U); } -static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt, u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index e5be351..b3e55cf 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c @@ -85,7 +85,7 @@ static inline u32 hybla_fraction(u32 odds) * o Give cwnd a new value based on the model proposed * o remember increments <1 */ -static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); @@ -103,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt, return; if (!ca->hybla_en) - return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); + return tcp_reno_cong_avoid(sk, ack, in_flight, flag); if (ca->rho == 0) hybla_recalc_param(sk); diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index b2b2256..cc5de6f 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -258,7 +258,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state) /* * Increase window in response to successful acknowledgment. */ -static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4e5884a..fec8a7a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2323,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, tcp_ack_no_tstamp(sk, seq_rtt, flag); } -static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int good) { const struct inet_connection_sock *icsk = inet_csk(sk); - icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good); + icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; } @@ -2826,11 +2826,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) /* Advance CWND, if state allows this. */ if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && tcp_may_raise_cwnd(sk, flag)) - tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0); + tcp_cong_avoid(sk, ack, prior_in_flight, 0); tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) - tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1); + tcp_cong_avoid(sk, ack, prior_in_flight, 1); } if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index e49836c..80e140e 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk) * Will only call newReno CA when away from inference. * From TCP-LP's paper, this will be handled in additive increasement. */ -static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, - int flag) +static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct lp *lp = inet_csk_ca(sk); if (!(lp->flag & LP_WITHIN_INF)) - tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); + tcp_reno_cong_avoid(sk, ack, in_flight, flag); } /** diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 4624501..be27a33 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -15,7 +15,7 @@ #define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_MD_SCALE 3 -static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index e218a51..914e030 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -163,13 +163,13 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int flag) + u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct vegas *vegas = inet_csk_ca(sk); if (!vegas->doing_vegas_now) - return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); + return tcp_reno_cong_avoid(sk, ack, in_flight, flag); /* The key players are v_beg_snd_una and v_beg_snd_nxt. * @@ -228,7 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, /* We don't have enough RTT samples to do the Vegas * calculation, so we'll behave like Reno. */ - tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); + tcp_reno_cong_avoid(sk, ack, in_flight, flag); } else { u32 rtt, target_cwnd, diff; diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index ec854cc..7a55ddf 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -115,13 +115,13 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event) } static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int flag) + u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct veno *veno = inet_csk_ca(sk); if (!veno->doing_veno_now) - return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); + return tcp_reno_cong_avoid(sk, ack, in_flight, flag); /* limited by applications */ if (!tcp_is_cwnd_limited(sk, in_flight)) @@ -132,7 +132,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, /* We don't have enough rtt samples to do the Veno * calculation, so we'll behave like Reno. */ - tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); + tcp_reno_cong_avoid(sk, ack, in_flight, flag); } else { u32 rtt, target_cwnd; diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 545ed23..c04b7c6 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -70,7 +70,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last) } static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int flag) + u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct yeah *yeah = inet_csk_ca(sk); diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index dcd7e32..4c670cf 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -2567,7 +2567,7 @@ int __init irsock_init(void) * Remove IrDA protocol * */ -void __exit irsock_cleanup(void) +void irsock_cleanup(void) { sock_unregister(PF_IRDA); proto_unregister(&irda_proto); diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index 7b5def1..435b563 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c @@ -95,14 +95,14 @@ int __init irda_device_init( void) return 0; } -static void __exit leftover_dongle(void *arg) +static void leftover_dongle(void *arg) { struct dongle_reg *reg = arg; IRDA_WARNING("IrDA: Dongle type %x not unregistered\n", reg->type); } -void __exit irda_device_cleanup(void) +void irda_device_cleanup(void) { IRDA_DEBUG(4, "%s()\n", __FUNCTION__); diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 774eb70..ee3889f 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -153,7 +153,7 @@ int __init iriap_init(void) * Initializes the IrIAP layer, called by the module cleanup code in * irmod.c */ -void __exit iriap_cleanup(void) +void iriap_cleanup(void) { irlmp_unregister_service(service_handle); diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 2fc9f51..3d76aaf 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -95,7 +95,7 @@ int __init irlap_init(void) return 0; } -void __exit irlap_cleanup(void) +void irlap_cleanup(void) { IRDA_ASSERT(irlap != NULL, return;); diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 24a5e3f..7efa930 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -116,7 +116,7 @@ int __init irlmp_init(void) * Remove IrLMP layer * */ -void __exit irlmp_cleanup(void) +void irlmp_cleanup(void) { /* Check for main structure */ IRDA_ASSERT(irlmp != NULL, return;); diff --git a/net/irda/irproc.c b/net/irda/irproc.c index d6f9aba..181cb51 100644 --- a/net/irda/irproc.c +++ b/net/irda/irproc.c @@ -84,7 +84,7 @@ void __init irda_proc_register(void) * Unregister irda entry in /proc file system * */ -void __exit irda_proc_unregister(void) +void irda_proc_unregister(void) { int i; diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index 2e968e7..957e04f 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -287,7 +287,7 @@ int __init irda_sysctl_register(void) * Unregister our sysctl interface * */ -void __exit irda_sysctl_unregister(void) +void irda_sysctl_unregister(void) { unregister_sysctl_table(irda_table_header); } diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 7f50832a..3d7ab03 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -109,7 +109,7 @@ int __init irttp_init(void) * Called by module destruction/cleanup code * */ -void __exit irttp_cleanup(void) +void irttp_cleanup(void) { /* Check for main structure */ IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 3ac39f1..3599770 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -436,6 +436,7 @@ config NETFILTER_XT_MATCH_CONNBYTES config NETFILTER_XT_MATCH_CONNLIMIT tristate '"connlimit" match support"' depends on NETFILTER_XTABLES + depends on NF_CONNTRACK ---help--- This match allows you to match against the number of parallel connections to a server per client IP address (or address block). diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a3c8e69..641cfbc 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1012,13 +1012,14 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); - int val = 0, err; + unsigned int val = 0; + int err; if (level != SOL_NETLINK) return -ENOPROTOOPT; if (optlen >= sizeof(int) && - get_user(val, (int __user *)optval)) + get_user(val, (unsigned int __user *)optval)) return -EFAULT; switch (optname) { diff --git a/net/sched/Kconfig b/net/sched/Kconfig index d3f7c3f..8a74cac 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -97,7 +97,7 @@ config NET_SCH_ATM select classes of this queuing discipline. Each class maps the flow(s) it is handling to a given virtual circuit. - See the top of <file:net/sched/sch_atm.c>) for more details. + See the top of <file:net/sched/sch_atm.c> for more details. To compile this code as a module, choose M here: the module will be called sch_atm. @@ -137,7 +137,7 @@ config NET_SCH_SFQ tristate "Stochastic Fairness Queueing (SFQ)" ---help--- Say Y here if you want to use the Stochastic Fairness Queueing (SFQ) - packet scheduling algorithm . + packet scheduling algorithm. See the top of <file:net/sched/sch_sfq.c> for more details. @@ -306,7 +306,7 @@ config NET_CLS_RSVP6 is important for real time data such as streaming sound or video. Say Y here if you want to be able to classify outgoing packets based - on their RSVP requests and you are using the IPv6. + on their RSVP requests and you are using the IPv6 protocol. To compile this code as a module, choose M here: the module will be called cls_rsvp6. diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 417ec8f..ddc4f2c 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -292,13 +292,12 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, } } DPRINTK("atm_tc_change: new id %x\n", classid); - flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL); + flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL); DPRINTK("atm_tc_change: flow %p\n", flow); if (!flow) { error = -ENOBUFS; goto err_out; } - memset(flow, 0, sizeof(*flow)); flow->filter_list = NULL; if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) flow->q = &noop_qdisc; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 157bfbd..b48f06f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2141,7 +2141,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, if (last == first) break; - last = last->u.next; + last = (struct xfrm_dst *)last->u.dst.next; last->child_mtu_cached = mtu; } |