diff options
Diffstat (limited to 'net')
34 files changed, 471 insertions, 188 deletions
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index faa6aaf..c0f6861 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -460,11 +460,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) skb_pull(skb, plen); skb_set_mac_header(skb, -ETH_HLEN); skb->pkt_type = PACKET_HOST; -#ifdef CONFIG_BR2684_FAST_TRANS - skb->protocol = ((u16 *) skb->data)[-1]; -#else /* some protocols might require this: */ skb->protocol = br_type_trans(skb, net_dev); -#endif /* CONFIG_BR2684_FAST_TRANS */ #else skb_pull(skb, plen - ETH_HLEN); skb->protocol = eth_type_trans(skb, net_dev); diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index a786e78..1ea2f86 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -125,7 +125,7 @@ static void br_stp_start(struct net_bridge *br) char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; char *envp[] = { NULL }; - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1); + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); if (r == 0) { br->stp_enabled = BR_USER_STP; printk(KERN_INFO "%s: userspace STP started\n", br->dev->name); diff --git a/net/core/dev.c b/net/core/dev.c index 13a0d9f..6357f54c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2715,20 +2715,6 @@ int __dev_addr_add(struct dev_addr_list **list, int *count, return 0; } -void __dev_addr_discard(struct dev_addr_list **list) -{ - struct dev_addr_list *tmp; - - while (*list != NULL) { - tmp = *list; - *list = tmp->next; - if (tmp->da_users > tmp->da_gusers) - printk("__dev_addr_discard: address leakage! " - "da_users=%d\n", tmp->da_users); - kfree(tmp); - } -} - /** * dev_unicast_delete - Release secondary unicast address. * @dev: device @@ -2777,11 +2763,30 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen) } EXPORT_SYMBOL(dev_unicast_add); -static void dev_unicast_discard(struct net_device *dev) +static void __dev_addr_discard(struct dev_addr_list **list) +{ + struct dev_addr_list *tmp; + + while (*list != NULL) { + tmp = *list; + *list = tmp->next; + if (tmp->da_users > tmp->da_gusers) + printk("__dev_addr_discard: address leakage! " + "da_users=%d\n", tmp->da_users); + kfree(tmp); + } +} + +static void dev_addr_discard(struct net_device *dev) { netif_tx_lock_bh(dev); + __dev_addr_discard(&dev->uc_list); dev->uc_count = 0; + + __dev_addr_discard(&dev->mc_list); + dev->mc_count = 0; + netif_tx_unlock_bh(dev); } @@ -3739,8 +3744,7 @@ void unregister_netdevice(struct net_device *dev) /* * Flush the unicast and multicast chains */ - dev_unicast_discard(dev); - dev_mc_discard(dev); + dev_addr_discard(dev); if (dev->uninit) dev->uninit(dev); diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 235a2a8..99aece1 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c @@ -177,18 +177,6 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from) } EXPORT_SYMBOL(dev_mc_unsync); -/* - * Discard multicast list when a device is downed - */ - -void dev_mc_discard(struct net_device *dev) -{ - netif_tx_lock_bh(dev); - __dev_addr_discard(&dev->mc_list); - dev->mc_count = 0; - netif_tx_unlock_bh(dev); -} - #ifdef CONFIG_PROC_FS static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos) { diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cc84d8d..590a767 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -79,27 +79,27 @@ struct gen_estimator { - struct gen_estimator *next; + struct list_head list; struct gnet_stats_basic *bstats; struct gnet_stats_rate_est *rate_est; spinlock_t *stats_lock; - unsigned interval; int ewma_log; u64 last_bytes; u32 last_packets; u32 avpps; u32 avbps; + struct rcu_head e_rcu; }; struct gen_estimator_head { struct timer_list timer; - struct gen_estimator *list; + struct list_head list; }; static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; -/* Estimator array lock */ +/* Protects against NULL dereference */ static DEFINE_RWLOCK(est_lock); static void est_timer(unsigned long arg) @@ -107,13 +107,17 @@ static void est_timer(unsigned long arg) int idx = (int)arg; struct gen_estimator *e; - read_lock(&est_lock); - for (e = elist[idx].list; e; e = e->next) { + rcu_read_lock(); + list_for_each_entry_rcu(e, &elist[idx].list, list) { u64 nbytes; u32 npackets; u32 rate; spin_lock(e->stats_lock); + read_lock(&est_lock); + if (e->bstats == NULL) + goto skip; + nbytes = e->bstats->bytes; npackets = e->bstats->packets; rate = (nbytes - e->last_bytes)<<(7 - idx); @@ -125,12 +129,14 @@ static void est_timer(unsigned long arg) e->last_packets = npackets; e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; e->rate_est->pps = (e->avpps+0x1FF)>>10; +skip: + read_unlock(&est_lock); spin_unlock(e->stats_lock); } - if (elist[idx].list != NULL) + if (!list_empty(&elist[idx].list)) mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4)); - read_unlock(&est_lock); + rcu_read_unlock(); } /** @@ -147,12 +153,17 @@ static void est_timer(unsigned long arg) * &rate_est with the statistics lock grabed during this period. * * Returns 0 on success or a negative error code. + * + * NOTE: Called under rtnl_mutex */ int gen_new_estimator(struct gnet_stats_basic *bstats, - struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt) + struct gnet_stats_rate_est *rate_est, + spinlock_t *stats_lock, + struct rtattr *opt) { struct gen_estimator *est; struct gnet_estimator *parm = RTA_DATA(opt); + int idx; if (RTA_PAYLOAD(opt) < sizeof(*parm)) return -EINVAL; @@ -164,7 +175,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, if (est == NULL) return -ENOBUFS; - est->interval = parm->interval + 2; + idx = parm->interval + 2; est->bstats = bstats; est->rate_est = rate_est; est->stats_lock = stats_lock; @@ -174,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, est->last_packets = bstats->packets; est->avpps = rate_est->pps<<10; - est->next = elist[est->interval].list; - if (est->next == NULL) { - init_timer(&elist[est->interval].timer); - elist[est->interval].timer.data = est->interval; - elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4); - elist[est->interval].timer.function = est_timer; - add_timer(&elist[est->interval].timer); + if (!elist[idx].timer.function) { + INIT_LIST_HEAD(&elist[idx].list); + setup_timer(&elist[idx].timer, est_timer, idx); } - write_lock_bh(&est_lock); - elist[est->interval].list = est; - write_unlock_bh(&est_lock); + + if (list_empty(&elist[idx].list)) + mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4)); + + list_add_rcu(&est->list, &elist[idx].list); return 0; } +static void __gen_kill_estimator(struct rcu_head *head) +{ + struct gen_estimator *e = container_of(head, + struct gen_estimator, e_rcu); + kfree(e); +} + /** * gen_kill_estimator - remove a rate estimator * @bstats: basic statistics @@ -195,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, * * Removes the rate estimator specified by &bstats and &rate_est * and deletes the timer. + * + * NOTE: Called under rtnl_mutex */ void gen_kill_estimator(struct gnet_stats_basic *bstats, struct gnet_stats_rate_est *rate_est) { int idx; - struct gen_estimator *est, **pest; + struct gen_estimator *e, *n; for (idx=0; idx <= EST_MAX_INTERVAL; idx++) { - int killed = 0; - pest = &elist[idx].list; - while ((est=*pest) != NULL) { - if (est->rate_est != rate_est || est->bstats != bstats) { - pest = &est->next; + + /* Skip non initialized indexes */ + if (!elist[idx].timer.function) + continue; + + list_for_each_entry_safe(e, n, &elist[idx].list, list) { + if (e->rate_est != rate_est || e->bstats != bstats) continue; - } write_lock_bh(&est_lock); - *pest = est->next; + e->bstats = NULL; write_unlock_bh(&est_lock); - kfree(est); - killed++; + list_del_rcu(&e->list); + call_rcu(&e->e_rcu, __gen_kill_estimator); } - if (killed && elist[idx].list == NULL) - del_timer(&elist[idx].timer); } } diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index dd9ef65..519de09 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -137,7 +137,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) } static void bictcp_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int data_acked) + u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 1260e52..55fca18 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -324,8 +324,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start); /* This is Jacobson's slow start and congestion avoidance. * SIGCOMM '88, p. 328. */ -void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, - int flag) +void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index ebfaac2..d17da30 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -270,7 +270,7 @@ static inline void measure_delay(struct sock *sk) } static void bictcp_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int data_acked) + u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 43d624e5..14a073d 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c @@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk) tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); } -static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, +static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 4ba4a7a..632c05a 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk) return max((tp->snd_cwnd * ca->beta) >> 7, 2U); } -static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt, u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index e5be351..b3e55cf 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c @@ -85,7 +85,7 @@ static inline u32 hybla_fraction(u32 odds) * o Give cwnd a new value based on the model proposed * o remember increments <1 */ -static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); @@ -103,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt, return; if (!ca->hybla_en) - return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); + return tcp_reno_cong_avoid(sk, ack, in_flight, flag); if (ca->rho == 0) hybla_recalc_param(sk); diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index b2b2256..cc5de6f 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -258,7 +258,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state) /* * Increase window in response to successful acknowledgment. */ -static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4e5884a..fec8a7a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2323,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, tcp_ack_no_tstamp(sk, seq_rtt, flag); } -static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int good) { const struct inet_connection_sock *icsk = inet_csk(sk); - icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good); + icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; } @@ -2826,11 +2826,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) /* Advance CWND, if state allows this. */ if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && tcp_may_raise_cwnd(sk, flag)) - tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0); + tcp_cong_avoid(sk, ack, prior_in_flight, 0); tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) - tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1); + tcp_cong_avoid(sk, ack, prior_in_flight, 1); } if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index e49836c..80e140e 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk) * Will only call newReno CA when away from inference. * From TCP-LP's paper, this will be handled in additive increasement. */ -static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, - int flag) +static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct lp *lp = inet_csk_ca(sk); if (!(lp->flag & LP_WITHIN_INF)) - tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); + tcp_reno_cong_avoid(sk, ack, in_flight, flag); } /** diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 4624501..be27a33 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -15,7 +15,7 @@ #define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_MD_SCALE 3 -static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt, +static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index e218a51..914e030 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -163,13 +163,13 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int flag) + u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct vegas *vegas = inet_csk_ca(sk); if (!vegas->doing_vegas_now) - return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); + return tcp_reno_cong_avoid(sk, ack, in_flight, flag); /* The key players are v_beg_snd_una and v_beg_snd_nxt. * @@ -228,7 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, /* We don't have enough RTT samples to do the Vegas * calculation, so we'll behave like Reno. */ - tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); + tcp_reno_cong_avoid(sk, ack, in_flight, flag); } else { u32 rtt, target_cwnd, diff; diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index ec854cc..7a55ddf 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -115,13 +115,13 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event) } static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int flag) + u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct veno *veno = inet_csk_ca(sk); if (!veno->doing_veno_now) - return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); + return tcp_reno_cong_avoid(sk, ack, in_flight, flag); /* limited by applications */ if (!tcp_is_cwnd_limited(sk, in_flight)) @@ -132,7 +132,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, /* We don't have enough rtt samples to do the Veno * calculation, so we'll behave like Reno. */ - tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); + tcp_reno_cong_avoid(sk, ack, in_flight, flag); } else { u32 rtt, target_cwnd; diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 545ed23..c04b7c6 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -70,7 +70,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last) } static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int flag) + u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct yeah *yeah = inet_csk_ca(sk); diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index dcd7e32..4c670cf 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -2567,7 +2567,7 @@ int __init irsock_init(void) * Remove IrDA protocol * */ -void __exit irsock_cleanup(void) +void irsock_cleanup(void) { sock_unregister(PF_IRDA); proto_unregister(&irda_proto); diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index 7b5def1..435b563 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c @@ -95,14 +95,14 @@ int __init irda_device_init( void) return 0; } -static void __exit leftover_dongle(void *arg) +static void leftover_dongle(void *arg) { struct dongle_reg *reg = arg; IRDA_WARNING("IrDA: Dongle type %x not unregistered\n", reg->type); } -void __exit irda_device_cleanup(void) +void irda_device_cleanup(void) { IRDA_DEBUG(4, "%s()\n", __FUNCTION__); diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 774eb70..ee3889f 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -153,7 +153,7 @@ int __init iriap_init(void) * Initializes the IrIAP layer, called by the module cleanup code in * irmod.c */ -void __exit iriap_cleanup(void) +void iriap_cleanup(void) { irlmp_unregister_service(service_handle); diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c index 4adaae2..cf30245 100644 --- a/net/irda/irias_object.c +++ b/net/irda/irias_object.c @@ -36,39 +36,6 @@ hashbin_t *irias_objects; */ struct ias_value irias_missing = { IAS_MISSING, 0, 0, 0, {0}}; -/* - * Function strndup (str, max) - * - * My own kernel version of strndup! - * - * Faster, check boundary... Jean II - */ -static char *strndup(char *str, size_t max) -{ - char *new_str; - int len; - - /* Check string */ - if (str == NULL) - return NULL; - /* Check length, truncate */ - len = strlen(str); - if(len > max) - len = max; - - /* Allocate new string */ - new_str = kmalloc(len + 1, GFP_ATOMIC); - if (new_str == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); - return NULL; - } - - /* Copy and truncate */ - memcpy(new_str, str, len); - new_str[len] = '\0'; - - return new_str; -} /* * Function ias_new_object (name, id) @@ -90,7 +57,7 @@ struct ias_object *irias_new_object( char *name, int id) } obj->magic = IAS_OBJECT_MAGIC; - obj->name = strndup(name, IAS_MAX_CLASSNAME); + obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC); if (!obj->name) { IRDA_WARNING("%s(), Unable to allocate name!\n", __FUNCTION__); @@ -360,7 +327,7 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, } attrib->magic = IAS_ATTRIB_MAGIC; - attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); + attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC); /* Insert value */ attrib->value = irias_new_integer_value(value); @@ -404,7 +371,7 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, } attrib->magic = IAS_ATTRIB_MAGIC; - attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); + attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC); attrib->value = irias_new_octseq_value( octets, len); if (!attrib->name || !attrib->value) { @@ -446,7 +413,7 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, } attrib->magic = IAS_ATTRIB_MAGIC; - attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); + attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC); attrib->value = irias_new_string_value(value); if (!attrib->name || !attrib->value) { @@ -506,7 +473,7 @@ struct ias_value *irias_new_string_value(char *string) value->type = IAS_STRING; value->charset = CS_ASCII; - value->t.string = strndup(string, IAS_MAX_STRING); + value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC); if (!value->t.string) { IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); kfree(value); diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 2fc9f51..3d76aaf 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -95,7 +95,7 @@ int __init irlap_init(void) return 0; } -void __exit irlap_cleanup(void) +void irlap_cleanup(void) { IRDA_ASSERT(irlap != NULL, return;); diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 24a5e3f..7efa930 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -116,7 +116,7 @@ int __init irlmp_init(void) * Remove IrLMP layer * */ -void __exit irlmp_cleanup(void) +void irlmp_cleanup(void) { /* Check for main structure */ IRDA_ASSERT(irlmp != NULL, return;); diff --git a/net/irda/irproc.c b/net/irda/irproc.c index d6f9aba..181cb51 100644 --- a/net/irda/irproc.c +++ b/net/irda/irproc.c @@ -84,7 +84,7 @@ void __init irda_proc_register(void) * Unregister irda entry in /proc file system * */ -void __exit irda_proc_unregister(void) +void irda_proc_unregister(void) { int i; diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index 2e968e7..957e04f 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -287,7 +287,7 @@ int __init irda_sysctl_register(void) * Unregister our sysctl interface * */ -void __exit irda_sysctl_unregister(void) +void irda_sysctl_unregister(void) { unregister_sysctl_table(irda_table_header); } diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 7f50832a..3d7ab03 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -109,7 +109,7 @@ int __init irttp_init(void) * Called by module destruction/cleanup code * */ -void __exit irttp_cleanup(void) +void irttp_cleanup(void) { /* Check for main structure */ IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 3ac39f1..3599770 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -436,6 +436,7 @@ config NETFILTER_XT_MATCH_CONNBYTES config NETFILTER_XT_MATCH_CONNLIMIT tristate '"connlimit" match support"' depends on NETFILTER_XTABLES + depends on NF_CONNTRACK ---help--- This match allows you to match against the number of parallel connections to a server per client IP address (or address block). diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a3c8e69..5681ce3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -62,6 +62,7 @@ #include <net/netlink.h> #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) +#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) struct netlink_sock { /* struct sock has to be the first member of netlink_sock */ @@ -314,10 +315,12 @@ netlink_update_listeners(struct sock *sk) unsigned long mask; unsigned int i; - for (i = 0; i < NLGRPSZ(tbl->groups)/sizeof(unsigned long); i++) { + for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { mask = 0; - sk_for_each_bound(sk, node, &tbl->mc_list) - mask |= nlk_sk(sk)->groups[i]; + sk_for_each_bound(sk, node, &tbl->mc_list) { + if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) + mask |= nlk_sk(sk)->groups[i]; + } tbl->listeners[i] = mask; } /* this function is only called with the netlink table "grabbed", which @@ -555,26 +558,37 @@ netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) nlk->subscriptions = subscriptions; } -static int netlink_alloc_groups(struct sock *sk) +static int netlink_realloc_groups(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); unsigned int groups; + unsigned long *new_groups; int err = 0; - netlink_lock_table(); + netlink_table_grab(); + groups = nl_table[sk->sk_protocol].groups; - if (!nl_table[sk->sk_protocol].registered) + if (!nl_table[sk->sk_protocol].registered) { err = -ENOENT; - netlink_unlock_table(); + goto out_unlock; + } - if (err) - return err; + if (nlk->ngroups >= groups) + goto out_unlock; - nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL); - if (nlk->groups == NULL) - return -ENOMEM; + new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC); + if (new_groups == NULL) { + err = -ENOMEM; + goto out_unlock; + } + memset((char*)new_groups + NLGRPSZ(nlk->ngroups), 0, + NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups)); + + nlk->groups = new_groups; nlk->ngroups = groups; - return 0; + out_unlock: + netlink_table_ungrab(); + return err; } static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) @@ -591,11 +605,9 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len if (nladdr->nl_groups) { if (!netlink_capable(sock, NL_NONROOT_RECV)) return -EPERM; - if (nlk->groups == NULL) { - err = netlink_alloc_groups(sk); - if (err) - return err; - } + err = netlink_realloc_groups(sk); + if (err) + return err; } if (nlk->pid) { @@ -839,10 +851,18 @@ retry: int netlink_has_listeners(struct sock *sk, unsigned int group) { int res = 0; + unsigned long *listeners; BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET)); + + rcu_read_lock(); + listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); + if (group - 1 < nl_table[sk->sk_protocol].groups) - res = test_bit(group - 1, nl_table[sk->sk_protocol].listeners); + res = test_bit(group - 1, listeners); + + rcu_read_unlock(); + return res; } EXPORT_SYMBOL_GPL(netlink_has_listeners); @@ -1007,18 +1027,36 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) read_unlock(&nl_table_lock); } +/* must be called with netlink table grabbed */ +static void netlink_update_socket_mc(struct netlink_sock *nlk, + unsigned int group, + int is_new) +{ + int old, new = !!is_new, subscriptions; + + old = test_bit(group - 1, nlk->groups); + subscriptions = nlk->subscriptions - old + new; + if (new) + __set_bit(group - 1, nlk->groups); + else + __clear_bit(group - 1, nlk->groups); + netlink_update_subscriptions(&nlk->sk, subscriptions); + netlink_update_listeners(&nlk->sk); +} + static int netlink_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); - int val = 0, err; + unsigned int val = 0; + int err; if (level != SOL_NETLINK) return -ENOPROTOOPT; if (optlen >= sizeof(int) && - get_user(val, (int __user *)optval)) + get_user(val, (unsigned int __user *)optval)) return -EFAULT; switch (optname) { @@ -1031,27 +1069,16 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, break; case NETLINK_ADD_MEMBERSHIP: case NETLINK_DROP_MEMBERSHIP: { - unsigned int subscriptions; - int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0; - if (!netlink_capable(sock, NL_NONROOT_RECV)) return -EPERM; - if (nlk->groups == NULL) { - err = netlink_alloc_groups(sk); - if (err) - return err; - } + err = netlink_realloc_groups(sk); + if (err) + return err; if (!val || val - 1 >= nlk->ngroups) return -EINVAL; netlink_table_grab(); - old = test_bit(val - 1, nlk->groups); - subscriptions = nlk->subscriptions - old + new; - if (new) - __set_bit(val - 1, nlk->groups); - else - __clear_bit(val - 1, nlk->groups); - netlink_update_subscriptions(sk, subscriptions); - netlink_update_listeners(sk); + netlink_update_socket_mc(nlk, val, + optname == NETLINK_ADD_MEMBERSHIP); netlink_table_ungrab(); err = 0; break; @@ -1327,6 +1354,71 @@ out_sock_release: return NULL; } +/** + * netlink_change_ngroups - change number of multicast groups + * + * This changes the number of multicast groups that are available + * on a certain netlink family. Note that it is not possible to + * change the number of groups to below 32. Also note that it does + * not implicitly call netlink_clear_multicast_users() when the + * number of groups is reduced. + * + * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). + * @groups: The new number of groups. + */ +int netlink_change_ngroups(struct sock *sk, unsigned int groups) +{ + unsigned long *listeners, *old = NULL; + struct netlink_table *tbl = &nl_table[sk->sk_protocol]; + int err = 0; + + if (groups < 32) + groups = 32; + + netlink_table_grab(); + if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { + listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC); + if (!listeners) { + err = -ENOMEM; + goto out_ungrab; + } + old = tbl->listeners; + memcpy(listeners, old, NLGRPSZ(tbl->groups)); + rcu_assign_pointer(tbl->listeners, listeners); + } + tbl->groups = groups; + + out_ungrab: + netlink_table_ungrab(); + synchronize_rcu(); + kfree(old); + return err; +} +EXPORT_SYMBOL(netlink_change_ngroups); + +/** + * netlink_clear_multicast_users - kick off multicast listeners + * + * This function removes all listeners from the given group. + * @ksk: The kernel netlink socket, as returned by + * netlink_kernel_create(). + * @group: The multicast group to clear. + */ +void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) +{ + struct sock *sk; + struct hlist_node *node; + struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; + + netlink_table_grab(); + + sk_for_each_bound(sk, node, &tbl->mc_list) + netlink_update_socket_mc(nlk_sk(sk), group, 0); + + netlink_table_ungrab(); +} +EXPORT_SYMBOL(netlink_clear_multicast_users); + void netlink_set_nonroot(int protocol, unsigned int flags) { if ((unsigned int)protocol < MAX_LINKS) diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index b9ab62f..e146531 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -3,6 +3,7 @@ * * Authors: Jamal Hadi Salim * Thomas Graf <tgraf@suug.ch> + * Johannes Berg <johannes@sipsolutions.net> */ #include <linux/module.h> @@ -13,6 +14,7 @@ #include <linux/string.h> #include <linux/skbuff.h> #include <linux/mutex.h> +#include <linux/bitmap.h> #include <net/sock.h> #include <net/genetlink.h> @@ -42,6 +44,16 @@ static void genl_unlock(void) #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) static struct list_head family_ht[GENL_FAM_TAB_SIZE]; +/* + * Bitmap of multicast groups that are currently in use. + * + * To avoid an allocation at boot of just one unsigned long, + * declare it global instead. + * Bit 0 is marked as already used since group 0 is invalid. + */ +static unsigned long mc_group_start = 0x1; +static unsigned long *mc_groups = &mc_group_start; +static unsigned long mc_groups_longs = 1; static int genl_ctrl_event(int event, void *data); @@ -116,6 +128,114 @@ static inline u16 genl_generate_id(void) return id_gen_idx; } +static struct genl_multicast_group notify_grp; + +/** + * genl_register_mc_group - register a multicast group + * + * Registers the specified multicast group and notifies userspace + * about the new group. + * + * Returns 0 on success or a negative error code. + * + * @family: The generic netlink family the group shall be registered for. + * @grp: The group to register, must have a name. + */ +int genl_register_mc_group(struct genl_family *family, + struct genl_multicast_group *grp) +{ + int id; + unsigned long *new_groups; + int err; + + BUG_ON(grp->name[0] == '\0'); + + genl_lock(); + + /* special-case our own group */ + if (grp == ¬ify_grp) + id = GENL_ID_CTRL; + else + id = find_first_zero_bit(mc_groups, + mc_groups_longs * BITS_PER_LONG); + + + if (id >= mc_groups_longs * BITS_PER_LONG) { + size_t nlen = (mc_groups_longs + 1) * sizeof(unsigned long); + + if (mc_groups == &mc_group_start) { + new_groups = kzalloc(nlen, GFP_KERNEL); + if (!new_groups) { + err = -ENOMEM; + goto out; + } + mc_groups = new_groups; + *mc_groups = mc_group_start; + } else { + new_groups = krealloc(mc_groups, nlen, GFP_KERNEL); + if (!new_groups) { + err = -ENOMEM; + goto out; + } + mc_groups = new_groups; + mc_groups[mc_groups_longs] = 0; + } + mc_groups_longs++; + } + + err = netlink_change_ngroups(genl_sock, + sizeof(unsigned long) * NETLINK_GENERIC); + if (err) + goto out; + + grp->id = id; + set_bit(id, mc_groups); + list_add_tail(&grp->list, &family->mcast_groups); + grp->family = family; + + genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp); + out: + genl_unlock(); + return 0; +} +EXPORT_SYMBOL(genl_register_mc_group); + +/** + * genl_unregister_mc_group - unregister a multicast group + * + * Unregisters the specified multicast group and notifies userspace + * about it. All current listeners on the group are removed. + * + * Note: It is not necessary to unregister all multicast groups before + * unregistering the family, unregistering the family will cause + * all assigned multicast groups to be unregistered automatically. + * + * @family: Generic netlink family the group belongs to. + * @grp: The group to unregister, must have been registered successfully + * previously. + */ +void genl_unregister_mc_group(struct genl_family *family, + struct genl_multicast_group *grp) +{ + BUG_ON(grp->family != family); + genl_lock(); + netlink_clear_multicast_users(genl_sock, grp->id); + clear_bit(grp->id, mc_groups); + list_del(&grp->list); + genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp); + grp->id = 0; + grp->family = NULL; + genl_unlock(); +} + +static void genl_unregister_mc_groups(struct genl_family *family) +{ + struct genl_multicast_group *grp, *tmp; + + list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list) + genl_unregister_mc_group(family, grp); +} + /** * genl_register_ops - register generic netlink operations * @family: generic netlink family @@ -216,6 +336,7 @@ int genl_register_family(struct genl_family *family) goto errout; INIT_LIST_HEAD(&family->ops_list); + INIT_LIST_HEAD(&family->mcast_groups); genl_lock(); @@ -275,6 +396,8 @@ int genl_unregister_family(struct genl_family *family) { struct genl_family *rc; + genl_unregister_mc_groups(family); + genl_lock(); list_for_each_entry(rc, genl_family_chain(family->id), family_list) { @@ -410,6 +533,67 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, nla_nest_end(skb, nla_ops); } + if (!list_empty(&family->mcast_groups)) { + struct genl_multicast_group *grp; + struct nlattr *nla_grps; + int idx = 1; + + nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); + if (nla_grps == NULL) + goto nla_put_failure; + + list_for_each_entry(grp, &family->mcast_groups, list) { + struct nlattr *nest; + + nest = nla_nest_start(skb, idx++); + if (nest == NULL) + goto nla_put_failure; + + NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); + NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, + grp->name); + + nla_nest_end(skb, nest); + } + nla_nest_end(skb, nla_grps); + } + + return genlmsg_end(skb, hdr); + +nla_put_failure: + return genlmsg_cancel(skb, hdr); +} + +static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, + u32 seq, u32 flags, struct sk_buff *skb, + u8 cmd) +{ + void *hdr; + struct nlattr *nla_grps; + struct nlattr *nest; + + hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd); + if (hdr == NULL) + return -1; + + NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name); + NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id); + + nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); + if (nla_grps == NULL) + goto nla_put_failure; + + nest = nla_nest_start(skb, 1); + if (nest == NULL) + goto nla_put_failure; + + NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); + NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, + grp->name); + + nla_nest_end(skb, nest); + nla_nest_end(skb, nla_grps); + return genlmsg_end(skb, hdr); nla_put_failure: @@ -453,8 +637,8 @@ errout: return skb->len; } -static struct sk_buff *ctrl_build_msg(struct genl_family *family, u32 pid, - int seq, u8 cmd) +static struct sk_buff *ctrl_build_family_msg(struct genl_family *family, + u32 pid, int seq, u8 cmd) { struct sk_buff *skb; int err; @@ -472,6 +656,25 @@ static struct sk_buff *ctrl_build_msg(struct genl_family *family, u32 pid, return skb; } +static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp, + u32 pid, int seq, u8 cmd) +{ + struct sk_buff *skb; + int err; + + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (skb == NULL) + return ERR_PTR(-ENOBUFS); + + err = ctrl_fill_mcgrp_info(grp, pid, seq, 0, skb, cmd); + if (err < 0) { + nlmsg_free(skb); + return ERR_PTR(err); + } + + return skb; +} + static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = { [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 }, [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING, @@ -501,8 +704,8 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) goto errout; } - msg = ctrl_build_msg(res, info->snd_pid, info->snd_seq, - CTRL_CMD_NEWFAMILY); + msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq, + CTRL_CMD_NEWFAMILY); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto errout; @@ -523,7 +726,15 @@ static int genl_ctrl_event(int event, void *data) switch (event) { case CTRL_CMD_NEWFAMILY: case CTRL_CMD_DELFAMILY: - msg = ctrl_build_msg(data, 0, 0, event); + msg = ctrl_build_family_msg(data, 0, 0, event); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL); + break; + case CTRL_CMD_NEWMCAST_GRP: + case CTRL_CMD_DELMCAST_GRP: + msg = ctrl_build_mcgrp_msg(data, 0, 0, event); if (IS_ERR(msg)) return PTR_ERR(msg); @@ -541,6 +752,10 @@ static struct genl_ops genl_ctrl_ops = { .policy = ctrl_policy, }; +static struct genl_multicast_group notify_grp = { + .name = "notify", +}; + static int __init genl_init(void) { int i, err; @@ -557,11 +772,17 @@ static int __init genl_init(void) goto errout_register; netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); - genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID, - genl_rcv, NULL, THIS_MODULE); + + /* we'll bump the group number right afterwards */ + genl_sock = netlink_kernel_create(NETLINK_GENERIC, 0, genl_rcv, + NULL, THIS_MODULE); if (genl_sock == NULL) panic("GENL: Cannot initialize generic netlink\n"); + err = genl_register_mc_group(&genl_ctrl, ¬ify_grp); + if (err < 0) + goto errout_register; + return 0; errout_register: diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index f3986d4..db3395b 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c @@ -187,7 +187,7 @@ static ssize_t rfkill_claim_store(struct device *dev, static struct device_attribute rfkill_dev_attrs[] = { __ATTR(name, S_IRUGO, rfkill_name_show, NULL), __ATTR(type, S_IRUGO, rfkill_type_show, NULL), - __ATTR(state, S_IRUGO, rfkill_state_show, rfkill_state_store), + __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), __ATTR_NULL }; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index d3f7c3f..8a74cac 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -97,7 +97,7 @@ config NET_SCH_ATM select classes of this queuing discipline. Each class maps the flow(s) it is handling to a given virtual circuit. - See the top of <file:net/sched/sch_atm.c>) for more details. + See the top of <file:net/sched/sch_atm.c> for more details. To compile this code as a module, choose M here: the module will be called sch_atm. @@ -137,7 +137,7 @@ config NET_SCH_SFQ tristate "Stochastic Fairness Queueing (SFQ)" ---help--- Say Y here if you want to use the Stochastic Fairness Queueing (SFQ) - packet scheduling algorithm . + packet scheduling algorithm. See the top of <file:net/sched/sch_sfq.c> for more details. @@ -306,7 +306,7 @@ config NET_CLS_RSVP6 is important for real time data such as streaming sound or video. Say Y here if you want to be able to classify outgoing packets based - on their RSVP requests and you are using the IPv6. + on their RSVP requests and you are using the IPv6 protocol. To compile this code as a module, choose M here: the module will be called cls_rsvp6. diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 417ec8f..ddc4f2c 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -292,13 +292,12 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, } } DPRINTK("atm_tc_change: new id %x\n", classid); - flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL); + flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL); DPRINTK("atm_tc_change: flow %p\n", flow); if (!flow) { error = -ENOBUFS; goto err_out; } - memset(flow, 0, sizeof(*flow)); flow->filter_list = NULL; if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) flow->q = &noop_qdisc; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 157bfbd..b48f06f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2141,7 +2141,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, if (last == first) break; - last = last->u.next; + last = (struct xfrm_dst *)last->u.dst.next; last->child_mtu_cached = mtu; } |