diff options
Diffstat (limited to 'net')
39 files changed, 496 insertions, 158 deletions
diff --git a/net/802/psnap.c b/net/802/psnap.c index 21cde8f..db6baf7 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c @@ -147,7 +147,6 @@ struct datalink_proto *register_snap_client(const unsigned char *desc, out: spin_unlock_bh(&snap_lock); - synchronize_net(); return proto; } diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index c3408de..9da07e3 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -118,11 +118,6 @@ extern void vlan_netlink_fini(void); extern struct rtnl_link_ops vlan_link_ops; -static inline int is_vlan_dev(struct net_device *dev) -{ - return dev->priv_flags & IFF_802_1Q_VLAN; -} - extern int vlan_net_id; struct proc_dir_entry; diff --git a/net/atm/proc.c b/net/atm/proc.c index f85da077..be3afde 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -191,7 +191,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc) { struct sock *sk = sk_atm(vcc); - seq_printf(seq, "%p ", vcc); + seq_printf(seq, "%pK ", vcc); if (!vcc->dev) seq_printf(seq, "Unassigned "); else @@ -218,7 +218,7 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) { if (!vcc->dev) seq_printf(seq, sizeof(void *) == 4 ? - "N/A@%p%10s" : "N/A@%p%2s", vcc, ""); + "N/A@%pK%10s" : "N/A@%pK%2s", vcc, ""); else seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, vcc->vci); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index e1f5ec7..3fa1231 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -117,6 +117,10 @@ static struct dst_ops fake_dst_ops = { * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ +static const u32 br_dst_default_metrics[RTAX_MAX] = { + [RTAX_MTU - 1] = 1500, +}; + void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; @@ -124,7 +128,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) atomic_set(&rt->dst.__refcnt, 1); rt->dst.dev = br->dev; rt->dst.path = &rt->dst; - dst_metric_set(&rt->dst, RTAX_MTU, 1500); + dst_init_metrics(&rt->dst, br_dst_default_metrics, true); rt->dst.flags = DST_NOXFRM; rt->dst.ops = &fake_dst_ops; } diff --git a/net/can/bcm.c b/net/can/bcm.c index cced806..184a657 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -165,9 +165,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; - seq_printf(m, ">>> socket %p", sk->sk_socket); - seq_printf(m, " / sk %p", sk); - seq_printf(m, " / bo %p", bo); + seq_printf(m, ">>> socket %pK", sk->sk_socket); + seq_printf(m, " / sk %pK", sk); + seq_printf(m, " / bo %pK", bo); seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); seq_printf(m, " <<<\n"); diff --git a/net/core/dev.c b/net/core/dev.c index bcb05cb..c7e305d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1308,6 +1308,13 @@ void dev_disable_lro(struct net_device *dev) { u32 flags; + /* + * If we're trying to disable lro on a vlan device + * use the underlying physical device instead + */ + if (is_vlan_dev(dev)) + dev = vlan_dev_real_dev(dev); + if (dev->ethtool_ops && dev->ethtool_ops->get_flags) flags = dev->ethtool_ops->get_flags(dev); else @@ -5954,7 +5961,10 @@ EXPORT_SYMBOL(free_netdev); void synchronize_net(void) { might_sleep(); - synchronize_rcu(); + if (rtnl_is_locked()) + synchronize_rcu_expedited(); + else + synchronize_rcu(); } EXPORT_SYMBOL(synchronize_net); diff --git a/net/core/dst.c b/net/core/dst.c index 81a4fa1..9ccca03 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -315,7 +315,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) { unsigned long prev, new; - new = (unsigned long) dst_default_metrics; + new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; prev = cmpxchg(&dst->_metrics, old, new); if (prev == old) kfree(__DST_METRICS_PTR(old)); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 3911586..008dc70 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -602,6 +602,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, skip: idx++; } + rcu_read_unlock(); cb->args[1] = idx; rules_ops_put(ops); diff --git a/net/core/filter.c b/net/core/filter.c index 0eb8c44..0e3622f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -350,7 +350,9 @@ load_b: continue; } default: - WARN_ON(1); + WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", + fentry->code, fentry->jt, + fentry->jf, fentry->k); return 0; } } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d1644e3..2d56cb9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -850,6 +850,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, struct nlattr *attr, *af_spec; struct rtnl_af_ops *af_ops; + ASSERT_RTNL(); nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE; @@ -1876,6 +1877,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) int min_len; int family; int type; + int err; type = nlh->nlmsg_type; if (type > RTM_MAX) @@ -1902,8 +1904,11 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (dumpit == NULL) return -EOPNOTSUPP; + __rtnl_unlock(); rtnl = net->rtnl; - return netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); + err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); + rtnl_lock(); + return err; } memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); @@ -1975,7 +1980,7 @@ static int __net_init rtnetlink_net_init(struct net *net) { struct sock *sk; sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, - rtnetlink_rcv, NULL, THIS_MODULE); + rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); if (!sk) return -ENOMEM; net->rtnl = sk; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 672e476..f1d27f6 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1155,20 +1155,18 @@ static void igmp_group_dropped(struct ip_mc_list *im) if (!in_dev->dead) { if (IGMP_V1_SEEN(in_dev)) - goto done; + return; if (IGMP_V2_SEEN(in_dev)) { if (reporter) igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); - goto done; + return; } /* IGMPv3 */ igmpv3_add_delrec(in_dev, im); igmp_ifc_event(in_dev); } -done: #endif - ip_mc_clear_src(im); } static void igmp_group_added(struct ip_mc_list *im) @@ -1305,6 +1303,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) *ip = i->next_rcu; in_dev->mc_count--; igmp_group_dropped(i); + ip_mc_clear_src(i); if (!in_dev->dead) ip_rt_multicast_event(in_dev); @@ -1414,7 +1413,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev) in_dev->mc_list = i->next_rcu; in_dev->mc_count--; - igmp_group_dropped(i); + /* We've dropped the groups in ip_mc_down already */ + ip_mc_clear_src(i); ip_ma_put(i); } } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 1f3bb11..9aaa671 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -137,9 +137,6 @@ static void ping_v4_unhash(struct sock *sk) struct inet_sock *isk = inet_sk(sk); pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); if (sk_hashed(sk)) { - struct hlist_nulls_head *hslot; - - hslot = ping_hashslot(&ping_table, sock_net(sk), isk->inet_num); write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sock_put(sk); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 11e17804..c9893d4 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -979,7 +979,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) srcp = inet->inet_num; seq_printf(seq, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3c8d9b6..a7d6671 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2371,7 +2371,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req, int ttd = req->expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n", i, ireq->loc_addr, ntohs(inet_sk(sk)->inet_sport), @@ -2426,7 +2426,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " - "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", + "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n", i, src, srcp, dest, destp, sk->sk_state, tp->write_seq - tp->snd_una, rx_queue, @@ -2461,7 +2461,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw, srcp = ntohs(tw->tw_sport); seq_printf(f, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n", i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw, len); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 599374f..abca870 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2090,7 +2090,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ae64984..cc7313b 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1240,7 +1240,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) srcp = inet_sk(sp)->inet_num; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8683664..d1fd287 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2036,7 +2036,7 @@ static void get_openreq6(struct seq_file *seq, seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], @@ -2087,7 +2087,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, @@ -2129,7 +2129,7 @@ static void get_timewait6_sock(struct seq_file *seq, seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index fc0c42a..41f8c9c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1391,7 +1391,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket srcp = ntohs(inet->inet_sport); seq_printf(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", bucket, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index a6770a0..4fe1db12 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -241,7 +241,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb) __be32 spi; spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr); - return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; + return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi); } static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, diff --git a/net/key/af_key.c b/net/key/af_key.c index d62401c..8f92cf8 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3656,7 +3656,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v) if (v == SEQ_START_TOKEN) seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); else - seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", + seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), sk_rmem_alloc_get(s), diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 7dfbe71..49d4f86 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -384,11 +384,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, int i; enum nl80211_channel_type orig_ct; + clear_bit(SDATA_STATE_RUNNING, &sdata->state); + if (local->scan_sdata == sdata) ieee80211_scan_cancel(local); - clear_bit(SDATA_STATE_RUNNING, &sdata->state); - /* * Stop TX on this interface first. */ diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 0d7b08d..866f269 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -752,11 +752,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); - /* mac80211 doesn't support more than 1 channel */ - for (i = 0; i < hw->wiphy->n_iface_combinations; i++) - if (hw->wiphy->iface_combinations[i].num_different_channels > 1) + /* + * mac80211 doesn't support more than 1 channel, and also not more + * than one IBSS interface + */ + for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { + const struct ieee80211_iface_combination *c; + int j; + + c = &hw->wiphy->iface_combinations[i]; + + if (c->num_different_channels > 1) return -EINVAL; + for (j = 0; j < c->n_limits; j++) + if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && + c->limits[j].max > 1) + return -EINVAL; + } + #ifndef CONFIG_MAC80211_MESH /* mesh depends on Kconfig, but drivers should set it if they want */ local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); @@ -1076,6 +1090,8 @@ static void __exit ieee80211_exit(void) ieee80211s_stop(); ieee80211_iface_exit(); + + rcu_barrier(); } diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index e7c5fdd..249e733 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -120,6 +120,7 @@ struct mesh_path { * buckets * @mean_chain_len: maximum average length for the hash buckets' list, if it is * reached, the table will grow + * rcu_head: RCU head to free the table */ struct mesh_table { /* Number of buckets will be 2^N */ @@ -132,6 +133,8 @@ struct mesh_table { int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); int size_order; int mean_chain_len; + + struct rcu_head rcu_head; }; /* Recent multicast cache */ @@ -286,10 +289,6 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; } -#define for_each_mesh_entry(x, p, node, i) \ - for (i = 0; i <= x->hash_mask; i++) \ - hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) - void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 83ce48e..0d2faac 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -36,8 +36,8 @@ struct mpath_node { struct mesh_path *mpath; }; -static struct mesh_table *mesh_paths; -static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ +static struct mesh_table __rcu *mesh_paths; +static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; @@ -48,17 +48,40 @@ int mesh_paths_generation; static DEFINE_RWLOCK(pathtbl_resize_lock); +static inline struct mesh_table *resize_dereference_mesh_paths(void) +{ + return rcu_dereference_protected(mesh_paths, + lockdep_is_held(&pathtbl_resize_lock)); +} + +static inline struct mesh_table *resize_dereference_mpp_paths(void) +{ + return rcu_dereference_protected(mpp_paths, + lockdep_is_held(&pathtbl_resize_lock)); +} + +/* + * CAREFUL -- "tbl" must not be an expression, + * in particular not an rcu_dereference(), since + * it's used twice. So it is illegal to do + * for_each_mesh_entry(rcu_dereference(...), ...) + */ +#define for_each_mesh_entry(tbl, p, node, i) \ + for (i = 0; i <= tbl->hash_mask; i++) \ + hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) + + static struct mesh_table *mesh_table_alloc(int size_order) { int i; struct mesh_table *newtbl; - newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); + newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); if (!newtbl) return NULL; newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * - (1 << size_order), GFP_KERNEL); + (1 << size_order), GFP_ATOMIC); if (!newtbl->hash_buckets) { kfree(newtbl); @@ -66,7 +89,7 @@ static struct mesh_table *mesh_table_alloc(int size_order) } newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * - (1 << size_order), GFP_KERNEL); + (1 << size_order), GFP_ATOMIC); if (!newtbl->hashwlock) { kfree(newtbl->hash_buckets); kfree(newtbl); @@ -258,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) */ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) { + struct mesh_table *tbl = rcu_dereference(mesh_paths); struct mpath_node *node; struct hlist_node *p; int i; int j = 0; - for_each_mesh_entry(mesh_paths, p, node, i) { + for_each_mesh_entry(tbl, p, node, i) { if (sdata && node->mpath->sdata != sdata) continue; if (j++ == idx) { @@ -293,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; + struct mesh_table *tbl; struct mesh_path *mpath, *new_mpath; struct mpath_node *node, *new_node; struct hlist_head *bucket; @@ -332,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) spin_lock_init(&new_mpath->state_lock); init_timer(&new_mpath->timer); - hash_idx = mesh_table_hash(dst, sdata, mesh_paths); - bucket = &mesh_paths->hash_buckets[hash_idx]; + tbl = resize_dereference_mesh_paths(); + + hash_idx = mesh_table_hash(dst, sdata, tbl); + bucket = &tbl->hash_buckets[hash_idx]; - spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_lock_bh(&tbl->hashwlock[hash_idx]); err = -EEXIST; hlist_for_each_entry(node, n, bucket, list) { @@ -345,13 +372,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) } hlist_add_head_rcu(&new_node->list, bucket); - if (atomic_inc_return(&mesh_paths->entries) >= - mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) + if (atomic_inc_return(&tbl->entries) >= + tbl->mean_chain_len * (tbl->hash_mask + 1)) grow = 1; mesh_paths_generation++; - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); if (grow) { set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); @@ -360,7 +387,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) return 0; err_exists: - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); kfree(new_node); err_node_alloc: @@ -370,58 +397,59 @@ err_path_alloc: return err; } +static void mesh_table_free_rcu(struct rcu_head *rcu) +{ + struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); + + mesh_table_free(tbl, false); +} + void mesh_mpath_table_grow(void) { struct mesh_table *oldtbl, *newtbl; - rcu_read_lock(); - newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1); - if (!newtbl) - return; write_lock_bh(&pathtbl_resize_lock); - oldtbl = mesh_paths; - if (mesh_table_grow(mesh_paths, newtbl) < 0) { - rcu_read_unlock(); + oldtbl = resize_dereference_mesh_paths(); + newtbl = mesh_table_alloc(oldtbl->size_order + 1); + if (!newtbl) + goto out; + if (mesh_table_grow(oldtbl, newtbl) < 0) { __mesh_table_free(newtbl); - write_unlock_bh(&pathtbl_resize_lock); - return; + goto out; } - rcu_read_unlock(); rcu_assign_pointer(mesh_paths, newtbl); - write_unlock_bh(&pathtbl_resize_lock); - synchronize_rcu(); - mesh_table_free(oldtbl, false); + call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); + + out: + write_unlock_bh(&pathtbl_resize_lock); } void mesh_mpp_table_grow(void) { struct mesh_table *oldtbl, *newtbl; - rcu_read_lock(); - newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1); - if (!newtbl) - return; write_lock_bh(&pathtbl_resize_lock); - oldtbl = mpp_paths; - if (mesh_table_grow(mpp_paths, newtbl) < 0) { - rcu_read_unlock(); + oldtbl = resize_dereference_mpp_paths(); + newtbl = mesh_table_alloc(oldtbl->size_order + 1); + if (!newtbl) + goto out; + if (mesh_table_grow(oldtbl, newtbl) < 0) { __mesh_table_free(newtbl); - write_unlock_bh(&pathtbl_resize_lock); - return; + goto out; } - rcu_read_unlock(); rcu_assign_pointer(mpp_paths, newtbl); - write_unlock_bh(&pathtbl_resize_lock); + call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); - synchronize_rcu(); - mesh_table_free(oldtbl, false); + out: + write_unlock_bh(&pathtbl_resize_lock); } int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; + struct mesh_table *tbl; struct mesh_path *mpath, *new_mpath; struct mpath_node *node, *new_node; struct hlist_head *bucket; @@ -456,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) new_mpath->exp_time = jiffies; spin_lock_init(&new_mpath->state_lock); - hash_idx = mesh_table_hash(dst, sdata, mpp_paths); - bucket = &mpp_paths->hash_buckets[hash_idx]; + tbl = resize_dereference_mpp_paths(); - spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); + hash_idx = mesh_table_hash(dst, sdata, tbl); + bucket = &tbl->hash_buckets[hash_idx]; + + spin_lock_bh(&tbl->hashwlock[hash_idx]); err = -EEXIST; hlist_for_each_entry(node, n, bucket, list) { @@ -469,11 +499,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) } hlist_add_head_rcu(&new_node->list, bucket); - if (atomic_inc_return(&mpp_paths->entries) >= - mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) + if (atomic_inc_return(&tbl->entries) >= + tbl->mean_chain_len * (tbl->hash_mask + 1)) grow = 1; - spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); if (grow) { set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); @@ -482,7 +512,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) return 0; err_exists: - spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); kfree(new_node); err_node_alloc: @@ -502,6 +532,7 @@ err_path_alloc: */ void mesh_plink_broken(struct sta_info *sta) { + struct mesh_table *tbl; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; struct mpath_node *node; @@ -510,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta) int i; rcu_read_lock(); - for_each_mesh_entry(mesh_paths, p, node, i) { + tbl = rcu_dereference(mesh_paths); + for_each_mesh_entry(tbl, p, node, i) { mpath = node->mpath; spin_lock_bh(&mpath->state_lock); - if (mpath->next_hop == sta && + if (rcu_dereference(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { mpath->flags &= ~MESH_PATH_ACTIVE; @@ -542,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta) */ void mesh_path_flush_by_nexthop(struct sta_info *sta) { + struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_node *p; int i; - for_each_mesh_entry(mesh_paths, p, node, i) { + rcu_read_lock(); + tbl = rcu_dereference(mesh_paths); + for_each_mesh_entry(tbl, p, node, i) { mpath = node->mpath; - if (mpath->next_hop == sta) + if (rcu_dereference(mpath->next_hop) == sta) mesh_path_del(mpath->dst, mpath->sdata); } + rcu_read_unlock(); } void mesh_path_flush(struct ieee80211_sub_if_data *sdata) { + struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_node *p; int i; - for_each_mesh_entry(mesh_paths, p, node, i) { + rcu_read_lock(); + tbl = rcu_dereference(mesh_paths); + for_each_mesh_entry(tbl, p, node, i) { mpath = node->mpath; if (mpath->sdata == sdata) mesh_path_del(mpath->dst, mpath->sdata); } + rcu_read_unlock(); } static void mesh_path_node_reclaim(struct rcu_head *rp) @@ -589,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) */ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) { + struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_head *bucket; @@ -597,19 +638,20 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) int err = 0; read_lock_bh(&pathtbl_resize_lock); - hash_idx = mesh_table_hash(addr, sdata, mesh_paths); - bucket = &mesh_paths->hash_buckets[hash_idx]; + tbl = resize_dereference_mesh_paths(); + hash_idx = mesh_table_hash(addr, sdata, tbl); + bucket = &tbl->hash_buckets[hash_idx]; - spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_lock_bh(&tbl->hashwlock[hash_idx]); hlist_for_each_entry(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - memcmp(addr, mpath->dst, ETH_ALEN) == 0) { + memcmp(addr, mpath->dst, ETH_ALEN) == 0) { spin_lock_bh(&mpath->state_lock); mpath->flags |= MESH_PATH_RESOLVING; hlist_del_rcu(&node->list); call_rcu(&node->rcu, mesh_path_node_reclaim); - atomic_dec(&mesh_paths->entries); + atomic_dec(&tbl->entries); spin_unlock_bh(&mpath->state_lock); goto enddel; } @@ -618,7 +660,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) err = -ENXIO; enddel: mesh_paths_generation++; - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); return err; } @@ -719,8 +761,10 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) struct mpath_node *node = hlist_entry(p, struct mpath_node, list); mpath = node->mpath; hlist_del_rcu(p); - if (free_leafs) + if (free_leafs) { + del_timer_sync(&mpath->timer); kfree(mpath); + } kfree(node); } @@ -745,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) int mesh_pathtbl_init(void) { - mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); - if (!mesh_paths) + struct mesh_table *tbl_path, *tbl_mpp; + + tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + if (!tbl_path) return -ENOMEM; - mesh_paths->free_node = &mesh_path_node_free; - mesh_paths->copy_node = &mesh_path_node_copy; - mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; + tbl_path->free_node = &mesh_path_node_free; + tbl_path->copy_node = &mesh_path_node_copy; + tbl_path->mean_chain_len = MEAN_CHAIN_LEN; - mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); - if (!mpp_paths) { - mesh_table_free(mesh_paths, true); + tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + if (!tbl_mpp) { + mesh_table_free(tbl_path, true); return -ENOMEM; } - mpp_paths->free_node = &mesh_path_node_free; - mpp_paths->copy_node = &mesh_path_node_copy; - mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; + tbl_mpp->free_node = &mesh_path_node_free; + tbl_mpp->copy_node = &mesh_path_node_copy; + tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; + + /* Need no locking since this is during init */ + RCU_INIT_POINTER(mesh_paths, tbl_path); + RCU_INIT_POINTER(mpp_paths, tbl_mpp); return 0; } void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { + struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_node *p; int i; - read_lock_bh(&pathtbl_resize_lock); - for_each_mesh_entry(mesh_paths, p, node, i) { + rcu_read_lock(); + tbl = rcu_dereference(mesh_paths); + for_each_mesh_entry(tbl, p, node, i) { if (node->mpath->sdata != sdata) continue; mpath = node->mpath; spin_lock_bh(&mpath->state_lock); if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && - time_after(jiffies, - mpath->exp_time + MESH_PATH_EXPIRE)) { + time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { spin_unlock_bh(&mpath->state_lock); mesh_path_del(mpath->dst, mpath->sdata); } else spin_unlock_bh(&mpath->state_lock); } - read_unlock_bh(&pathtbl_resize_lock); + rcu_read_unlock(); } void mesh_pathtbl_unregister(void) { - mesh_table_free(mesh_paths, true); - mesh_table_free(mpp_paths, true); + /* no need for locking during exit path */ + mesh_table_free(rcu_dereference_raw(mesh_paths), true); + mesh_table_free(rcu_dereference_raw(mpp_paths), true); } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index d20046b..27af672 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -719,6 +719,11 @@ void ieee80211_scan_work(struct work_struct *work) * without scheduling a new work */ do { + if (!ieee80211_sdata_running(sdata)) { + aborted = true; + goto out_complete; + } + switch (local->next_scan_state) { case SCAN_DECISION: /* if no more bands/channels left, complete scan */ diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5fe4f3b..6ef64ad 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1985,7 +1985,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v) struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); - seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n", + seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", s, s->sk_protocol, nlk->pid, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 549527b..925f715 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2706,7 +2706,7 @@ static int packet_seq_show(struct seq_file *seq, void *v) const struct packet_sock *po = pkt_sk(s); seq_printf(seq, - "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", + "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), s->sk_type, diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 8c5bfce..ab07711 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -607,7 +607,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v) struct pn_sock *pn = pn_sk(sk); seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " - "%d %p %d%n", + "%d %pK %d%n", sk->sk_protocol, pn->sobject, pn->dobject, pn->resource, sk->sk_state, sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 48464ca..78efe89 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig @@ -33,3 +33,12 @@ config RFKILL_REGULATOR To compile this driver as a module, choose M here: the module will be called rfkill-regulator. + +config RFKILL_GPIO + tristate "GPIO RFKILL driver" + depends on RFKILL && GPIOLIB && HAVE_CLK + default n + help + If you say yes here you get support of a generic gpio RFKILL + driver. The platform should fill in the appropriate fields in the + rfkill_gpio_platform_data structure and pass that to the driver. diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile index d9a5a58..3117687 100644 --- a/net/rfkill/Makefile +++ b/net/rfkill/Makefile @@ -6,3 +6,4 @@ rfkill-y += core.o rfkill-$(CONFIG_RFKILL_INPUT) += input.o obj-$(CONFIG_RFKILL) += rfkill.o obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o +obj-$(CONFIG_RFKILL_GPIO) += rfkill-gpio.o diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c new file mode 100644 index 0000000..256c5dd --- /dev/null +++ b/net/rfkill/rfkill-gpio.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2011, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include <linux/gpio.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/rfkill.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/slab.h> + +#include <linux/rfkill-gpio.h> + +enum rfkill_gpio_clk_state { + UNSPECIFIED = 0, + PWR_ENABLED, + PWR_DISABLED +}; + +#define PWR_CLK_SET(_RF, _EN) \ + ((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED)) +#define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED) +#define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED) + +struct rfkill_gpio_data { + struct rfkill_gpio_platform_data *pdata; + struct rfkill *rfkill_dev; + char *reset_name; + char *shutdown_name; + enum rfkill_gpio_clk_state pwr_clk_enabled; + struct clk *pwr_clk; +}; + +static int rfkill_gpio_set_power(void *data, bool blocked) +{ + struct rfkill_gpio_data *rfkill = data; + + if (blocked) { + if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) + gpio_direction_output(rfkill->pdata->shutdown_gpio, 0); + if (gpio_is_valid(rfkill->pdata->reset_gpio)) + gpio_direction_output(rfkill->pdata->reset_gpio, 0); + if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill)) + clk_disable(rfkill->pwr_clk); + } else { + if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill)) + clk_enable(rfkill->pwr_clk); + if (gpio_is_valid(rfkill->pdata->reset_gpio)) + gpio_direction_output(rfkill->pdata->reset_gpio, 1); + if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) + gpio_direction_output(rfkill->pdata->shutdown_gpio, 1); + } + + if (rfkill->pwr_clk) + PWR_CLK_SET(rfkill, blocked); + + return 0; +} + +static const struct rfkill_ops rfkill_gpio_ops = { + .set_block = rfkill_gpio_set_power, +}; + +static int rfkill_gpio_probe(struct platform_device *pdev) +{ + struct rfkill_gpio_data *rfkill; + struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; + int ret = 0; + int len = 0; + + if (!pdata) { + pr_warn("%s: No platform data specified\n", __func__); + return -EINVAL; + } + + /* make sure at-least one of the GPIO is defined and that + * a name is specified for this instance */ + if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) && + !gpio_is_valid(pdata->shutdown_gpio))) { + pr_warn("%s: invalid platform data\n", __func__); + return -EINVAL; + } + + rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); + if (!rfkill) + return -ENOMEM; + + rfkill->pdata = pdata; + + len = strlen(pdata->name); + rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL); + if (!rfkill->reset_name) { + ret = -ENOMEM; + goto fail_alloc; + } + + rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL); + if (!rfkill->shutdown_name) { + ret = -ENOMEM; + goto fail_reset_name; + } + + snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name); + snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name); + + if (pdata->power_clk_name) { + rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name); + if (IS_ERR(rfkill->pwr_clk)) { + pr_warn("%s: can't find pwr_clk.\n", __func__); + goto fail_shutdown_name; + } + } + + if (gpio_is_valid(pdata->reset_gpio)) { + ret = gpio_request(pdata->reset_gpio, rfkill->reset_name); + if (ret) { + pr_warn("%s: failed to get reset gpio.\n", __func__); + goto fail_clock; + } + } + + if (gpio_is_valid(pdata->shutdown_gpio)) { + ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name); + if (ret) { + pr_warn("%s: failed to get shutdown gpio.\n", __func__); + goto fail_reset; + } + } + + rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type, + &rfkill_gpio_ops, rfkill); + if (!rfkill->rfkill_dev) + goto fail_shutdown; + + ret = rfkill_register(rfkill->rfkill_dev); + if (ret < 0) + goto fail_rfkill; + + platform_set_drvdata(pdev, rfkill); + + dev_info(&pdev->dev, "%s device registered.\n", pdata->name); + + return 0; + +fail_rfkill: + rfkill_destroy(rfkill->rfkill_dev); +fail_shutdown: + if (gpio_is_valid(pdata->shutdown_gpio)) + gpio_free(pdata->shutdown_gpio); +fail_reset: + if (gpio_is_valid(pdata->reset_gpio)) + gpio_free(pdata->reset_gpio); +fail_clock: + if (rfkill->pwr_clk) + clk_put(rfkill->pwr_clk); +fail_shutdown_name: + kfree(rfkill->shutdown_name); +fail_reset_name: + kfree(rfkill->reset_name); +fail_alloc: + kfree(rfkill); + + return ret; +} + +static int rfkill_gpio_remove(struct platform_device *pdev) +{ + struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev); + + rfkill_unregister(rfkill->rfkill_dev); + rfkill_destroy(rfkill->rfkill_dev); + if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) + gpio_free(rfkill->pdata->shutdown_gpio); + if (gpio_is_valid(rfkill->pdata->reset_gpio)) + gpio_free(rfkill->pdata->reset_gpio); + if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill)) + clk_disable(rfkill->pwr_clk); + if (rfkill->pwr_clk) + clk_put(rfkill->pwr_clk); + kfree(rfkill->shutdown_name); + kfree(rfkill->reset_name); + kfree(rfkill); + + return 0; +} + +static struct platform_driver rfkill_gpio_driver = { + .probe = rfkill_gpio_probe, + .remove = __devexit_p(rfkill_gpio_remove), + .driver = { + .name = "rfkill_gpio", + .owner = THIS_MODULE, + }, +}; + +static int __init rfkill_gpio_init(void) +{ + return platform_driver_register(&rfkill_gpio_driver); +} + +static void __exit rfkill_gpio_exit(void) +{ + platform_driver_unregister(&rfkill_gpio_driver); +} + +module_init(rfkill_gpio_init); +module_exit(rfkill_gpio_exit); + +MODULE_DESCRIPTION("gpio rfkill"); +MODULE_AUTHOR("NVIDIA"); +MODULE_LICENSE("GPL"); diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 7ef87f9..b6ea6af 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; - sfq_index x; + sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); @@ -405,20 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; + qlen = slot->qlen; sfq_drop(sch); - return NET_XMIT_CN; -} - -static struct sk_buff * -sfq_peek(struct Qdisc *sch) -{ - struct sfq_sched_data *q = qdisc_priv(sch); - - /* No active slots */ - if (q->tail == NULL) - return NULL; - - return q->slots[q->tail->next].skblist_next; + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ + return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS; } static struct sk_buff * @@ -702,7 +694,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { .priv_size = sizeof(struct sfq_sched_data), .enqueue = sfq_enqueue, .dequeue = sfq_dequeue, - .peek = sfq_peek, + .peek = qdisc_peek_dequeued, .drop = sfq_drop, .init = sfq_init, .reset = sfq_reset, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 1a21c57..525f97c 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -64,6 +64,7 @@ /* Forward declarations for internal functions. */ static void sctp_assoc_bh_rcv(struct work_struct *work); static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); /* Keep track of the new idr low so that we don't re-use association id * numbers too fast. It is protected by they idr spin lock is in the @@ -446,6 +447,9 @@ void sctp_association_free(struct sctp_association *asoc) /* Free any cached ASCONF_ACK chunk. */ sctp_assoc_free_asconf_acks(asoc); + /* Free the ASCONF queue. */ + sctp_assoc_free_asconf_queue(asoc); + /* Free any cached ASCONF chunk. */ if (asoc->addip_last_asconf) sctp_chunk_free(asoc->addip_last_asconf); @@ -1578,6 +1582,18 @@ retry: return error; } +/* Free the ASCONF queue */ +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) +{ + struct sctp_chunk *asconf; + struct sctp_chunk *tmp; + + list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { + list_del_init(&asconf->list); + sctp_chunk_free(asconf); + } +} + /* Free asconf_ack cache */ static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) { diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 61aacfb..05a6ce2 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -212,7 +212,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) sctp_for_each_hentry(epb, node, &head->chain) { ep = sctp_ep(epb); sk = epb->sk; - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, + seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, epb->bind_addr.port, sock_i_uid(sk), sock_i_ino(sk)); @@ -316,7 +316,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) assoc = sctp_assoc(epb); sk = epb->sk; seq_printf(seq, - "%8p %8p %-3d %-3d %-2d %-4d " + "%8pK %8pK %-3d %-3d %-2d %-4d " "%4d %8d %8d %7d %5lu %-5d %5d ", assoc, sk, sctp_sk(sk)->type, sk->sk_state, assoc->state, hash, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b1d75be..0722a25 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2254,7 +2254,7 @@ static int unix_seq_show(struct seq_file *seq, void *v) struct unix_sock *u = unix_sk(s); unix_state_lock(s); - seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", + seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", s, atomic_read(&s->sk_refcnt), 0, diff --git a/net/wireless/core.h b/net/wireless/core.h index bf0fb40..3dce1f1 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -245,6 +245,7 @@ struct cfg80211_event { u16 status; } cr; struct { + struct ieee80211_channel *channel; u8 bssid[ETH_ALEN]; const u8 *req_ie; const u8 *resp_ie; @@ -392,7 +393,9 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev); -void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, +void __cfg80211_roamed(struct wireless_dev *wdev, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len); int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 2222ce08..ec83f41 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3294,8 +3294,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_scan_request *request; - struct cfg80211_ssid *ssid; - struct ieee80211_channel *channel; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; @@ -3342,8 +3340,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) return -EINVAL; request = kzalloc(sizeof(*request) - + sizeof(*ssid) * n_ssids - + sizeof(channel) * n_channels + + sizeof(*request->ssids) * n_ssids + + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) return -ENOMEM; @@ -3449,8 +3447,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, struct cfg80211_sched_scan_request *request; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; - struct cfg80211_ssid *ssid; - struct ieee80211_channel *channel; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; @@ -3507,8 +3503,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, return -EINVAL; request = kzalloc(sizeof(*request) - + sizeof(*ssid) * n_ssids - + sizeof(channel) * n_channels + + sizeof(*request->ssids) * n_ssids + + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) return -ENOMEM; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index e17b0be..b7b6ff8 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -250,7 +250,8 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) if (wdev->conn->params.privacy) capa |= WLAN_CAPABILITY_PRIVACY; - bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid, + bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel, + wdev->conn->params.bssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len, WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, @@ -470,7 +471,10 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, } if (!bss) - bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, + bss = cfg80211_get_bss(wdev->wiphy, + wdev->conn ? wdev->conn->params.channel : + NULL, + bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); @@ -538,7 +542,9 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, } EXPORT_SYMBOL(cfg80211_connect_result); -void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, +void __cfg80211_roamed(struct wireless_dev *wdev, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len) { @@ -565,7 +571,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; - bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, + bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); @@ -603,7 +609,9 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, #endif } -void cfg80211_roamed(struct net_device *dev, const u8 *bssid, +void cfg80211_roamed(struct net_device *dev, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) { @@ -619,6 +627,7 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid, return; ev->type = EVENT_ROAMED; + ev->rm.channel = channel; memcpy(ev->rm.bssid, bssid, ETH_ALEN); ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); ev->rm.req_ie_len = req_ie_len; diff --git a/net/wireless/util.c b/net/wireless/util.c index f0536d4..4d7b83f 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -746,7 +746,7 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev) NULL); break; case EVENT_ROAMED: - __cfg80211_roamed(wdev, ev->rm.bssid, + __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid, ev->rm.req_ie, ev->rm.req_ie_len, ev->rm.resp_ie, ev->rm.resp_ie_len); break; |