From d0df9eecf9f61f70fd847656f5eb113e06e46a03 Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Fri, 13 May 2011 14:16:07 -0700 Subject: mac80211: Deactivate mesh path timers when freeing nodes Mesh paths are deleted via mesh_path_del() which properly deactivates the timer associated to a mesh path. But if paths were deleted by mesh_table_free(..., true) timers would not be deactivated. This fixes this case. Reported-by: Johannes Berg Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_pathtbl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 0aa96cd..f775202 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -719,8 +719,10 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) struct mpath_node *node = hlist_entry(p, struct mpath_node, list); mpath = node->mpath; hlist_del_rcu(p); - if (free_leafs) + if (free_leafs) { + del_timer_sync(&mpath->timer); kfree(mpath); + } kfree(node); } -- cgit v1.1 From 8e621fc90b42fa2ca4ff65dd8d9cb21723e47837 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 14 May 2011 01:55:23 +0200 Subject: mac80211: verify IBSS in interface combinations Drivers shouldn't attempt to advertise support for more than one IBSS interface since mac80211 doesn't support that. Check and return an error from ieee80211_register_hw() in that case. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/main.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 0d7b08d..96ab130 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -752,11 +752,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); - /* mac80211 doesn't support more than 1 channel */ - for (i = 0; i < hw->wiphy->n_iface_combinations; i++) - if (hw->wiphy->iface_combinations[i].num_different_channels > 1) + /* + * mac80211 doesn't support more than 1 channel, and also not more + * than one IBSS interface + */ + for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { + const struct ieee80211_iface_combination *c; + int j; + + c = &hw->wiphy->iface_combinations[i]; + + if (c->num_different_channels > 1) return -EINVAL; + for (j = 0; j < c->n_limits; j++) + if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && + c->limits[j].max > 1) + return -EINVAL; + } + #ifndef CONFIG_MAC80211_MESH /* mesh depends on Kconfig, but drivers should set it if they want */ local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); -- cgit v1.1 From c29acf201007a6d73223f864f52406eb5ba19933 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Sat, 14 May 2011 09:43:28 +0530 Subject: mac80211: abort scan_work immediately when the device goes down As long as no delay is required b/w channel change, scan work is proceeding without scheduling a new work. In such case, we can not abort scan work when the card was unplugged. This patch completes the scanning immediately whenever the device goes down. Reviewed-by: Johannes Berg Signed-off-by: Rajkumar Manoharan Signed-off-by: John W. Linville --- net/mac80211/iface.c | 4 ++-- net/mac80211/scan.c | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 0d00ac9..dee30ae 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -384,11 +384,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, int i; enum nl80211_channel_type orig_ct; + clear_bit(SDATA_STATE_RUNNING, &sdata->state); + if (local->scan_sdata == sdata) ieee80211_scan_cancel(local); - clear_bit(SDATA_STATE_RUNNING, &sdata->state); - /* * Stop TX on this interface first. */ diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index d20046b..27af672 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -719,6 +719,11 @@ void ieee80211_scan_work(struct work_struct *work) * without scheduling a new work */ do { + if (!ieee80211_sdata_running(sdata)) { + aborted = true; + goto out_complete; + } + switch (local->next_scan_state) { case SCAN_DECISION: /* if no more bands/channels left, complete scan */ -- cgit v1.1 From d07c7cf49ae7c488e778c4d668f4cc10bd2fa971 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 14 May 2011 11:04:51 +0200 Subject: mac80211: add missing rcu_barrier mac80211 uses call_rcu() with functions that are defined in the module, so it must use rcu_barrier() at module exit time. Luckily, this seems to not be a problem in practice as module unload and unregistration takes a long time and probably does multiple synchronize_rcu(). Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 96ab130..866f269 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1090,6 +1090,8 @@ static void __exit ieee80211_exit(void) ieee80211s_stop(); ieee80211_iface_exit(); + + rcu_barrier(); } -- cgit v1.1 From 1928ecab620907a0953f811316d05f367f3f4dba Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 14 May 2011 11:00:52 +0200 Subject: mac80211: fix and simplify mesh locking The locking in mesh_{mpath,mpp}_table_grow not only has an rcu_read_unlock() missing, it's also racy (though really only technically since it's invoked from a single function only) since it obtains the new size of the table without any locking, so two invocations of the function could attempt the same resize. Additionally, it uses synchronize_rcu() which is rather expensive and can be avoided trivially here. Modify the functions to only use the table lock and use call_rcu() instead of synchronize_rcu(). Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/mesh.h | 3 +++ net/mac80211/mesh_pathtbl.c | 44 ++++++++++++++++++++++---------------------- 2 files changed, 25 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index e7c5fdd..eb733c0 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -120,6 +120,7 @@ struct mesh_path { * buckets * @mean_chain_len: maximum average length for the hash buckets' list, if it is * reached, the table will grow + * rcu_head: RCU head to free the table */ struct mesh_table { /* Number of buckets will be 2^N */ @@ -132,6 +133,8 @@ struct mesh_table { int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); int size_order; int mean_chain_len; + + struct rcu_head rcu_head; }; /* Recent multicast cache */ diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index f775202..7402136 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -370,52 +370,52 @@ err_path_alloc: return err; } +static void mesh_table_free_rcu(struct rcu_head *rcu) +{ + struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); + + mesh_table_free(tbl, false); +} + void mesh_mpath_table_grow(void) { struct mesh_table *oldtbl, *newtbl; - rcu_read_lock(); - newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1); - if (!newtbl) - return; write_lock_bh(&pathtbl_resize_lock); + newtbl = mesh_table_alloc(mesh_paths->size_order + 1); + if (!newtbl) + goto out; oldtbl = mesh_paths; if (mesh_table_grow(mesh_paths, newtbl) < 0) { - rcu_read_unlock(); __mesh_table_free(newtbl); - write_unlock_bh(&pathtbl_resize_lock); - return; + goto out; } - rcu_read_unlock(); rcu_assign_pointer(mesh_paths, newtbl); - write_unlock_bh(&pathtbl_resize_lock); - synchronize_rcu(); - mesh_table_free(oldtbl, false); + call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); + + out: + write_unlock_bh(&pathtbl_resize_lock); } void mesh_mpp_table_grow(void) { struct mesh_table *oldtbl, *newtbl; - rcu_read_lock(); - newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1); - if (!newtbl) - return; write_lock_bh(&pathtbl_resize_lock); + newtbl = mesh_table_alloc(mpp_paths->size_order + 1); + if (!newtbl) + goto out; oldtbl = mpp_paths; if (mesh_table_grow(mpp_paths, newtbl) < 0) { - rcu_read_unlock(); __mesh_table_free(newtbl); - write_unlock_bh(&pathtbl_resize_lock); - return; + goto out; } - rcu_read_unlock(); rcu_assign_pointer(mpp_paths, newtbl); - write_unlock_bh(&pathtbl_resize_lock); + call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); - synchronize_rcu(); - mesh_table_free(oldtbl, false); + out: + write_unlock_bh(&pathtbl_resize_lock); } int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) -- cgit v1.1 From 349eb8cf45aadd35836fdfde75b3265a01b2aaa1 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 14 May 2011 11:56:16 +0200 Subject: mac80211: annotate and fix RCU in mesh code This adds proper RCU annotations to the mesh path table code, and fixes a number of bugs in the code that I found while checking the sparse warnings I got as a result of the annotations. Some things like the changes in mesh_path_add() or mesh_pathtbl_init() only serve to shut up sparse, but other changes like the changes surrounding the for_each_mesh_entry() macro fix real RCU bugs in the code. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/mesh.h | 4 -- net/mac80211/mesh_pathtbl.c | 154 +++++++++++++++++++++++++++++--------------- 2 files changed, 102 insertions(+), 56 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index eb733c0..249e733 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -289,10 +289,6 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; } -#define for_each_mesh_entry(x, p, node, i) \ - for (i = 0; i <= x->hash_mask; i++) \ - hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) - void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 7402136..2bda0ac 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -36,8 +36,8 @@ struct mpath_node { struct mesh_path *mpath; }; -static struct mesh_table *mesh_paths; -static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ +static struct mesh_table __rcu *mesh_paths; +static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; @@ -48,6 +48,29 @@ int mesh_paths_generation; static DEFINE_RWLOCK(pathtbl_resize_lock); +static inline struct mesh_table *resize_dereference_mesh_paths(void) +{ + return rcu_dereference_protected(mesh_paths, + lockdep_is_held(&pathtbl_resize_lock)); +} + +static inline struct mesh_table *resize_dereference_mpp_paths(void) +{ + return rcu_dereference_protected(mpp_paths, + lockdep_is_held(&pathtbl_resize_lock)); +} + +/* + * CAREFUL -- "tbl" must not be an expression, + * in particular not an rcu_dereference(), since + * it's used twice. So it is illegal to do + * for_each_mesh_entry(rcu_dereference(...), ...) + */ +#define for_each_mesh_entry(tbl, p, node, i) \ + for (i = 0; i <= tbl->hash_mask; i++) \ + hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) + + static struct mesh_table *mesh_table_alloc(int size_order) { int i; @@ -258,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) */ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) { + struct mesh_table *tbl = rcu_dereference(mesh_paths); struct mpath_node *node; struct hlist_node *p; int i; int j = 0; - for_each_mesh_entry(mesh_paths, p, node, i) { + for_each_mesh_entry(tbl, p, node, i) { if (sdata && node->mpath->sdata != sdata) continue; if (j++ == idx) { @@ -293,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; + struct mesh_table *tbl; struct mesh_path *mpath, *new_mpath; struct mpath_node *node, *new_node; struct hlist_head *bucket; @@ -332,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) spin_lock_init(&new_mpath->state_lock); init_timer(&new_mpath->timer); - hash_idx = mesh_table_hash(dst, sdata, mesh_paths); - bucket = &mesh_paths->hash_buckets[hash_idx]; + tbl = resize_dereference_mesh_paths(); - spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); + hash_idx = mesh_table_hash(dst, sdata, tbl); + bucket = &tbl->hash_buckets[hash_idx]; + + spin_lock_bh(&tbl->hashwlock[hash_idx]); err = -EEXIST; hlist_for_each_entry(node, n, bucket, list) { @@ -345,13 +372,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) } hlist_add_head_rcu(&new_node->list, bucket); - if (atomic_inc_return(&mesh_paths->entries) >= - mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) + if (atomic_inc_return(&tbl->entries) >= + tbl->mean_chain_len * (tbl->hash_mask + 1)) grow = 1; mesh_paths_generation++; - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); if (grow) { set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); @@ -360,7 +387,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) return 0; err_exists: - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); kfree(new_node); err_node_alloc: @@ -382,11 +409,11 @@ void mesh_mpath_table_grow(void) struct mesh_table *oldtbl, *newtbl; write_lock_bh(&pathtbl_resize_lock); - newtbl = mesh_table_alloc(mesh_paths->size_order + 1); + oldtbl = resize_dereference_mesh_paths(); + newtbl = mesh_table_alloc(oldtbl->size_order + 1); if (!newtbl) goto out; - oldtbl = mesh_paths; - if (mesh_table_grow(mesh_paths, newtbl) < 0) { + if (mesh_table_grow(oldtbl, newtbl) < 0) { __mesh_table_free(newtbl); goto out; } @@ -403,11 +430,11 @@ void mesh_mpp_table_grow(void) struct mesh_table *oldtbl, *newtbl; write_lock_bh(&pathtbl_resize_lock); - newtbl = mesh_table_alloc(mpp_paths->size_order + 1); + oldtbl = resize_dereference_mpp_paths(); + newtbl = mesh_table_alloc(oldtbl->size_order + 1); if (!newtbl) goto out; - oldtbl = mpp_paths; - if (mesh_table_grow(mpp_paths, newtbl) < 0) { + if (mesh_table_grow(oldtbl, newtbl) < 0) { __mesh_table_free(newtbl); goto out; } @@ -422,6 +449,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; + struct mesh_table *tbl; struct mesh_path *mpath, *new_mpath; struct mpath_node *node, *new_node; struct hlist_head *bucket; @@ -456,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) new_mpath->exp_time = jiffies; spin_lock_init(&new_mpath->state_lock); - hash_idx = mesh_table_hash(dst, sdata, mpp_paths); - bucket = &mpp_paths->hash_buckets[hash_idx]; + tbl = resize_dereference_mpp_paths(); - spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); + hash_idx = mesh_table_hash(dst, sdata, tbl); + bucket = &tbl->hash_buckets[hash_idx]; + + spin_lock_bh(&tbl->hashwlock[hash_idx]); err = -EEXIST; hlist_for_each_entry(node, n, bucket, list) { @@ -469,11 +499,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) } hlist_add_head_rcu(&new_node->list, bucket); - if (atomic_inc_return(&mpp_paths->entries) >= - mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) + if (atomic_inc_return(&tbl->entries) >= + tbl->mean_chain_len * (tbl->hash_mask + 1)) grow = 1; - spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); if (grow) { set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); @@ -482,7 +512,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) return 0; err_exists: - spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); kfree(new_node); err_node_alloc: @@ -502,6 +532,7 @@ err_path_alloc: */ void mesh_plink_broken(struct sta_info *sta) { + struct mesh_table *tbl; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; struct mpath_node *node; @@ -510,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta) int i; rcu_read_lock(); - for_each_mesh_entry(mesh_paths, p, node, i) { + tbl = rcu_dereference(mesh_paths); + for_each_mesh_entry(tbl, p, node, i) { mpath = node->mpath; spin_lock_bh(&mpath->state_lock); - if (mpath->next_hop == sta && + if (rcu_dereference(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { mpath->flags &= ~MESH_PATH_ACTIVE; @@ -542,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta) */ void mesh_path_flush_by_nexthop(struct sta_info *sta) { + struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_node *p; int i; - for_each_mesh_entry(mesh_paths, p, node, i) { + rcu_read_lock(); + tbl = rcu_dereference(mesh_paths); + for_each_mesh_entry(tbl, p, node, i) { mpath = node->mpath; - if (mpath->next_hop == sta) + if (rcu_dereference(mpath->next_hop) == sta) mesh_path_del(mpath->dst, mpath->sdata); } + rcu_read_unlock(); } void mesh_path_flush(struct ieee80211_sub_if_data *sdata) { + struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_node *p; int i; - for_each_mesh_entry(mesh_paths, p, node, i) { + rcu_read_lock(); + tbl = rcu_dereference(mesh_paths); + for_each_mesh_entry(tbl, p, node, i) { mpath = node->mpath; if (mpath->sdata == sdata) mesh_path_del(mpath->dst, mpath->sdata); } + rcu_read_unlock(); } static void mesh_path_node_reclaim(struct rcu_head *rp) @@ -589,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) */ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) { + struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_head *bucket; @@ -597,19 +638,20 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) int err = 0; read_lock_bh(&pathtbl_resize_lock); - hash_idx = mesh_table_hash(addr, sdata, mesh_paths); - bucket = &mesh_paths->hash_buckets[hash_idx]; + tbl = resize_dereference_mesh_paths(); + hash_idx = mesh_table_hash(addr, sdata, tbl); + bucket = &tbl->hash_buckets[hash_idx]; - spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_lock_bh(&tbl->hashwlock[hash_idx]); hlist_for_each_entry(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - memcmp(addr, mpath->dst, ETH_ALEN) == 0) { + memcmp(addr, mpath->dst, ETH_ALEN) == 0) { spin_lock_bh(&mpath->state_lock); mpath->flags |= MESH_PATH_RESOLVING; hlist_del_rcu(&node->list); call_rcu(&node->rcu, mesh_path_node_reclaim); - atomic_dec(&mesh_paths->entries); + atomic_dec(&tbl->entries); spin_unlock_bh(&mpath->state_lock); goto enddel; } @@ -618,7 +660,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) err = -ENXIO; enddel: mesh_paths_generation++; - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); + spin_unlock_bh(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); return err; } @@ -747,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) int mesh_pathtbl_init(void) { - mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); - if (!mesh_paths) + struct mesh_table *tbl_path, *tbl_mpp; + + tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + if (!tbl_path) return -ENOMEM; - mesh_paths->free_node = &mesh_path_node_free; - mesh_paths->copy_node = &mesh_path_node_copy; - mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; + tbl_path->free_node = &mesh_path_node_free; + tbl_path->copy_node = &mesh_path_node_copy; + tbl_path->mean_chain_len = MEAN_CHAIN_LEN; - mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); - if (!mpp_paths) { - mesh_table_free(mesh_paths, true); + tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + if (!tbl_mpp) { + mesh_table_free(tbl_path, true); return -ENOMEM; } - mpp_paths->free_node = &mesh_path_node_free; - mpp_paths->copy_node = &mesh_path_node_copy; - mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; + tbl_mpp->free_node = &mesh_path_node_free; + tbl_mpp->copy_node = &mesh_path_node_copy; + tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; + + /* Need no locking since this is during init */ + RCU_INIT_POINTER(mesh_paths, tbl_path); + RCU_INIT_POINTER(mpp_paths, tbl_mpp); return 0; } void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { + struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_node *p; int i; - read_lock_bh(&pathtbl_resize_lock); - for_each_mesh_entry(mesh_paths, p, node, i) { + rcu_read_lock(); + tbl = rcu_dereference(mesh_paths); + for_each_mesh_entry(tbl, p, node, i) { if (node->mpath->sdata != sdata) continue; mpath = node->mpath; spin_lock_bh(&mpath->state_lock); if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && - time_after(jiffies, - mpath->exp_time + MESH_PATH_EXPIRE)) { + time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { spin_unlock_bh(&mpath->state_lock); mesh_path_del(mpath->dst, mpath->sdata); } else spin_unlock_bh(&mpath->state_lock); } - read_unlock_bh(&pathtbl_resize_lock); + rcu_read_unlock(); } void mesh_pathtbl_unregister(void) { - mesh_table_free(mesh_paths, true); - mesh_table_free(mpp_paths, true); + /* no need for locking during exit path */ + mesh_table_free(rcu_dereference_raw(mesh_paths), true); + mesh_table_free(rcu_dereference_raw(mpp_paths), true); } -- cgit v1.1 From ed9d01026f156db2d638cbb045231c7a8fde877d Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Mon, 16 May 2011 19:40:15 +0300 Subject: cfg80211: Use consistent BSS matching between scan and sme cfg80211 scan code adds separate BSS entries if the same BSS shows up on multiple channels. However, sme implementation does not use the frequency when fetching the BSS entry. Fix this by adding channel information to cfg80211_roamed() and include it in cfg80211_get_bss() calls. Please note that drivers using cfg80211_roamed() need to be modified to fully implement this fix. This commit includes only minimal changes to avoid compilation issues; it maintains the old (broken) behavior for most drivers. ath6kl was the only one that I could test, so I updated it to provide the operating frequency in the roamed event. Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville --- net/wireless/core.h | 5 ++++- net/wireless/sme.c | 19 ++++++++++++++----- net/wireless/util.c | 2 +- 3 files changed, 19 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/wireless/core.h b/net/wireless/core.h index bf0fb40..3dce1f1 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -245,6 +245,7 @@ struct cfg80211_event { u16 status; } cr; struct { + struct ieee80211_channel *channel; u8 bssid[ETH_ALEN]; const u8 *req_ie; const u8 *resp_ie; @@ -392,7 +393,9 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev); -void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, +void __cfg80211_roamed(struct wireless_dev *wdev, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len); int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/sme.c b/net/wireless/sme.c index e17b0be..b7b6ff8 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -250,7 +250,8 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) if (wdev->conn->params.privacy) capa |= WLAN_CAPABILITY_PRIVACY; - bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid, + bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel, + wdev->conn->params.bssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len, WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, @@ -470,7 +471,10 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, } if (!bss) - bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, + bss = cfg80211_get_bss(wdev->wiphy, + wdev->conn ? wdev->conn->params.channel : + NULL, + bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); @@ -538,7 +542,9 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, } EXPORT_SYMBOL(cfg80211_connect_result); -void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, +void __cfg80211_roamed(struct wireless_dev *wdev, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len) { @@ -565,7 +571,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; - bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, + bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); @@ -603,7 +609,9 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, #endif } -void cfg80211_roamed(struct net_device *dev, const u8 *bssid, +void cfg80211_roamed(struct net_device *dev, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) { @@ -619,6 +627,7 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid, return; ev->type = EVENT_ROAMED; + ev->rm.channel = channel; memcpy(ev->rm.bssid, bssid, ETH_ALEN); ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); ev->rm.req_ie_len = req_ie_len; diff --git a/net/wireless/util.c b/net/wireless/util.c index f0536d4..4d7b83f 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -746,7 +746,7 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev) NULL); break; case EVENT_ROAMED: - __cfg80211_roamed(wdev, ev->rm.bssid, + __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid, ev->rm.req_ie, ev->rm.req_ie_len, ev->rm.resp_ie, ev->rm.resp_ie_len); break; -- cgit v1.1 From 7176ba23f8b589b1df3229574ff46fb904ce9ec5 Mon Sep 17 00:00:00 2001 From: Rhyland Klein Date: Mon, 16 May 2011 14:41:48 -0700 Subject: net: rfkill: add generic gpio rfkill driver This adds a new generic gpio rfkill driver to support rfkill switches which are controlled by gpios. The driver also supports passing in data about the clock for the radio, so that when rfkill is blocking, it can disable the clock. This driver assumes platform data is passed from the board files to configure it for specific devices. Original-patch-by: Anantha Idapalapati Signed-off-by: Rhyland Klein Signed-off-by: John W. Linville --- net/rfkill/Kconfig | 9 ++ net/rfkill/Makefile | 1 + net/rfkill/rfkill-gpio.c | 227 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 237 insertions(+) create mode 100644 net/rfkill/rfkill-gpio.c (limited to 'net') diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 48464ca..78efe89 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig @@ -33,3 +33,12 @@ config RFKILL_REGULATOR To compile this driver as a module, choose M here: the module will be called rfkill-regulator. + +config RFKILL_GPIO + tristate "GPIO RFKILL driver" + depends on RFKILL && GPIOLIB && HAVE_CLK + default n + help + If you say yes here you get support of a generic gpio RFKILL + driver. The platform should fill in the appropriate fields in the + rfkill_gpio_platform_data structure and pass that to the driver. diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile index d9a5a58..3117687 100644 --- a/net/rfkill/Makefile +++ b/net/rfkill/Makefile @@ -6,3 +6,4 @@ rfkill-y += core.o rfkill-$(CONFIG_RFKILL_INPUT) += input.o obj-$(CONFIG_RFKILL) += rfkill.o obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o +obj-$(CONFIG_RFKILL_GPIO) += rfkill-gpio.o diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c new file mode 100644 index 0000000..256c5dd --- /dev/null +++ b/net/rfkill/rfkill-gpio.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2011, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +enum rfkill_gpio_clk_state { + UNSPECIFIED = 0, + PWR_ENABLED, + PWR_DISABLED +}; + +#define PWR_CLK_SET(_RF, _EN) \ + ((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED)) +#define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED) +#define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED) + +struct rfkill_gpio_data { + struct rfkill_gpio_platform_data *pdata; + struct rfkill *rfkill_dev; + char *reset_name; + char *shutdown_name; + enum rfkill_gpio_clk_state pwr_clk_enabled; + struct clk *pwr_clk; +}; + +static int rfkill_gpio_set_power(void *data, bool blocked) +{ + struct rfkill_gpio_data *rfkill = data; + + if (blocked) { + if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) + gpio_direction_output(rfkill->pdata->shutdown_gpio, 0); + if (gpio_is_valid(rfkill->pdata->reset_gpio)) + gpio_direction_output(rfkill->pdata->reset_gpio, 0); + if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill)) + clk_disable(rfkill->pwr_clk); + } else { + if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill)) + clk_enable(rfkill->pwr_clk); + if (gpio_is_valid(rfkill->pdata->reset_gpio)) + gpio_direction_output(rfkill->pdata->reset_gpio, 1); + if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) + gpio_direction_output(rfkill->pdata->shutdown_gpio, 1); + } + + if (rfkill->pwr_clk) + PWR_CLK_SET(rfkill, blocked); + + return 0; +} + +static const struct rfkill_ops rfkill_gpio_ops = { + .set_block = rfkill_gpio_set_power, +}; + +static int rfkill_gpio_probe(struct platform_device *pdev) +{ + struct rfkill_gpio_data *rfkill; + struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; + int ret = 0; + int len = 0; + + if (!pdata) { + pr_warn("%s: No platform data specified\n", __func__); + return -EINVAL; + } + + /* make sure at-least one of the GPIO is defined and that + * a name is specified for this instance */ + if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) && + !gpio_is_valid(pdata->shutdown_gpio))) { + pr_warn("%s: invalid platform data\n", __func__); + return -EINVAL; + } + + rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); + if (!rfkill) + return -ENOMEM; + + rfkill->pdata = pdata; + + len = strlen(pdata->name); + rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL); + if (!rfkill->reset_name) { + ret = -ENOMEM; + goto fail_alloc; + } + + rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL); + if (!rfkill->shutdown_name) { + ret = -ENOMEM; + goto fail_reset_name; + } + + snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name); + snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name); + + if (pdata->power_clk_name) { + rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name); + if (IS_ERR(rfkill->pwr_clk)) { + pr_warn("%s: can't find pwr_clk.\n", __func__); + goto fail_shutdown_name; + } + } + + if (gpio_is_valid(pdata->reset_gpio)) { + ret = gpio_request(pdata->reset_gpio, rfkill->reset_name); + if (ret) { + pr_warn("%s: failed to get reset gpio.\n", __func__); + goto fail_clock; + } + } + + if (gpio_is_valid(pdata->shutdown_gpio)) { + ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name); + if (ret) { + pr_warn("%s: failed to get shutdown gpio.\n", __func__); + goto fail_reset; + } + } + + rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type, + &rfkill_gpio_ops, rfkill); + if (!rfkill->rfkill_dev) + goto fail_shutdown; + + ret = rfkill_register(rfkill->rfkill_dev); + if (ret < 0) + goto fail_rfkill; + + platform_set_drvdata(pdev, rfkill); + + dev_info(&pdev->dev, "%s device registered.\n", pdata->name); + + return 0; + +fail_rfkill: + rfkill_destroy(rfkill->rfkill_dev); +fail_shutdown: + if (gpio_is_valid(pdata->shutdown_gpio)) + gpio_free(pdata->shutdown_gpio); +fail_reset: + if (gpio_is_valid(pdata->reset_gpio)) + gpio_free(pdata->reset_gpio); +fail_clock: + if (rfkill->pwr_clk) + clk_put(rfkill->pwr_clk); +fail_shutdown_name: + kfree(rfkill->shutdown_name); +fail_reset_name: + kfree(rfkill->reset_name); +fail_alloc: + kfree(rfkill); + + return ret; +} + +static int rfkill_gpio_remove(struct platform_device *pdev) +{ + struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev); + + rfkill_unregister(rfkill->rfkill_dev); + rfkill_destroy(rfkill->rfkill_dev); + if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) + gpio_free(rfkill->pdata->shutdown_gpio); + if (gpio_is_valid(rfkill->pdata->reset_gpio)) + gpio_free(rfkill->pdata->reset_gpio); + if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill)) + clk_disable(rfkill->pwr_clk); + if (rfkill->pwr_clk) + clk_put(rfkill->pwr_clk); + kfree(rfkill->shutdown_name); + kfree(rfkill->reset_name); + kfree(rfkill); + + return 0; +} + +static struct platform_driver rfkill_gpio_driver = { + .probe = rfkill_gpio_probe, + .remove = __devexit_p(rfkill_gpio_remove), + .driver = { + .name = "rfkill_gpio", + .owner = THIS_MODULE, + }, +}; + +static int __init rfkill_gpio_init(void) +{ + return platform_driver_register(&rfkill_gpio_driver); +} + +static void __exit rfkill_gpio_exit(void) +{ + platform_driver_unregister(&rfkill_gpio_driver); +} + +module_init(rfkill_gpio_init); +module_exit(rfkill_gpio_exit); + +MODULE_DESCRIPTION("gpio rfkill"); +MODULE_AUTHOR("NVIDIA"); +MODULE_LICENSE("GPL"); -- cgit v1.1 From d676ff493d9dfb8b34892214665028a8c85e2056 Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Tue, 17 May 2011 16:13:34 -0700 Subject: mac80211: Don't sleep when growing the mesh path After commit 1928ecab620907a0953f811316d05f367f3f4dba (mac80211: fix and simplify mesh locking) mesh table allocation is performed with the pathtbl_resize_lock taken. Under those conditions one should not sleep. This patch makes the allocations GFP_ATOMIC to prevent that. Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_pathtbl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 2bda0ac..51f6fe8 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -76,12 +76,12 @@ static struct mesh_table *mesh_table_alloc(int size_order) int i; struct mesh_table *newtbl; - newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); + newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); if (!newtbl) return NULL; newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * - (1 << size_order), GFP_KERNEL); + (1 << size_order), GFP_ATOMIC); if (!newtbl->hash_buckets) { kfree(newtbl); @@ -89,7 +89,7 @@ static struct mesh_table *mesh_table_alloc(int size_order) } newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * - (1 << size_order), GFP_KERNEL); + (1 << size_order), GFP_ATOMIC); if (!newtbl->hashwlock) { kfree(newtbl->hash_buckets); kfree(newtbl); -- cgit v1.1 From a2cd43c52aa5c676b03d575177536e05ac672c75 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Wed, 18 May 2011 11:42:03 +0300 Subject: nl80211: remove some stack variables in trigger_scan and start_sched_scan Some stack variables (name *ssid and *channel) are only used to define the size of the memory block that needs to be allocated for the request structure in the nl80211_trigger_scan() and nl80211_start_sched_scan() functions. This is unnecessary because the sizes of the actual elements in the structure can be used instead. Signed-off-by: Luciano Coelho Signed-off-by: John W. Linville --- net/wireless/nl80211.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 2222ce08..ec83f41 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3294,8 +3294,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_scan_request *request; - struct cfg80211_ssid *ssid; - struct ieee80211_channel *channel; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; @@ -3342,8 +3340,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) return -EINVAL; request = kzalloc(sizeof(*request) - + sizeof(*ssid) * n_ssids - + sizeof(channel) * n_channels + + sizeof(*request->ssids) * n_ssids + + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) return -ENOMEM; @@ -3449,8 +3447,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, struct cfg80211_sched_scan_request *request; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; - struct cfg80211_ssid *ssid; - struct ieee80211_channel *channel; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; @@ -3507,8 +3503,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, return -EINVAL; request = kzalloc(sizeof(*request) - + sizeof(*ssid) * n_ssids - + sizeof(channel) * n_channels + + sizeof(*request->ssids) * n_ssids + + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) return -ENOMEM; -- cgit v1.1 From 19a76fa9593bad778dabeeec1f6c2df6effe9ca3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 May 2011 22:23:00 +0000 Subject: net: ping: cleanups ping_v4_unhash() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit net/ipv4/ping.c: In function ‘ping_v4_unhash’: net/ipv4/ping.c:140:28: warning: variable ‘hslot’ set but not used Signed-off-by: Eric Dumazet CC: Vasiliy Kulikov Acked-by: Vasiliy Kulikov Signed-off-by: David S. Miller --- net/ipv4/ping.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 1f3bb11..9aaa671 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -137,9 +137,6 @@ static void ping_v4_unhash(struct sock *sk) struct inet_sock *isk = inet_sk(sk); pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); if (sk_hashed(sk)) { - struct hlist_nulls_head *hslot; - - hslot = ping_hashslot(&ping_table, sock_net(sk), isk->inet_num); write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sock_put(sk); -- cgit v1.1 From 418f275ed5b03c48e0e6c3401466660dfe894f76 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 May 2011 22:41:48 +0000 Subject: snap: remove one synchronize_net() No need to wait for a rcu grace period after list insertion. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/802/psnap.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net') diff --git a/net/802/psnap.c b/net/802/psnap.c index 21cde8f..db6baf7 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c @@ -147,7 +147,6 @@ struct datalink_proto *register_snap_client(const unsigned char *desc, out: spin_unlock_bh(&snap_lock); - synchronize_net(); return proto; } -- cgit v1.1 From 8efa885406359af300d46910642b50ca82c0fe47 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 23 May 2011 11:02:42 +0000 Subject: sch_sfq: avoid giving spurious NET_XMIT_CN signals While chasing a possible net_sched bug, I found that IP fragments have litle chance to pass a congestioned SFQ qdisc : - Say SFQ qdisc is full because one flow is non responsive. - ip_fragment() wants to send two fragments belonging to an idle flow. - sfq_enqueue() queues first packet, but see queue limit reached : - sfq_enqueue() drops one packet from 'big consumer', and returns NET_XMIT_CN. - ip_fragment() cancel remaining fragments. This patch restores fairness, making sure we return NET_XMIT_CN only if we dropped a packet from the same flow. Signed-off-by: Eric Dumazet CC: Patrick McHardy CC: Jarek Poplawski CC: Jamal Hadi Salim CC: Stephen Hemminger Signed-off-by: David S. Miller --- net/sched/sch_sfq.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 7ef87f9..b1d00f8 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; - sfq_index x; + sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); @@ -405,8 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; + qlen = slot->qlen; sfq_drop(sch); - return NET_XMIT_CN; + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ + return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS; } static struct sk_buff * -- cgit v1.1 From 6c4a5cb219520c7bc937ee186ca53f03733bd09f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 21 May 2011 07:48:40 +0000 Subject: net: filter: Use WARN_RATELIMIT A mis-configured filter can spam the logs with lots of stack traces. Rate-limit the warnings and add printout of the bogus filter information. Original-patch-by: Ben Greear Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/core/filter.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index 0eb8c44..0e3622f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -350,7 +350,9 @@ load_b: continue; } default: - WARN_ON(1); + WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", + fentry->code, fentry->jt, + fentry->jf, fentry->k); return 0; } } -- cgit v1.1 From 6ac3f6649223d916bbdf1e823926f8f3b34b5d99 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 24 May 2011 01:11:51 -0400 Subject: ipv6: Fix return of xfrm6_tunnel_rcv() Like ipv4, just return xfrm6_rcv_spi()'s return value directly. Signed-off-by: David S. Miller --- net/ipv6/xfrm6_tunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index a6770a0..4fe1db12 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -241,7 +241,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb) __be32 spi; spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr); - return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; + return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi); } static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, -- cgit v1.1 From 71338aa7d050c86d8765cd36e46be514fb0ebbce Mon Sep 17 00:00:00 2001 From: Dan Rosenberg Date: Mon, 23 May 2011 12:17:35 +0000 Subject: net: convert %p usage to %pK The %pK format specifier is designed to hide exposed kernel pointers, specifically via /proc interfaces. Exposing these pointers provides an easy target for kernel write vulnerabilities, since they reveal the locations of writable structures containing easily triggerable function pointers. The behavior of %pK depends on the kptr_restrict sysctl. If kptr_restrict is set to 0, no deviation from the standard %p behavior occurs. If kptr_restrict is set to 1, the default, if the current user (intended to be a reader via seq_printf(), etc.) does not have CAP_SYSLOG (currently in the LSM tree), kernel pointers using %pK are printed as 0's. If kptr_restrict is set to 2, kernel pointers using %pK are printed as 0's regardless of privileges. Replacing with 0's was chosen over the default "(null)", which cannot be parsed by userland %p, which expects "(nil)". The supporting code for kptr_restrict and %pK are currently in the -mm tree. This patch converts users of %p in net/ to %pK. Cases of printing pointers to the syslog are not covered, since this would eliminate useful information for postmortem debugging and the reading of the syslog is already optionally protected by the dmesg_restrict sysctl. Signed-off-by: Dan Rosenberg Cc: James Morris Cc: Eric Dumazet Cc: Thomas Graf Cc: Eugene Teo Cc: Kees Cook Cc: Ingo Molnar Cc: David S. Miller Cc: Peter Zijlstra Cc: Eric Paris Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- net/atm/proc.c | 4 ++-- net/can/bcm.c | 6 +++--- net/ipv4/raw.c | 2 +- net/ipv4/tcp_ipv4.c | 6 +++--- net/ipv4/udp.c | 2 +- net/ipv6/raw.c | 2 +- net/ipv6/tcp_ipv6.c | 6 +++--- net/ipv6/udp.c | 2 +- net/key/af_key.c | 2 +- net/netlink/af_netlink.c | 2 +- net/packet/af_packet.c | 2 +- net/phonet/socket.c | 2 +- net/sctp/proc.c | 4 ++-- net/unix/af_unix.c | 2 +- 14 files changed, 22 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/atm/proc.c b/net/atm/proc.c index f85da077..be3afde 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -191,7 +191,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc) { struct sock *sk = sk_atm(vcc); - seq_printf(seq, "%p ", vcc); + seq_printf(seq, "%pK ", vcc); if (!vcc->dev) seq_printf(seq, "Unassigned "); else @@ -218,7 +218,7 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) { if (!vcc->dev) seq_printf(seq, sizeof(void *) == 4 ? - "N/A@%p%10s" : "N/A@%p%2s", vcc, ""); + "N/A@%pK%10s" : "N/A@%pK%2s", vcc, ""); else seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, vcc->vci); diff --git a/net/can/bcm.c b/net/can/bcm.c index cced806..184a657 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -165,9 +165,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; - seq_printf(m, ">>> socket %p", sk->sk_socket); - seq_printf(m, " / sk %p", sk); - seq_printf(m, " / bo %p", bo); + seq_printf(m, ">>> socket %pK", sk->sk_socket); + seq_printf(m, " / sk %pK", sk); + seq_printf(m, " / bo %pK", bo); seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); seq_printf(m, " <<<\n"); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 11e17804..c9893d4 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -979,7 +979,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) srcp = inet->inet_num; seq_printf(seq, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3c8d9b6..a7d6671 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2371,7 +2371,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req, int ttd = req->expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n", i, ireq->loc_addr, ntohs(inet_sk(sk)->inet_sport), @@ -2426,7 +2426,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " - "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", + "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n", i, src, srcp, dest, destp, sk->sk_state, tp->write_seq - tp->snd_una, rx_queue, @@ -2461,7 +2461,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw, srcp = ntohs(tw->tw_sport); seq_printf(f, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n", i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw, len); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 599374f..abca870 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2090,7 +2090,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ae64984..cc7313b 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1240,7 +1240,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) srcp = inet_sk(sp)->inet_num; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8683664..d1fd287 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2036,7 +2036,7 @@ static void get_openreq6(struct seq_file *seq, seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], @@ -2087,7 +2087,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, @@ -2129,7 +2129,7 @@ static void get_timewait6_sock(struct seq_file *seq, seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index fc0c42a..41f8c9c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1391,7 +1391,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket srcp = ntohs(inet->inet_sport); seq_printf(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", bucket, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, diff --git a/net/key/af_key.c b/net/key/af_key.c index d62401c..8f92cf8 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3656,7 +3656,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v) if (v == SEQ_START_TOKEN) seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); else - seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", + seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), sk_rmem_alloc_get(s), diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5fe4f3b..6ef64ad 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1985,7 +1985,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v) struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); - seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n", + seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", s, s->sk_protocol, nlk->pid, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 549527b..925f715 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2706,7 +2706,7 @@ static int packet_seq_show(struct seq_file *seq, void *v) const struct packet_sock *po = pkt_sk(s); seq_printf(seq, - "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", + "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), s->sk_type, diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 8c5bfce..ab07711 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -607,7 +607,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v) struct pn_sock *pn = pn_sk(sk); seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " - "%d %p %d%n", + "%d %pK %d%n", sk->sk_protocol, pn->sobject, pn->dobject, pn->resource, sk->sk_state, sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 61aacfb..05a6ce2 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -212,7 +212,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) sctp_for_each_hentry(epb, node, &head->chain) { ep = sctp_ep(epb); sk = epb->sk; - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, + seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, epb->bind_addr.port, sock_i_uid(sk), sock_i_ino(sk)); @@ -316,7 +316,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) assoc = sctp_assoc(epb); sk = epb->sk; seq_printf(seq, - "%8p %8p %-3d %-3d %-2d %-4d " + "%8pK %8pK %-3d %-3d %-2d %-4d " "%4d %8d %8d %7d %5lu %-5d %5d ", assoc, sk, sctp_sk(sk)->type, sk->sk_state, assoc->state, hash, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b1d75be..0722a25 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2254,7 +2254,7 @@ static int unix_seq_show(struct seq_file *seq, void *v) struct unix_sock *u = unix_sk(s); unix_state_lock(s); - seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", + seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", s, atomic_read(&s->sk_refcnt), 0, -- cgit v1.1 From be3fc413da9eb17cce0991f214ab019d16c88c41 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 23 May 2011 23:07:32 +0000 Subject: net: use synchronize_rcu_expedited() synchronize_rcu() is very slow in various situations (HZ=100, CONFIG_NO_HZ=y, CONFIG_PREEMPT=n) Extract from my (mostly idle) 8 core machine : synchronize_rcu() in 99985 us synchronize_rcu() in 79982 us synchronize_rcu() in 87612 us synchronize_rcu() in 79827 us synchronize_rcu() in 109860 us synchronize_rcu() in 98039 us synchronize_rcu() in 89841 us synchronize_rcu() in 79842 us synchronize_rcu() in 80151 us synchronize_rcu() in 119833 us synchronize_rcu() in 99858 us synchronize_rcu() in 73999 us synchronize_rcu() in 79855 us synchronize_rcu() in 79853 us When we hold RTNL mutex, we would like to spend some cpu cycles but not block too long other processes waiting for this mutex. We also want to setup/dismantle network features as fast as possible at boot/shutdown time. This patch makes synchronize_net() call the expedited version if RTNL is locked. synchronize_rcu_expedited() typical delay is about 20 us on my machine. synchronize_rcu_expedited() in 18 us synchronize_rcu_expedited() in 18 us synchronize_rcu_expedited() in 18 us synchronize_rcu_expedited() in 18 us synchronize_rcu_expedited() in 20 us synchronize_rcu_expedited() in 16 us synchronize_rcu_expedited() in 20 us synchronize_rcu_expedited() in 18 us synchronize_rcu_expedited() in 18 us Signed-off-by: Eric Dumazet CC: Paul E. McKenney CC: Ben Greear Reviewed-by: Paul E. McKenney Signed-off-by: David S. Miller --- net/core/dev.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index bcb05cb..ec11d75 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5954,7 +5954,10 @@ EXPORT_SYMBOL(free_netdev); void synchronize_net(void) { might_sleep(); - synchronize_rcu(); + if (rtnl_is_locked()) + synchronize_rcu_expedited(); + else + synchronize_rcu(); } EXPORT_SYMBOL(synchronize_net); -- cgit v1.1 From 24cf3af3fed5edcf90bc2a0ed181e6ce1513d2dc Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Mon, 23 May 2011 23:15:05 +0000 Subject: igmp: call ip_mc_clear_src() only when we have no users of ip_mc_list In igmp_group_dropped() we call ip_mc_clear_src(), which resets the number of source filters per mulitcast. However, igmp_group_dropped() is also called on NETDEV_DOWN, NETDEV_PRE_TYPE_CHANGE and NETDEV_UNREGISTER, which means that the group might get added back on NETDEV_UP, NETDEV_REGISTER and NETDEV_POST_TYPE_CHANGE respectively, leaving us with broken source filters. To fix that, we must clear the source filters only when there are no users in the ip_mc_list, i.e. in ip_mc_dec_group() and on device destroy. Acked-by: David L Stevens Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller --- net/ipv4/igmp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 672e476..f1d27f6 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1155,20 +1155,18 @@ static void igmp_group_dropped(struct ip_mc_list *im) if (!in_dev->dead) { if (IGMP_V1_SEEN(in_dev)) - goto done; + return; if (IGMP_V2_SEEN(in_dev)) { if (reporter) igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); - goto done; + return; } /* IGMPv3 */ igmpv3_add_delrec(in_dev, im); igmp_ifc_event(in_dev); } -done: #endif - ip_mc_clear_src(im); } static void igmp_group_added(struct ip_mc_list *im) @@ -1305,6 +1303,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) *ip = i->next_rcu; in_dev->mc_count--; igmp_group_dropped(i); + ip_mc_clear_src(i); if (!in_dev->dead) ip_rt_multicast_event(in_dev); @@ -1414,7 +1413,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev) in_dev->mc_list = i->next_rcu; in_dev->mc_count--; - igmp_group_dropped(i); + /* We've dropped the groups in ip_mc_down already */ + ip_mc_clear_src(i); ip_ma_put(i); } } -- cgit v1.1 From b30c516f875004f025f4d10147bde28c5e98466b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 24 May 2011 13:29:50 -0400 Subject: net: fix __dst_destroy_metrics_generic() dst_default_metrics is readonly, we dont want to kfree() it later. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dst.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/dst.c b/net/core/dst.c index 81a4fa1..9ccca03 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -315,7 +315,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) { unsigned long prev, new; - new = (unsigned long) dst_default_metrics; + new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; prev = cmpxchg(&dst->_metrics, old, new); if (prev == old) kfree(__DST_METRICS_PTR(old)); -- cgit v1.1 From 33eb9873a283a2076f2b5628813d5365ca420ea9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 24 May 2011 13:32:18 -0400 Subject: bridge: initialize fake_rtable metrics bridge netfilter code uses a fake_rtable, and we must init its _metric field or risk NULL dereference later. Ref: https://bugzilla.kernel.org/show_bug.cgi?id=35672 Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/bridge/br_netfilter.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index e1f5ec7..3fa1231 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -117,6 +117,10 @@ static struct dst_ops fake_dst_ops = { * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ +static const u32 br_dst_default_metrics[RTAX_MAX] = { + [RTAX_MTU - 1] = 1500, +}; + void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; @@ -124,7 +128,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) atomic_set(&rt->dst.__refcnt, 1); rt->dst.dev = br->dev; rt->dst.path = &rt->dst; - dst_metric_set(&rt->dst, RTAX_MTU, 1500); + dst_init_metrics(&rt->dst, br_dst_default_metrics, true); rt->dst.flags = DST_NOXFRM; rt->dst.ops = &fake_dst_ops; } -- cgit v1.1 From 6dcbbe25dcc9bd2bdeb4f685f8fb874ffc10e6be Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Tue, 24 May 2011 08:31:08 +0000 Subject: net: move is_vlan_dev into public header file (v2) Migrate is_vlan_dev() to if_vlan.h so that core networkig can use it Signed-off-by: Neil Horman CC: davem@davemloft.net CC: bhutchings@solarflare.com Signed-off-by: David S. Miller --- net/8021q/vlan.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'net') diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index c3408de..9da07e3 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -118,11 +118,6 @@ extern void vlan_netlink_fini(void); extern struct rtnl_link_ops vlan_link_ops; -static inline int is_vlan_dev(struct net_device *dev) -{ - return dev->priv_flags & IFF_802_1Q_VLAN; -} - extern int vlan_net_id; struct proc_dir_entry; -- cgit v1.1 From f11970e383acd6f505f492f1bc07fb1a4d884829 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Tue, 24 May 2011 08:31:09 +0000 Subject: net: make dev_disable_lro use physical device if passed a vlan dev (v2) If the device passed into dev_disable_lro is a vlan, then repoint the dev poniter so that we actually modify the underlying physical device. Signed-of-by: Neil Horman CC: davem@davemloft.net CC: bhutchings@solarflare.com Signed-off-by: David S. Miller --- net/core/dev.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index ec11d75..c7e305d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1308,6 +1308,13 @@ void dev_disable_lro(struct net_device *dev) { u32 flags; + /* + * If we're trying to disable lro on a vlan device + * use the underlying physical device instead + */ + if (is_vlan_dev(dev)) + dev = vlan_dev_real_dev(dev); + if (dev->ethtool_ops && dev->ethtool_ops->get_flags) flags = dev->ethtool_ops->get_flags(dev); else -- cgit v1.1 From 8b4472cc13136d04727e399c6fdadf58d2218b0a Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 24 May 2011 21:48:02 +0000 Subject: sctp: fix memory leak of the ASCONF queue when free asoc If an ASCONF chunk is outstanding, then the following ASCONF chunk will be queued for later transmission. But when we free the asoc, we forget to free the ASCONF queue at the same time, this will cause memory leak. Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- net/sctp/associola.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'net') diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 1a21c57..525f97c 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -64,6 +64,7 @@ /* Forward declarations for internal functions. */ static void sctp_assoc_bh_rcv(struct work_struct *work); static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); /* Keep track of the new idr low so that we don't re-use association id * numbers too fast. It is protected by they idr spin lock is in the @@ -446,6 +447,9 @@ void sctp_association_free(struct sctp_association *asoc) /* Free any cached ASCONF_ACK chunk. */ sctp_assoc_free_asconf_acks(asoc); + /* Free the ASCONF queue. */ + sctp_assoc_free_asconf_queue(asoc); + /* Free any cached ASCONF chunk. */ if (asoc->addip_last_asconf) sctp_chunk_free(asoc->addip_last_asconf); @@ -1578,6 +1582,18 @@ retry: return error; } +/* Free the ASCONF queue */ +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) +{ + struct sctp_chunk *asconf; + struct sctp_chunk *tmp; + + list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { + list_del_init(&asconf->list); + sctp_chunk_free(asconf); + } +} + /* Free asconf_ack cache */ static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) { -- cgit v1.1 From 07bd8df5df4369487812bf85a237322ff3569b77 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 25 May 2011 04:40:11 +0000 Subject: sch_sfq: fix peek() implementation Since commit eeaeb068f139 (sch_sfq: allow big packets and be fair), sfq_peek() can return a different skb that would be normally dequeued by sfq_dequeue() [ if current slot->allot is negative ] Use generic qdisc_peek_dequeued() instead of custom implementation, to get consistent result. Signed-off-by: Eric Dumazet CC: Jarek Poplawski CC: Patrick McHardy CC: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- net/sched/sch_sfq.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'net') diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index b1d00f8..b6ea6af 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -414,18 +414,6 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } static struct sk_buff * -sfq_peek(struct Qdisc *sch) -{ - struct sfq_sched_data *q = qdisc_priv(sch); - - /* No active slots */ - if (q->tail == NULL) - return NULL; - - return q->slots[q->tail->next].skblist_next; -} - -static struct sk_buff * sfq_dequeue(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); @@ -706,7 +694,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { .priv_size = sizeof(struct sfq_sched_data), .enqueue = sfq_enqueue, .dequeue = sfq_dequeue, - .peek = sfq_peek, + .peek = qdisc_peek_dequeued, .drop = sfq_drop, .init = sfq_init, .reset = sfq_reset, -- cgit v1.1 From 2907c35ff64708065e5a7fd54e8ded8263eb3074 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 25 May 2011 07:34:04 +0000 Subject: net: hold rtnl again in dump callbacks Commit e67f88dd12f6 (dont hold rtnl mutex during netlink dump callbacks) missed fact that rtnl_fill_ifinfo() must be called with rtnl held. Because of possible deadlocks between two mutexes (cb_mutex and rtnl), its not easy to solve this problem, so revert this part of the patch. It also forgot one rcu_read_unlock() in FIB dump_rules() Add one ASSERT_RTNL() in rtnl_fill_ifinfo() to remind us the rule. Signed-off-by: Eric Dumazet CC: Patrick McHardy CC: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/fib_rules.c | 1 + net/core/rtnetlink.c | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 3911586..008dc70 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -602,6 +602,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, skip: idx++; } + rcu_read_unlock(); cb->args[1] = idx; rules_ops_put(ops); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d1644e3..2d56cb9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -850,6 +850,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, struct nlattr *attr, *af_spec; struct rtnl_af_ops *af_ops; + ASSERT_RTNL(); nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE; @@ -1876,6 +1877,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) int min_len; int family; int type; + int err; type = nlh->nlmsg_type; if (type > RTM_MAX) @@ -1902,8 +1904,11 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (dumpit == NULL) return -EOPNOTSUPP; + __rtnl_unlock(); rtnl = net->rtnl; - return netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); + err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); + rtnl_lock(); + return err; } memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); @@ -1975,7 +1980,7 @@ static int __net_init rtnetlink_net_init(struct net *net) { struct sock *sk; sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, - rtnetlink_rcv, NULL, THIS_MODULE); + rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); if (!sk) return -ENOMEM; net->rtnl = sk; -- cgit v1.1