From 83723d60717f8da0f53f91cf42a845ed56c09662 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 10 Jan 2011 20:11:38 +0100 Subject: netfilter: x_tables: dont block BH while reading counters Using "iptables -L" with a lot of rules have a too big BH latency. Jesper mentioned ~6 ms and worried of frame drops. Switch to a per_cpu seqlock scheme, so that taking a snapshot of counters doesnt need to block BH (for this cpu, but also other cpus). This adds two increments on seqlock sequence per ipt_do_table() call, its a reasonable cost for allowing "iptables -L" not block BH processing. Reported-by: Jesper Dangaard Brouer Signed-off-by: Eric Dumazet CC: Patrick McHardy Acked-by: Stephen Hemminger Acked-by: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 742bec0..6712e71 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info); * necessary for reading the counters. */ struct xt_info_lock { - spinlock_t lock; + seqlock_t lock; unsigned char readers; }; DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); @@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void) local_bh_disable(); lock = &__get_cpu_var(xt_info_locks); if (likely(!lock->readers++)) - spin_lock(&lock->lock); + write_seqlock(&lock->lock); } static inline void xt_info_rdunlock_bh(void) @@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void) struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); if (likely(!--lock->readers)) - spin_unlock(&lock->lock); + write_sequnlock(&lock->lock); local_bh_enable(); } @@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void) */ static inline void xt_info_wrlock(unsigned int cpu) { - spin_lock(&per_cpu(xt_info_locks, cpu).lock); + write_seqlock(&per_cpu(xt_info_locks, cpu).lock); } static inline void xt_info_wrunlock(unsigned int cpu) { - spin_unlock(&per_cpu(xt_info_locks, cpu).lock); + write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); } /* -- cgit v1.1 From 2f46e07995734a363608e974a82fd05d5b610750 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 11 Jan 2011 23:55:51 +0100 Subject: netfilter: ebtables: make broute table work again broute table init hook sets up the "br_should_route_hook" pointer, which then gets called from br_input. commit a386f99025f13b32502fe5dedf223c20d7283826 (bridge: add proper RCU annotation to should_route_hook) introduced a typedef, and then changed this to: br_should_route_hook_t *rhook; [..] rhook = rcu_dereference(br_should_route_hook); if (*rhook(skb)) problem is that "br_should_route_hook" contains the address of the function, so calling *rhook() results in kernel panic. Signed-off-by: Florian Westphal Acked-by: Eric Dumazet Signed-off-by: Pablo Neira Ayuso --- include/linux/if_bridge.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index f7e73c3..dd3f201 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -103,7 +103,7 @@ struct __fdb_entry { extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); -typedef int (*br_should_route_hook_t)(struct sk_buff *skb); +typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; #endif -- cgit v1.1