summaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-04-04 17:04:03 +0200
committerPatrick McHardy <kaber@trash.net>2011-04-04 17:04:03 +0200
commit7f5c6d4f665bb57a19a34ce1fb16cc708c04f219 (patch)
treee804faa506bbf9edcfd1fdadb2ab3749f58836cd /net/ipv6
parent8f7b01a178b8e6a7b663a1bbaa1710756d67b69b (diff)
downloadop-kernel-dev-7f5c6d4f665bb57a19a34ce1fb16cc708c04f219.zip
op-kernel-dev-7f5c6d4f665bb57a19a34ce1fb16cc708c04f219.tar.gz
netfilter: get rid of atomic ops in fast path
We currently use a percpu spinlock to 'protect' rule bytes/packets counters, after various attempts to use RCU instead. Lately we added a seqlock so that get_counters() can run without blocking BH or 'writers'. But we really only need the seqcount in it. Spinlock itself is only locked by the current/owner cpu, so we can remove it completely. This cleanups api, using correct 'writer' vs 'reader' semantic. At replace time, the get_counters() call makes sure all cpus are done using the old table. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Jan Engelhardt <jengelh@medozas.de> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/netfilter/ip6_tables.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0b2af9b..ec7cf57 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb,
unsigned int *stackptr, origptr, cpu;
const struct xt_table_info *private;
struct xt_action_param acpar;
+ unsigned int addend;
/* Initialization */
indev = in ? in->name : nulldevname;
@@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb,
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
- xt_info_rdlock_bh();
+ local_bh_disable();
+ addend = xt_write_recseq_begin();
private = table->private;
cpu = smp_processor_id();
table_base = private->entries[cpu];
@@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb,
} while (!acpar.hotdrop);
*stackptr = origptr;
- xt_info_rdunlock_bh();
+
+ xt_write_recseq_end(addend);
+ local_bh_enable();
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
@@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t,
unsigned int i;
for_each_possible_cpu(cpu) {
- seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+ seqcount_t *s = &per_cpu(xt_recseq, cpu);
i = 0;
xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t,
unsigned int start;
do {
- start = read_seqbegin(lock);
+ start = read_seqcount_begin(s);
bcnt = iter->counters.bcnt;
pcnt = iter->counters.pcnt;
- } while (read_seqretry(lock, start));
+ } while (read_seqcount_retry(s, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i;
@@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
int ret = 0;
const void *loc_cpu_entry;
struct ip6t_entry *iter;
+ unsigned int addend;
#ifdef CONFIG_COMPAT
struct compat_xt_counters_info compat_tmp;
@@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
i = 0;
/* Choose the copy that is on our node */
curcpu = smp_processor_id();
- xt_info_wrlock(curcpu);
+ addend = xt_write_recseq_begin();
loc_cpu_entry = private->entries[curcpu];
xt_entry_foreach(iter, loc_cpu_entry, private->size) {
ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
++i;
}
- xt_info_wrunlock(curcpu);
+ xt_write_recseq_end(addend);
unlock_up_free:
local_bh_enable();
OpenPOWER on IntegriCloud