diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2007-02-08 14:16:46 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-02-08 14:16:46 -0800 |
commit | dbca9b2750e3b1ee6f56a616160ccfc12e8b161f (patch) | |
tree | bf84c0acb5495fc95bc616d32b8af946f2e3bca9 | |
parent | eac3731bd04c7131478722a3c148b78774553116 (diff) | |
download | op-kernel-dev-dbca9b2750e3b1ee6f56a616160ccfc12e8b161f.zip op-kernel-dev-dbca9b2750e3b1ee6f56a616160ccfc12e8b161f.tar.gz |
[NET]: change layout of ehash table
ehash table layout is currently this one :
First half of this table is used by sockets not in TIME_WAIT state
Second half of it is used by sockets in TIME_WAIT state.
This is non optimal because of for a given hash or socket, the two chain heads
are located in separate cache lines.
Moreover the locks of the second half are never used.
If instead of this halving, we use two list heads in inet_ehash_bucket instead
of only one, we probably can avoid one cache miss, and reduce ram usage,
particularly if sizeof(rwlock_t) is big (various CONFIG_DEBUG_SPINLOCK,
CONFIG_DEBUG_LOCK_ALLOC settings). So we still halves the table but we keep
together related chains to speedup lookups and socket state change.
In this patch I did not try to align struct inet_ehash_bucket, but a future
patch could try to make this structure have a convenient size (a power of two
or a multiple of L1_CACHE_SIZE).
I guess rwlock will just vanish as soon as RCU is plugged into ehash :) , so
maybe we dont need to scratch our heads to align the bucket...
Note : In case struct inet_ehash_bucket is not a power of two, we could
probably change alloc_large_system_hash() (in case it use __get_free_pages())
to free the unused space. It currently allocates a big zone, but the last
quarter of it could be freed. Again, this should be a temporary 'problem'.
Patch tested on ipv4 tcp only, but should be OK for IPV6 and DCCP.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/inet_hashtables.h | 10 | ||||
-rw-r--r-- | net/dccp/proto.c | 4 | ||||
-rw-r--r-- | net/ipv4/inet_diag.c | 2 | ||||
-rw-r--r-- | net/ipv4/inet_hashtables.c | 2 | ||||
-rw-r--r-- | net/ipv4/inet_timewait_sock.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 7 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 4 | ||||
-rw-r--r-- | net/ipv6/inet6_hashtables.c | 4 |
8 files changed, 19 insertions, 18 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 34cc76e..d27ee8c 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -34,12 +34,13 @@ #include <asm/byteorder.h> /* This is for all connections with a full identity, no wildcards. - * New scheme, half the table is for TIME_WAIT, the other half is - * for the rest. I'll experiment with dynamic table growth later. + * One chain is dedicated to TIME_WAIT sockets. + * I'll experiment with dynamic table growth later. */ struct inet_ehash_bucket { rwlock_t lock; struct hlist_head chain; + struct hlist_head twchain; }; /* There are a few simple rules, which allow for local port reuse by @@ -97,8 +98,7 @@ struct inet_hashinfo { * * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE * - * First half of the table is for sockets not in TIME_WAIT, second half - * is for TIME_WAIT sockets only. + * TIME_WAIT sockets use a separate chain (twchain). */ struct inet_ehash_bucket *ehash; @@ -369,7 +369,7 @@ static inline struct sock * } /* Must check for a TIME_WAIT'er before going to listener hash. */ - sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { + sk_for_each(sk, node, &head->twchain) { if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) goto hit; } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 63b3fa2..4843856 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1024,7 +1024,6 @@ static int __init dccp_init(void) do { dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE / sizeof(struct inet_ehash_bucket); - dccp_hashinfo.ehash_size >>= 1; while (dccp_hashinfo.ehash_size & (dccp_hashinfo.ehash_size - 1)) dccp_hashinfo.ehash_size--; @@ -1037,9 +1036,10 @@ static int __init dccp_init(void) goto out_free_bind_bucket_cachep; } - for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) { + for (i = 0; i < dccp_hashinfo.ehash_size; i++) { rwlock_init(&dccp_hashinfo.ehash[i].lock); INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain); + INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain); } bhash_order = ehash_order; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 9cd53ad..8aa7d51 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -778,7 +778,7 @@ next_normal: struct inet_timewait_sock *tw; inet_twsk_for_each(tw, node, - &hashinfo->ehash[i + hashinfo->ehash_size].chain) { + &head->twchain) { if (num < s_num) goto next_dying; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 8c79c8a..150ace1 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -212,7 +212,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ - sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) { + sk_for_each(sk2, node, &head->twchain) { tw = inet_twsk(sk2); if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) { diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 9f414e3..a73cf93 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -78,8 +78,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, if (__sk_del_node_init(sk)) sock_prot_dec_use(sk->sk_prot); - /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ - inet_twsk_add_node(tw, &(ehead + hashinfo->ehash_size)->chain); + /* Step 3: Hash TW into TIMEWAIT chain. */ + inet_twsk_add_node(tw, &ehead->twchain); atomic_inc(&tw->tw_refcnt); write_unlock(&ehead->lock); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b67e0dd..5bd43d7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2415,10 +2415,11 @@ void __init tcp_init(void) &tcp_hashinfo.ehash_size, NULL, 0); - tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1; - for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) { + tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; + for (i = 0; i < tcp_hashinfo.ehash_size; i++) { rwlock_init(&tcp_hashinfo.ehash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); + INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain); } tcp_hashinfo.bhash = @@ -2475,7 +2476,7 @@ void __init tcp_init(void) printk(KERN_INFO "TCP: Hash tables configured " "(established %d bind %d)\n", - tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size); + tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size); tcp_register_congestion_control(&tcp_reno); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 383e4b5..f51d640 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2051,7 +2051,7 @@ static void *established_get_first(struct seq_file *seq) } st->state = TCP_SEQ_STATE_TIME_WAIT; inet_twsk_for_each(tw, node, - &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) { + &tcp_hashinfo.ehash[st->bucket].twchain) { if (tw->tw_family != st->family) { continue; } @@ -2107,7 +2107,7 @@ get_tw: } st->state = TCP_SEQ_STATE_TIME_WAIT; - tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain); + tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain); goto get_tw; found: cur = sk; diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index b7e5bae..e611169 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -79,7 +79,7 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo, goto hit; /* You sunk my battleship! */ } /* Must check for a TIME_WAIT'er before going to listener hash. */ - sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { + sk_for_each(sk, node, &head->twchain) { const struct inet_timewait_sock *tw = inet_twsk(sk); if(*((__portpair *)&(tw->tw_dport)) == ports && @@ -183,7 +183,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ - sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) { + sk_for_each(sk2, node, &head->twchain) { const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2); tw = inet_twsk(sk2); |