summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorJens Låås <jens.laas@its.uu.se>2009-08-28 23:57:15 -0700
committerDavid S. Miller <davem@davemloft.net>2009-08-28 23:57:15 -0700
commit80b71b80df14d885f7e50e115c1348398f418759 (patch)
treef5b58c73d1b2ca7639a8e0513b6c68a992b05484 /net/ipv4
parent8945a808f7d5efd21fa9fb6055d2dd7887bdd9d8 (diff)
downloadop-kernel-dev-80b71b80df14d885f7e50e115c1348398f418759.zip
op-kernel-dev-80b71b80df14d885f7e50e115c1348398f418759.tar.gz
fib_trie: resize rework
Here is rework and cleanup of the resize function. Some bugs we had. We were using ->parent when we should use node_parent(). Also we used ->parent which is not assigned by inflate in inflate loop. Also a fix to set thresholds to power 2 to fit halve and double strategy. max_resize is renamed to max_work which better indicates it's function. Reaching max_work is not an error, so warning is removed. max_work only limits amount of work done per resize. (limits CPU-usage, outstanding memory etc). The clean-up makes it relatively easy to add fixed sized root-nodes if we would like to decrease the memory pressure on routers with large routing tables and dynamic routing. If we'll need that... Its been tested with 280k routes. Work done together with Robert Olsson. Signed-off-by: Jens Låås <jens.laas@its.uu.se> Signed-off-by: Robert Olsson <robert.olsson@its.uu.se> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/fib_trie.c95
1 files changed, 23 insertions, 72 deletions
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index fe3c846..291bdf5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -48,7 +48,7 @@
* Patrick McHardy <kaber@trash.net>
*/
-#define VERSION "0.408"
+#define VERSION "0.409"
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -325,10 +325,7 @@ static inline void check_tnode(const struct tnode *tn)
static const int halve_threshold = 25;
static const int inflate_threshold = 50;
static const int halve_threshold_root = 15;
-static const int inflate_threshold_root = 25;
-
-static int inflate_threshold_root_fix;
-#define INFLATE_FIX_MAX 10 /* a comment in resize() */
+static const int inflate_threshold_root = 30;
static void __alias_free_mem(struct rcu_head *head)
{
@@ -516,14 +513,14 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
rcu_assign_pointer(tn->child[i], n);
}
+#define MAX_WORK 10
static struct node *resize(struct trie *t, struct tnode *tn)
{
int i;
- int err = 0;
struct tnode *old_tn;
int inflate_threshold_use;
int halve_threshold_use;
- int max_resize;
+ int max_work;
if (!tn)
return NULL;
@@ -538,18 +535,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
}
/* One child */
if (tn->empty_children == tnode_child_length(tn) - 1)
- for (i = 0; i < tnode_child_length(tn); i++) {
- struct node *n;
-
- n = tn->child[i];
- if (!n)
- continue;
-
- /* compress one level */
- node_set_parent(n, NULL);
- tnode_free_safe(tn);
- return n;
- }
+ goto one_child;
/*
* Double as long as the resulting node has a number of
* nonempty nodes that are above the threshold.
@@ -618,15 +604,17 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Keep root node larger */
- if (!tn->parent)
- inflate_threshold_use = inflate_threshold_root +
- inflate_threshold_root_fix;
- else
+ if (!node_parent((struct node*) tn)) {
+ inflate_threshold_use = inflate_threshold_root;
+ halve_threshold_use = halve_threshold_root;
+ }
+ else {
inflate_threshold_use = inflate_threshold;
+ halve_threshold_use = halve_threshold;
+ }
- err = 0;
- max_resize = 10;
- while ((tn->full_children > 0 && max_resize-- &&
+ max_work = MAX_WORK;
+ while ((tn->full_children > 0 && max_work-- &&
50 * (tn->full_children + tnode_child_length(tn)
- tn->empty_children)
>= inflate_threshold_use * tnode_child_length(tn))) {
@@ -643,47 +631,19 @@ static struct node *resize(struct trie *t, struct tnode *tn)
}
}
- if (max_resize < 0) {
- if (!tn->parent) {
- /*
- * It was observed that during large updates even
- * inflate_threshold_root = 35 might be needed to avoid
- * this warning; but it should be temporary, so let's
- * try to handle this automatically.
- */
- if (inflate_threshold_root_fix < INFLATE_FIX_MAX)
- inflate_threshold_root_fix++;
- else
- pr_warning("Fix inflate_threshold_root."
- " Now=%d size=%d bits fix=%d\n",
- inflate_threshold_root, tn->bits,
- inflate_threshold_root_fix);
- } else {
- pr_warning("Fix inflate_threshold."
- " Now=%d size=%d bits\n",
- inflate_threshold, tn->bits);
- }
- } else if (max_resize > 3 && !tn->parent && inflate_threshold_root_fix)
- inflate_threshold_root_fix--;
-
check_tnode(tn);
+ /* Return if at least one inflate is run */
+ if( max_work != MAX_WORK)
+ return (struct node *) tn;
+
/*
* Halve as long as the number of empty children in this
* node is above threshold.
*/
-
- /* Keep root node larger */
-
- if (!tn->parent)
- halve_threshold_use = halve_threshold_root;
- else
- halve_threshold_use = halve_threshold;
-
- err = 0;
- max_resize = 10;
- while (tn->bits > 1 && max_resize-- &&
+ max_work = MAX_WORK;
+ while (tn->bits > 1 && max_work-- &&
100 * (tnode_child_length(tn) - tn->empty_children) <
halve_threshold_use * tnode_child_length(tn)) {
@@ -698,19 +658,10 @@ static struct node *resize(struct trie *t, struct tnode *tn)
}
}
- if (max_resize < 0) {
- if (!tn->parent)
- pr_warning("Fix halve_threshold_root."
- " Now=%d size=%d bits\n",
- halve_threshold_root, tn->bits);
- else
- pr_warning("Fix halve_threshold."
- " Now=%d size=%d bits\n",
- halve_threshold, tn->bits);
- }
/* Only one child remains */
- if (tn->empty_children == tnode_child_length(tn) - 1)
+ if (tn->empty_children == tnode_child_length(tn) - 1) {
+one_child:
for (i = 0; i < tnode_child_length(tn); i++) {
struct node *n;
@@ -724,7 +675,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
tnode_free_safe(tn);
return n;
}
-
+ }
return (struct node *) tn;
}
OpenPOWER on IntegriCloud