summaryrefslogtreecommitdiffstats
path: root/contrib/pf/pfctl/pfctl_optimize.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/pf/pfctl/pfctl_optimize.c')
-rw-r--r--contrib/pf/pfctl/pfctl_optimize.c156
1 files changed, 119 insertions, 37 deletions
diff --git a/contrib/pf/pfctl/pfctl_optimize.c b/contrib/pf/pfctl/pfctl_optimize.c
index 111dc32..44c0477 100644
--- a/contrib/pf/pfctl/pfctl_optimize.c
+++ b/contrib/pf/pfctl/pfctl_optimize.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pfctl_optimize.c,v 1.5 2005/01/03 15:18:10 frantzen Exp $ */
+/* $OpenBSD: pfctl_optimize.c,v 1.13 2006/10/31 14:17:45 mcbride Exp $ */
/*
* Copyright (c) 2004 Mike Frantzen <frantzen@openbsd.org>
@@ -112,6 +112,10 @@ struct pf_rule_field {
PF_RULE_FIELD(prob, BARRIER),
PF_RULE_FIELD(max_states, BARRIER),
PF_RULE_FIELD(max_src_nodes, BARRIER),
+ PF_RULE_FIELD(max_src_states, BARRIER),
+ PF_RULE_FIELD(max_src_conn, BARRIER),
+ PF_RULE_FIELD(max_src_conn_rate, BARRIER),
+ PF_RULE_FIELD(anchor, BARRIER), /* for now */
/*
* These fields must be the same between all rules in the same superblock.
@@ -123,10 +127,18 @@ struct pf_rule_field {
PF_RULE_FIELD(tagname, BREAK),
PF_RULE_FIELD(keep_state, BREAK),
PF_RULE_FIELD(qname, BREAK),
+ PF_RULE_FIELD(pqname, BREAK),
PF_RULE_FIELD(rt, BREAK),
PF_RULE_FIELD(allow_opts, BREAK),
PF_RULE_FIELD(rule_flag, BREAK),
PF_RULE_FIELD(action, BREAK),
+ PF_RULE_FIELD(log, BREAK),
+ PF_RULE_FIELD(quick, BREAK),
+ PF_RULE_FIELD(return_ttl, BREAK),
+ PF_RULE_FIELD(overload_tblname, BREAK),
+ PF_RULE_FIELD(flush, BREAK),
+ PF_RULE_FIELD(rpool, BREAK),
+ PF_RULE_FIELD(logif, BREAK),
/*
* Any fields not listed in this structure act as BREAK fields
@@ -140,7 +152,7 @@ struct pf_rule_field {
*/
PF_RULE_FIELD(af, NOMERGE),
PF_RULE_FIELD(ifnot, NOMERGE),
- PF_RULE_FIELD(ifname, NOMERGE),
+ PF_RULE_FIELD(ifname, NOMERGE), /* hack for IF groups */
PF_RULE_FIELD(match_tag_not, NOMERGE),
PF_RULE_FIELD(match_tagname, NOMERGE),
PF_RULE_FIELD(os_fingerprint, NOMERGE),
@@ -173,7 +185,6 @@ struct pf_rule_field {
PF_RULE_FIELD(packets, DC),
PF_RULE_FIELD(bytes, DC),
PF_RULE_FIELD(kif, DC),
- PF_RULE_FIELD(anchor, DC),
PF_RULE_FIELD(states, DC),
PF_RULE_FIELD(src_nodes, DC),
PF_RULE_FIELD(nr, DC),
@@ -182,6 +193,9 @@ struct pf_rule_field {
PF_RULE_FIELD(pqid, DC),
PF_RULE_FIELD(anchor_relative, DC),
PF_RULE_FIELD(anchor_wildcard, DC),
+ PF_RULE_FIELD(tag, DC),
+ PF_RULE_FIELD(match_tag, DC),
+ PF_RULE_FIELD(overload_tbl, DC),
/* These fields should never be set in a PASS/BLOCK rule */
PF_RULE_FIELD(natpass, NEVER),
@@ -201,6 +215,7 @@ void comparable_rule(struct pf_rule *, const struct pf_rule *, int);
int construct_superblocks(struct pfctl *, struct pf_opt_queue *,
struct superblocks *);
void exclude_supersets(struct pf_rule *, struct pf_rule *);
+int interface_group(const char *);
int load_feedback_profile(struct pfctl *, struct superblocks *);
int optimize_superblock(struct pfctl *, struct superblock *);
int pf_opt_create_table(struct pfctl *, struct pf_opt_tbl *);
@@ -243,25 +258,52 @@ int table_identifier;
int
-pfctl_optimize_rules(struct pfctl *pf)
+pfctl_optimize_ruleset(struct pfctl *pf, struct pf_ruleset *rs)
{
struct superblocks superblocks;
+ struct pf_opt_queue opt_queue;
struct superblock *block;
struct pf_opt_rule *por;
- int nr;
+ struct pf_rule *r;
+ struct pf_rulequeue *old_rules;
DEBUG("optimizing ruleset");
memset(&table_buffer, 0, sizeof(table_buffer));
skip_init();
+ TAILQ_INIT(&opt_queue);
- if (TAILQ_FIRST(&pf->opt_queue))
- nr = TAILQ_FIRST(&pf->opt_queue)->por_rule.nr;
+ old_rules = rs->rules[PF_RULESET_FILTER].active.ptr;
+ rs->rules[PF_RULESET_FILTER].active.ptr =
+ rs->rules[PF_RULESET_FILTER].inactive.ptr;
+ rs->rules[PF_RULESET_FILTER].inactive.ptr = old_rules;
+
+ /*
+ * XXX expanding the pf_opt_rule format throughout pfctl might allow
+ * us to avoid all this copying.
+ */
+ while ((r = TAILQ_FIRST(rs->rules[PF_RULESET_FILTER].inactive.ptr))
+ != NULL) {
+ TAILQ_REMOVE(rs->rules[PF_RULESET_FILTER].inactive.ptr, r,
+ entries);
+ if ((por = calloc(1, sizeof(*por))) == NULL)
+ err(1, "calloc");
+ memcpy(&por->por_rule, r, sizeof(*r));
+ if (TAILQ_FIRST(&r->rpool.list) != NULL) {
+ TAILQ_INIT(&por->por_rule.rpool.list);
+ pfctl_move_pool(&r->rpool, &por->por_rule.rpool);
+ } else
+ bzero(&por->por_rule.rpool,
+ sizeof(por->por_rule.rpool));
+
+
+ TAILQ_INSERT_TAIL(&opt_queue, por, por_entry);
+ }
TAILQ_INIT(&superblocks);
- if (construct_superblocks(pf, &pf->opt_queue, &superblocks))
+ if (construct_superblocks(pf, &opt_queue, &superblocks))
goto error;
- if (pf->opts & PF_OPT_OPTIMIZE_PROFILE) {
+ if (pf->optimize & PF_OPTIMIZE_PROFILE) {
if (load_feedback_profile(pf, &superblocks))
goto error;
}
@@ -271,24 +313,21 @@ pfctl_optimize_rules(struct pfctl *pf)
goto error;
}
-
- /*
- * Optimizations are done so we turn off the optimization flag and
- * put the rules right back into the regular codepath.
- */
- pf->opts &= ~PF_OPT_OPTIMIZE;
-
+ rs->anchor->refcnt = 0;
while ((block = TAILQ_FIRST(&superblocks))) {
TAILQ_REMOVE(&superblocks, block, sb_entry);
while ((por = TAILQ_FIRST(&block->sb_rules))) {
TAILQ_REMOVE(&block->sb_rules, por, por_entry);
- por->por_rule.nr = nr++;
- if (pfctl_add_rule(pf, &por->por_rule,
- por->por_anchor)) {
- free(por);
- goto error;
- }
+ por->por_rule.nr = rs->anchor->refcnt++;
+ if ((r = calloc(1, sizeof(*r))) == NULL)
+ err(1, "calloc");
+ memcpy(r, &por->por_rule, sizeof(*r));
+ TAILQ_INIT(&r->rpool.list);
+ pfctl_move_pool(&por->por_rule.rpool, &r->rpool);
+ TAILQ_INSERT_TAIL(
+ rs->rules[PF_RULESET_FILTER].active.ptr,
+ r, entries);
free(por);
}
free(block);
@@ -297,8 +336,8 @@ pfctl_optimize_rules(struct pfctl *pf)
return (0);
error:
- while ((por = TAILQ_FIRST(&pf->opt_queue))) {
- TAILQ_REMOVE(&pf->opt_queue, por, por_entry);
+ while ((por = TAILQ_FIRST(&opt_queue))) {
+ TAILQ_REMOVE(&opt_queue, por, por_entry);
if (por->por_src_tbl) {
pfr_buf_clear(por->por_src_tbl->pt_buf);
free(por->por_src_tbl->pt_buf);
@@ -367,7 +406,8 @@ optimize_superblock(struct pfctl *pf, struct superblock *block)
printf("--- Superblock ---\n");
TAILQ_FOREACH(por, &block->sb_rules, por_entry) {
printf(" ");
- print_rule(&por->por_rule, por->por_anchor, 1);
+ print_rule(&por->por_rule, por->por_rule.anchor ?
+ por->por_rule.anchor->name : "", 1);
}
#endif /* OPT_DEBUG */
@@ -376,7 +416,7 @@ optimize_superblock(struct pfctl *pf, struct superblock *block)
return (1);
if (combine_rules(pf, block))
return (1);
- if ((pf->opts & PF_OPT_OPTIMIZE_PROFILE) &&
+ if ((pf->optimize & PF_OPTIMIZE_PROFILE) &&
TAILQ_FIRST(&block->sb_rules)->por_rule.quick &&
block->sb_profiled_block) {
if (block_feedback(pf, block))
@@ -783,14 +823,16 @@ block_feedback(struct pfctl *pf, struct superblock *block)
*/
TAILQ_FOREACH(por1, &block->sb_profiled_block->sb_rules, por_entry) {
comparable_rule(&a, &por1->por_rule, DC);
- total_count += por1->por_rule.packets;
+ total_count += por1->por_rule.packets[0] +
+ por1->por_rule.packets[1];
TAILQ_FOREACH(por2, &block->sb_rules, por_entry) {
if (por2->por_profile_count)
continue;
comparable_rule(&b, &por2->por_rule, DC);
if (memcmp(&a, &b, sizeof(a)) == 0) {
por2->por_profile_count =
- por1->por_rule.packets;
+ por1->por_rule.packets[0] +
+ por1->por_rule.packets[1];
break;
}
}
@@ -858,6 +900,7 @@ load_feedback_profile(struct pfctl *pf, struct superblocks *superblocks)
DEBUG("Loading %d active rules for a feedback profile", mnr);
for (nr = 0; nr < mnr; ++nr) {
+ struct pf_ruleset *rs;
if ((por = calloc(1, sizeof(*por))) == NULL) {
warn("calloc");
return (1);
@@ -868,8 +911,8 @@ load_feedback_profile(struct pfctl *pf, struct superblocks *superblocks)
return (1);
}
memcpy(&por->por_rule, &pr.rule, sizeof(por->por_rule));
- strlcpy(por->por_anchor, pr.anchor_call,
- sizeof(por->por_anchor));
+ rs = pf_find_or_create_ruleset(pr.anchor_call);
+ por->por_rule.anchor = rs->anchor;
if (TAILQ_EMPTY(&por->por_rule.rpool.list))
memset(&por->por_rule.rpool, 0,
sizeof(por->por_rule.rpool));
@@ -1052,6 +1095,7 @@ skip_cmp_dst_addr(struct pf_rule *a, struct pf_rule *b)
return (1);
return (0);
case PF_ADDR_NOROUTE:
+ case PF_ADDR_URPFFAILED:
return (0);
case PF_ADDR_TABLE:
return (strcmp(a->dst.addr.v.tblname, b->dst.addr.v.tblname));
@@ -1123,6 +1167,7 @@ skip_cmp_src_addr(struct pf_rule *a, struct pf_rule *b)
return (1);
return (0);
case PF_ADDR_NOROUTE:
+ case PF_ADDR_URPFFAILED:
return (0);
case PF_ADDR_TABLE:
return (strcmp(a->src.addr.v.tblname, b->src.addr.v.tblname));
@@ -1274,8 +1319,8 @@ again:
tablenum++;
- if (pfctl_define_table(tbl->pt_name, PFR_TFLAG_CONST, 1, pf->anchor,
- tbl->pt_buf, pf->tticket)) {
+ if (pfctl_define_table(tbl->pt_name, PFR_TFLAG_CONST, 1,
+ pf->anchor->name, tbl->pt_buf, pf->anchor->ruleset.tticket)) {
warn("failed to create table %s", tbl->pt_name);
return (1);
}
@@ -1374,15 +1419,34 @@ superblock_inclusive(struct superblock *block, struct pf_opt_rule *por)
}
}
- /* 'anchor' heads and per-rule src-track are also hard breaks */
- if (por->por_anchor[0] != '\0' ||
- (por->por_rule.rule_flag & PFRULE_RULESRCTRACK))
+ /* per-rule src-track is also a hard break */
+ if (por->por_rule.rule_flag & PFRULE_RULESRCTRACK)
return (0);
+ /*
+ * Have to handle interface groups seperately. Consider the following
+ * rules:
+ * block on EXTIFS to any port 22
+ * pass on em0 to any port 22
+ * (where EXTIFS is an arbitrary interface group)
+ * The optimizer may decide to re-order the pass rule in front of the
+ * block rule. But what if EXTIFS includes em0??? Such a reordering
+ * would change the meaning of the ruleset.
+ * We can't just lookup the EXTIFS group and check if em0 is a member
+ * because the user is allowed to add interfaces to a group during
+ * runtime.
+ * Ergo interface groups become a defacto superblock break :-(
+ */
+ if (interface_group(por->por_rule.ifname) ||
+ interface_group(TAILQ_FIRST(&block->sb_rules)->por_rule.ifname)) {
+ if (strcasecmp(por->por_rule.ifname,
+ TAILQ_FIRST(&block->sb_rules)->por_rule.ifname) != 0)
+ return (0);
+ }
+
comparable_rule(&a, &TAILQ_FIRST(&block->sb_rules)->por_rule, NOMERGE);
comparable_rule(&b, &por->por_rule, NOMERGE);
- if (strcmp(TAILQ_FIRST(&block->sb_rules)->por_anchor,
- por->por_anchor) == 0 && memcmp(&a, &b, sizeof(a)) == 0)
+ if (memcmp(&a, &b, sizeof(a)) == 0)
return (1);
#ifdef OPT_DEBUG
@@ -1426,6 +1490,24 @@ superblock_inclusive(struct superblock *block, struct pf_opt_rule *por)
/*
+ * Figure out if an interface name is an actual interface or actually a
+ * group of interfaces.
+ */
+int
+interface_group(const char *ifname)
+{
+ if (ifname == NULL || !ifname[0])
+ return (0);
+
+ /* Real interfaces must end in a number, interface groups do not */
+ if (isdigit(ifname[strlen(ifname) - 1]))
+ return (0);
+ else
+ return (1);
+}
+
+
+/*
* Make a rule that can directly compared by memcmp()
*/
void
OpenPOWER on IntegriCloud