summaryrefslogtreecommitdiffstats
path: root/sys/netpfil
diff options
context:
space:
mode:
authorglebius <glebius@FreeBSD.org>2014-02-14 10:05:21 +0000
committerglebius <glebius@FreeBSD.org>2014-02-14 10:05:21 +0000
commit1ea1d562a3cd9a58f90832eaff87d94ede9b5bfc (patch)
treed75437f1387850beb0727915096f9a956d708eb3 /sys/netpfil
parente7560978e3e057a99776d754c463415dceea5dd7 (diff)
downloadFreeBSD-src-1ea1d562a3cd9a58f90832eaff87d94ede9b5bfc.zip
FreeBSD-src-1ea1d562a3cd9a58f90832eaff87d94ede9b5bfc.tar.gz
Once pf became not covered by a single mutex, many counters in it became
race prone. Some just gather statistics, but some are later used in different calculations. A real problem was the race provoked underflow of the states_cur counter on a rule. Once it goes below zero, it wraps to UINT32_MAX. Later this value is used in pf_state_expires() and any state created by this rule is immediately expired. Thus, make fields states_cur, states_tot and src_nodes of struct pf_rule be counter(9)s. Thanks to Dennis for providing me shell access to problematic box and his help with reproducing, debugging and investigating the problem. Thanks to: Dennis Yusupoff <dyr smartspb.net> Also reported by: dumbbell, pgj, Rambler Sponsored by: Nginx, Inc.
Diffstat (limited to 'sys/netpfil')
-rw-r--r--sys/netpfil/pf/if_pfsync.c16
-rw-r--r--sys/netpfil/pf/pf.c55
-rw-r--r--sys/netpfil/pf/pf_ioctl.c47
3 files changed, 72 insertions, 46 deletions
diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c
index 471be3e..22ed46c 100644
--- a/sys/netpfil/pf/if_pfsync.c
+++ b/sys/netpfil/pf/if_pfsync.c
@@ -438,7 +438,8 @@ pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
else
r = &V_pf_default_rule;
- if ((r->max_states && r->states_cur >= r->max_states))
+ if ((r->max_states &&
+ counter_u64_fetch(r->states_cur) >= r->max_states))
goto cleanup;
/*
@@ -516,18 +517,15 @@ pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
st->pfsync_time = time_uptime;
st->sync_state = PFSYNC_S_NONE;
- /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
- r->states_cur++;
- r->states_tot++;
-
if (!(flags & PFSYNC_SI_IOCTL))
st->state_flags |= PFSTATE_NOSYNC;
- if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
- /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
- r->states_cur--;
+ if ((error = pf_state_insert(kif, skw, sks, st)) != 0)
goto cleanup_state;
- }
+
+ /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
+ counter_u64_add(r->states_cur, 1);
+ counter_u64_add(r->states_tot, 1);
if (!(flags & PFSYNC_SI_IOCTL)) {
st->state_flags &= ~PFSTATE_NOSYNC;
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index c0ae748..496ff13 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -335,27 +335,27 @@ enum { PF_ICMP_MULTI_NONE, PF_ICMP_MULTI_SOLICITED, PF_ICMP_MULTI_LINK };
#define BOUND_IFACE(r, k) \
((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
-#define STATE_INC_COUNTERS(s) \
- do { \
- s->rule.ptr->states_cur++; \
- s->rule.ptr->states_tot++; \
- if (s->anchor.ptr != NULL) { \
- s->anchor.ptr->states_cur++; \
- s->anchor.ptr->states_tot++; \
- } \
- if (s->nat_rule.ptr != NULL) { \
- s->nat_rule.ptr->states_cur++; \
- s->nat_rule.ptr->states_tot++; \
- } \
+#define STATE_INC_COUNTERS(s) \
+ do { \
+ counter_u64_add(s->rule.ptr->states_cur, 1); \
+ counter_u64_add(s->rule.ptr->states_tot, 1); \
+ if (s->anchor.ptr != NULL) { \
+ counter_u64_add(s->anchor.ptr->states_cur, 1); \
+ counter_u64_add(s->anchor.ptr->states_tot, 1); \
+ } \
+ if (s->nat_rule.ptr != NULL) { \
+ counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
+ counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
+ } \
} while (0)
-#define STATE_DEC_COUNTERS(s) \
- do { \
- if (s->nat_rule.ptr != NULL) \
- s->nat_rule.ptr->states_cur--; \
- if (s->anchor.ptr != NULL) \
- s->anchor.ptr->states_cur--; \
- s->rule.ptr->states_cur--; \
+#define STATE_DEC_COUNTERS(s) \
+ do { \
+ if (s->nat_rule.ptr != NULL) \
+ counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
+ if (s->anchor.ptr != NULL) \
+ counter_u64_add(s->anchor.ptr->states_cur, -1); \
+ counter_u64_add(s->rule.ptr->states_cur, -1); \
} while (0)
static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
@@ -647,7 +647,7 @@ pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
PF_HASHROW_ASSERT(sh);
if (!rule->max_src_nodes ||
- rule->src_nodes < rule->max_src_nodes)
+ counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
(*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
else
V_pf_status.lcounters[LCNT_SRCNODES]++;
@@ -667,7 +667,7 @@ pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
(*sn)->creation = time_uptime;
(*sn)->ruletype = rule->action;
if ((*sn)->rule.ptr != NULL)
- (*sn)->rule.ptr->src_nodes++;
+ counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
PF_HASHROW_UNLOCK(sh);
V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
V_pf_status.src_nodes++;
@@ -692,7 +692,7 @@ pf_unlink_src_node_locked(struct pf_src_node *src)
#endif
LIST_REMOVE(src, entry);
if (src->rule.ptr)
- src->rule.ptr->src_nodes--;
+ counter_u64_add(src->rule.ptr->src_nodes, -1);
V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
V_pf_status.src_nodes--;
}
@@ -1478,7 +1478,7 @@ pf_state_expires(const struct pf_state *state)
start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
if (start) {
end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
- states = state->rule.ptr->states_cur; /* XXXGL */
+ states = counter_u64_fetch(state->rule.ptr->states_cur);
} else {
start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
@@ -1587,11 +1587,7 @@ pf_unlink_state(struct pf_state *s, u_int flags)
if (pfsync_delete_state_ptr != NULL)
pfsync_delete_state_ptr(s);
- --s->rule.ptr->states_cur;
- if (s->nat_rule.ptr != NULL)
- --s->nat_rule.ptr->states_cur;
- if (s->anchor.ptr != NULL)
- --s->anchor.ptr->states_cur;
+ STATE_DEC_COUNTERS(s);
s->timeout = PFTM_UNLINKED;
@@ -3606,7 +3602,8 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
u_short reason;
/* check maximums */
- if (r->max_states && (r->states_cur >= r->max_states)) {
+ if (r->max_states &&
+ (counter_u64_fetch(r->states_cur) >= r->max_states)) {
V_pf_status.lcounters[LCNT_STATES]++;
REASON_SET(&reason, PFRES_MAXSTATES);
return (PF_DROP);
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index 7c3ca0a..c12fae6 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -229,6 +229,10 @@ pfattach(void)
V_pf_default_rule.nr = -1;
V_pf_default_rule.rtableid = -1;
+ V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
+ V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
+ V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
+
/* initialize default timeouts */
my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
@@ -398,6 +402,9 @@ pf_free_rule(struct pf_rule *rule)
pfi_kif_unref(rule->kif);
pf_anchor_remove(rule);
pf_empty_pool(&rule->rpool.list);
+ counter_u64_free(rule->states_cur);
+ counter_u64_free(rule->states_tot);
+ counter_u64_free(rule->src_nodes);
free(rule, M_PFRULE);
}
@@ -1149,6 +1156,9 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
bcopy(&pr->rule, rule, sizeof(struct pf_rule));
if (rule->ifname[0])
kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
+ rule->states_cur = counter_u64_alloc(M_WAITOK);
+ rule->states_tot = counter_u64_alloc(M_WAITOK);
+ rule->src_nodes = counter_u64_alloc(M_WAITOK);
rule->cuid = td->td_ucred->cr_ruid;
rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
TAILQ_INIT(&rule->rpool.list);
@@ -1265,6 +1275,9 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
#undef ERROUT
DIOCADDRULE_error:
PF_RULES_WUNLOCK();
+ counter_u64_free(rule->states_cur);
+ counter_u64_free(rule->states_tot);
+ counter_u64_free(rule->src_nodes);
free(rule, M_PFRULE);
if (kif)
free(kif, PFI_MTYPE);
@@ -1336,6 +1349,16 @@ DIOCADDRULE_error:
break;
}
bcopy(rule, &pr->rule, sizeof(struct pf_rule));
+ /*
+ * XXXGL: this is what happens when internal kernel
+ * structures are used as ioctl API structures.
+ */
+ pr->rule.states_cur =
+ (counter_u64_t )counter_u64_fetch(rule->states_cur);
+ pr->rule.states_tot =
+ (counter_u64_t )counter_u64_fetch(rule->states_tot);
+ pr->rule.src_nodes =
+ (counter_u64_t )counter_u64_fetch(rule->src_nodes);
if (pf_anchor_copyout(ruleset, rule, pr)) {
PF_RULES_WUNLOCK();
error = EBUSY;
@@ -1354,7 +1377,7 @@ DIOCADDRULE_error:
rule->evaluations = 0;
rule->packets[0] = rule->packets[1] = 0;
rule->bytes[0] = rule->bytes[1] = 0;
- rule->states_tot = 0;
+ counter_u64_zero(rule->states_tot);
}
PF_RULES_WUNLOCK();
break;
@@ -1394,15 +1417,14 @@ DIOCADDRULE_error:
#endif /* INET6 */
newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
+ if (newrule->ifname[0])
+ kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
+ newrule->states_cur = counter_u64_alloc(M_WAITOK);
+ newrule->states_tot = counter_u64_alloc(M_WAITOK);
+ newrule->src_nodes = counter_u64_alloc(M_WAITOK);
newrule->cuid = td->td_ucred->cr_ruid;
newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
TAILQ_INIT(&newrule->rpool.list);
- /* Initialize refcounting. */
- newrule->states_cur = 0;
- newrule->entries.tqe_prev = NULL;
-
- if (newrule->ifname[0])
- kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
}
#define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
@@ -1570,8 +1592,12 @@ DIOCADDRULE_error:
#undef ERROUT
DIOCCHANGERULE_error:
PF_RULES_WUNLOCK();
- if (newrule != NULL)
+ if (newrule != NULL) {
+ counter_u64_free(newrule->states_cur);
+ counter_u64_free(newrule->states_tot);
+ counter_u64_free(newrule->src_nodes);
free(newrule, M_PFRULE);
+ }
if (kif != NULL)
free(kif, PFI_MTYPE);
break;
@@ -3427,6 +3453,11 @@ shutdown_pf(void)
char nn = '\0';
V_pf_status.running = 0;
+
+ counter_u64_free(V_pf_default_rule.states_cur);
+ counter_u64_free(V_pf_default_rule.states_tot);
+ counter_u64_free(V_pf_default_rule.src_nodes);
+
do {
if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
!= 0) {
OpenPOWER on IntegriCloud