summaryrefslogtreecommitdiffstats
path: root/sys/netpfil
diff options
context:
space:
mode:
authorglebius <glebius@FreeBSD.org>2014-03-11 15:43:06 +0000
committerglebius <glebius@FreeBSD.org>2014-03-11 15:43:06 +0000
commit71d3a4f585b759a3740834be41625b7dc0e5fb24 (patch)
tree21738f0e36adc0d336cb80148b7c296cd41323bf /sys/netpfil
parentcbdb898ddfc732494e2b5679eac39b0b74443173 (diff)
downloadFreeBSD-src-71d3a4f585b759a3740834be41625b7dc0e5fb24.zip
FreeBSD-src-71d3a4f585b759a3740834be41625b7dc0e5fb24.tar.gz
Merge r261882, r261898, r261937, r262760, r262799:
Once pf became not covered by a single mutex, many counters in it became race prone. Some just gather statistics, but some are later used in different calculations. A real problem was the race provoked underflow of the states_cur counter on a rule. Once it goes below zero, it wraps to UINT32_MAX. Later this value is used in pf_state_expires() and any state created by this rule is immediately expired. Thus, make fields states_cur, states_tot and src_nodes of struct pf_rule be counter(9)s.
Diffstat (limited to 'sys/netpfil')
-rw-r--r--sys/netpfil/pf/if_pfsync.c16
-rw-r--r--sys/netpfil/pf/pf.c55
-rw-r--r--sys/netpfil/pf/pf_ioctl.c40
3 files changed, 65 insertions, 46 deletions
diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c
index 982f856..2afb668 100644
--- a/sys/netpfil/pf/if_pfsync.c
+++ b/sys/netpfil/pf/if_pfsync.c
@@ -436,7 +436,8 @@ pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
else
r = &V_pf_default_rule;
- if ((r->max_states && r->states_cur >= r->max_states))
+ if ((r->max_states &&
+ counter_u64_fetch(r->states_cur) >= r->max_states))
goto cleanup;
/*
@@ -514,18 +515,15 @@ pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
st->pfsync_time = time_uptime;
st->sync_state = PFSYNC_S_NONE;
- /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
- r->states_cur++;
- r->states_tot++;
-
if (!(flags & PFSYNC_SI_IOCTL))
st->state_flags |= PFSTATE_NOSYNC;
- if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
- /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
- r->states_cur--;
+ if ((error = pf_state_insert(kif, skw, sks, st)) != 0)
goto cleanup_state;
- }
+
+ /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
+ counter_u64_add(r->states_cur, 1);
+ counter_u64_add(r->states_tot, 1);
if (!(flags & PFSYNC_SI_IOCTL)) {
st->state_flags &= ~PFSTATE_NOSYNC;
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index af49c25..f3eb98e 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -327,27 +327,27 @@ VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
#define BOUND_IFACE(r, k) \
((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
-#define STATE_INC_COUNTERS(s) \
- do { \
- s->rule.ptr->states_cur++; \
- s->rule.ptr->states_tot++; \
- if (s->anchor.ptr != NULL) { \
- s->anchor.ptr->states_cur++; \
- s->anchor.ptr->states_tot++; \
- } \
- if (s->nat_rule.ptr != NULL) { \
- s->nat_rule.ptr->states_cur++; \
- s->nat_rule.ptr->states_tot++; \
- } \
+#define STATE_INC_COUNTERS(s) \
+ do { \
+ counter_u64_add(s->rule.ptr->states_cur, 1); \
+ counter_u64_add(s->rule.ptr->states_tot, 1); \
+ if (s->anchor.ptr != NULL) { \
+ counter_u64_add(s->anchor.ptr->states_cur, 1); \
+ counter_u64_add(s->anchor.ptr->states_tot, 1); \
+ } \
+ if (s->nat_rule.ptr != NULL) { \
+ counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
+ counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
+ } \
} while (0)
-#define STATE_DEC_COUNTERS(s) \
- do { \
- if (s->nat_rule.ptr != NULL) \
- s->nat_rule.ptr->states_cur--; \
- if (s->anchor.ptr != NULL) \
- s->anchor.ptr->states_cur--; \
- s->rule.ptr->states_cur--; \
+#define STATE_DEC_COUNTERS(s) \
+ do { \
+ if (s->nat_rule.ptr != NULL) \
+ counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
+ if (s->anchor.ptr != NULL) \
+ counter_u64_add(s->anchor.ptr->states_cur, -1); \
+ counter_u64_add(s->rule.ptr->states_cur, -1); \
} while (0)
static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
@@ -639,7 +639,7 @@ pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
PF_HASHROW_ASSERT(sh);
if (!rule->max_src_nodes ||
- rule->src_nodes < rule->max_src_nodes)
+ counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
(*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
else
V_pf_status.lcounters[LCNT_SRCNODES]++;
@@ -659,7 +659,7 @@ pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
(*sn)->creation = time_uptime;
(*sn)->ruletype = rule->action;
if ((*sn)->rule.ptr != NULL)
- (*sn)->rule.ptr->src_nodes++;
+ counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
PF_HASHROW_UNLOCK(sh);
V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
V_pf_status.src_nodes++;
@@ -684,7 +684,7 @@ pf_unlink_src_node_locked(struct pf_src_node *src)
#endif
LIST_REMOVE(src, entry);
if (src->rule.ptr)
- src->rule.ptr->src_nodes--;
+ counter_u64_add(src->rule.ptr->src_nodes, -1);
V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
V_pf_status.src_nodes--;
}
@@ -1470,7 +1470,7 @@ pf_state_expires(const struct pf_state *state)
start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
if (start) {
end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
- states = state->rule.ptr->states_cur; /* XXXGL */
+ states = counter_u64_fetch(state->rule.ptr->states_cur);
} else {
start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
@@ -1579,11 +1579,7 @@ pf_unlink_state(struct pf_state *s, u_int flags)
if (pfsync_delete_state_ptr != NULL)
pfsync_delete_state_ptr(s);
- --s->rule.ptr->states_cur;
- if (s->nat_rule.ptr != NULL)
- --s->nat_rule.ptr->states_cur;
- if (s->anchor.ptr != NULL)
- --s->anchor.ptr->states_cur;
+ STATE_DEC_COUNTERS(s);
s->timeout = PFTM_UNLINKED;
@@ -3444,7 +3440,8 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
u_short reason;
/* check maximums */
- if (r->max_states && (r->states_cur >= r->max_states)) {
+ if (r->max_states &&
+ (counter_u64_fetch(r->states_cur) >= r->max_states)) {
V_pf_status.lcounters[LCNT_STATES]++;
REASON_SET(&reason, PFRES_MAXSTATES);
return (PF_DROP);
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index 8fe8121..aa42040 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -225,6 +225,10 @@ pfattach(void)
V_pf_default_rule.nr = -1;
V_pf_default_rule.rtableid = -1;
+ V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
+ V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
+ V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
+
/* initialize default timeouts */
my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
@@ -394,6 +398,9 @@ pf_free_rule(struct pf_rule *rule)
pfi_kif_unref(rule->kif);
pf_anchor_remove(rule);
pf_empty_pool(&rule->rpool.list);
+ counter_u64_free(rule->states_cur);
+ counter_u64_free(rule->states_tot);
+ counter_u64_free(rule->src_nodes);
free(rule, M_PFRULE);
}
@@ -1145,6 +1152,9 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
bcopy(&pr->rule, rule, sizeof(struct pf_rule));
if (rule->ifname[0])
kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
+ rule->states_cur = counter_u64_alloc(M_WAITOK);
+ rule->states_tot = counter_u64_alloc(M_WAITOK);
+ rule->src_nodes = counter_u64_alloc(M_WAITOK);
rule->cuid = td->td_ucred->cr_ruid;
rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
TAILQ_INIT(&rule->rpool.list);
@@ -1261,6 +1271,9 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
#undef ERROUT
DIOCADDRULE_error:
PF_RULES_WUNLOCK();
+ counter_u64_free(rule->states_cur);
+ counter_u64_free(rule->states_tot);
+ counter_u64_free(rule->src_nodes);
free(rule, M_PFRULE);
if (kif)
free(kif, PFI_MTYPE);
@@ -1332,6 +1345,9 @@ DIOCADDRULE_error:
break;
}
bcopy(rule, &pr->rule, sizeof(struct pf_rule));
+ pr->rule.u_states_cur = counter_u64_fetch(rule->states_cur);
+ pr->rule.u_states_tot = counter_u64_fetch(rule->states_tot);
+ pr->rule.u_src_nodes = counter_u64_fetch(rule->src_nodes);
if (pf_anchor_copyout(ruleset, rule, pr)) {
PF_RULES_WUNLOCK();
error = EBUSY;
@@ -1350,7 +1366,7 @@ DIOCADDRULE_error:
rule->evaluations = 0;
rule->packets[0] = rule->packets[1] = 0;
rule->bytes[0] = rule->bytes[1] = 0;
- rule->states_tot = 0;
+ counter_u64_zero(rule->states_tot);
}
PF_RULES_WUNLOCK();
break;
@@ -1390,15 +1406,14 @@ DIOCADDRULE_error:
#endif /* INET6 */
newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK);
bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
+ if (newrule->ifname[0])
+ kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
+ newrule->states_cur = counter_u64_alloc(M_WAITOK);
+ newrule->states_tot = counter_u64_alloc(M_WAITOK);
+ newrule->src_nodes = counter_u64_alloc(M_WAITOK);
newrule->cuid = td->td_ucred->cr_ruid;
newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
TAILQ_INIT(&newrule->rpool.list);
- /* Initialize refcounting. */
- newrule->states_cur = 0;
- newrule->entries.tqe_prev = NULL;
-
- if (newrule->ifname[0])
- kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
}
#define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; }
@@ -1566,8 +1581,12 @@ DIOCADDRULE_error:
#undef ERROUT
DIOCCHANGERULE_error:
PF_RULES_WUNLOCK();
- if (newrule != NULL)
+ if (newrule != NULL) {
+ counter_u64_free(newrule->states_cur);
+ counter_u64_free(newrule->states_tot);
+ counter_u64_free(newrule->src_nodes);
free(newrule, M_PFRULE);
+ }
if (kif != NULL)
free(kif, PFI_MTYPE);
break;
@@ -3423,6 +3442,11 @@ shutdown_pf(void)
char nn = '\0';
V_pf_status.running = 0;
+
+ counter_u64_free(V_pf_default_rule.states_cur);
+ counter_u64_free(V_pf_default_rule.states_tot);
+ counter_u64_free(V_pf_default_rule.src_nodes);
+
do {
if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
!= 0) {
OpenPOWER on IntegriCloud