summaryrefslogtreecommitdiffstats
path: root/sys/netpfil/pf
diff options
context:
space:
mode:
authorglebius <glebius@FreeBSD.org>2015-01-22 01:23:16 +0000
committerglebius <glebius@FreeBSD.org>2015-01-22 01:23:16 +0000
commit12e7b30255a9de99bf8f4627681e5cfd418afab8 (patch)
tree1f0af5a28b3ab42bc8f76c7f00102cdb21be3ca1 /sys/netpfil/pf
parent2ed64417f268a191aaa926a0ad9e55fcec29c2a7 (diff)
downloadFreeBSD-src-12e7b30255a9de99bf8f4627681e5cfd418afab8.zip
FreeBSD-src-12e7b30255a9de99bf8f4627681e5cfd418afab8.tar.gz
Back out r276841, r276756, r276747, r276746. The change in r276747 is very
very questionable, since it makes vimages more dependent on each other. But the reason for the backout is that it screwed up shutting down the pf purge threads, and now kernel immedially panics on pf module unload. Although module unloading isn't an advertised feature of pf, it is very important for development process. I'd like to not backout r276746, since in general it is good. But since it has introduced numerous build breakages, that later were addressed in r276841, r276756, r276747, I need to back it out as well. Better replay it in clean fashion from scratch.
Diffstat (limited to 'sys/netpfil/pf')
-rw-r--r--sys/netpfil/pf/pf.c99
-rw-r--r--sys/netpfil/pf/pf_if.c36
-rw-r--r--sys/netpfil/pf/pf_ioctl.c48
-rw-r--r--sys/netpfil/pf/pf_norm.c9
-rw-r--r--sys/netpfil/pf/pf_table.c46
5 files changed, 145 insertions, 93 deletions
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index bf47c18..15667a6 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -151,7 +151,6 @@ static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
#define V_pf_sendqueue VNET(pf_sendqueue)
static struct mtx pf_sendqueue_mtx;
-MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
#define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
#define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
@@ -173,15 +172,11 @@ static VNET_DEFINE(struct task, pf_overloadtask);
#define V_pf_overloadtask VNET(pf_overloadtask)
static struct mtx pf_overloadqueue_mtx;
-MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
- "pf overload/flush queue", MTX_DEF);
#define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
#define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
struct mtx pf_unlnkdrules_mtx;
-MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
- MTX_DEF);
static VNET_DEFINE(uma_zone_t, pf_sources_z);
#define V_pf_sources_z VNET(pf_sources_z)
@@ -295,6 +290,8 @@ static void pf_route6(struct mbuf **, struct pf_rule *, int,
int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
+VNET_DECLARE(int, pf_end_threads);
+
VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
#define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
@@ -731,7 +728,7 @@ pf_mtag_initialize()
/* Per-vnet data storage structures initialization. */
void
-pf_vnet_initialize()
+pf_initialize()
{
struct pf_keyhash *kh;
struct pf_idhash *ih;
@@ -791,9 +788,13 @@ pf_vnet_initialize()
STAILQ_INIT(&V_pf_sendqueue);
SLIST_INIT(&V_pf_overloadqueue);
TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
+ mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
+ mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
+ MTX_DEF);
/* Unlinked, but may be referenced rules. */
TAILQ_INIT(&V_pf_unlinked_rules);
+ mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
}
void
@@ -836,6 +837,10 @@ pf_cleanup()
free(pfse, M_PFTEMP);
}
+ mtx_destroy(&pf_sendqueue_mtx);
+ mtx_destroy(&pf_overloadqueue_mtx);
+ mtx_destroy(&pf_unlnkdrules_mtx);
+
uma_zdestroy(V_pf_sources_z);
uma_zdestroy(V_pf_state_z);
uma_zdestroy(V_pf_state_key_z);
@@ -1381,37 +1386,71 @@ pf_intr(void *v)
}
void
-pf_purge_thread(void *v __unused)
+pf_purge_thread(void *v)
{
u_int idx = 0;
- VNET_ITERATOR_DECL(vnet_iter);
+
+ CURVNET_SET((struct vnet *)v);
for (;;) {
- tsleep(pf_purge_thread, PWAIT, "pftm", hz / 10);
- VNET_LIST_RLOCK();
- VNET_FOREACH(vnet_iter) {
- CURVNET_SET(vnet_iter);
- /* Process 1/interval fraction of the state table every run. */
- idx = pf_purge_expired_states(idx, pf_hashmask /
- (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
-
- /* Purge other expired types every PFTM_INTERVAL seconds. */
- if (idx == 0) {
- /*
- * Order is important:
- * - states and src nodes reference rules
- * - states and rules reference kifs
- */
- pf_purge_expired_fragments();
- pf_purge_expired_src_nodes();
- pf_purge_unlinked_rules();
- pfi_kif_purge();
- }
- CURVNET_RESTORE();
+ PF_RULES_RLOCK();
+ rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
+
+ if (V_pf_end_threads) {
+ /*
+ * To cleanse up all kifs and rules we need
+ * two runs: first one clears reference flags,
+ * then pf_purge_expired_states() doesn't
+ * raise them, and then second run frees.
+ */
+ PF_RULES_RUNLOCK();
+ pf_purge_unlinked_rules();
+ pfi_kif_purge();
+
+ /*
+ * Now purge everything.
+ */
+ pf_purge_expired_states(0, pf_hashmask);
+ pf_purge_expired_fragments();
+ pf_purge_expired_src_nodes();
+
+ /*
+ * Now all kifs & rules should be unreferenced,
+ * thus should be successfully freed.
+ */
+ pf_purge_unlinked_rules();
+ pfi_kif_purge();
+
+ /*
+ * Announce success and exit.
+ */
+ PF_RULES_RLOCK();
+ V_pf_end_threads++;
+ PF_RULES_RUNLOCK();
+ wakeup(pf_purge_thread);
+ kproc_exit(0);
+ }
+ PF_RULES_RUNLOCK();
+
+ /* Process 1/interval fraction of the state table every run. */
+ idx = pf_purge_expired_states(idx, pf_hashmask /
+ (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
+
+ /* Purge other expired types every PFTM_INTERVAL seconds. */
+ if (idx == 0) {
+ /*
+ * Order is important:
+ * - states and src nodes reference rules
+ * - states and rules reference kifs
+ */
+ pf_purge_expired_fragments();
+ pf_purge_expired_src_nodes();
+ pf_purge_unlinked_rules();
+ pfi_kif_purge();
}
- VNET_LIST_RUNLOCK();
}
/* not reached */
+ CURVNET_RESTORE();
}
u_int32_t
diff --git a/sys/netpfil/pf/pf_if.c b/sys/netpfil/pf/pf_if.c
index da79ed6..41acc7d 100644
--- a/sys/netpfil/pf/pf_if.c
+++ b/sys/netpfil/pf/pf_if.c
@@ -102,13 +102,10 @@ MALLOC_DEFINE(PFI_MTYPE, "pf_ifnet", "pf(4) interface database");
LIST_HEAD(pfi_list, pfi_kif);
static VNET_DEFINE(struct pfi_list, pfi_unlinked_kifs);
#define V_pfi_unlinked_kifs VNET(pfi_unlinked_kifs)
-
static struct mtx pfi_unlnkdkifs_mtx;
-MTX_SYSINIT(pfi_unlnkdkifs_mtx, &pfi_unlnkdkifs_mtx, "pf unlinked interfaces",
- MTX_DEF);
void
-pfi_vnet_initialize(void)
+pfi_initialize(void)
{
struct ifg_group *ifg;
struct ifnet *ifp;
@@ -117,6 +114,9 @@ pfi_vnet_initialize(void)
V_pfi_buffer_max = 64;
V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer),
PFI_MTYPE, M_WAITOK);
+
+ mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF);
+
kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
PF_RULES_WLOCK();
V_pfi_all = pfi_kif_attach(kif, IFG_ALL);
@@ -129,20 +129,18 @@ pfi_vnet_initialize(void)
pfi_attach_ifnet(ifp);
IFNET_RUNLOCK();
- if (IS_DEFAULT_VNET(curvnet)) {
- pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
- pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
- pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
- pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
- pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
- pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
- pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
- pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY);
- pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
- pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
- pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
- pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
- }
+ pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
+ pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
+ pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
+ pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
+ pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
+ pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
+ pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
+ pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY);
+ pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
+ pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
+ pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
+ pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
}
void
@@ -168,6 +166,8 @@ pfi_cleanup(void)
free(p, PFI_MTYPE);
}
+ mtx_destroy(&pfi_unlnkdkifs_mtx);
+
free(V_pfi_buffer, PFI_MTYPE);
}
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index ef479bc..213e49c 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -87,7 +87,7 @@ __FBSDID("$FreeBSD$");
#include <altq/altq.h>
#endif
-static int pf_vnet_init(void);
+static int pfattach(void);
static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
u_int8_t, u_int8_t, u_int8_t);
@@ -189,6 +189,7 @@ static struct cdevsw pf_cdevsw = {
static volatile VNET_DEFINE(int, pf_pfil_hooked);
#define V_pf_pfil_hooked VNET(pf_pfil_hooked)
+VNET_DEFINE(int, pf_end_threads);
struct rwlock pf_rules_lock;
struct sx pf_ioctl_lock;
@@ -204,20 +205,17 @@ pfsync_defer_t *pfsync_defer_ptr = NULL;
pflog_packet_t *pflog_packet_ptr = NULL;
static int
-pf_vnet_init(void)
+pfattach(void)
{
u_int32_t *my_timeout = V_pf_default_rule.timeout;
int error;
if (IS_DEFAULT_VNET(curvnet))
pf_mtag_initialize();
- TAILQ_INIT(&V_pf_tags);
- TAILQ_INIT(&V_pf_qids);
-
- pf_vnet_initialize();
+ pf_initialize();
pfr_initialize();
- pfi_vnet_initialize();
- pf_vnet_normalize_init();
+ pfi_initialize();
+ pf_normalize_init();
V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
@@ -278,13 +276,10 @@ pf_vnet_init(void)
for (int i = 0; i < SCNT_MAX; i++)
V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
- if (IS_DEFAULT_VNET(curvnet)) {
- if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0,
- "pf purge")) != 0) {
- /* XXXGL: leaked all above. */
- return (error);
- }
- }
+ if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0,
+ "pf purge")) != 0)
+ /* XXXGL: leaked all above. */
+ return (error);
if ((error = swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
INTR_MPSAFE, &V_pf_swi_cookie)) != 0)
/* XXXGL: leaked all above. */
@@ -3720,11 +3715,27 @@ dehook_pf(void)
static int
pf_load(void)
{
+ int error;
+
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ V_pf_pfil_hooked = 0;
+ V_pf_end_threads = 0;
+ TAILQ_INIT(&V_pf_tags);
+ TAILQ_INIT(&V_pf_qids);
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK();
rw_init(&pf_rules_lock, "pf rulesets");
sx_init(&pf_ioctl_lock, "pf ioctl");
pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
+ if ((error = pfattach()) != 0)
+ return (error);
return (0);
}
@@ -3748,6 +3759,11 @@ pf_unload(void)
}
PF_RULES_WLOCK();
shutdown_pf();
+ V_pf_end_threads = 1;
+ while (V_pf_end_threads < 2) {
+ wakeup_one(pf_purge_thread);
+ rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0);
+ }
pf_normalize_cleanup();
pfi_cleanup();
pfr_cleanup();
@@ -3797,5 +3813,3 @@ static moduledata_t pf_mod = {
DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
MODULE_VERSION(pf, PF_MODVER);
-VNET_SYSINIT(pf_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY - 255,
- pf_vnet_init, NULL);
diff --git a/sys/netpfil/pf/pf_norm.c b/sys/netpfil/pf/pf_norm.c
index fa9acd6..fb30331 100644
--- a/sys/netpfil/pf/pf_norm.c
+++ b/sys/netpfil/pf/pf_norm.c
@@ -33,7 +33,6 @@ __FBSDID("$FreeBSD$");
#include "opt_pf.h"
#include <sys/param.h>
-#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
@@ -93,7 +92,6 @@ struct pf_fragment {
};
static struct mtx pf_frag_mtx;
-MTX_SYSINIT(pf_frag_mtx, &pf_frag_mtx, "pf fragments", MTX_DEF);
#define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx)
#define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx)
#define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED)
@@ -148,7 +146,7 @@ static void pf_scrub_ip6(struct mbuf **, u_int8_t);
} while(0)
void
-pf_vnet_normalize_init(void)
+pf_normalize_init(void)
{
V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
@@ -163,6 +161,9 @@ pf_vnet_normalize_init(void)
V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
+
+ mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
+
TAILQ_INIT(&V_pf_fragqueue);
TAILQ_INIT(&V_pf_cachequeue);
}
@@ -174,6 +175,8 @@ pf_normalize_cleanup(void)
uma_zdestroy(V_pf_state_scrub_z);
uma_zdestroy(V_pf_frent_z);
uma_zdestroy(V_pf_frag_z);
+
+ mtx_destroy(&pf_frag_mtx);
}
static int
diff --git a/sys/netpfil/pf/pf_table.c b/sys/netpfil/pf/pf_table.c
index 7cdca40..b9f13b9 100644
--- a/sys/netpfil/pf/pf_table.c
+++ b/sys/netpfil/pf/pf_table.c
@@ -184,13 +184,9 @@ static struct pfr_kentry
static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
-VNET_DEFINE(struct pfr_ktablehead, pfr_ktables);
-#define V_pfr_ktables VNET(pfr_ktables)
-
+struct pfr_ktablehead pfr_ktables;
struct pfr_table pfr_nulltable;
-
-VNET_DEFINE(int, pfr_ktable_cnt);
-#define V_pfr_ktable_cnt VNET(pfr_ktable_cnt)
+int pfr_ktable_cnt;
void
pfr_initialize(void)
@@ -1087,7 +1083,7 @@ pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
return (ENOENT);
SLIST_INIT(&workq);
- RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
if (pfr_skip_table(filter, p, flags))
continue;
if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
@@ -1122,7 +1118,7 @@ pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
flags & PFR_FLAG_USERIOCTL))
senderr(EINVAL);
key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
- p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
if (p == NULL) {
p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
if (p == NULL)
@@ -1138,7 +1134,7 @@ pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
/* find or create root table */
bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
- r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+ r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
if (r != NULL) {
p->pfrkt_root = r;
goto _skip;
@@ -1194,7 +1190,7 @@ pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
return (EINVAL);
- p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
SLIST_FOREACH(q, &workq, pfrkt_workq)
if (!pfr_ktable_compare(p, q))
@@ -1233,7 +1229,7 @@ pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
*size = n;
return (0);
}
- RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
if (pfr_skip_table(filter, p, flags))
continue;
if (n-- <= 0)
@@ -1268,7 +1264,7 @@ pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
return (0);
}
SLIST_INIT(&workq);
- RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
if (pfr_skip_table(filter, p, flags))
continue;
if (n-- <= 0)
@@ -1300,7 +1296,7 @@ pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
if (pfr_validate_table(&key.pfrkt_t, 0, 0))
return (EINVAL);
- p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
if (p != NULL) {
SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
xzero++;
@@ -1332,7 +1328,7 @@ pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
return (EINVAL);
- p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
~clrflag;
@@ -1374,7 +1370,7 @@ pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
if (rs == NULL)
return (ENOMEM);
SLIST_INIT(&workq);
- RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
pfr_skip_table(trs, p, 0))
continue;
@@ -1419,7 +1415,7 @@ pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
return (EBUSY);
tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
SLIST_INIT(&tableq);
- kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
+ kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
if (kt == NULL) {
kt = pfr_create_ktable(tbl, 0, 1);
if (kt == NULL)
@@ -1432,7 +1428,7 @@ pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
/* find or create root table */
bzero(&key, sizeof(key));
strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
- rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
+ rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
if (rt != NULL) {
kt->pfrkt_root = rt;
goto _skip;
@@ -1509,7 +1505,7 @@ pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
if (rs == NULL || !rs->topen || ticket != rs->tticket)
return (0);
SLIST_INIT(&workq);
- RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
pfr_skip_table(trs, p, 0))
continue;
@@ -1545,7 +1541,7 @@ pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
return (EBUSY);
SLIST_INIT(&workq);
- RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
pfr_skip_table(trs, p, 0))
continue;
@@ -1691,7 +1687,7 @@ pfr_table_count(struct pfr_table *filter, int flags)
PF_RULES_ASSERT();
if (flags & PFR_FLAG_ALLRSETS)
- return (V_pfr_ktable_cnt);
+ return (pfr_ktable_cnt);
if (filter->pfrt_anchor[0]) {
rs = pf_find_ruleset(filter->pfrt_anchor);
return ((rs != NULL) ? rs->tables : -1);
@@ -1724,8 +1720,8 @@ pfr_insert_ktable(struct pfr_ktable *kt)
PF_RULES_WASSERT();
- RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
- V_pfr_ktable_cnt++;
+ RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
+ pfr_ktable_cnt++;
if (kt->pfrkt_root != NULL)
if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
pfr_setflags_ktable(kt->pfrkt_root,
@@ -1756,14 +1752,14 @@ pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
if (!(newf & PFR_TFLAG_ACTIVE))
newf &= ~PFR_TFLAG_USRMASK;
if (!(newf & PFR_TFLAG_SETMASK)) {
- RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
+ RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
if (kt->pfrkt_root != NULL)
if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
pfr_setflags_ktable(kt->pfrkt_root,
kt->pfrkt_root->pfrkt_flags &
~PFR_TFLAG_REFDANCHOR);
pfr_destroy_ktable(kt, 1);
- V_pfr_ktable_cnt--;
+ pfr_ktable_cnt--;
return;
}
if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
@@ -1884,7 +1880,7 @@ static struct pfr_ktable *
pfr_lookup_table(struct pfr_table *tbl)
{
/* struct pfr_ktable start like a struct pfr_table */
- return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
+ return (RB_FIND(pfr_ktablehead, &pfr_ktables,
(struct pfr_ktable *)tbl));
}
OpenPOWER on IntegriCloud