summaryrefslogtreecommitdiffstats
path: root/sys/net/pfil.c
diff options
context:
space:
mode:
authormelifaro <melifaro@FreeBSD.org>2012-10-22 14:10:17 +0000
committermelifaro <melifaro@FreeBSD.org>2012-10-22 14:10:17 +0000
commit030e8d5babad5063a71805311c5d64a891cd3f24 (patch)
treeff3e75b886091b1b012e8212d8b884514bbe1142 /sys/net/pfil.c
parentf9a05f9a0a07c02f6cb54808ae52eef16ee600df (diff)
downloadFreeBSD-src-030e8d5babad5063a71805311c5d64a891cd3f24.zip
FreeBSD-src-030e8d5babad5063a71805311c5d64a891cd3f24.tar.gz
Make PFIL use per-VNET lock instead of per-AF lock. Since most used packet
filters (ipfw and PF) use the same ruleset with the same lock for both AF_INET and AF_INET6 there is no need in more fine-grade locking. However, it is possible to request personal lock by specifying PFIL_FLAG_PRIVATE_LOCK flag in pfil_head structure (see pfil.9 for more details). Export PFIL lock via rw_lock(9)/rm_lock(9)-like API permitting pfil consumers to use this lock instead of own lock. This help reducing locks on main traffic path. pfil_assert() is currently not implemented due to absense of rm_assert(). Waiting for some kind of r234648 to be merged in HEAD. This change is part of bigger patch reducing routing locking. Sponsored by: Yandex LLC Reviewed by: glebius, ae OK'd by: silence on net@ MFC after: 3 weeks
Diffstat (limited to 'sys/net/pfil.c')
-rw-r--r--sys/net/pfil.c58
1 files changed, 58 insertions, 0 deletions
diff --git a/sys/net/pfil.c b/sys/net/pfil.c
index a11950f..06da0be 100644
--- a/sys/net/pfil.c
+++ b/sys/net/pfil.c
@@ -61,6 +61,8 @@ static int pfil_list_remove(pfil_list_t *,
LIST_HEAD(pfilheadhead, pfil_head);
VNET_DEFINE(struct pfilheadhead, pfil_head_list);
#define V_pfil_head_list VNET(pfil_head_list)
+VNET_DEFINE(struct rmlock, pfil_lock);
+#define V_pfil_lock VNET(pfil_lock)
/*
* pfil_run_hooks() runs the specified packet filter hooks.
@@ -91,6 +93,60 @@ pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
}
/*
+ * pfil_try_rlock() acquires rm reader lock for specified head
+ * if this is immediately possible,
+ */
+int
+pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
+{
+ return PFIL_TRY_RLOCK(ph, tracker);
+}
+
+/*
+ * pfil_rlock() acquires rm reader lock for specified head.
+ */
+void
+pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
+{
+ PFIL_RLOCK(ph, tracker);
+}
+
+/*
+ * pfil_runlock() releases reader lock for specified head.
+ */
+void
+pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker)
+{
+ PFIL_RUNLOCK(ph, tracker);
+}
+
+/*
+ * pfil_wlock() acquires writer lock for specified head.
+ */
+void
+pfil_wlock(struct pfil_head *ph)
+{
+ PFIL_WLOCK(ph);
+}
+
+/*
+ * pfil_wunlock() releases writer lock for specified head.
+ */
+void
+pfil_wunlock(struct pfil_head *ph)
+{
+ PFIL_WUNLOCK(ph);
+}
+
+/*
+ * pfil_wowned() releases writer lock for specified head.
+ */
+int
+pfil_wowned(struct pfil_head *ph)
+{
+ return PFIL_WOWNED(ph);
+}
+/*
* pfil_head_register() registers a pfil_head with the packet filter hook
* mechanism.
*/
@@ -295,6 +351,7 @@ vnet_pfil_init(const void *unused)
{
LIST_INIT(&V_pfil_head_list);
+ PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared");
return (0);
}
@@ -306,6 +363,7 @@ vnet_pfil_uninit(const void *unused)
{
/* XXX should panic if list is not empty */
+ PFIL_LOCK_DESTROY_REAL(&V_pfil_lock);
return (0);
}
OpenPOWER on IntegriCloud