summaryrefslogtreecommitdiffstats
path: root/sys/netpfil
diff options
context:
space:
mode:
authortruckman <truckman@FreeBSD.org>2016-07-05 00:53:01 +0000
committertruckman <truckman@FreeBSD.org>2016-07-05 00:53:01 +0000
commit9e6e43be959bda5472c468f080cf53a6b89df4b1 (patch)
tree7ec59f62a930f6429ad3c14599c07e18bc3efdf0 /sys/netpfil
parent88737253e2b67e4109f85f15f071809a99bb081c (diff)
downloadFreeBSD-src-9e6e43be959bda5472c468f080cf53a6b89df4b1.zip
FreeBSD-src-9e6e43be959bda5472c468f080cf53a6b89df4b1.tar.gz
Fix a race condition between the main thread in aqm_pie_cleanup() and the
callout thread that can cause a kernel panic. Always do the final cleanup in the callout thread by passing a separate callout function for that task to callout_reset_sbt(). Protect the ref_count decrement in the callout with DN_BH_WLOCK(). All other ref_count manipulation is protected with this lock. There is still a tiny window between ref_count reaching zero and the end of the callout function where it is unsafe to unload the module. Fixing this would require the use of callout_drain(), but this can't be done because dummynet holds a mutex and callout_drain() might sleep. Remove the callout_pending(), callout_active(), and callout_deactivate() calls from calculate_drop_prob(). They are not needed because this callout uses callout_init_mtx(). Submitted by: Rasool Al-Saadi <ralsaadi@swin.edu.au> Approved by: re (gjb) MFC after: 3 days Differential Revision: https://reviews.freebsd.org/D6928
Diffstat (limited to 'sys/netpfil')
-rw-r--r--sys/netpfil/ipfw/dn_aqm_pie.c61
1 files changed, 28 insertions, 33 deletions
diff --git a/sys/netpfil/ipfw/dn_aqm_pie.c b/sys/netpfil/ipfw/dn_aqm_pie.c
index c4b9401..c2e4d43 100644
--- a/sys/netpfil/ipfw/dn_aqm_pie.c
+++ b/sys/netpfil/ipfw/dn_aqm_pie.c
@@ -207,24 +207,6 @@ calculate_drop_prob(void *x)
struct dn_aqm_pie_parms *pprms;
struct pie_status *pst = (struct pie_status *) x;
- /* dealing with race condition */
- if (callout_pending(&pst->aqm_pie_callout)) {
- /* callout was reset */
- mtx_unlock(&pst->lock_mtx);
- return;
- }
-
- if (!callout_active(&pst->aqm_pie_callout)) {
- /* callout was stopped */
- mtx_unlock(&pst->lock_mtx);
- mtx_destroy(&pst->lock_mtx);
- free(x, M_DUMMYNET);
- //pst->pq->aqm_status = NULL;
- pie_desc.ref_count--;
- return;
- }
- callout_deactivate(&pst->aqm_pie_callout);
-
pprms = pst->parms;
prob = pst->drop_prob;
@@ -576,7 +558,7 @@ aqm_pie_init(struct dn_queue *q)
do { /* exit with break when error occurs*/
if (!pprms){
- D("AQM_PIE is not configured");
+ DX(2, "AQM_PIE is not configured");
err = EINVAL;
break;
}
@@ -615,6 +597,22 @@ aqm_pie_init(struct dn_queue *q)
}
/*
+ * Callout function to destroy pie mtx and free PIE status memory
+ */
+static void
+pie_callout_cleanup(void *x)
+{
+ struct pie_status *pst = (struct pie_status *) x;
+
+ mtx_unlock(&pst->lock_mtx);
+ mtx_destroy(&pst->lock_mtx);
+ free(x, M_DUMMYNET);
+ DN_BH_WLOCK();
+ pie_desc.ref_count--;
+ DN_BH_WUNLOCK();
+}
+
+/*
* Clean up PIE status for queue 'q'
* Destroy memory allocated for PIE status.
*/
@@ -640,22 +638,19 @@ aqm_pie_cleanup(struct dn_queue *q)
return 1;
}
+ /*
+ * Free PIE status allocated memory using pie_callout_cleanup() callout
+ * function to avoid any potential race.
+ * We reset aqm_pie_callout to call pie_callout_cleanup() in next 1um. This
+ * stops the scheduled calculate_drop_prob() callout and call pie_callout_cleanup()
+ * which does memory freeing.
+ */
mtx_lock(&pst->lock_mtx);
+ callout_reset_sbt(&pst->aqm_pie_callout,
+ SBT_1US, 0, pie_callout_cleanup, pst, 0);
+ q->aqm_status = NULL;
+ mtx_unlock(&pst->lock_mtx);
- /* stop callout timer */
- if (callout_stop(&pst->aqm_pie_callout) || !(pst->sflags & PIE_ACTIVE)) {
- mtx_unlock(&pst->lock_mtx);
- mtx_destroy(&pst->lock_mtx);
- free(q->aqm_status, M_DUMMYNET);
- q->aqm_status = NULL;
- pie_desc.ref_count--;
- return 0;
- } else {
- q->aqm_status = NULL;
- mtx_unlock(&pst->lock_mtx);
- DX(2, "PIE callout has not been stoped from cleanup!");
- return EBUSY;
- }
return 0;
}
OpenPOWER on IntegriCloud