summaryrefslogtreecommitdiffstats
path: root/lib/libthr
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2016-03-01 15:21:01 +0000
committerkib <kib@FreeBSD.org>2016-03-01 15:21:01 +0000
commitac7a1d6753fdf33b970359a74fffe286a60968fb (patch)
tree965a9bccbd1118f4ca50883462c15144fb9e1543 /lib/libthr
parent49f9d6850f93cac08f98798386d9c8da0f751470 (diff)
downloadFreeBSD-src-ac7a1d6753fdf33b970359a74fffe286a60968fb.zip
FreeBSD-src-ac7a1d6753fdf33b970359a74fffe286a60968fb.tar.gz
Add two comments explaining the fine points of the hash
implementation. Reviewed by: emaste Sponsored by: The FreeBSD Foundation Differential revision: https://reviews.freebsd.org/D5490
Diffstat (limited to 'lib/libthr')
-rw-r--r--lib/libthr/thread/thr_pshared.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/lib/libthr/thread/thr_pshared.c b/lib/libthr/thread/thr_pshared.c
index d40346d..e8ccf1c 100644
--- a/lib/libthr/thread/thr_pshared.c
+++ b/lib/libthr/thread/thr_pshared.c
@@ -86,6 +86,16 @@ pshared_unlock(struct pthread *curthread)
_thr_ast(curthread);
}
+/*
+ * Among all processes sharing a lock only one executes
+ * pthread_lock_destroy(). Other processes still have the hash and
+ * mapped off-page.
+ *
+ * Mitigate the problem by checking the liveness of all hashed keys
+ * periodically. Right now this is executed on each
+ * pthread_lock_destroy(), but may be done less often if found to be
+ * too time-consuming.
+ */
static void
pshared_gc(struct pthread *curthread)
{
@@ -131,6 +141,27 @@ pshared_insert(void *key, void **val)
hd = &pshared_hash[PSHARED_KEY_HASH(key)];
LIST_FOREACH(h, hd, link) {
+ /*
+ * When the key already exists in the hash, we should
+ * return either the new (just mapped) or old (hashed)
+ * val, and the other val should be unmapped to avoid
+ * address space leak.
+ *
+ * If two threads perform lock of the same object
+ * which is not yet stored in the pshared_hash, then
+ * the val already inserted by the first thread should
+ * be returned, and the second val freed (order is by
+ * the pshared_lock()). Otherwise, if we unmap the
+ * value obtained from the hash, the first thread
+ * might operate on an unmapped off-page object.
+ *
+ * There is still an issue: if hashed key was unmapped
+ * and then other page is mapped at the same key
+ * address, the hash would return the old val. I
+ * decided to handle the race of simultaneous hash
+ * insertion, leaving the unlikely remap problem
+ * unaddressed.
+ */
if (h->key == key) {
if (h->val != *val) {
munmap(*val, PAGE_SIZE);
OpenPOWER on IntegriCloud