From 913a73686a6ddb969bcf6028a5577e494ace046f Mon Sep 17 00:00:00 2001 From: bmilekic Date: Thu, 8 Mar 2001 19:21:45 +0000 Subject: Fix is a similar race condition as existed in the mbuf code. When we go into an interruptable sleep and we increment a sleep count, we make sure that we are the thread that will decrement the count when we wakeup. Otherwise, what happens is that if we get interrupted (signal) and we have to wake up, but before we get our mutex, some thread that wants to wake us up detects that the count is non-zero and so enters wakeup_one(), but there's nothing on the sleep queue and so we don't get woken up. The thread will still decrement the sleep count, which is bad because we will also decrement it again later (as we got interrupted) and are already off the sleep queue. --- sys/kern/uipc_syscalls.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'sys/kern/uipc_syscalls.c') diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index d849b21..2526c6b 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -1461,10 +1461,13 @@ sf_buf_alloc() sf_buf_alloc_want++; error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH, "sfbufa", 0); - if (error != 0) { - sf_buf_alloc_want--; + sf_buf_alloc_want--; + + /* + * If we got a signal, don't risk going back to sleep. + */ + if (error) break; - } } SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list); mtx_unlock(&sf_freelist.sf_lock); @@ -1499,10 +1502,8 @@ sf_buf_free(caddr_t addr, void *args) sf->m = NULL; mtx_lock(&sf_freelist.sf_lock); SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list); - if (sf_buf_alloc_want) { - sf_buf_alloc_want--; + if (sf_buf_alloc_want > 0) wakeup_one(&sf_freelist); - } mtx_unlock(&sf_freelist.sf_lock); } -- cgit v1.1