summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authormtm <mtm@FreeBSD.org>2004-01-19 14:51:45 +0000
committermtm <mtm@FreeBSD.org>2004-01-19 14:51:45 +0000
commit2d3f49ebd38d6441b693d51adcf071088817ddf5 (patch)
tree8ea09bd49e1effae3ff537f35c5766a1cdc3bf69 /lib
parentfc6eb88ff293065b8973e0a33349d6ee1547181c (diff)
downloadFreeBSD-src-2d3f49ebd38d6441b693d51adcf071088817ddf5.zip
FreeBSD-src-2d3f49ebd38d6441b693d51adcf071088817ddf5.tar.gz
Implement reference counting of read-write locks. This uses
a list in the thread structure to keep track of the locks and how many times they have been locked. This list is checked on every lock and unlock. The traversal through the list is O(n). Most applications don't hold so many locks at once that this will become a problem. However, if it does become a problem it might be a good idea to review this once libthr is off probation and in the optimization cycle. This fixes: o deadlock when a thread tries to recursively acquire a read lock when a writer is waiting on the lock. o a thread could previously successfully unlock a lock it did not own o deadlock when a thread tries to acquire a write lock on a lock it already owns for reading or writing [ this is admittedly not required by POSIX, but is nice to have ]
Diffstat (limited to 'lib')
-rw-r--r--lib/libthr/thread/thr_exit.c6
-rw-r--r--lib/libthr/thread/thr_private.h16
-rw-r--r--lib/libthr/thread/thr_rwlock.c141
3 files changed, 155 insertions, 8 deletions
diff --git a/lib/libthr/thread/thr_exit.c b/lib/libthr/thread/thr_exit.c
index facd707..41b1f86 100644
--- a/lib/libthr/thread/thr_exit.c
+++ b/lib/libthr/thread/thr_exit.c
@@ -122,6 +122,12 @@ _pthread_exit(void *status)
_thread_cleanupspecific();
}
+ /*
+ * Remove read-write lock list. It is allocated as-needed.
+ * Therefore, it must be checked for validity before freeing.
+ */
+ if (curthread->rwlockList != NULL)
+ free(curthread->rwlockList);
retry:
/*
* Proper lock order, to minimize deadlocks, between joining
diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h
index 7a7412b..c2bea3f 100644
--- a/lib/libthr/thread/thr_private.h
+++ b/lib/libthr/thread/thr_private.h
@@ -423,6 +423,15 @@ struct pthread_specific_elem {
int seqno;
};
+struct rwlock_held {
+ LIST_ENTRY(rwlock_held) rh_link;
+ struct pthread_rwlock *rh_rwlock;
+ int rh_rdcount;
+ int rh_wrcount;
+};
+
+LIST_HEAD(rwlock_listhead, rwlock_held);
+
/*
* Thread structure.
*/
@@ -562,6 +571,13 @@ struct pthread {
*/
TAILQ_HEAD(, pthread_mutex) mutexq;
+ /*
+ * List of read-write locks owned for reading _OR_ writing.
+ * This is accessed only by the current thread, so there's
+ * no need for mutual exclusion.
+ */
+ struct rwlock_listhead *rwlockList;
+
void *ret;
struct pthread_specific_elem *specific;
int specific_data_count;
diff --git a/lib/libthr/thread/thr_rwlock.c b/lib/libthr/thread/thr_rwlock.c
index afd2fa5..8644e1e 100644
--- a/lib/libthr/thread/thr_rwlock.c
+++ b/lib/libthr/thread/thr_rwlock.c
@@ -37,6 +37,11 @@
/* maximum number of times a read lock may be obtained */
#define MAX_READ_LOCKS (INT_MAX - 1)
+/*
+ * For distinguishing operations on read and write locks.
+ */
+enum rwlock_type {RWT_READ, RWT_WRITE};
+
__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
@@ -47,6 +52,7 @@ __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
+static int insert_rwlock(struct pthread_rwlock *, enum rwlock_type);
static int rwlock_rdlock_common(pthread_rwlock_t *, int,
const struct timespec *);
static int rwlock_wrlock_common(pthread_rwlock_t *, int,
@@ -84,12 +90,14 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr
/* allocate rwlock object */
prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
- if (prwlock == NULL)
- return(ENOMEM);
+ if (prwlock == NULL) {
+ ret = ENOMEM;
+ goto out;
+ }
/* initialize the lock */
if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0)
- goto out_mutex;
+ goto out;
/* initialize the read condition signal */
if ((ret = pthread_cond_init(&prwlock->read_signal, NULL)) != 0)
@@ -110,8 +118,9 @@ out_writecond:
pthread_cond_destroy(&prwlock->read_signal);
out_readcond:
pthread_mutex_destroy(&prwlock->lock);
-out_mutex:
- free(prwlock);
+out:
+ if (prwlock != NULL)
+ free(prwlock);
return(ret);
}
@@ -123,9 +132,11 @@ static int
rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
const struct timespec *timeout)
{
+ struct rwlock_held *rh;
pthread_rwlock_t prwlock;
int ret;
+ rh = NULL;
if (rwlock == NULL || *rwlock == NULL)
return(EINVAL);
@@ -155,6 +166,19 @@ rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
return (EBUSY);
}
+ /*
+ * If this lock is already held for writing we have
+ * a deadlock situation.
+ */
+ if (curthread->rwlockList != NULL && prwlock->state < 0) {
+ LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
+ if (rh->rh_rwlock == prwlock &&
+ rh->rh_wrcount > 0) {
+ pthread_mutex_unlock(&prwlock->lock);
+ return (EDEADLK);
+ }
+ }
+ }
if (timeout == NULL)
ret = pthread_cond_wait(&prwlock->read_signal,
&prwlock->lock);
@@ -170,6 +194,11 @@ rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
}
++prwlock->state; /* indicate we are locked for reading */
+ ret = insert_rwlock(prwlock, RWT_READ);
+ if (ret != 0) {
+ pthread_mutex_unlock(&prwlock->lock);
+ return (ret);
+ }
/*
* Something is really wrong if this call fails. Returning
@@ -204,9 +233,11 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
int
_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
+ struct rwlock_held *rh;
pthread_rwlock_t prwlock;
int ret;
+ rh = NULL;
if (rwlock == NULL || *rwlock == NULL)
return(EINVAL);
@@ -216,20 +247,45 @@ _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
return(ret);
- /* XXX - Make sure we hold a lock on this rwlock */
-
+ if (curthread->rwlockList != NULL) {
+ LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
+ if (rh->rh_rwlock == prwlock)
+ break;
+ }
+ }
+ if (rh == NULL) {
+ ret = EPERM;
+ goto out;
+ }
if (prwlock->state > 0) {
+ rh->rh_rdcount--;
+ if (rh->rh_rdcount == 0) {
+ LIST_REMOVE(rh, rh_link);
+ free(rh);
+ }
if (--prwlock->state == 0 && prwlock->blocked_writers)
ret = pthread_cond_signal(&prwlock->write_signal);
} else if (prwlock->state < 0) {
+ rh->rh_wrcount--;
+ if (rh->rh_wrcount == 0) {
+ LIST_REMOVE(rh, rh_link);
+ free(rh);
+ }
prwlock->state = 0;
-
if (prwlock->blocked_writers)
ret = pthread_cond_signal(&prwlock->write_signal);
else
ret = pthread_cond_broadcast(&prwlock->read_signal);
+ } else {
+ /*
+ * No thread holds this lock. We should never get here.
+ */
+ PTHREAD_ASSERT(0, "state=0 on read-write lock held by thread");
+ ret = EPERM;
+ goto out;
}
+out:
/* see the comment on this in rwlock_rdlock_common */
pthread_mutex_unlock(&prwlock->lock);
@@ -263,9 +319,11 @@ static int
rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking,
const struct timespec *timeout)
{
+ struct rwlock_held *rh;
pthread_rwlock_t prwlock;
int ret;
+ rh = NULL;
if (rwlock == NULL || *rwlock == NULL)
return(EINVAL);
@@ -288,6 +346,21 @@ rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking,
return (EBUSY);
}
+ /*
+ * If this thread already holds the lock for reading
+ * or writing we have a deadlock situation.
+ */
+ if (curthread->rwlockList != NULL) {
+ LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
+ if (rh->rh_rwlock == prwlock &&
+ (rh->rh_rdcount > 0 || rh->rh_wrcount > 0)) {
+ pthread_mutex_unlock(&prwlock->lock);
+ return (EDEADLK);
+ break;
+ }
+ }
+ }
+
++prwlock->blocked_writers;
if (timeout == NULL)
@@ -308,6 +381,11 @@ rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking,
/* indicate we are locked for writing */
prwlock->state = -1;
+ ret = insert_rwlock(prwlock, RWT_WRITE);
+ if (ret != 0) {
+ pthread_mutex_unlock(&prwlock->lock);
+ return (ret);
+ }
/* see the comment on this in pthread_rwlock_rdlock */
pthread_mutex_unlock(&prwlock->lock);
@@ -315,3 +393,50 @@ rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking,
return(0);
}
+static int
+insert_rwlock(struct pthread_rwlock *prwlock, enum rwlock_type rwt)
+{
+ struct rwlock_held *rh;
+
+ /*
+ * Initialize the rwlock list in the thread. Although this function
+ * may be called for many read-write locks, the initialization
+ * of the the head happens only once during the lifetime of
+ * the thread.
+ */
+ if (curthread->rwlockList == NULL) {
+ curthread->rwlockList =
+ (struct rwlock_listhead *)malloc(sizeof(struct rwlock_listhead));
+ if (curthread->rwlockList == NULL) {
+ return (ENOMEM);
+ }
+ LIST_INIT(curthread->rwlockList);
+ }
+
+ LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
+ if (rh->rh_rwlock == prwlock) {
+ if (rwt == RWT_READ)
+ rh->rh_rdcount++;
+ else if (rwt == RWT_WRITE)
+ rh->rh_wrcount++;
+ return (0);
+ }
+ }
+
+ /*
+ * This is the first time we're holding this lock,
+ * create a new entry.
+ */
+ rh = (struct rwlock_held *)malloc(sizeof(struct rwlock_held));
+ if (rh == NULL)
+ return (ENOMEM);
+ rh->rh_rwlock = prwlock;
+ rh->rh_rdcount = 0;
+ rh->rh_wrcount = 0;
+ if (rwt == RWT_READ)
+ rh->rh_rdcount = 1;
+ else if (rwt == RWT_WRITE)
+ rh->rh_wrcount = 1;
+ LIST_INSERT_HEAD(curthread->rwlockList, rh, rh_link);
+ return (0);
+}
OpenPOWER on IntegriCloud