summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_lockf.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2009-05-24 12:39:38 +0000
committerkib <kib@FreeBSD.org>2009-05-24 12:39:38 +0000
commit862b0fc4e34c231b61d1ab0c8f9abdc98f402ef1 (patch)
treec07ceecfdfbb97e8a579fcf5c97efe22316af3c0 /sys/kern/kern_lockf.c
parentc54d127bf0b5509d9233583ca4d8a8dc0441c38f (diff)
downloadFreeBSD-src-862b0fc4e34c231b61d1ab0c8f9abdc98f402ef1.zip
FreeBSD-src-862b0fc4e34c231b61d1ab0c8f9abdc98f402ef1.tar.gz
The advisory lock may be activated or activated and removed during the
sleep waiting for conditions when the lock may be granted. To prevent lf_setlock() from accessing possibly freed memory, add reference counting to the struct lockf_entry. Bump refcount around the sleep. Make lf_free_lock() return non-zero when structure was freed, and use this after the sleep to return EINTR to the caller. The error code might need a clarification, but we cannot return success to usermode, since the lock is not owned anymore. Reviewed by: dfr Tested by: pho MFC after: 1 month
Diffstat (limited to 'sys/kern/kern_lockf.c')
-rw-r--r--sys/kern/kern_lockf.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c
index 3f8e9f6..8862f1e 100644
--- a/sys/kern/kern_lockf.c
+++ b/sys/kern/kern_lockf.c
@@ -106,7 +106,7 @@ static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
int);
static struct lockf_entry *
lf_alloc_lock(struct lock_owner *);
-static void lf_free_lock(struct lockf_entry *);
+static int lf_free_lock(struct lockf_entry *);
static int lf_clearlock(struct lockf *, struct lockf_entry *);
static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
@@ -347,9 +347,13 @@ lf_alloc_lock(struct lock_owner *lo)
return (lf);
}
-static void
+static int
lf_free_lock(struct lockf_entry *lock)
{
+
+ KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
+ if (--lock->lf_refs > 0)
+ return (0);
/*
* Adjust the lock_owner reference count and
* reclaim the entry if this is the last lock
@@ -394,6 +398,7 @@ lf_free_lock(struct lockf_entry *lock)
printf("Freed lock %p\n", lock);
#endif
free(lock, M_LOCKF);
+ return (1);
}
/*
@@ -540,6 +545,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
* the lf_lock_owners_lock tax twice.
*/
lock = lf_alloc_lock(NULL);
+ lock->lf_refs = 1;
lock->lf_start = start;
lock->lf_end = end;
lock->lf_owner = lo;
@@ -1447,7 +1453,13 @@ lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
goto out;
}
+ lock->lf_refs++;
error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
+ if (lf_free_lock(lock)) {
+ error = EINTR;
+ goto out;
+ }
+
/*
* We may have been awakened by a signal and/or by a
* debugger continuing us (in which cases we must
@@ -1809,6 +1821,7 @@ lf_split(struct lockf *state, struct lockf_entry *lock1,
*/
splitlock = lf_alloc_lock(lock1->lf_owner);
memcpy(splitlock, lock1, sizeof *splitlock);
+ splitlock->lf_refs = 1;
if (splitlock->lf_flags & F_REMOTE)
vref(splitlock->lf_vnode);
OpenPOWER on IntegriCloud