summaryrefslogtreecommitdiffstats
path: root/lib/libkse/sys
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-09-03 17:56:26 +0000
committerdeischen <deischen@FreeBSD.org>2003-09-03 17:56:26 +0000
commit919bc52171f32bfe264b987934e1055b6901ac6d (patch)
tree4224af62a2f45f6a320c58acdd7f40a2692ca686 /lib/libkse/sys
parent43632098e791da21a8b261c5b05b55cba97ae911 (diff)
downloadFreeBSD-src-919bc52171f32bfe264b987934e1055b6901ac6d.zip
FreeBSD-src-919bc52171f32bfe264b987934e1055b6901ac6d.tar.gz
Don't assume sizeof(long) = sizeof(int) on x86; use int
instead of long types for low-level locks. Add prototypes for some internal libc functions that are wrapped by the library as cancellation points. Add memory barriers to alpha atomic swap functions (submitted by davidxu). Requested by: bde
Diffstat (limited to 'lib/libkse/sys')
-rw-r--r--lib/libkse/sys/lock.c22
-rw-r--r--lib/libkse/sys/lock.h2
2 files changed, 12 insertions, 12 deletions
diff --git a/lib/libkse/sys/lock.c b/lib/libkse/sys/lock.c
index c67da33..9cae036 100644
--- a/lib/libkse/sys/lock.c
+++ b/lib/libkse/sys/lock.c
@@ -107,7 +107,7 @@ void
_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
{
int i;
- long lval;
+ int lval;
/**
* XXX - We probably want to remove these checks to optimize
@@ -167,7 +167,7 @@ _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
if (lu->lu_watchreq->lr_active == 0)
break;
}
- atomic_swap_long((long *)&lu->lu_watchreq->lr_locked,
+ atomic_swap_int((int *)&lu->lu_watchreq->lr_locked,
2, &lval);
if (lval == 0)
lu->lu_watchreq->lr_locked = 0;
@@ -188,7 +188,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
struct lockuser *lu_tmp, *lu_h;
struct lockreq *myreq;
int prio_h;
- long lval;
+ int lval;
/**
* XXX - We probably want to remove these checks to optimize
@@ -240,8 +240,8 @@ _lock_release(struct lock *lck, struct lockuser *lu)
if (lu_h != NULL) {
/* Give the lock to the highest priority user. */
if (lck->l_wakeup != NULL) {
- atomic_swap_long(
- (long *)&lu_h->lu_watchreq->lr_locked,
+ atomic_swap_int(
+ (int *)&lu_h->lu_watchreq->lr_locked,
0, &lval);
if (lval == 2)
/* Notify the sleeper */
@@ -249,11 +249,11 @@ _lock_release(struct lock *lck, struct lockuser *lu)
lu_h->lu_myreq->lr_watcher);
}
else
- atomic_store_rel_long(
+ atomic_store_rel_int(
&lu_h->lu_watchreq->lr_locked, 0);
} else {
if (lck->l_wakeup != NULL) {
- atomic_swap_long((long *)&myreq->lr_locked,
+ atomic_swap_int((int *)&myreq->lr_locked,
0, &lval);
if (lval == 2)
/* Notify the sleeper */
@@ -261,7 +261,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
}
else
/* Give the lock to the previous request. */
- atomic_store_rel_long(&myreq->lr_locked, 0);
+ atomic_store_rel_int(&myreq->lr_locked, 0);
}
} else {
/*
@@ -274,14 +274,14 @@ _lock_release(struct lock *lck, struct lockuser *lu)
lu->lu_watchreq = NULL;
lu->lu_myreq->lr_locked = 1;
if (lck->l_wakeup) {
- atomic_swap_long((long *)&myreq->lr_locked, 0, &lval);
+ atomic_swap_int((int *)&myreq->lr_locked, 0, &lval);
if (lval == 2)
/* Notify the sleeper */
lck->l_wakeup(lck, myreq->lr_watcher);
}
else
/* Give the lock to the previous request. */
- atomic_store_rel_long(&myreq->lr_locked, 0);
+ atomic_store_rel_int(&myreq->lr_locked, 0);
}
lu->lu_myreq->lr_active = 0;
}
@@ -289,7 +289,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
void
_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
{
- atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 3);
+ atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
}
void
diff --git a/lib/libkse/sys/lock.h b/lib/libkse/sys/lock.h
index 9d4b994..917ca63 100644
--- a/lib/libkse/sys/lock.h
+++ b/lib/libkse/sys/lock.h
@@ -51,9 +51,9 @@ struct lock {
/* Try to make this >= CACHELINESIZE */
struct lockreq {
- volatile long lr_locked; /* lock granted = 0, busy otherwise */
struct lockuser *lr_watcher; /* only used for priority locks */
struct lockuser *lr_owner; /* only used for priority locks */
+ volatile int lr_locked; /* lock granted = 0, busy otherwise */
volatile int lr_active; /* non-zero if the lock is last lock for thread */
};
OpenPOWER on IntegriCloud