summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordelphij <delphij@FreeBSD.org>2005-07-25 10:21:49 +0000
committerdelphij <delphij@FreeBSD.org>2005-07-25 10:21:49 +0000
commit049e7675aa7008ec1651d0bffdb40aecc51a3cff (patch)
treed211e49a49fec3d9892228a79635acbb2034fd0d
parentda0fa3b3e071fca3f1287439ec8fb56f430166fd (diff)
downloadFreeBSD-src-049e7675aa7008ec1651d0bffdb40aecc51a3cff.zip
FreeBSD-src-049e7675aa7008ec1651d0bffdb40aecc51a3cff.tar.gz
Cast to uintptr_t when the compiler complains. This unbreaks ULE
scheduler breakage accompanied by the recent atomic_ptr() change.
-rw-r--r--sys/kern/sched_ule.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 04c8f48..e414697 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -651,7 +651,8 @@ kseq_assign(struct kseq *kseq)
do {
*(volatile struct kse **)&ke = kseq->ksq_assigned;
- } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
+ } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
+ (uintptr_t)ke, (uintptr_t)NULL));
for (; ke != NULL; ke = nke) {
nke = ke->ke_assign;
kseq->ksq_group->ksg_load--;
@@ -688,7 +689,8 @@ kseq_notify(struct kse *ke, int cpu)
*/
do {
*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
- } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
+ } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
+ (uintptr_t)ke->ke_assign, (uintptr_t)ke));
/*
* Without sched_lock we could lose a race where we set NEEDRESCHED
* on a thread that is switched out before the IPI is delivered. This
OpenPOWER on IntegriCloud