summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-06 03:40:47 +0000
committerjeff <jeff@FreeBSD.org>2007-06-06 03:40:47 +0000
commit7a9c95c1006958928813d10d99735b67778a03e7 (patch)
tree6e1a416746fad7d57f2b0f3b1f0430655bd995a9 /sys
parentaed2a0bc2931b2e8d1b39a45093c62e28fb3ff3d (diff)
downloadFreeBSD-src-7a9c95c1006958928813d10d99735b67778a03e7.zip
FreeBSD-src-7a9c95c1006958928813d10d99735b67778a03e7.tar.gz
- Placing the 'volatile' on the right side of the * in the td_lock
declaration removes the need for __DEVOLATILE(). Pointed out by: tegge
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_mutex.c6
-rw-r--r--sys/kern/sched_4bsd.c2
-rw-r--r--sys/kern/sched_ule.c2
-rw-r--r--sys/sys/mutex.h2
-rw-r--r--sys/sys/proc.h4
5 files changed, 8 insertions, 8 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index a144062..c251c75 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -504,7 +504,7 @@ _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
for (;;) {
retry:
spinlock_enter();
- m = __DEVOLATILE(struct mtx *, td->td_lock);
+ m = td->td_lock;
WITNESS_CHECKORDER(&m->lock_object,
opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line);
while (!_obtain_lock(m, tid)) {
@@ -542,7 +542,7 @@ thread_lock_block(struct thread *td)
spinlock_enter();
THREAD_LOCK_ASSERT(td, MA_OWNED);
- lock = __DEVOLATILE(struct mtx *, td->td_lock);
+ lock = td->td_lock;
td->td_lock = &blocked_lock;
mtx_unlock_spin(lock);
@@ -565,7 +565,7 @@ thread_lock_set(struct thread *td, struct mtx *new)
mtx_assert(new, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
- lock = __DEVOLATILE(struct mtx *, td->td_lock);
+ lock = td->td_lock;
td->td_lock = new;
mtx_unlock_spin(lock);
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 86e8dad..a4b1e08 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -903,7 +903,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
#endif
/* I feel sleepy */
- cpu_switch(td, newtd, __DEVOLATILE(struct mtx *, td->td_lock));
+ cpu_switch(td, newtd, td->td_lock);
/*
* Where am I? What year is it?
* We are in the same thread that went to sleep above,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index e374b50..83d6833 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1487,7 +1487,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
#endif
- cpu_switch(td, newtd, __DEVOLATILE(struct mtx *, td->td_lock));
+ cpu_switch(td, newtd, td->td_lock);
#ifdef HWPMC_HOOKS
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index d18061a..9db89ad 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -132,7 +132,7 @@ void _thread_lock_flags(struct thread *, int, const char *, int);
#define thread_lock_flags(tdp, opt) \
_thread_lock_flags((tdp), (opt), __FILE__, __LINE__)
#define thread_unlock(tdp) \
- mtx_unlock_spin(__DEVOLATILE(struct mtx *, (tdp)->td_lock))
+ mtx_unlock_spin((tdp)->td_lock)
/*
* We define our machine-independent (unoptimized) mutex micro-operations
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index acde39d..dc8ab6b 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -202,7 +202,7 @@ struct mqueue_notifier;
* Thread context. Processes may have multiple threads.
*/
struct thread {
- volatile struct mtx *td_lock; /* replaces sched lock */
+ struct mtx *volatile td_lock; /* replaces sched lock */
struct proc *td_proc; /* (*) Associated process. */
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
@@ -306,7 +306,7 @@ void thread_lock_unblock(struct thread *, struct mtx *);
void thread_lock_set(struct thread *, struct mtx *);
#define THREAD_LOCK_ASSERT(td, type) \
do { \
- struct mtx *__m = __DEVOLATILE(struct mtx *, (td)->td_lock); \
+ struct mtx *__m = (td)->td_lock; \
if (__m != &blocked_lock) \
mtx_assert(__m, (type)); \
} while (0)
OpenPOWER on IntegriCloud