summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/arch
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libpthread/arch')
-rw-r--r--lib/libpthread/arch/alpha/include/atomic_ops.h17
-rw-r--r--lib/libpthread/arch/amd64/include/atomic_ops.h14
-rw-r--r--lib/libpthread/arch/i386/include/atomic_ops.h12
3 files changed, 34 insertions, 9 deletions
diff --git a/lib/libpthread/arch/alpha/include/atomic_ops.h b/lib/libpthread/arch/alpha/include/atomic_ops.h
index 90ffd8d..caa291b 100644
--- a/lib/libpthread/arch/alpha/include/atomic_ops.h
+++ b/lib/libpthread/arch/alpha/include/atomic_ops.h
@@ -45,6 +45,23 @@ atomic_swap_long(long *dst, long val, long *res)
" stq_c $2, %1\n" /* attempt the store; $2 clobbered */
" beq $2, 1b\n" /* it didn't work, loop */
" stq $1, %0\n" /* save value of *dst in *res */
+ " mb \n"
+ : "+m"(*res)
+ : "m"(*dst), "r"(val)
+ : "memory", "$1", "$2"); /* clobber t0 and t1 */
+}
+
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ /* $1 and $2 are t0 and t1 respectively. */
+ __asm ( " ldl $1, %1\n" /* get cache line before lock */
+ "1: ldl_l $1, %1\n" /* load *dst asserting lock */
+ " mov %2, $2\n" /* save value to be swapped */
+ " stl_c $2, %1\n" /* attempt the store; $2 clobbered */
+ " beq $2, 1b\n" /* it didn't work, loop */
+ " stl $1, %0\n" /* save value of *dst in *res */
+ " mb \n"
: "+m"(*res)
: "m"(*dst), "r"(val)
: "memory", "$1", "$2"); /* clobber t0 and t1 */
diff --git a/lib/libpthread/arch/amd64/include/atomic_ops.h b/lib/libpthread/arch/amd64/include/atomic_ops.h
index 5edb533..980eb8e 100644
--- a/lib/libpthread/arch/amd64/include/atomic_ops.h
+++ b/lib/libpthread/arch/amd64/include/atomic_ops.h
@@ -33,17 +33,25 @@
* Atomic swap:
* Atomic (tmp = *dst, *dst = val), then *res = tmp
*
- * void atomic_swap_long(long *dst, long val, long *res);
+ * void atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res);
*/
static inline void
-atomic_swap_long(long *dst, long val, long *res)
+atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res)
{
__asm __volatile(
"xchgq %2, %1; movq %2, %0"
: "=m" (*res) : "m" (*dst), "r" (val) : "memory");
}
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ __asm __volatile(
+ "xchgl %2, %1; movl %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
#define atomic_swap_ptr(d, v, r) \
- atomic_swap_long((long *)(d), (long)(v), (long *)(r))
+ atomic_swap64((intptr_t *)(d), (intptr_t)(v), (intptr_t *)(r))
#endif
diff --git a/lib/libpthread/arch/i386/include/atomic_ops.h b/lib/libpthread/arch/i386/include/atomic_ops.h
index 1825b8c..7bc3d1b 100644
--- a/lib/libpthread/arch/i386/include/atomic_ops.h
+++ b/lib/libpthread/arch/i386/include/atomic_ops.h
@@ -33,19 +33,19 @@
* Atomic swap:
* Atomic (tmp = *dst, *dst = val), then *res = tmp
*
- * void atomic_swap_long(long *dst, long val, long *res);
+ * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res);
*/
static inline void
-atomic_swap_long(long *dst, long val, long *res)
+atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res)
{
__asm __volatile(
"xchgl %2, %1; movl %2, %0"
: "=m" (*res) : "m" (*dst), "r" (val) : "memory");
}
-#define atomic_swap_int(d, v, r) \
- atomic_swap_long((long *)(d), (long)(v), (long *)(r))
-
-#define atomic_swap_ptr atomic_swap_int
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+#define atomic_swap_int(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
#endif
OpenPOWER on IntegriCloud