summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2005-09-27 17:39:11 +0000
committerjhb <jhb@FreeBSD.org>2005-09-27 17:39:11 +0000
commit89caa56972d968272278a67fe10affb38d9e88eb (patch)
tree368df1c62ea6aa397797927efbf9c7ccc4b3b92e /sys
parentaa790ae8700e7fed7ef83f2549c6bc53d6ae7f8f (diff)
downloadFreeBSD-src-89caa56972d968272278a67fe10affb38d9e88eb.zip
FreeBSD-src-89caa56972d968272278a67fe10affb38d9e88eb.tar.gz
Add a new atomic_fetchadd() primitive that atomically adds a value to a
variable and returns the previous value of the variable. Tested on: i386, alpha, sparc64, arm (cognet) Reviewed by: arch@ Submitted by: cognet (arm) MFC after: 1 week
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/include/atomic.h22
-rw-r--r--sys/amd64/include/atomic.h21
-rw-r--r--sys/arm/include/atomic.h38
-rw-r--r--sys/i386/include/atomic.h21
-rw-r--r--sys/ia64/include/atomic.h19
-rw-r--r--sys/powerpc/include/atomic.h13
-rw-r--r--sys/sparc64/include/atomic.h3
7 files changed, 137 insertions, 0 deletions
diff --git a/sys/alpha/include/atomic.h b/sys/alpha/include/atomic.h
index 7277600..adae6ec 100644
--- a/sys/alpha/include/atomic.h
+++ b/sys/alpha/include/atomic.h
@@ -365,6 +365,27 @@ atomic_cmpset_rel_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval)
return (atomic_cmpset_64(p, cmpval, newval));
}
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ */
+static __inline u_int
+atomic_fetchadd_32(volatile u_int32_t *p, u_int32_t v)
+{
+ u_int32_t value, temp;
+
+#ifdef __GNUCLIKE_ASM
+ __asm __volatile (
+ "1:\tldl_l %0, %1\n\t" /* load old value */
+ "addl %0, %3, %2\n\t" /* calculate new value */
+ "stl_c %2, %1\n\t" /* attempt to store */
+ "beq %2, 1b\n" /* spin if failed */
+ : "=&r" (value), "=m" (*p), "=r" (temp)
+ : "r" (v), "m" (*p));
+#endif
+ return (value);
+}
+
/* Operations on chars. */
#define atomic_set_char atomic_set_8
#define atomic_set_acq_char atomic_set_acq_8
@@ -412,6 +433,7 @@ atomic_cmpset_rel_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval)
#define atomic_load_acq_int atomic_load_acq_32
#define atomic_store_rel_int atomic_store_rel_32
#define atomic_readandclear_int atomic_readandclear_32
+#define atomic_fetchadd_int atomic_fetchadd_32
/* Operations on longs. */
#define atomic_set_long atomic_set_64
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
index 2d59167..fbdbd8f 100644
--- a/sys/amd64/include/atomic.h
+++ b/sys/amd64/include/atomic.h
@@ -73,6 +73,7 @@ void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src);
+u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
@@ -154,6 +155,25 @@ atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
return (res);
}
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ */
+static __inline u_int
+atomic_fetchadd_int(volatile u_int *p, u_int v)
+{
+
+ __asm __volatile (
+ " " __XSTRING(MPLOCKED) " "
+ " xaddl %0, %1 ; "
+ "# atomic_fetchadd_int"
+ : "+r" (v), /* 0 (result) */
+ "=m" (*p) /* 1 */
+ : "m" (*p)); /* 2 */
+
+ return (v);
+}
+
#if defined(_KERNEL) && !defined(SMP)
/*
@@ -375,6 +395,7 @@ u_long atomic_readandclear_long(volatile u_long *);
#define atomic_cmpset_acq_32 atomic_cmpset_acq_int
#define atomic_cmpset_rel_32 atomic_cmpset_rel_int
#define atomic_readandclear_32 atomic_readandclear_int
+#define atomic_fetchadd_32 atomic_fetchadd_int
/* Operations on 64-bit quad words. */
#define atomic_set_64 atomic_set_long
diff --git a/sys/arm/include/atomic.h b/sys/arm/include/atomic.h
index 1170847..40288cf 100644
--- a/sys/arm/include/atomic.h
+++ b/sys/arm/include/atomic.h
@@ -127,6 +127,19 @@ atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
__with_interrupts_disabled(*p -= val);
}
+static __inline uint32_t
+atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
+{
+ uint32_t value;
+
+ __with_interrupts_disabled(
+ {
+ value = *p;
+ *p += v;
+ });
+ return (value);
+}
+
#else /* !_KERNEL */
static __inline u_int32_t
@@ -240,6 +253,30 @@ atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
: "=r" (ras_start), "=r" (start), "+r" (address), "+r" (clearmask));
}
+
+static __inline uint32_t
+atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
+{
+ uint32_t ras_start, start;
+
+ __asm __volatile("1:\n"
+ "mov %0, #0xe0000008\n"
+ "adr %1, 2f\n"
+ "str %1, [%0]\n"
+ "adr %1, 1b\n"
+ "mov %0, #0xe0000004\n"
+ "str %1, [%0]\n"
+ "ldr %1, %2\n"
+ "add %3, %1, %3\n"
+ "str %3, %2\n"
+ "2:\n"
+ "mov %3, #0\n"
+ "str %3, [%0]\n"
+ : "=r" (ras_start), "=r" (start), "=m" (*p), "+r" (v));
+ return (start);
+}
+
+
#endif /* _KERNEL */
static __inline int
@@ -291,5 +328,6 @@ atomic_readandclear_32(volatile u_int32_t *p)
#define atomic_store_ptr atomic_store_32
#define atomic_cmpset_ptr atomic_cmpset_32
#define atomic_set_ptr atomic_set_32
+#define atomic_fetchadd_int atomic_fetchadd_32
#endif /* _MACHINE_ATOMIC_H_ */
diff --git a/sys/i386/include/atomic.h b/sys/i386/include/atomic.h
index e574ee7..70cec84 100644
--- a/sys/i386/include/atomic.h
+++ b/sys/i386/include/atomic.h
@@ -72,6 +72,7 @@
void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
+u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
@@ -163,6 +164,25 @@ atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
#endif /* defined(CPU_DISABLE_CMPXCHG) */
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ */
+static __inline u_int
+atomic_fetchadd_int(volatile u_int *p, u_int v)
+{
+
+ __asm __volatile (
+ " " __XSTRING(MPLOCKED) " "
+ " xaddl %0, %1 ; "
+ "# atomic_fetchadd_int"
+ : "+r" (v), /* 0 (result) */
+ "=m" (*p) /* 1 */
+ : "m" (*p)); /* 2 */
+
+ return (v);
+}
+
#if defined(_KERNEL) && !defined(SMP)
/*
@@ -392,6 +412,7 @@ u_long atomic_readandclear_long(volatile u_long *);
#define atomic_cmpset_acq_32 atomic_cmpset_acq_int
#define atomic_cmpset_rel_32 atomic_cmpset_rel_int
#define atomic_readandclear_32 atomic_readandclear_int
+#define atomic_fetchadd_32 atomic_fetchadd_int
/* Operations on pointers. */
#define atomic_set_ptr atomic_set_int
diff --git a/sys/ia64/include/atomic.h b/sys/ia64/include/atomic.h
index 1119079..f660180 100644
--- a/sys/ia64/include/atomic.h
+++ b/sys/ia64/include/atomic.h
@@ -342,4 +342,23 @@ atomic_readandclear_64(volatile uint64_t* p)
#define atomic_readandclear_int atomic_readandclear_32
#define atomic_readandclear_long atomic_readandclear_64
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ *
+ * XXX: Should we use the fetchadd instruction here?
+ */
+static __inline uint32_t
+atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
+{
+ uint32_t value;
+
+ do {
+ value = *p;
+ } while (!atomic_cmpset_32(p, value, value + v));
+ return (value);
+}
+
+#define atomic_fetchadd_int atomic_fetchadd_32
+
#endif /* ! _MACHINE_ATOMIC_H_ */
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index 0bc8f21..9faa5bc 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -444,4 +444,17 @@ atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_32
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_32
+static __inline uint32_t
+atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
+{
+ uint32_t value;
+
+ do {
+ value = *p;
+ } while (!atomic_cmpset_32(p, value, value + v));
+ return (value);
+}
+
+#define atomic_fetchadd_int atomic_fetchadd_32
+
#endif /* ! _MACHINE_ATOMIC_H_ */
diff --git a/sys/sparc64/include/atomic.h b/sys/sparc64/include/atomic.h
index ed16955..77d455b 100644
--- a/sys/sparc64/include/atomic.h
+++ b/sys/sparc64/include/atomic.h
@@ -277,6 +277,9 @@ ATOMIC_GEN(64, uint64_t *, uint64_t, uint64_t, 64);
ATOMIC_GEN(ptr, uintptr_t *, uintptr_t, uintptr_t, 64);
+#define atomic_fetchadd_int atomic_add_int
+#define atomic_fetchadd_32 atomic_add_32
+
#undef ATOMIC_GEN
#undef atomic_cas
#undef atomic_cas_acq
OpenPOWER on IntegriCloud