summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libkse/arch/amd64/include/atomic_ops.h14
-rw-r--r--lib/libkse/arch/i386/include/atomic_ops.h12
-rw-r--r--lib/libkse/sys/lock.c22
-rw-r--r--lib/libkse/sys/lock.h2
-rw-r--r--lib/libkse/thread/thr_creat.c2
-rw-r--r--lib/libkse/thread/thr_pause.c2
-rw-r--r--lib/libkse/thread/thr_sleep.c2
-rw-r--r--lib/libkse/thread/thr_system.c2
-rw-r--r--lib/libkse/thread/thr_tcdrain.c2
-rw-r--r--lib/libkse/thread/thr_wait.c2
-rw-r--r--lib/libkse/thread/thr_wait4.c3
-rw-r--r--lib/libkse/thread/thr_waitpid.c2
-rw-r--r--lib/libpthread/arch/alpha/include/atomic_ops.h17
-rw-r--r--lib/libpthread/arch/amd64/include/atomic_ops.h14
-rw-r--r--lib/libpthread/arch/i386/include/atomic_ops.h12
-rw-r--r--lib/libpthread/sys/lock.c22
-rw-r--r--lib/libpthread/sys/lock.h2
-rw-r--r--lib/libpthread/thread/thr_creat.c2
-rw-r--r--lib/libpthread/thread/thr_pause.c2
-rw-r--r--lib/libpthread/thread/thr_sleep.c2
-rw-r--r--lib/libpthread/thread/thr_system.c2
-rw-r--r--lib/libpthread/thread/thr_tcdrain.c2
-rw-r--r--lib/libpthread/thread/thr_wait.c2
-rw-r--r--lib/libpthread/thread/thr_wait4.c3
-rw-r--r--lib/libpthread/thread/thr_waitpid.c2
25 files changed, 109 insertions, 42 deletions
diff --git a/lib/libkse/arch/amd64/include/atomic_ops.h b/lib/libkse/arch/amd64/include/atomic_ops.h
index 5edb533..980eb8e 100644
--- a/lib/libkse/arch/amd64/include/atomic_ops.h
+++ b/lib/libkse/arch/amd64/include/atomic_ops.h
@@ -33,17 +33,25 @@
* Atomic swap:
* Atomic (tmp = *dst, *dst = val), then *res = tmp
*
- * void atomic_swap_long(long *dst, long val, long *res);
+ * void atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res);
*/
static inline void
-atomic_swap_long(long *dst, long val, long *res)
+atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res)
{
__asm __volatile(
"xchgq %2, %1; movq %2, %0"
: "=m" (*res) : "m" (*dst), "r" (val) : "memory");
}
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ __asm __volatile(
+ "xchgl %2, %1; movl %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
#define atomic_swap_ptr(d, v, r) \
- atomic_swap_long((long *)(d), (long)(v), (long *)(r))
+ atomic_swap64((intptr_t *)(d), (intptr_t)(v), (intptr_t *)(r))
#endif
diff --git a/lib/libkse/arch/i386/include/atomic_ops.h b/lib/libkse/arch/i386/include/atomic_ops.h
index 1825b8c..7bc3d1b 100644
--- a/lib/libkse/arch/i386/include/atomic_ops.h
+++ b/lib/libkse/arch/i386/include/atomic_ops.h
@@ -33,19 +33,19 @@
* Atomic swap:
* Atomic (tmp = *dst, *dst = val), then *res = tmp
*
- * void atomic_swap_long(long *dst, long val, long *res);
+ * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res);
*/
static inline void
-atomic_swap_long(long *dst, long val, long *res)
+atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res)
{
__asm __volatile(
"xchgl %2, %1; movl %2, %0"
: "=m" (*res) : "m" (*dst), "r" (val) : "memory");
}
-#define atomic_swap_int(d, v, r) \
- atomic_swap_long((long *)(d), (long)(v), (long *)(r))
-
-#define atomic_swap_ptr atomic_swap_int
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+#define atomic_swap_int(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
#endif
diff --git a/lib/libkse/sys/lock.c b/lib/libkse/sys/lock.c
index c67da33..9cae036 100644
--- a/lib/libkse/sys/lock.c
+++ b/lib/libkse/sys/lock.c
@@ -107,7 +107,7 @@ void
_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
{
int i;
- long lval;
+ int lval;
/**
* XXX - We probably want to remove these checks to optimize
@@ -167,7 +167,7 @@ _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
if (lu->lu_watchreq->lr_active == 0)
break;
}
- atomic_swap_long((long *)&lu->lu_watchreq->lr_locked,
+ atomic_swap_int((int *)&lu->lu_watchreq->lr_locked,
2, &lval);
if (lval == 0)
lu->lu_watchreq->lr_locked = 0;
@@ -188,7 +188,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
struct lockuser *lu_tmp, *lu_h;
struct lockreq *myreq;
int prio_h;
- long lval;
+ int lval;
/**
* XXX - We probably want to remove these checks to optimize
@@ -240,8 +240,8 @@ _lock_release(struct lock *lck, struct lockuser *lu)
if (lu_h != NULL) {
/* Give the lock to the highest priority user. */
if (lck->l_wakeup != NULL) {
- atomic_swap_long(
- (long *)&lu_h->lu_watchreq->lr_locked,
+ atomic_swap_int(
+ (int *)&lu_h->lu_watchreq->lr_locked,
0, &lval);
if (lval == 2)
/* Notify the sleeper */
@@ -249,11 +249,11 @@ _lock_release(struct lock *lck, struct lockuser *lu)
lu_h->lu_myreq->lr_watcher);
}
else
- atomic_store_rel_long(
+ atomic_store_rel_int(
&lu_h->lu_watchreq->lr_locked, 0);
} else {
if (lck->l_wakeup != NULL) {
- atomic_swap_long((long *)&myreq->lr_locked,
+ atomic_swap_int((int *)&myreq->lr_locked,
0, &lval);
if (lval == 2)
/* Notify the sleeper */
@@ -261,7 +261,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
}
else
/* Give the lock to the previous request. */
- atomic_store_rel_long(&myreq->lr_locked, 0);
+ atomic_store_rel_int(&myreq->lr_locked, 0);
}
} else {
/*
@@ -274,14 +274,14 @@ _lock_release(struct lock *lck, struct lockuser *lu)
lu->lu_watchreq = NULL;
lu->lu_myreq->lr_locked = 1;
if (lck->l_wakeup) {
- atomic_swap_long((long *)&myreq->lr_locked, 0, &lval);
+ atomic_swap_int((int *)&myreq->lr_locked, 0, &lval);
if (lval == 2)
/* Notify the sleeper */
lck->l_wakeup(lck, myreq->lr_watcher);
}
else
/* Give the lock to the previous request. */
- atomic_store_rel_long(&myreq->lr_locked, 0);
+ atomic_store_rel_int(&myreq->lr_locked, 0);
}
lu->lu_myreq->lr_active = 0;
}
@@ -289,7 +289,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
void
_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
{
- atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 3);
+ atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
}
void
diff --git a/lib/libkse/sys/lock.h b/lib/libkse/sys/lock.h
index 9d4b994..917ca63 100644
--- a/lib/libkse/sys/lock.h
+++ b/lib/libkse/sys/lock.h
@@ -51,9 +51,9 @@ struct lock {
/* Try to make this >= CACHELINESIZE */
struct lockreq {
- volatile long lr_locked; /* lock granted = 0, busy otherwise */
struct lockuser *lr_watcher; /* only used for priority locks */
struct lockuser *lr_owner; /* only used for priority locks */
+ volatile int lr_locked; /* lock granted = 0, busy otherwise */
volatile int lr_active; /* non-zero if the lock is last lock for thread */
};
diff --git a/lib/libkse/thread/thr_creat.c b/lib/libkse/thread/thr_creat.c
index bba8ec3..478e037 100644
--- a/lib/libkse/thread/thr_creat.c
+++ b/lib/libkse/thread/thr_creat.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __creat(const char *, mode_t);
+
__weak_reference(___creat, creat);
int
diff --git a/lib/libkse/thread/thr_pause.c b/lib/libkse/thread/thr_pause.c
index b6bcc51..aa97c77 100644
--- a/lib/libkse/thread/thr_pause.c
+++ b/lib/libkse/thread/thr_pause.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __pause(void);
+
__weak_reference(_pause, pause);
int
diff --git a/lib/libkse/thread/thr_sleep.c b/lib/libkse/thread/thr_sleep.c
index 0f02db7..b494e5b 100644
--- a/lib/libkse/thread/thr_sleep.c
+++ b/lib/libkse/thread/thr_sleep.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern unsigned int __sleep(unsigned int);
+
__weak_reference(_sleep, sleep);
unsigned int
diff --git a/lib/libkse/thread/thr_system.c b/lib/libkse/thread/thr_system.c
index 28976d3..ae26c9c 100644
--- a/lib/libkse/thread/thr_system.c
+++ b/lib/libkse/thread/thr_system.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __system(const char *);
+
__weak_reference(_system, system);
int
diff --git a/lib/libkse/thread/thr_tcdrain.c b/lib/libkse/thread/thr_tcdrain.c
index 6a2002b..86af9c2 100644
--- a/lib/libkse/thread/thr_tcdrain.c
+++ b/lib/libkse/thread/thr_tcdrain.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __tcdrain(int);
+
__weak_reference(_tcdrain, tcdrain);
int
diff --git a/lib/libkse/thread/thr_wait.c b/lib/libkse/thread/thr_wait.c
index 98f2c8d..b0a3af4 100644
--- a/lib/libkse/thread/thr_wait.c
+++ b/lib/libkse/thread/thr_wait.c
@@ -32,6 +32,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __wait(int *);
+
__weak_reference(_wait, wait);
pid_t
diff --git a/lib/libkse/thread/thr_wait4.c b/lib/libkse/thread/thr_wait4.c
index 9f23584..07ff79d 100644
--- a/lib/libkse/thread/thr_wait4.c
+++ b/lib/libkse/thread/thr_wait4.c
@@ -33,9 +33,12 @@
*/
#include <sys/types.h>
+#include "namespace.h"
#include <errno.h>
#include <sys/wait.h>
#include <pthread.h>
+#include "un-namespace.h"
+
#include "thr_private.h"
__weak_reference(__wait4, wait4);
diff --git a/lib/libkse/thread/thr_waitpid.c b/lib/libkse/thread/thr_waitpid.c
index 8ee3ce1..e09bbd5 100644
--- a/lib/libkse/thread/thr_waitpid.c
+++ b/lib/libkse/thread/thr_waitpid.c
@@ -34,6 +34,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __waitpid(pid_t, int *, int);
+
__weak_reference(_waitpid, waitpid);
pid_t
diff --git a/lib/libpthread/arch/alpha/include/atomic_ops.h b/lib/libpthread/arch/alpha/include/atomic_ops.h
index 90ffd8d..caa291b 100644
--- a/lib/libpthread/arch/alpha/include/atomic_ops.h
+++ b/lib/libpthread/arch/alpha/include/atomic_ops.h
@@ -45,6 +45,23 @@ atomic_swap_long(long *dst, long val, long *res)
" stq_c $2, %1\n" /* attempt the store; $2 clobbered */
" beq $2, 1b\n" /* it didn't work, loop */
" stq $1, %0\n" /* save value of *dst in *res */
+ " mb \n"
+ : "+m"(*res)
+ : "m"(*dst), "r"(val)
+ : "memory", "$1", "$2"); /* clobber t0 and t1 */
+}
+
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ /* $1 and $2 are t0 and t1 respectively. */
+ __asm ( " ldl $1, %1\n" /* get cache line before lock */
+ "1: ldl_l $1, %1\n" /* load *dst asserting lock */
+ " mov %2, $2\n" /* save value to be swapped */
+ " stl_c $2, %1\n" /* attempt the store; $2 clobbered */
+ " beq $2, 1b\n" /* it didn't work, loop */
+ " stl $1, %0\n" /* save value of *dst in *res */
+ " mb \n"
: "+m"(*res)
: "m"(*dst), "r"(val)
: "memory", "$1", "$2"); /* clobber t0 and t1 */
diff --git a/lib/libpthread/arch/amd64/include/atomic_ops.h b/lib/libpthread/arch/amd64/include/atomic_ops.h
index 5edb533..980eb8e 100644
--- a/lib/libpthread/arch/amd64/include/atomic_ops.h
+++ b/lib/libpthread/arch/amd64/include/atomic_ops.h
@@ -33,17 +33,25 @@
* Atomic swap:
* Atomic (tmp = *dst, *dst = val), then *res = tmp
*
- * void atomic_swap_long(long *dst, long val, long *res);
+ * void atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res);
*/
static inline void
-atomic_swap_long(long *dst, long val, long *res)
+atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res)
{
__asm __volatile(
"xchgq %2, %1; movq %2, %0"
: "=m" (*res) : "m" (*dst), "r" (val) : "memory");
}
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ __asm __volatile(
+ "xchgl %2, %1; movl %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
#define atomic_swap_ptr(d, v, r) \
- atomic_swap_long((long *)(d), (long)(v), (long *)(r))
+ atomic_swap64((intptr_t *)(d), (intptr_t)(v), (intptr_t *)(r))
#endif
diff --git a/lib/libpthread/arch/i386/include/atomic_ops.h b/lib/libpthread/arch/i386/include/atomic_ops.h
index 1825b8c..7bc3d1b 100644
--- a/lib/libpthread/arch/i386/include/atomic_ops.h
+++ b/lib/libpthread/arch/i386/include/atomic_ops.h
@@ -33,19 +33,19 @@
* Atomic swap:
* Atomic (tmp = *dst, *dst = val), then *res = tmp
*
- * void atomic_swap_long(long *dst, long val, long *res);
+ * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res);
*/
static inline void
-atomic_swap_long(long *dst, long val, long *res)
+atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res)
{
__asm __volatile(
"xchgl %2, %1; movl %2, %0"
: "=m" (*res) : "m" (*dst), "r" (val) : "memory");
}
-#define atomic_swap_int(d, v, r) \
- atomic_swap_long((long *)(d), (long)(v), (long *)(r))
-
-#define atomic_swap_ptr atomic_swap_int
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+#define atomic_swap_int(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
#endif
diff --git a/lib/libpthread/sys/lock.c b/lib/libpthread/sys/lock.c
index c67da33..9cae036 100644
--- a/lib/libpthread/sys/lock.c
+++ b/lib/libpthread/sys/lock.c
@@ -107,7 +107,7 @@ void
_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
{
int i;
- long lval;
+ int lval;
/**
* XXX - We probably want to remove these checks to optimize
@@ -167,7 +167,7 @@ _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
if (lu->lu_watchreq->lr_active == 0)
break;
}
- atomic_swap_long((long *)&lu->lu_watchreq->lr_locked,
+ atomic_swap_int((int *)&lu->lu_watchreq->lr_locked,
2, &lval);
if (lval == 0)
lu->lu_watchreq->lr_locked = 0;
@@ -188,7 +188,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
struct lockuser *lu_tmp, *lu_h;
struct lockreq *myreq;
int prio_h;
- long lval;
+ int lval;
/**
* XXX - We probably want to remove these checks to optimize
@@ -240,8 +240,8 @@ _lock_release(struct lock *lck, struct lockuser *lu)
if (lu_h != NULL) {
/* Give the lock to the highest priority user. */
if (lck->l_wakeup != NULL) {
- atomic_swap_long(
- (long *)&lu_h->lu_watchreq->lr_locked,
+ atomic_swap_int(
+ (int *)&lu_h->lu_watchreq->lr_locked,
0, &lval);
if (lval == 2)
/* Notify the sleeper */
@@ -249,11 +249,11 @@ _lock_release(struct lock *lck, struct lockuser *lu)
lu_h->lu_myreq->lr_watcher);
}
else
- atomic_store_rel_long(
+ atomic_store_rel_int(
&lu_h->lu_watchreq->lr_locked, 0);
} else {
if (lck->l_wakeup != NULL) {
- atomic_swap_long((long *)&myreq->lr_locked,
+ atomic_swap_int((int *)&myreq->lr_locked,
0, &lval);
if (lval == 2)
/* Notify the sleeper */
@@ -261,7 +261,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
}
else
/* Give the lock to the previous request. */
- atomic_store_rel_long(&myreq->lr_locked, 0);
+ atomic_store_rel_int(&myreq->lr_locked, 0);
}
} else {
/*
@@ -274,14 +274,14 @@ _lock_release(struct lock *lck, struct lockuser *lu)
lu->lu_watchreq = NULL;
lu->lu_myreq->lr_locked = 1;
if (lck->l_wakeup) {
- atomic_swap_long((long *)&myreq->lr_locked, 0, &lval);
+ atomic_swap_int((int *)&myreq->lr_locked, 0, &lval);
if (lval == 2)
/* Notify the sleeper */
lck->l_wakeup(lck, myreq->lr_watcher);
}
else
/* Give the lock to the previous request. */
- atomic_store_rel_long(&myreq->lr_locked, 0);
+ atomic_store_rel_int(&myreq->lr_locked, 0);
}
lu->lu_myreq->lr_active = 0;
}
@@ -289,7 +289,7 @@ _lock_release(struct lock *lck, struct lockuser *lu)
void
_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
{
- atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 3);
+ atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
}
void
diff --git a/lib/libpthread/sys/lock.h b/lib/libpthread/sys/lock.h
index 9d4b994..917ca63 100644
--- a/lib/libpthread/sys/lock.h
+++ b/lib/libpthread/sys/lock.h
@@ -51,9 +51,9 @@ struct lock {
/* Try to make this >= CACHELINESIZE */
struct lockreq {
- volatile long lr_locked; /* lock granted = 0, busy otherwise */
struct lockuser *lr_watcher; /* only used for priority locks */
struct lockuser *lr_owner; /* only used for priority locks */
+ volatile int lr_locked; /* lock granted = 0, busy otherwise */
volatile int lr_active; /* non-zero if the lock is last lock for thread */
};
diff --git a/lib/libpthread/thread/thr_creat.c b/lib/libpthread/thread/thr_creat.c
index bba8ec3..478e037 100644
--- a/lib/libpthread/thread/thr_creat.c
+++ b/lib/libpthread/thread/thr_creat.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __creat(const char *, mode_t);
+
__weak_reference(___creat, creat);
int
diff --git a/lib/libpthread/thread/thr_pause.c b/lib/libpthread/thread/thr_pause.c
index b6bcc51..aa97c77 100644
--- a/lib/libpthread/thread/thr_pause.c
+++ b/lib/libpthread/thread/thr_pause.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __pause(void);
+
__weak_reference(_pause, pause);
int
diff --git a/lib/libpthread/thread/thr_sleep.c b/lib/libpthread/thread/thr_sleep.c
index 0f02db7..b494e5b 100644
--- a/lib/libpthread/thread/thr_sleep.c
+++ b/lib/libpthread/thread/thr_sleep.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern unsigned int __sleep(unsigned int);
+
__weak_reference(_sleep, sleep);
unsigned int
diff --git a/lib/libpthread/thread/thr_system.c b/lib/libpthread/thread/thr_system.c
index 28976d3..ae26c9c 100644
--- a/lib/libpthread/thread/thr_system.c
+++ b/lib/libpthread/thread/thr_system.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __system(const char *);
+
__weak_reference(_system, system);
int
diff --git a/lib/libpthread/thread/thr_tcdrain.c b/lib/libpthread/thread/thr_tcdrain.c
index 6a2002b..86af9c2 100644
--- a/lib/libpthread/thread/thr_tcdrain.c
+++ b/lib/libpthread/thread/thr_tcdrain.c
@@ -33,6 +33,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __tcdrain(int);
+
__weak_reference(_tcdrain, tcdrain);
int
diff --git a/lib/libpthread/thread/thr_wait.c b/lib/libpthread/thread/thr_wait.c
index 98f2c8d..b0a3af4 100644
--- a/lib/libpthread/thread/thr_wait.c
+++ b/lib/libpthread/thread/thr_wait.c
@@ -32,6 +32,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __wait(int *);
+
__weak_reference(_wait, wait);
pid_t
diff --git a/lib/libpthread/thread/thr_wait4.c b/lib/libpthread/thread/thr_wait4.c
index 9f23584..07ff79d 100644
--- a/lib/libpthread/thread/thr_wait4.c
+++ b/lib/libpthread/thread/thr_wait4.c
@@ -33,9 +33,12 @@
*/
#include <sys/types.h>
+#include "namespace.h"
#include <errno.h>
#include <sys/wait.h>
#include <pthread.h>
+#include "un-namespace.h"
+
#include "thr_private.h"
__weak_reference(__wait4, wait4);
diff --git a/lib/libpthread/thread/thr_waitpid.c b/lib/libpthread/thread/thr_waitpid.c
index 8ee3ce1..e09bbd5 100644
--- a/lib/libpthread/thread/thr_waitpid.c
+++ b/lib/libpthread/thread/thr_waitpid.c
@@ -34,6 +34,8 @@
#include <pthread.h>
#include "thr_private.h"
+extern int __waitpid(pid_t, int *, int);
+
__weak_reference(_waitpid, waitpid);
pid_t
OpenPOWER on IntegriCloud