summaryrefslogtreecommitdiffstats
path: root/include/asm-xtensa
diff options
context:
space:
mode:
authorChris Zankel <czankel@tensilica.com>2005-09-22 21:44:23 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-22 22:17:37 -0700
commit288a60cf4d7cc35f84f46cd8ffd0b34f9d8e7346 (patch)
tree24691394b7e1aaa0b8e9a64f0e1723df3f3974bb /include/asm-xtensa
parentfac97ae0b1a206e2952baf1f9eb46305d673adc6 (diff)
downloadop-kernel-dev-288a60cf4d7cc35f84f46cd8ffd0b34f9d8e7346.zip
op-kernel-dev-288a60cf4d7cc35f84f46cd8ffd0b34f9d8e7346.tar.gz
[PATCH] xtensa: remove io_remap_page_range and minor clean-ups
Remove io_remap_page_range() from all of Linux 2.6.x (as requested and suggested by Randy Dunlap) and minor clean-ups. Signed-off-by: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-xtensa')
-rw-r--r--include/asm-xtensa/atomic.h2
-rw-r--r--include/asm-xtensa/bitops.h2
-rw-r--r--include/asm-xtensa/hardirq.h1
-rw-r--r--include/asm-xtensa/semaphore.h49
-rw-r--r--include/asm-xtensa/system.h16
5 files changed, 14 insertions, 56 deletions
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 24f86f0..12b5732 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -22,7 +22,7 @@ typedef struct { volatile int counter; } atomic_t;
#include <asm/processor.h>
#include <asm/system.h>
-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+#define ATOMIC_INIT(i) { (i) }
/*
* This Xtensa implementation assumes that the right mechanism
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
index d395ef2..e76ee88 100644
--- a/include/asm-xtensa/bitops.h
+++ b/include/asm-xtensa/bitops.h
@@ -174,7 +174,7 @@ static __inline__ int test_bit(int nr, const volatile void *addr)
return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
}
-#if XCHAL_HAVE_NSAU
+#if XCHAL_HAVE_NSA
static __inline__ int __cntlz (unsigned long x)
{
diff --git a/include/asm-xtensa/hardirq.h b/include/asm-xtensa/hardirq.h
index e07c76c..aa9c1ad 100644
--- a/include/asm-xtensa/hardirq.h
+++ b/include/asm-xtensa/hardirq.h
@@ -23,6 +23,7 @@ typedef struct {
unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t;
+void ack_bad_irq(unsigned int irq);
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#endif /* _XTENSA_HARDIRQ_H */
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index db740b8..09e89ab 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -20,28 +20,19 @@ struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
-#if WAITQUEUE_DEBUG
- long __magic;
-#endif
};
-#if WAITQUEUE_DEBUG
-# define __SEM_DEBUG_INIT(name) \
- , (int)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
- { ATOMIC_INIT(count), \
- 0, \
- __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
- __SEM_DEBUG_INIT(name) }
+#define __SEMAPHORE_INITIALIZER(name,n) \
+{ \
+ .count = ATOMIC_INIT(n), \
+ .sleepers = 0, \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+}
-#define __MUTEX_INITIALIZER(name) \
+#define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name, 1)
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
@@ -49,17 +40,8 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val)
{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
- */
atomic_set(&sem->count, val);
init_waitqueue_head(&sem->wait);
-#if WAITQUEUE_DEBUG
- sem->__magic = (int)&sem->__magic;
-#endif
}
static inline void init_MUTEX (struct semaphore *sem)
@@ -81,9 +63,7 @@ extern spinlock_t semaphore_wake_lock;
static inline void down(struct semaphore * sem)
{
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
+ might_sleep();
if (atomic_sub_return(1, &sem->count) < 0)
__down(sem);
@@ -92,9 +72,8 @@ static inline void down(struct semaphore * sem)
static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
+
+ might_sleep();
if (atomic_sub_return(1, &sem->count) < 0)
ret = __down_interruptible(sem);
@@ -104,9 +83,6 @@ static inline int down_interruptible(struct semaphore * sem)
static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
if (atomic_sub_return(1, &sem->count) < 0)
ret = __down_trylock(sem);
@@ -119,9 +95,6 @@ static inline int down_trylock(struct semaphore * sem)
*/
static inline void up(struct semaphore * sem)
{
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
if (atomic_add_return(1, &sem->count) <= 0)
__up(sem);
}
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index f093932..9284867 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -189,20 +189,6 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#define tas(ptr) (xchg((ptr),1))
-#if ( __XCC__ == 1 )
-
-/* xt-xcc processes __inline__ differently than xt-gcc and decides to
- * insert an out-of-line copy of function __xchg. This presents the
- * unresolved symbol at link time of __xchg_called_with_bad_pointer,
- * even though such a function would never be called at run-time.
- * xt-gcc always inlines __xchg, and optimizes away the undefined
- * bad_pointer function.
- */
-
-#define xchg(ptr,x) xchg_u32(ptr,x)
-
-#else /* assume xt-gcc */
-
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/*
@@ -224,8 +210,6 @@ __xchg(unsigned long x, volatile void * ptr, int size)
return x;
}
-#endif
-
extern void set_except_vector(int n, void *addr);
static inline void spill_registers(void)
OpenPOWER on IntegriCloud