summaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/atomic.h7
-rw-r--r--arch/ia64/include/asm/barrier.h3
-rw-r--r--arch/ia64/include/asm/bitops.h9
-rw-r--r--arch/ia64/include/asm/pci.h6
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h9
5 files changed, 15 insertions, 19 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 6e6fe18..0f8bf48 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <asm/intrinsics.h>
+#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
@@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index d0a69aa..a48957c 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -55,6 +55,9 @@
#endif
+#define smp_mb__before_atomic() barrier()
+#define smp_mb__after_atomic() barrier()
+
/*
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
* need for asm trickery!
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index c27eccd..71e8145 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -16,6 +16,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/intrinsics.h>
+#include <asm/barrier.h>
/**
* set_bit - Atomically set a bit in memory
@@ -65,12 +66,6 @@ __set_bit (int nr, volatile void *addr)
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
-/*
- * clear_bit() has "acquire" semantics.
- */
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
-
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
@@ -78,7 +73,7 @@ __set_bit (int nr, volatile void *addr)
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static __inline__ void
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 7d41cc0..52af5ed 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -50,12 +50,6 @@ struct pci_dev;
extern unsigned long ia64_max_iommu_merge_mask;
#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
-static inline void
-pcibios_penalize_isa_irq (int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
#include <asm-generic/pci-dma-compat.h>
#ifdef CONFIG_PCI
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 4f37dbb..f35109b 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -118,6 +118,15 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
#define cmpxchg_rel(ptr, o, n) \
ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
+/*
+ * Worse still - early processor implementations actually just ignored
+ * the acquire/release and did a full fence all the time. Unfortunately
+ * this meant a lot of badly written code that used .acq when they really
+ * wanted .rel became legacy out in the wild - so when we made a cpu
+ * that strictly did the .acq or .rel ... all that code started breaking - so
+ * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
+ */
+
/* for compatibility with other platforms: */
#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
OpenPOWER on IntegriCloud