summaryrefslogtreecommitdiffstats
path: root/arch/ia64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/include/asm')
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/include/asm/asm-offsets.h1
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/cacheflush.h1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/elf.h1
-rw-r--r--arch/ia64/include/asm/hw_irq.h6
-rw-r--r--arch/ia64/include/asm/io.h2
-rw-r--r--arch/ia64/include/asm/irq.h2
-rw-r--r--arch/ia64/include/asm/kvm.h1
-rw-r--r--arch/ia64/include/asm/kvm_host.h1
-rw-r--r--arch/ia64/include/asm/mca.h5
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/ia64/include/asm/perfmon_default_smpl.h2
-rw-r--r--arch/ia64/include/asm/rwsem.h2
-rw-r--r--arch/ia64/include/asm/sn/shubio.h2
-rw-r--r--arch/ia64/include/asm/socket.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h136
-rw-r--r--arch/ia64/include/asm/spinlock_types.h10
-rw-r--r--arch/ia64/include/asm/swiotlb.h2
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h28
22 files changed, 116 insertions, 103 deletions
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 91df968..7ae5889 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -132,6 +132,12 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
+static inline bool arch_has_acpi_pdc(void) { return true; }
+static inline void arch_acpi_set_pdc_bits(u32 *buf)
+{
+ buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+}
+
#define acpi_unlazy_tlb(x)
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h
new file mode 100644
index 0000000..d370ee3
--- /dev/null
+++ b/arch/ia64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787..6ebc229 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
* @addr: Address to start counting from
*
* Similarly to clear_bit_unlock, the implementation uses a store
- * with release semantics. See also __raw_spin_unlock().
+ * with release semantics. See also arch_spin_unlock().
*/
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index c8ce271..429eefc 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -25,6 +25,7 @@
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &(page)->flags); \
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 8d3c79c..7d09a09 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 86eddee..e14108b 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -25,7 +25,6 @@
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_IA_64
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 91619b3..bf2e374 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -59,7 +59,13 @@ typedef u16 ia64_vector;
extern int ia64_first_device_vector;
extern int ia64_last_device_vector;
+#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
+/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
+#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
+#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
+#else
#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
+#endif
#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 0d9d16e..cc8335e 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr)
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
+extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
+extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
/*
* String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 5282546..91b920fd 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <linux/cpumask.h>
-#include <asm-ia64/nr-irqs.h>
+#include <generated/nr-irqs.h>
static __inline__ int
irq_canonicalize (int irq)
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index 18a7e49..bc90c75 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -60,6 +60,7 @@ struct kvm_ioapic_state {
#define KVM_IRQCHIP_PIC_MASTER 0
#define KVM_IRQCHIP_PIC_SLAVE 1
#define KVM_IRQCHIP_IOAPIC 2
+#define KVM_NR_IRQCHIPS 3
#define KVM_CONTEXT_SIZE 8*1024
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index d9b6325..a362e67 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -475,7 +475,6 @@ struct kvm_arch {
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
int iommu_flags;
- struct hlist_head irq_ack_notifier_list;
unsigned long irq_sources_bitmap;
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index c171cdf..43f96ab 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -106,6 +106,11 @@ struct ia64_sal_os_state {
unsigned long os_status; /* OS status to SAL, enum below */
unsigned long context; /* 0 if return to same context
1 if return to new context */
+
+ /* I-resources */
+ unsigned long iip;
+ unsigned long ipsr;
+ unsigned long ifs;
};
enum {
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 3499ff5..6a8a27c 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -22,8 +22,6 @@
#include <asm/mmzone.h>
-#define NUMA_NO_NODE -1
-
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h
index 48822c0..74724b2 100644
--- a/arch/ia64/include/asm/perfmon_default_smpl.h
+++ b/arch/ia64/include/asm/perfmon_default_smpl.h
@@ -67,7 +67,7 @@ typedef struct {
unsigned long ip; /* where did the overflow interrupt happened */
unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
- unsigned short cpu; /* cpu on which the overfow occured */
+ unsigned short cpu; /* cpu on which the overflow occured */
unsigned short set; /* event set active when overflow ocurred */
int tgid; /* thread group id (for NPTL, this is getpid()) */
} pfm_default_smpl_entry_t;
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index fbee74b..e876268 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -47,7 +47,7 @@ struct rw_semaphore {
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
index 22a6f18..6052422 100644
--- a/arch/ia64/include/asm/sn/shubio.h
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t;
#define IIO_IIDSR_LVL_SHIFT 0
#define IIO_IIDSR_LVL_MASK 0x000000ff
-/* Xtalk timeout threshhold register (IIO_IXTT) */
+/* Xtalk timeout threshold register (IIO_IXTT) */
#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 0b0d5ff..51427ea 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -69,4 +69,6 @@
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
+#define SO_RXQ_OVFL 40
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 30bb930..1a91c91 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
#include <asm/intrinsics.h>
#include <asm/system.h>
-#define __raw_spin_lock_init(x) ((x)->lock = 0)
+#define arch_spin_lock_init(x) ((x)->lock = 0)
/*
* Ticket locks are conceptually two parts, one indicating the current head of
@@ -25,108 +25,128 @@
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
* becomes equal to the the initial value of the tail.
+ * The pad bits in the middle are used to prevent the next_ticket number
+ * overflowing into the now_serving number.
*
- * 63 32 31 0
+ * 31 17 16 15 14 0
* +----------------------------------------------------+
- * | next_ticket_number | now_serving |
+ * | now_serving | padding | next_ticket |
* +----------------------------------------------------+
*/
-#define TICKET_SHIFT 32
+#define TICKET_SHIFT 17
+#define TICKET_BITS 15
+#define TICKET_MASK ((1 << TICKET_BITS) - 1)
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
- int *p = (int *)&lock->lock, turn, now_serving;
+ int *p = (int *)&lock->lock, ticket, serve;
- now_serving = *p;
- turn = ia64_fetchadd(1, p+1, acq);
+ ticket = ia64_fetchadd(1, p, acq);
- if (turn == now_serving)
+ if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
- do {
+ ia64_invala();
+
+ for (;;) {
+ asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
+
+ if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+ return;
cpu_relax();
- } while (ACCESS_ONCE(*p) != turn);
+ }
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
- long tmp = ACCESS_ONCE(lock->lock), try;
-
- if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
- try = tmp + (1L << TICKET_SHIFT);
+ int tmp = ACCESS_ONCE(lock->lock);
- return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
- }
+ if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
+ return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
return 0;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
- int *p = (int *)&lock->lock;
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
- (void)ia64_fetchadd(1, p, rel);
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+ ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ int *p = (int *)&lock->lock, ticket;
+
+ ia64_invala();
+
+ for (;;) {
+ asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
+ if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+ return;
+ cpu_relax();
+ }
+}
+
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
- return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1));
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
- return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1;
+ return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
- cpu_relax();
+ __ticket_spin_unlock_wait(lock);
}
-#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
+#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -149,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
: "p6", "p7", "r2", "memory");
}
-#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-#define __raw_read_lock(rw) \
+#define arch_read_lock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -168,16 +188,16 @@ do { \
#endif /* !ASM_SUPPORTED */
-#define __raw_read_unlock(rw) \
+#define arch_read_unlock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -201,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
-#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
register long result; \
\
@@ -215,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
(result == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -224,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#else /* !ASM_SUPPORTED */
-#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+#define arch_write_lock_flags(l, flags) arch_write_lock(l)
-#define __raw_write_lock(l) \
+#define arch_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -237,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
} while (ia64_val); \
})
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -245,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -253,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(raw_rwlock_t *x)
+static inline int arch_read_trylock(arch_rwlock_t *x)
{
union {
- raw_rwlock_t lock;
+ arch_rwlock_t lock;
__u32 word;
} old, new;
old.lock = new.lock = *x;
@@ -265,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index b61d136..e2b42a5 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -6,16 +6,16 @@
#endif
typedef struct {
- volatile unsigned long lock;
-} raw_spinlock_t;
+ volatile unsigned int lock;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int read_counter : 31;
volatile unsigned int write_lock : 1;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
#endif
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
index dcbaea7..f0acde6 100644
--- a/arch/ia64/include/asm/swiotlb.h
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -4,8 +4,6 @@
#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
-extern int swiotlb_force;
-
#ifdef CONFIG_SWIOTLB
extern int swiotlb;
extern void pci_swiotlb_init(void);
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 5a5347f..10a8f21 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -311,11 +311,12 @@
#define __NR_preadv 1319
#define __NR_pwritev 1320
#define __NR_rt_tgsigqueueinfo 1321
+#define __NR_recvmmsg 1322
#ifdef __KERNEL__
-#define NR_syscalls 298 /* length of syscall table */
+#define NR_syscalls 299 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 88afb54..67455c2 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -37,35 +37,9 @@
#include <xen/interface/xen.h>
#include <xen/interface/version.h> /* to compile feature.c */
#include <xen/features.h> /* to comiple xen-netfront.c */
+#include <xen/xen.h>
#include <asm/xen/hypercall.h>
-/* xen_domain_type is set before executing any C code by early_xen_setup */
-enum xen_domain_type {
- XEN_NATIVE, /* running on bare hardware */
- XEN_PV_DOMAIN, /* running in a PV domain */
- XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
-};
-
-#ifdef CONFIG_XEN
-extern enum xen_domain_type xen_domain_type;
-#else
-#define xen_domain_type XEN_NATIVE
-#endif
-
-#define xen_domain() (xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain() (xen_domain() && \
- xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain() (xen_domain() && \
- xen_domain_type == XEN_HVM_DOMAIN)
-
-#ifdef CONFIG_XEN_DOM0
-#define xen_initial_domain() (xen_pv_domain() && \
- (xen_start_info->flags & SIF_INITDOMAIN))
-#else
-#define xen_initial_domain() (0)
-#endif
-
-
#ifdef CONFIG_XEN
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
OpenPOWER on IntegriCloud