summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/asm-generic/qspinlock.h27
-rw-r--r--include/asm-generic/seccomp.h14
-rw-r--r--include/asm-generic/siginfo.h15
5 files changed, 50 insertions, 18 deletions
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index eed3bbe..002b81f 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readl_relaxed readl
#endif
-#ifndef readq_relaxed
+#if defined(readq) && !defined(readq_relaxed)
#define readq_relaxed readq
#endif
@@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define writel_relaxed writel
#endif
-#ifndef writeq_relaxed
+#if defined(writeq) && !defined(writeq_relaxed)
#define writeq_relaxed writeq
#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 9401f48..d4458b6 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -806,4 +806,12 @@ static inline int pmd_clear_huge(pmd_t *pmd)
#define io_remap_pfn_range remap_pfn_range
#endif
+#ifndef has_transparent_hugepage
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#else
+#define has_transparent_hugepage() 0
+#endif
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 35a52a8..6bd0570 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -28,7 +28,30 @@
*/
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
- return atomic_read(&lock->val);
+ /*
+ * queued_spin_lock_slowpath() can ACQUIRE the lock before
+ * issuing the unordered store that sets _Q_LOCKED_VAL.
+ *
+ * See both smp_cond_acquire() sites for more detail.
+ *
+ * This however means that in code like:
+ *
+ * spin_lock(A) spin_lock(B)
+ * spin_unlock_wait(B) spin_is_locked(A)
+ * do_something() do_something()
+ *
+ * Both CPUs can end up running do_something() because the store
+ * setting _Q_LOCKED_VAL will pass through the loads in
+ * spin_unlock_wait() and/or spin_is_locked().
+ *
+ * Avoid this by issuing a full memory barrier between the spin_lock()
+ * and the loads in spin_unlock_wait() and spin_is_locked().
+ *
+ * Note that regular mutual exclusion doesn't care about this
+ * delayed store.
+ */
+ smp_mb();
+ return atomic_read(&lock->val) & _Q_LOCKED_MASK;
}
/**
@@ -108,6 +131,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
*/
static inline void queued_spin_unlock_wait(struct qspinlock *lock)
{
+ /* See queued_spin_is_locked() */
+ smp_mb();
while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
cpu_relax();
}
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index c9ccafa..e74072d 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -29,4 +29,18 @@
#define __NR_seccomp_sigreturn __NR_rt_sigreturn
#endif
+#ifdef CONFIG_COMPAT
+#ifndef get_compat_mode1_syscalls
+static inline const int *get_compat_mode1_syscalls(void)
+{
+ static const int mode1_syscalls_32[] = {
+ __NR_seccomp_read_32, __NR_seccomp_write_32,
+ __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
+ 0, /* null terminated */
+ };
+ return mode1_syscalls_32;
+}
+#endif
+#endif /* CONFIG_COMPAT */
+
#endif /* _ASM_GENERIC_SECCOMP_H */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 3d1a3af..a2508a8 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -17,21 +17,6 @@
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
-#ifndef HAVE_ARCH_COPY_SIGINFO
-
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
-}
-
-#endif
-
extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
#endif
OpenPOWER on IntegriCloud