summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bitops/ext2-non-atomic.h2
-rw-r--r--include/asm-generic/bitops/le.h4
-rw-r--r--include/asm-generic/bug.h17
-rw-r--r--include/asm-generic/percpu.h97
-rw-r--r--include/asm-generic/resource.h7
-rw-r--r--include/asm-generic/tlb.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h90
7 files changed, 171 insertions, 47 deletions
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h
index 1697404..63cf822 100644
--- a/include/asm-generic/bitops/ext2-non-atomic.h
+++ b/include/asm-generic/bitops/ext2-non-atomic.h
@@ -14,5 +14,7 @@
generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
#define ext2_find_next_zero_bit(addr, size, off) \
generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
+#define ext2_find_next_bit(addr, size, off) \
+ generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index b9c7e5d..80e3bf1 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -20,6 +20,8 @@
#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
+#define generic_find_next_le_bit(addr, size, offset) \
+ find_next_bit(addr, size, offset)
#elif defined(__BIG_ENDIAN)
@@ -42,6 +44,8 @@
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
+extern unsigned long generic_find_next_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
#else
#error "Please fix <asm/byteorder.h>"
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index d56fedb..2632328 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -31,14 +31,19 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
#endif
-#ifndef HAVE_ARCH_WARN_ON
+#ifndef __WARN
+#ifndef __ASSEMBLY__
+extern void warn_on_slowpath(const char *file, const int line);
+#define WANT_WARN_ON_SLOWPATH
+#endif
+#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
+#endif
+
+#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) { \
- printk("WARNING: at %s:%d %s()\n", __FILE__, \
- __LINE__, __FUNCTION__); \
- dump_stack(); \
- } \
+ if (unlikely(__ret_warn_on)) \
+ __WARN(); \
unlikely(__ret_warn_on); \
})
#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d85172e..4b8d31c 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -3,54 +3,79 @@
#include <linux/compiler.h>
#include <linux/threads.h>
-#define __GENERIC_PER_CPU
+/*
+ * Determine the real variable name from the name visible in the
+ * kernel sources.
+ */
+#define per_cpu_var(var) per_cpu__##var
+
#ifdef CONFIG_SMP
+/*
+ * per_cpu_offset() is the offset that has to be added to a
+ * percpu variable to get to the instance for a certain processor.
+ *
+ * Most arches use the __per_cpu_offset array for those offsets but
+ * some arches have their own ways of determining the offset (x86_64, s390).
+ */
+#ifndef __per_cpu_offset
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu_offset(x) (__per_cpu_offset[x])
+#endif
-/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- __attribute__((__section__(".data.percpu.shared_aligned"))) \
- __typeof__(type) per_cpu__##name \
- ____cacheline_aligned_in_smp
-
-/* var is in discarded region: offset to particular copy we want */
-#define per_cpu(var, cpu) (*({ \
- extern int simple_identifier_##var(void); \
- RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
-#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
-#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id())
-
-/* A macro to avoid #include hell... */
-#define percpu_modcopy(pcpudst, src, size) \
-do { \
- unsigned int __i; \
- for_each_possible_cpu(__i) \
- memcpy((pcpudst)+__per_cpu_offset[__i], \
- (src), (size)); \
-} while (0)
-#else /* ! SMP */
+/*
+ * Determine the offset for the currently active processor.
+ * An arch may define __my_cpu_offset to provide a more effective
+ * means of obtaining the offset to the per cpu variables of the
+ * current processor.
+ */
+#ifndef __my_cpu_offset
+#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
+#define my_cpu_offset per_cpu_offset(smp_processor_id())
+#else
+#define my_cpu_offset __my_cpu_offset
+#endif
+
+/*
+ * Add a offset to a pointer but keep the pointer as is.
+ *
+ * Only S390 provides its own means of moving the pointer.
+ */
+#ifndef SHIFT_PERCPU_PTR
+#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
+#endif
-#define DEFINE_PER_CPU(type, name) \
- __typeof__(type) per_cpu__##name
+/*
+ * A percpu variable may point to a discarded regions. The following are
+ * established ways to produce a usable pointer from the percpu variable
+ * offset.
+ */
+#define per_cpu(var, cpu) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
+#define __get_cpu_var(var) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
+#define __raw_get_cpu_var(var) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
-#define __get_cpu_var(var) per_cpu__##var
-#define __raw_get_cpu_var(var) per_cpu__##var
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+extern void setup_per_cpu_areas(void);
+#endif
+
+#else /* ! SMP */
+
+#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
+#define __get_cpu_var(var) per_cpu_var(var)
+#define __raw_get_cpu_var(var) per_cpu_var(var)
#endif /* SMP */
-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+#ifndef PER_CPU_ATTRIBUTES
+#define PER_CPU_ATTRIBUTES
+#endif
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
+ __typeof__(type) per_cpu_var(name)
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index cfe3692..587566f 100644
--- a/include/asm-generic/resource.h
+++ b/include/asm-generic/resource.h
@@ -12,7 +12,7 @@
* then it defines them prior including asm-generic/resource.h. )
*/
-#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_CPU 0 /* CPU time in sec */
#define RLIMIT_FSIZE 1 /* Maximum filesize */
#define RLIMIT_DATA 2 /* max data size */
#define RLIMIT_STACK 3 /* max stack size */
@@ -44,8 +44,8 @@
#define RLIMIT_NICE 13 /* max nice prio allowed to raise to
0-39 for nice level 19 .. -20 */
#define RLIMIT_RTPRIO 14 /* maximum realtime priority */
-
-#define RLIM_NLIMITS 15
+#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */
+#define RLIM_NLIMITS 16
/*
* SuS says limits have to be unsigned.
@@ -86,6 +86,7 @@
[RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
[RLIMIT_NICE] = { 0, 0 }, \
[RLIMIT_RTPRIO] = { 0, 0 }, \
+ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
}
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f490e43..75f2bfa 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -14,6 +14,7 @@
#define _ASM_GENERIC__TLB_H
#include <linux/swap.h>
+#include <linux/quicklist.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 9f584cc..f784d2f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -9,10 +9,46 @@
/* Align . to a 8 byte boundary equals to maximum function alignment. */
#define ALIGN_FUNCTION() . = ALIGN(8)
+/* The actual configuration determine if the init/exit sections
+ * are handled as text/data or they can be discarded (which
+ * often happens at runtime)
+ */
+#ifdef CONFIG_HOTPLUG
+#define DEV_KEEP(sec) *(.dev##sec)
+#define DEV_DISCARD(sec)
+#else
+#define DEV_KEEP(sec)
+#define DEV_DISCARD(sec) *(.dev##sec)
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define CPU_KEEP(sec) *(.cpu##sec)
+#define CPU_DISCARD(sec)
+#else
+#define CPU_KEEP(sec)
+#define CPU_DISCARD(sec) *(.cpu##sec)
+#endif
+
+#if defined(CONFIG_MEMORY_HOTPLUG)
+#define MEM_KEEP(sec) *(.mem##sec)
+#define MEM_DISCARD(sec)
+#else
+#define MEM_KEEP(sec)
+#define MEM_DISCARD(sec) *(.mem##sec)
+#endif
+
+
/* .data section */
#define DATA_DATA \
*(.data) \
*(.data.init.refok) \
+ *(.ref.data) \
+ DEV_KEEP(init.data) \
+ DEV_KEEP(exit.data) \
+ CPU_KEEP(init.data) \
+ CPU_KEEP(exit.data) \
+ MEM_KEEP(init.data) \
+ MEM_KEEP(exit.data) \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___markers) = .; \
*(__markers) \
@@ -132,14 +168,25 @@
*(__ksymtab_strings) \
} \
\
+ /* __*init sections */ \
+ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
+ *(.ref.rodata) \
+ DEV_KEEP(init.rodata) \
+ DEV_KEEP(exit.rodata) \
+ CPU_KEEP(init.rodata) \
+ CPU_KEEP(exit.rodata) \
+ MEM_KEEP(init.rodata) \
+ MEM_KEEP(exit.rodata) \
+ } \
+ \
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
*(__param) \
VMLINUX_SYMBOL(__stop___param) = .; \
+ . = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
} \
- \
. = ALIGN((align));
/* RODATA provided for backward compatibility.
@@ -158,8 +205,16 @@
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text) \
+ *(.ref.text) \
*(.text.init.refok) \
- *(.exit.text.refok)
+ *(.exit.text.refok) \
+ DEV_KEEP(init.text) \
+ DEV_KEEP(exit.text) \
+ CPU_KEEP(init.text) \
+ CPU_KEEP(exit.text) \
+ MEM_KEEP(init.text) \
+ MEM_KEEP(exit.text)
+
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
@@ -183,6 +238,37 @@
*(.kprobes.text) \
VMLINUX_SYMBOL(__kprobes_text_end) = .;
+/* init and exit section handling */
+#define INIT_DATA \
+ *(.init.data) \
+ DEV_DISCARD(init.data) \
+ DEV_DISCARD(init.rodata) \
+ CPU_DISCARD(init.data) \
+ CPU_DISCARD(init.rodata) \
+ MEM_DISCARD(init.data) \
+ MEM_DISCARD(init.rodata)
+
+#define INIT_TEXT \
+ *(.init.text) \
+ DEV_DISCARD(init.text) \
+ CPU_DISCARD(init.text) \
+ MEM_DISCARD(init.text)
+
+#define EXIT_DATA \
+ *(.exit.data) \
+ DEV_DISCARD(exit.data) \
+ DEV_DISCARD(exit.rodata) \
+ CPU_DISCARD(exit.data) \
+ CPU_DISCARD(exit.rodata) \
+ MEM_DISCARD(exit.data) \
+ MEM_DISCARD(exit.rodata)
+
+#define EXIT_TEXT \
+ *(.exit.text) \
+ DEV_DISCARD(exit.text) \
+ CPU_DISCARD(exit.text) \
+ MEM_DISCARD(exit.text)
+
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to
the beginning of the section so we begin them at 0. */
OpenPOWER on IntegriCloud