summaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig22
-rw-r--r--init/main.c18
2 files changed, 31 insertions, 9 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 54c655c..a23da9f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1079,6 +1079,28 @@ config SLOB
endchoice
+config MMAP_ALLOW_UNINITIALIZED
+ bool "Allow mmapped anonymous memory to be uninitialized"
+ depends on EMBEDDED && !MMU
+ default n
+ help
+ Normally, and according to the Linux spec, anonymous memory obtained
+ from mmap() has it's contents cleared before it is passed to
+ userspace. Enabling this config option allows you to request that
+ mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
+ providing a huge performance boost. If this option is not enabled,
+ then the flag will be ignored.
+
+ This is taken advantage of by uClibc's malloc(), and also by
+ ELF-FDPIC binfmt's brk and stack allocator.
+
+ Because of the obvious security issues, this option should only be
+ enabled on embedded devices where you control what is run in
+ userspace. Since that isn't generally a problem on no-MMU systems,
+ it is normally safe to say Y here.
+
+ See Documentation/nommu-mmap.txt for more information.
+
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
diff --git a/init/main.c b/init/main.c
index 4051d75..c3db4a9 100644
--- a/init/main.c
+++ b/init/main.c
@@ -691,10 +691,10 @@ asmlinkage void __init start_kernel(void)
static void __init do_ctors(void)
{
#ifdef CONFIG_CONSTRUCTORS
- ctor_fn_t *call = (ctor_fn_t *) __ctors_start;
+ ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
- for (; call < (ctor_fn_t *) __ctors_end; call++)
- (*call)();
+ for (; fn < (ctor_fn_t *) __ctors_end; fn++)
+ (*fn)();
#endif
}
@@ -755,10 +755,10 @@ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
static void __init do_initcalls(void)
{
- initcall_t *call;
+ initcall_t *fn;
- for (call = __early_initcall_end; call < __initcall_end; call++)
- do_one_initcall(*call);
+ for (fn = __early_initcall_end; fn < __initcall_end; fn++)
+ do_one_initcall(*fn);
/* Make sure there is no pending stuff from the initcall sequence */
flush_scheduled_work();
@@ -785,10 +785,10 @@ static void __init do_basic_setup(void)
static void __init do_pre_smp_initcalls(void)
{
- initcall_t *call;
+ initcall_t *fn;
- for (call = __initcall_start; call < __early_initcall_end; call++)
- do_one_initcall(*call);
+ for (fn = __initcall_start; fn < __early_initcall_end; fn++)
+ do_one_initcall(*fn);
}
static void run_init_process(char *init_filename)
OpenPOWER on IntegriCloud