diff options
Diffstat (limited to 'init')
-rw-r--r-- | init/Kconfig | 41 | ||||
-rw-r--r-- | init/main.c | 53 |
2 files changed, 76 insertions, 18 deletions
diff --git a/init/Kconfig b/init/Kconfig index 4389cee..fed6dc3 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -302,13 +302,14 @@ config AUDITSYSCALL config AUDIT_TREE def_bool y - depends on AUDITSYSCALL && INOTIFY + depends on AUDITSYSCALL + select INOTIFY menu "RCU Subsystem" choice prompt "RCU Implementation" - default CLASSIC_RCU + default TREE_RCU config CLASSIC_RCU bool "Classic RCU" @@ -925,6 +926,42 @@ config AIO by some high performance threaded applications. Disabling this option saves about 7k. +config HAVE_PERF_COUNTERS + bool + help + See tools/perf/design.txt for details. + +menu "Performance Counters" + +config PERF_COUNTERS + bool "Kernel Performance Counters" + depends on HAVE_PERF_COUNTERS + select ANON_INODES + help + Enable kernel support for performance counter hardware. + + Performance counters are special hardware registers available + on most modern CPUs. These registers count the number of certain + types of hw events: such as instructions executed, cachemisses + suffered, or branches mis-predicted - without slowing down the + kernel or applications. These registers can also trigger interrupts + when a threshold number of events have passed - and can thus be + used to profile the code that runs on that CPU. + + The Linux Performance Counter subsystem provides an abstraction of + these hardware capabilities, available via a system call. It + provides per task and per CPU counters, and it provides event + capabilities on top of those. + + Say Y if unsure. + +config EVENT_PROFILE + bool "Tracepoint profile sources" + depends on PERF_COUNTERS && EVENT_TRACER + default y + +endmenu + config VM_EVENT_COUNTERS default y bool "Enable VM event counters for /proc/vmstat" if EMBEDDED diff --git a/init/main.c b/init/main.c index d721dad..f6204f7 100644 --- a/init/main.c +++ b/init/main.c @@ -56,6 +56,7 @@ #include <linux/debug_locks.h> #include <linux/debugobjects.h> #include <linux/lockdep.h> +#include <linux/kmemleak.h> #include <linux/pid_namespace.h> #include <linux/device.h> #include <linux/kthread.h> @@ -64,6 +65,7 @@ #include <linux/idr.h> #include <linux/ftrace.h> #include <linux/async.h> +#include <linux/kmemtrace.h> #include <trace/boot.h> #include <asm/io.h> @@ -71,7 +73,6 @@ #include <asm/setup.h> #include <asm/sections.h> #include <asm/cacheflush.h> -#include <trace/kmemtrace.h> #ifdef CONFIG_X86_LOCAL_APIC #include <asm/smp.h> @@ -533,6 +534,21 @@ void __init __weak thread_info_cache_init(void) { } +/* + * Set up kernel memory allocators + */ +static void __init mm_init(void) +{ + /* + * page_cgroup requires countinous pages as memmap + * and it's bigger than MAX_ORDER unless SPARSEMEM. + */ + page_cgroup_init_flatmem(); + mem_init(); + kmem_cache_init(); + vmalloc_init(); +} + asmlinkage void __init start_kernel(void) { char * command_line; @@ -574,6 +590,23 @@ asmlinkage void __init start_kernel(void) setup_nr_cpu_ids(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + build_all_zonelists(); + page_alloc_init(); + + printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); + parse_early_param(); + parse_args("Booting kernel", static_command_line, __start___param, + __stop___param - __start___param, + &unknown_bootoption); + /* + * These use large bootmem allocations and must precede + * kmem_cache_init() + */ + pidhash_init(); + vfs_caches_init_early(); + sort_main_extable(); + trap_init(); + mm_init(); /* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() @@ -585,25 +618,16 @@ asmlinkage void __init start_kernel(void) * fragile until we cpu_idle() for the first time. */ preempt_disable(); - build_all_zonelists(); - page_alloc_init(); - printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); - parse_early_param(); - parse_args("Booting kernel", static_command_line, __start___param, - __stop___param - __start___param, - &unknown_bootoption); if (!irqs_disabled()) { printk(KERN_WARNING "start_kernel(): bug: interrupts were " "enabled *very* early, fixing it\n"); local_irq_disable(); } - sort_main_extable(); - trap_init(); rcu_init(); /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); - pidhash_init(); + prio_tree_init(); init_timers(); hrtimers_init(); softirq_init(); @@ -616,6 +640,7 @@ asmlinkage void __init start_kernel(void) "enabled early\n"); early_boot_irqs_on(); local_irq_enable(); + kmem_cache_init_late(); /* * HACK ALERT! This is early. We're enabling the console before @@ -645,15 +670,12 @@ asmlinkage void __init start_kernel(void) initrd_start = 0; } #endif - vmalloc_init(); - vfs_caches_init_early(); cpuset_init_early(); page_cgroup_init(); - mem_init(); enable_debug_pagealloc(); cpu_hotplug_init(); - kmem_cache_init(); kmemtrace_init(); + kmemleak_init(); debug_objects_mem_init(); idr_init_cache(); setup_per_cpu_pageset(); @@ -663,7 +685,6 @@ asmlinkage void __init start_kernel(void) calibrate_delay(); pidmap_init(); pgtable_cache_init(); - prio_tree_init(); anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) |