diff options
Diffstat (limited to 'arch')
34 files changed, 416 insertions, 370 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 7336ba6..137b277 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -101,6 +101,9 @@ config GENERIC_IOMAP bool default y +config ARCH_CLOCKSOURCE_DATA + def_bool y + config SCHED_OMIT_FRAME_POINTER bool default y diff --git a/arch/ia64/include/asm/clocksource.h b/arch/ia64/include/asm/clocksource.h new file mode 100644 index 0000000..5c8596e --- /dev/null +++ b/arch/ia64/include/asm/clocksource.h @@ -0,0 +1,10 @@ +/* IA64-specific clocksource additions */ + +#ifndef _ASM_IA64_CLOCKSOURCE_H +#define _ASM_IA64_CLOCKSOURCE_H + +struct arch_clocksource_data { + void *fsys_mmio; /* used by fsyscall asm code */ +}; + +#endif /* _ASM_IA64_CLOCKSOURCE_H */ diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index f64097b..4826ff9 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c @@ -115,7 +115,7 @@ int __init init_cyclone_clock(void) } /* initialize last tick */ cyclone_mc = cyclone_timer; - clocksource_cyclone.fsys_mmio = cyclone_timer; + clocksource_cyclone.archdata.fsys_mmio = cyclone_timer; clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ); return 0; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 85118df..43920de 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -468,7 +468,7 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm, fsyscall_gtod_data.clk_mask = c->mask; fsyscall_gtod_data.clk_mult = mult; fsyscall_gtod_data.clk_shift = c->shift; - fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; + fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; fsyscall_gtod_data.clk_cycle_last = c->cycle_last; /* copy kernel time structures */ diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index c34efda..0f8844e 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c @@ -54,7 +54,7 @@ ia64_sn_udelay (unsigned long usecs) void __init sn_timer_init(void) { - clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; + clocksource_sn2.archdata.fsys_mmio = RTC_COUNTER_ADDR; clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second); ia64_udelay = &ia64_sn_udelay; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fc76e42..5f60ea1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -95,6 +95,10 @@ config CLOCKSOURCE_WATCHDOG config GENERIC_CLOCKEVENTS def_bool y +config ARCH_CLOCKSOURCE_DATA + def_bool y + depends on X86_64 + config GENERIC_CLOCKEVENTS_BROADCAST def_bool y depends on X86_64 || (X86_32 && X86_LOCAL_APIC) diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 94d420b..4554cc6 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -17,8 +17,8 @@ .macro altinstruction_entry orig alt feature orig_len alt_len .align 8 - .quad \orig - .quad \alt + .long \orig - . + .long \alt - . .word \feature .byte \orig_len .byte \alt_len diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index bf535f9..23fb6d7 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -43,8 +43,8 @@ #endif struct alt_instr { - u8 *instr; /* original instruction */ - u8 *replacement; + s32 instr_offset; /* original instruction */ + s32 repl_offset; /* offset to replacement instruction */ u16 cpuid; /* cpuid bit set for replacement */ u8 instrlen; /* length of original instruction */ u8 replacementlen; /* length of new instruction, <= instrlen */ @@ -84,8 +84,8 @@ static inline int alternatives_text_reserved(void *start, void *end) "661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ _ASM_ALIGN "\n" \ - _ASM_PTR "661b\n" /* label */ \ - _ASM_PTR "663f\n" /* new instruction */ \ + " .long 661b - .\n" /* label */ \ + " .long 663f - .\n" /* new instruction */ \ " .word " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h new file mode 100644 index 0000000..0bdbbb3 --- /dev/null +++ b/arch/x86/include/asm/clocksource.h @@ -0,0 +1,18 @@ +/* x86-specific clocksource additions */ + +#ifndef _ASM_X86_CLOCKSOURCE_H +#define _ASM_X86_CLOCKSOURCE_H + +#ifdef CONFIG_X86_64 + +#define VCLOCK_NONE 0 /* No vDSO clock available. */ +#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ +#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */ + +struct arch_clocksource_data { + int vclock_mode; +}; + +#endif /* CONFIG_X86_64 */ + +#endif /* _ASM_X86_CLOCKSOURCE_H */ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 71cc380..9929b35 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -331,8 +331,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) "2:\n" ".section .altinstructions,\"a\"\n" _ASM_ALIGN "\n" - _ASM_PTR "1b\n" - _ASM_PTR "0\n" /* no replacement */ + " .long 1b - .\n" + " .long 0\n" /* no replacement */ " .word %P0\n" /* feature bit */ " .byte 2b - 1b\n" /* source len */ " .byte 0\n" /* replacement len */ @@ -349,8 +349,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) "2:\n" ".section .altinstructions,\"a\"\n" _ASM_ALIGN "\n" - _ASM_PTR "1b\n" - _ASM_PTR "3f\n" + " .long 1b - .\n" + " .long 3f - .\n" " .word %P1\n" /* feature bit */ " .byte 2b - 1b\n" /* source len */ " .byte 4f - 3f\n" /* replacement len */ diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 4729b2b..460c74e 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -78,6 +78,7 @@ enum fixed_addresses { VSYSCALL_LAST_PAGE, VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, + VVAR_PAGE, VSYSCALL_HPET, #endif FIX_DBGP_BASE, diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 6665026..f9a3209 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -17,7 +17,8 @@ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events * Vectors 32 ... 127 : device interrupts * Vector 128 : legacy int80 syscall interface - * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts + * Vector 204 : legacy x86_64 vsyscall emulation + * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts * * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. @@ -50,6 +51,9 @@ #ifdef CONFIG_X86_32 # define SYSCALL_VECTOR 0x80 #endif +#ifdef CONFIG_X86_64 +# define VSYSCALL_EMU_VECTOR 0xcc +#endif /* * Vectors 0x30-0x3f are used for ISA interrupts. diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index d56187c..013286a1 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -107,7 +107,8 @@ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) +#define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) @@ -129,7 +130,8 @@ #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) -#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE) +#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR) +#define PAGE_KERNEL_VVAR_NOCACHE __pgprot(__PAGE_KERNEL_VVAR_NOCACHE) #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 0310da6..2bae0a5 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_TRAPS_H #define _ASM_X86_TRAPS_H +#include <linux/kprobes.h> + #include <asm/debugreg.h> #include <asm/siginfo.h> /* TRAP_TRACE, ... */ @@ -38,6 +40,7 @@ asmlinkage void alignment_check(void); asmlinkage void machine_check(void); #endif /* CONFIG_X86_MCE */ asmlinkage void simd_coprocessor_error(void); +asmlinkage void emulate_vsyscall(void); dotraplinkage void do_divide_error(struct pt_regs *, long); dotraplinkage void do_debug(struct pt_regs *, long); @@ -64,6 +67,7 @@ dotraplinkage void do_alignment_check(struct pt_regs *, long); dotraplinkage void do_machine_check(struct pt_regs *, long); #endif dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); +dotraplinkage void do_emulate_vsyscall(struct pt_regs *, long); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *, long); #endif diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 9db5583..83e2efd 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -51,10 +51,6 @@ extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); extern unsigned long native_calibrate_tsc(void); -#ifdef CONFIG_X86_64 -extern cycles_t vread_tsc(void); -#endif - /* * Boot-time check whether the TSCs are synchronized across * all CPUs/cores: diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 646b4c1..815285b 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -11,10 +11,9 @@ struct vsyscall_gtod_data { time_t wall_time_sec; u32 wall_time_nsec; - int sysctl_enabled; struct timezone sys_tz; struct { /* extract of a clocksource struct */ - cycle_t (*vread)(void); + int vclock_mode; cycle_t cycle_last; cycle_t mask; u32 mult; diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index d555973..6010707 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -16,10 +16,6 @@ enum vsyscall_num { #ifdef __KERNEL__ #include <linux/seqlock.h> -/* Definitions for CONFIG_GENERIC_TIME definitions */ -#define __vsyscall_fn \ - __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace - #define VGETCPU_RDTSCP 1 #define VGETCPU_LSL 2 diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h index 341b355..de656ac 100644 --- a/arch/x86/include/asm/vvar.h +++ b/arch/x86/include/asm/vvar.h @@ -10,15 +10,14 @@ * In normal kernel code, they are used like any other variable. * In user code, they are accessed through the VVAR macro. * - * Each of these variables lives in the vsyscall page, and each - * one needs a unique offset within the little piece of the page - * reserved for vvars. Specify that offset in DECLARE_VVAR. - * (There are 896 bytes available. If you mess up, the linker will - * catch it.) + * These variables live in a page of kernel data that has an extra RO + * mapping for userspace. Each variable needs a unique offset within + * that page; specify that offset with the DECLARE_VVAR macro. (If + * you mess up, the linker will catch it.) */ -/* Offset of vars within vsyscall page */ -#define VSYSCALL_VARS_OFFSET (3072 + 128) +/* Base address of vvars. This is not ABI. */ +#define VVAR_ADDRESS (-10*1024*1024 - 4096) #if defined(__VVAR_KERNEL_LDS) @@ -26,17 +25,17 @@ * right place. */ #define DECLARE_VVAR(offset, type, name) \ - EMIT_VVAR(name, VSYSCALL_VARS_OFFSET + offset) + EMIT_VVAR(name, offset) #else #define DECLARE_VVAR(offset, type, name) \ static type const * const vvaraddr_ ## name = \ - (void *)(VSYSCALL_START + VSYSCALL_VARS_OFFSET + (offset)); + (void *)(VVAR_ADDRESS + (offset)); #define DEFINE_VVAR(type, name) \ - type __vvar_ ## name \ - __attribute__((section(".vsyscall_var_" #name), aligned(16))) + type name \ + __attribute__((section(".vvar_" #name), aligned(16))) #define VVAR(name) (*vvaraddr_ ## name) @@ -45,8 +44,7 @@ /* DECLARE_VVAR(offset, type, name) */ DECLARE_VVAR(0, volatile unsigned long, jiffies) -DECLARE_VVAR(8, int, vgetcpu_mode) +DECLARE_VVAR(16, int, vgetcpu_mode) DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) #undef DECLARE_VVAR -#undef VSYSCALL_VARS_OFFSET diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 11817ff..0410557 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -24,17 +24,12 @@ endif nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) CFLAGS_hpet.o := $(nostackp) -CFLAGS_vread_tsc_64.o := $(nostackp) CFLAGS_paravirt.o := $(nostackp) GCOV_PROFILE_vsyscall_64.o := n GCOV_PROFILE_hpet.o := n GCOV_PROFILE_tsc.o := n -GCOV_PROFILE_vread_tsc_64.o := n GCOV_PROFILE_paravirt.o := n -# vread_tsc_64 is hot and should be fully optimized: -CFLAGS_REMOVE_vread_tsc_64.o = -pg -fno-optimize-sibling-calls - obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o ldt.o dumpstack.o @@ -43,7 +38,8 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o -obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o vread_tsc_64.o +obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o +obj-$(CONFIG_X86_64) += vsyscall_emu_64.o obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o topology.o kdebugfs.o obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a81f2d5..c638228 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -14,7 +14,6 @@ #include <asm/pgtable.h> #include <asm/mce.h> #include <asm/nmi.h> -#include <asm/vsyscall.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/io.h> @@ -250,7 +249,6 @@ static void __init_or_module add_nops(void *insns, unsigned int len) extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern s32 __smp_locks[], __smp_locks_end[]; -extern char __vsyscall_0; void *text_poke_early(void *addr, const void *opcode, size_t len); /* Replace instructions with better alternatives for this CPU type. @@ -263,6 +261,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end) { struct alt_instr *a; + u8 *instr, *replacement; u8 insnbuf[MAX_PATCH_LEN]; DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); @@ -276,25 +275,23 @@ void __init_or_module apply_alternatives(struct alt_instr *start, * order. */ for (a = start; a < end; a++) { - u8 *instr = a->instr; + instr = (u8 *)&a->instr_offset + a->instr_offset; + replacement = (u8 *)&a->repl_offset + a->repl_offset; BUG_ON(a->replacementlen > a->instrlen); BUG_ON(a->instrlen > sizeof(insnbuf)); BUG_ON(a->cpuid >= NCAPINTS*32); if (!boot_cpu_has(a->cpuid)) continue; -#ifdef CONFIG_X86_64 - /* vsyscall code is not mapped yet. resolve it manually. */ - if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { - instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); - DPRINTK("%s: vsyscall fixup: %p => %p\n", - __func__, a->instr, instr); - } -#endif - memcpy(insnbuf, a->replacement, a->replacementlen); + + memcpy(insnbuf, replacement, a->replacementlen); + + /* 0xe8 is a relative jump; fix the offset. */ if (*insnbuf == 0xe8 && a->replacementlen == 5) - *(s32 *)(insnbuf + 1) += a->replacement - a->instr; + *(s32 *)(insnbuf + 1) += replacement - instr; + add_nops(insnbuf + a->replacementlen, a->instrlen - a->replacementlen); + text_poke_early(instr, insnbuf, a->instrlen); } } diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 37e895a1..e13329d 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -9,6 +9,8 @@ /* * entry.S contains the system-call and fault low-level handling routines. * + * Some of this is documented in Documentation/x86/entry_64.txt + * * NOTE: This code handles signal-recognition, which happens every time * after an interrupt and after each system call. * @@ -1109,6 +1111,8 @@ zeroentry spurious_interrupt_bug do_spurious_interrupt_bug zeroentry coprocessor_error do_coprocessor_error errorentry alignment_check do_alignment_check zeroentry simd_coprocessor_error do_simd_coprocessor_error +zeroentry emulate_vsyscall do_emulate_vsyscall + /* Reload gs selector with exception handling */ /* edi: new selector */ diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 0f4b065..4aecc54 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -72,7 +72,7 @@ static inline void hpet_set_mapping(void) { hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); #ifdef CONFIG_X86_64 - __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); + __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE); #endif } @@ -739,13 +739,6 @@ static cycle_t read_hpet(struct clocksource *cs) return (cycle_t)hpet_readl(HPET_COUNTER); } -#ifdef CONFIG_X86_64 -static cycle_t __vsyscall_fn vread_hpet(void) -{ - return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); -} -#endif - static struct clocksource clocksource_hpet = { .name = "hpet", .rating = 250, @@ -754,7 +747,7 @@ static struct clocksource clocksource_hpet = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, .resume = hpet_resume_counter, #ifdef CONFIG_X86_64 - .vread = vread_hpet, + .archdata = { .vclock_mode = VCLOCK_HPET }, #endif }; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b9b6716..fbc097a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -872,6 +872,12 @@ void __init trap_init(void) set_bit(SYSCALL_VECTOR, used_vectors); #endif +#ifdef CONFIG_X86_64 + BUG_ON(test_bit(VSYSCALL_EMU_VECTOR, used_vectors)); + set_system_intr_gate(VSYSCALL_EMU_VECTOR, &emulate_vsyscall); + set_bit(VSYSCALL_EMU_VECTOR, used_vectors); +#endif + /* * Should be a barrier for any external CPU state: */ diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 6cc6922..56c633a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -777,7 +777,7 @@ static struct clocksource clocksource_tsc = { .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_MUST_VERIFY, #ifdef CONFIG_X86_64 - .vread = vread_tsc, + .archdata = { .vclock_mode = VCLOCK_TSC }, #endif }; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 89aed99..4aa9c54 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -161,50 +161,47 @@ SECTIONS #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) -#define EMIT_VVAR(x, offset) .vsyscall_var_ ## x \ - ADDR(.vsyscall_0) + offset \ - : AT(VLOAD(.vsyscall_var_ ## x)) { \ - *(.vsyscall_var_ ## x) \ - } \ - x = VVIRT(.vsyscall_var_ ## x); . = ALIGN(4096); __vsyscall_0 = .; . = VSYSCALL_ADDR; - .vsyscall_0 : AT(VLOAD(.vsyscall_0)) { + .vsyscall : AT(VLOAD(.vsyscall)) { *(.vsyscall_0) - } :user - . = ALIGN(L1_CACHE_BYTES); - .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { - *(.vsyscall_fn) - } - - .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { + . = 1024; *(.vsyscall_1) - } - .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { - *(.vsyscall_2) - } - .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { - *(.vsyscall_3) - } - -#define __VVAR_KERNEL_LDS -#include <asm/vvar.h> -#undef __VVAR_KERNEL_LDS + . = 2048; + *(.vsyscall_2) - . = __vsyscall_0 + PAGE_SIZE; + . = 4096; /* Pad the whole page. */ + } :user =0xcc + . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE); #undef VSYSCALL_ADDR #undef VLOAD_OFFSET #undef VLOAD #undef VVIRT_OFFSET #undef VVIRT + + __vvar_page = .; + + .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { + + /* Place all vvars at the offsets in asm/vvar.h. */ +#define EMIT_VVAR(name, offset) \ + . = offset; \ + *(.vvar_ ## name) +#define __VVAR_KERNEL_LDS +#include <asm/vvar.h> +#undef __VVAR_KERNEL_LDS #undef EMIT_VVAR + } :data + + . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); + #endif /* CONFIG_X86_64 */ /* Init code and data - will be freed after init */ diff --git a/arch/x86/kernel/vread_tsc_64.c b/arch/x86/kernel/vread_tsc_64.c deleted file mode 100644 index a81aa9e..0000000 --- a/arch/x86/kernel/vread_tsc_64.c +++ /dev/null @@ -1,36 +0,0 @@ -/* This code runs in userspace. */ - -#define DISABLE_BRANCH_PROFILING -#include <asm/vgtod.h> - -notrace cycle_t __vsyscall_fn vread_tsc(void) -{ - cycle_t ret; - u64 last; - - /* - * Empirically, a fence (of type that depends on the CPU) - * before rdtsc is enough to ensure that rdtsc is ordered - * with respect to loads. The various CPU manuals are unclear - * as to whether rdtsc can be reordered with later loads, - * but no one has ever seen it happen. - */ - rdtsc_barrier(); - ret = (cycle_t)vget_cycles(); - - last = VVAR(vsyscall_gtod_data).clock.cycle_last; - - if (likely(ret >= last)) - return ret; - - /* - * GCC likes to generate cmov here, but this branch is extremely - * predictable (it's just a funciton of time and the likely is - * very likely) and there's a data dependence, so force GCC - * to generate a branch instead. I don't barrier() because - * we don't actually need a barrier, and if this function - * ever gets inlined it will generate worse code. - */ - asm volatile (""); - return last; -} diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 3e68218..dda7dff 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -2,6 +2,8 @@ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright 2003 Andi Kleen, SuSE Labs. * + * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ] + * * Thanks to hpa@transmeta.com for some useful hint. * Special thanks to Ingo Molnar for his early experience with * a different vsyscall implementation for Linux/IA32 and for the name. @@ -11,10 +13,9 @@ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid * jumping out of line if necessary. We cannot add more with this * mechanism because older kernels won't return -ENOSYS. - * If we want more than four we need a vDSO. * - * Note: the concept clashes with user mode linux. If you use UML and - * want per guest time just set the kernel.vsyscall64 sysctl to 0. + * Note: the concept clashes with user mode linux. UML users should + * use the vDSO. */ /* Disable profiling for userspace code: */ @@ -32,9 +33,12 @@ #include <linux/cpu.h> #include <linux/smp.h> #include <linux/notifier.h> +#include <linux/syscalls.h> +#include <linux/ratelimit.h> #include <asm/vsyscall.h> #include <asm/pgtable.h> +#include <asm/compat.h> #include <asm/page.h> #include <asm/unistd.h> #include <asm/fixmap.h> @@ -44,16 +48,12 @@ #include <asm/desc.h> #include <asm/topology.h> #include <asm/vgtod.h> - -#define __vsyscall(nr) \ - __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace -#define __syscall_clobber "r11","cx","memory" +#include <asm/traps.h> DEFINE_VVAR(int, vgetcpu_mode); DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = { .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), - .sysctl_enabled = 1, }; void update_vsyscall_tz(void) @@ -72,179 +72,149 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, unsigned long flags; write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + /* copy vsyscall data */ - vsyscall_gtod_data.clock.vread = clock->vread; - vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; - vsyscall_gtod_data.clock.mask = clock->mask; - vsyscall_gtod_data.clock.mult = mult; - vsyscall_gtod_data.clock.shift = clock->shift; - vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; - vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; - vsyscall_gtod_data.wall_to_monotonic = *wtm; - vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); + vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; + vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; + vsyscall_gtod_data.clock.mask = clock->mask; + vsyscall_gtod_data.clock.mult = mult; + vsyscall_gtod_data.clock.shift = clock->shift; + vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; + vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; + vsyscall_gtod_data.wall_to_monotonic = *wtm; + vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); + write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } -/* RED-PEN may want to readd seq locking, but then the variable should be - * write-once. - */ -static __always_inline void do_get_tz(struct timezone * tz) +static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, + const char *message) { - *tz = VVAR(vsyscall_gtod_data).sys_tz; -} + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); + struct task_struct *tsk; -static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) -{ - int ret; - asm volatile("syscall" - : "=a" (ret) - : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) - : __syscall_clobber ); - return ret; -} + if (!show_unhandled_signals || !__ratelimit(&rs)) + return; -static __always_inline long time_syscall(long *t) -{ - long secs; - asm volatile("syscall" - : "=a" (secs) - : "0" (__NR_time),"D" (t) : __syscall_clobber); - return secs; -} + tsk = current; -static __always_inline void do_vgettimeofday(struct timeval * tv) -{ - cycle_t now, base, mask, cycle_delta; - unsigned seq; - unsigned long mult, shift, nsec; - cycle_t (*vread)(void); - do { - seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock); - - vread = VVAR(vsyscall_gtod_data).clock.vread; - if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled || - !vread)) { - gettimeofday(tv,NULL); - return; - } - - now = vread(); - base = VVAR(vsyscall_gtod_data).clock.cycle_last; - mask = VVAR(vsyscall_gtod_data).clock.mask; - mult = VVAR(vsyscall_gtod_data).clock.mult; - shift = VVAR(vsyscall_gtod_data).clock.shift; - - tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec; - nsec = VVAR(vsyscall_gtod_data).wall_time_nsec; - } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq)); - - /* calculate interval: */ - cycle_delta = (now - base) & mask; - /* convert to nsecs: */ - nsec += (cycle_delta * mult) >> shift; - - while (nsec >= NSEC_PER_SEC) { - tv->tv_sec += 1; - nsec -= NSEC_PER_SEC; - } - tv->tv_usec = nsec / NSEC_PER_USEC; + printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", + level, tsk->comm, task_pid_nr(tsk), + message, regs->ip - 2, regs->cs, + regs->sp, regs->ax, regs->si, regs->di); } -int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) +static int addr_to_vsyscall_nr(unsigned long addr) { - if (tv) - do_vgettimeofday(tv); - if (tz) - do_get_tz(tz); - return 0; -} + int nr; -/* This will break when the xtime seconds get inaccurate, but that is - * unlikely */ -time_t __vsyscall(1) vtime(time_t *t) -{ - unsigned seq; - time_t result; - if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled)) - return time_syscall(t); + if ((addr & ~0xC00UL) != VSYSCALL_START) + return -EINVAL; - do { - seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock); + nr = (addr & 0xC00UL) >> 10; + if (nr >= 3) + return -EINVAL; - result = VVAR(vsyscall_gtod_data).wall_time_sec; + return nr; +} - } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq)); +void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code) +{ + struct task_struct *tsk; + unsigned long caller; + int vsyscall_nr; + long ret; + + local_irq_enable(); + + /* + * Real 64-bit user mode code has cs == __USER_CS. Anything else + * is bogus. + */ + if (regs->cs != __USER_CS) { + /* + * If we trapped from kernel mode, we might as well OOPS now + * instead of returning to some random address and OOPSing + * then. + */ + BUG_ON(!user_mode(regs)); + + /* Compat mode and non-compat 32-bit CS should both segfault. */ + warn_bad_vsyscall(KERN_WARNING, regs, + "illegal int 0xcc from 32-bit mode"); + goto sigsegv; + } - if (t) - *t = result; - return result; -} + /* + * x86-ism here: regs->ip points to the instruction after the int 0xcc, + * and int 0xcc is two bytes long. + */ + vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2); + if (vsyscall_nr < 0) { + warn_bad_vsyscall(KERN_WARNING, regs, + "illegal int 0xcc (exploit attempt?)"); + goto sigsegv; + } -/* Fast way to get current CPU and node. - This helps to do per node and per CPU caches in user space. - The result is not guaranteed without CPU affinity, but usually - works out because the scheduler tries to keep a thread on the same - CPU. + if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { + warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)"); + goto sigsegv; + } - tcache must point to a two element sized long array. - All arguments can be NULL. */ -long __vsyscall(2) -vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) -{ - unsigned int p; - unsigned long j = 0; - - /* Fast cache - only recompute value once per jiffies and avoid - relatively costly rdtscp/cpuid otherwise. - This works because the scheduler usually keeps the process - on the same CPU and this syscall doesn't guarantee its - results anyways. - We do this here because otherwise user space would do it on - its own in a likely inferior way (no access to jiffies). - If you don't like it pass NULL. */ - if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) { - p = tcache->blob[1]; - } else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) { - /* Load per CPU data from RDTSCP */ - native_read_tscp(&p); - } else { - /* Load per CPU data from GDT */ - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); + tsk = current; + if (seccomp_mode(&tsk->seccomp)) + do_exit(SIGKILL); + + switch (vsyscall_nr) { + case 0: + ret = sys_gettimeofday( + (struct timeval __user *)regs->di, + (struct timezone __user *)regs->si); + break; + + case 1: + ret = sys_time((time_t __user *)regs->di); + break; + + case 2: + ret = sys_getcpu((unsigned __user *)regs->di, + (unsigned __user *)regs->si, + 0); + break; } - if (tcache) { - tcache->blob[0] = j; - tcache->blob[1] = p; + + if (ret == -EFAULT) { + /* + * Bad news -- userspace fed a bad pointer to a vsyscall. + * + * With a real vsyscall, that would have caused SIGSEGV. + * To make writing reliable exploits using the emulated + * vsyscalls harder, generate SIGSEGV here as well. + */ + warn_bad_vsyscall(KERN_INFO, regs, + "vsyscall fault (exploit attempt?)"); + goto sigsegv; } - if (cpu) - *cpu = p & 0xfff; - if (node) - *node = p >> 12; - return 0; -} -static long __vsyscall(3) venosys_1(void) -{ - return -ENOSYS; -} + regs->ax = ret; -#ifdef CONFIG_SYSCTL -static ctl_table kernel_table2[] = { - { .procname = "vsyscall64", - .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec }, - {} -}; + /* Emulate a ret instruction. */ + regs->ip = caller; + regs->sp += 8; -static ctl_table kernel_root_table2[] = { - { .procname = "kernel", .mode = 0555, - .child = kernel_table2 }, - {} -}; -#endif + local_irq_disable(); + return; + +sigsegv: + regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */ + force_sig(SIGSEGV, current); + local_irq_disable(); +} -/* Assume __initcall executes before all user space. Hopefully kmod - doesn't violate that. We'll find out if it does. */ +/* + * Assume __initcall executes before all user space. Hopefully kmod + * doesn't violate that. We'll find out if it does. + */ static void __cpuinit vsyscall_set_cpu(int cpu) { unsigned long d; @@ -255,13 +225,15 @@ static void __cpuinit vsyscall_set_cpu(int cpu) if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) write_rdtscp_aux((node << 12) | cpu); - /* Store cpu number in limit so that it can be loaded quickly - in user space in vgetcpu. - 12 bits for the CPU and 8 bits for the node. */ + /* + * Store cpu number in limit so that it can be loaded quickly + * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node) + */ d = 0x0f40000000000ULL; d |= cpu; d |= (node & 0xf) << 12; d |= (node >> 4) << 48; + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); } @@ -275,8 +247,10 @@ static int __cpuinit cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) { long cpu = (long)arg; + if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); + return NOTIFY_DONE; } @@ -284,25 +258,23 @@ void __init map_vsyscall(void) { extern char __vsyscall_0; unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); + extern char __vvar_page; + unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); + __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR); + BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS); } static int __init vsyscall_init(void) { - BUG_ON(((unsigned long) &vgettimeofday != - VSYSCALL_ADDR(__NR_vgettimeofday))); - BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)); - BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE))); - BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu)); -#ifdef CONFIG_SYSCTL - register_sysctl_table(kernel_root_table2); -#endif + BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)); + on_each_cpu(cpu_vsyscall_init, NULL, 1); /* notifier priority > KVM */ hotcpu_notifier(cpu_vsyscall_notifier, 30); + return 0; } - __initcall(vsyscall_init); diff --git a/arch/x86/kernel/vsyscall_emu_64.S b/arch/x86/kernel/vsyscall_emu_64.S new file mode 100644 index 0000000..ffa845e --- /dev/null +++ b/arch/x86/kernel/vsyscall_emu_64.S @@ -0,0 +1,27 @@ +/* + * vsyscall_emu_64.S: Vsyscall emulation page + * + * Copyright (c) 2011 Andy Lutomirski + * + * Subject to the GNU General Public License, version 2 + */ + +#include <linux/linkage.h> +#include <asm/irq_vectors.h> + +/* The unused parts of the page are filled with 0xcc by the linker script. */ + +.section .vsyscall_0, "a" +ENTRY(vsyscall_0) + int $VSYSCALL_EMU_VECTOR +END(vsyscall_0) + +.section .vsyscall_1, "a" +ENTRY(vsyscall_1) + int $VSYSCALL_EMU_VECTOR +END(vsyscall_1) + +.section .vsyscall_2, "a" +ENTRY(vsyscall_2) + int $VSYSCALL_EMU_VECTOR +END(vsyscall_2) diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index 6fec2d1..01c805b 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -2,6 +2,7 @@ #include <linux/linkage.h> #include <asm/dwarf2.h> +#include <asm/alternative-asm.h> ALIGN copy_page_c: @@ -110,10 +111,6 @@ ENDPROC(copy_page) 2: .previous .section .altinstructions,"a" - .align 8 - .quad copy_page - .quad 1b - .word X86_FEATURE_REP_GOOD - .byte .Lcopy_page_end - copy_page - .byte 2b - 1b + altinstruction_entry copy_page, 1b, X86_FEATURE_REP_GOOD, \ + .Lcopy_page_end-copy_page, 2b-1b .previous diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index d0ec9c2..ee16461 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -9,6 +9,7 @@ #include <linux/linkage.h> #include <asm/dwarf2.h> #include <asm/cpufeature.h> +#include <asm/alternative-asm.h> #undef memmove @@ -214,11 +215,9 @@ ENTRY(memmove) .previous .section .altinstructions,"a" - .align 8 - .quad .Lmemmove_begin_forward - .quad .Lmemmove_begin_forward_efs - .word X86_FEATURE_ERMS - .byte .Lmemmove_end_forward-.Lmemmove_begin_forward - .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs + altinstruction_entry .Lmemmove_begin_forward, \ + .Lmemmove_begin_forward_efs,X86_FEATURE_ERMS, \ + .Lmemmove_end_forward-.Lmemmove_begin_forward, \ + .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs .previous ENDPROC(memmove) diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index bef0bc9..5d17950 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -26,6 +26,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) export CPPFLAGS_vdso.lds += -P -C VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ + -Wl,--no-undefined \ -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index a724905..6bc0e72 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -6,7 +6,6 @@ * * The code should have no internal unresolved relocations. * Check with readelf after changing. - * Also alternative() doesn't work. */ /* Disable profiling for userspace code: */ @@ -17,6 +16,7 @@ #include <linux/time.h> #include <linux/string.h> #include <asm/vsyscall.h> +#include <asm/fixmap.h> #include <asm/vgtod.h> #include <asm/timex.h> #include <asm/hpet.h> @@ -25,6 +25,43 @@ #define gtod (&VVAR(vsyscall_gtod_data)) +notrace static cycle_t vread_tsc(void) +{ + cycle_t ret; + u64 last; + + /* + * Empirically, a fence (of type that depends on the CPU) + * before rdtsc is enough to ensure that rdtsc is ordered + * with respect to loads. The various CPU manuals are unclear + * as to whether rdtsc can be reordered with later loads, + * but no one has ever seen it happen. + */ + rdtsc_barrier(); + ret = (cycle_t)vget_cycles(); + + last = VVAR(vsyscall_gtod_data).clock.cycle_last; + + if (likely(ret >= last)) + return ret; + + /* + * GCC likes to generate cmov here, but this branch is extremely + * predictable (it's just a funciton of time and the likely is + * very likely) and there's a data dependence, so force GCC + * to generate a branch instead. I don't barrier() because + * we don't actually need a barrier, and if this function + * ever gets inlined it will generate worse code. + */ + asm volatile (""); + return last; +} + +static notrace cycle_t vread_hpet(void) +{ + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); +} + notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) { long ret; @@ -36,9 +73,12 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) notrace static inline long vgetns(void) { long v; - cycles_t (*vread)(void); - vread = gtod->clock.vread; - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask; + cycles_t cycles; + if (gtod->clock.vclock_mode == VCLOCK_TSC) + cycles = vread_tsc(); + else + cycles = vread_hpet(); + v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; return (v * gtod->clock.mult) >> gtod->clock.shift; } @@ -116,21 +156,21 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts) notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) { - if (likely(gtod->sysctl_enabled)) - switch (clock) { - case CLOCK_REALTIME: - if (likely(gtod->clock.vread)) - return do_realtime(ts); - break; - case CLOCK_MONOTONIC: - if (likely(gtod->clock.vread)) - return do_monotonic(ts); - break; - case CLOCK_REALTIME_COARSE: - return do_realtime_coarse(ts); - case CLOCK_MONOTONIC_COARSE: - return do_monotonic_coarse(ts); - } + switch (clock) { + case CLOCK_REALTIME: + if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) + return do_realtime(ts); + break; + case CLOCK_MONOTONIC: + if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) + return do_monotonic(ts); + break; + case CLOCK_REALTIME_COARSE: + return do_realtime_coarse(ts); + case CLOCK_MONOTONIC_COARSE: + return do_monotonic_coarse(ts); + } + return vdso_fallback_gettime(clock, ts); } int clock_gettime(clockid_t, struct timespec *) @@ -139,7 +179,7 @@ int clock_gettime(clockid_t, struct timespec *) notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) { long ret; - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { + if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) { if (likely(tv != NULL)) { BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != offsetof(struct timespec, tv_nsec) || @@ -161,27 +201,14 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) int gettimeofday(struct timeval *, struct timezone *) __attribute__((weak, alias("__vdso_gettimeofday"))); -/* This will break when the xtime seconds get inaccurate, but that is - * unlikely */ - -static __always_inline long time_syscall(long *t) -{ - long secs; - asm volatile("syscall" - : "=a" (secs) - : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory"); - return secs; -} - +/* + * This will break when the xtime seconds get inaccurate, but that is + * unlikely + */ notrace time_t __vdso_time(time_t *t) { - time_t result; - - if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled)) - return time_syscall(t); - /* This is atomic on x86_64 so we don't need any locks. */ - result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); + time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); if (t) *t = result; diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S index 1d3aa6b..1b979c1 100644 --- a/arch/x86/vdso/vdso.S +++ b/arch/x86/vdso/vdso.S @@ -1,10 +1,21 @@ +#include <asm/page_types.h> +#include <linux/linkage.h> #include <linux/init.h> -__INITDATA +__PAGE_ALIGNED_DATA .globl vdso_start, vdso_end + .align PAGE_SIZE vdso_start: .incbin "arch/x86/vdso/vdso.so" vdso_end: -__FINIT +.previous + + .globl vdso_pages + .bss + .align 8 + .type vdso_pages, @object +vdso_pages: + .zero (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE * 8 + .size vdso_pages, .-vdso_pages diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 7abd2be..316fbca 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -14,41 +14,61 @@ #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> +#include <asm/page.h> unsigned int __read_mostly vdso_enabled = 1; extern char vdso_start[], vdso_end[]; extern unsigned short vdso_sync_cpuid; -static struct page **vdso_pages; +extern struct page *vdso_pages[]; static unsigned vdso_size; -static int __init init_vdso_vars(void) +static void __init patch_vdso(void *vdso, size_t len) +{ + Elf64_Ehdr *hdr = vdso; + Elf64_Shdr *sechdrs, *alt_sec = 0; + char *secstrings; + void *alt_data; + int i; + + BUG_ON(len < sizeof(Elf64_Ehdr)); + BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0); + + sechdrs = (void *)hdr + hdr->e_shoff; + secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (i = 1; i < hdr->e_shnum; i++) { + Elf64_Shdr *shdr = &sechdrs[i]; + if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) { + alt_sec = shdr; + goto found; + } + } + + /* If we get here, it's probably a bug. */ + pr_warning("patch_vdso: .altinstructions not found\n"); + return; /* nothing to patch */ + +found: + alt_data = (void *)hdr + alt_sec->sh_offset; + apply_alternatives(alt_data, alt_data + alt_sec->sh_size); +} + +static int __init init_vdso(void) { int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; int i; + patch_vdso(vdso_start, vdso_end - vdso_start); + vdso_size = npages << PAGE_SHIFT; - vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); - if (!vdso_pages) - goto oom; - for (i = 0; i < npages; i++) { - struct page *p; - p = alloc_page(GFP_KERNEL); - if (!p) - goto oom; - vdso_pages[i] = p; - copy_page(page_address(p), vdso_start + i*PAGE_SIZE); - } + for (i = 0; i < npages; i++) + vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE); return 0; - - oom: - printk("Cannot allocate vdso\n"); - vdso_enabled = 0; - return -ENOMEM; } -subsys_initcall(init_vdso_vars); +subsys_initcall(init_vdso); struct linux_binprm; |