diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 18:15:32 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 18:15:32 -0800 |
commit | 12f2bbd609006f983c1c99d240cf61e6e829a14c (patch) | |
tree | 47fa77dd447cbb735d81de663d7b46828a8315b9 /arch/x86/xen | |
parent | 10ffe3dbf7f9c63d6fde3d1fe23b8fd2ae4d7bd1 (diff) | |
parent | 07ba06d9d293d3c0a512f1cb9189645c6e0424e2 (diff) | |
download | op-kernel-dev-12f2bbd609006f983c1c99d240cf61e6e829a14c.zip op-kernel-dev-12f2bbd609006f983c1c99d240cf61e6e829a14c.tar.gz |
Merge branch 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asmlinkage (LTO) changes from Peter Anvin:
"This patchset adds more infrastructure for link time optimization
(LTO).
This patchset was pulled into my tree late because of a
miscommunication (part of the patchset was picked up by other
maintainers). However, the patchset is strictly build-related and
seems to be okay in testing"
* 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, asmlinkage, xen: Fix type of NMI
x86, asmlinkage, xen, kvm: Make {xen,kvm}_lock_spinning global and visible
x86: Use inline assembler instead of global register variable to get sp
x86, asmlinkage, paravirt: Make paravirt thunks global
x86, asmlinkage, paravirt: Don't rely on local assembler labels
x86, asmlinkage, lguest: Fix C functions used by inline assembler
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/irq.c | 8 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 16 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/spinlock.c | 2 |
4 files changed, 15 insertions, 15 deletions
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 76ca326..08f763d 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void) (void)HYPERVISOR_xen_version(0, NULL); } -static unsigned long xen_save_fl(void) +asmlinkage unsigned long xen_save_fl(void) { struct vcpu_info *vcpu; unsigned long flags; @@ -41,7 +41,7 @@ static unsigned long xen_save_fl(void) } PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); -static void xen_restore_fl(unsigned long flags) +__visible void xen_restore_fl(unsigned long flags) { struct vcpu_info *vcpu; @@ -63,7 +63,7 @@ static void xen_restore_fl(unsigned long flags) } PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); -static void xen_irq_disable(void) +asmlinkage void xen_irq_disable(void) { /* There's a one instruction preempt window here. We need to make sure we're don't switch CPUs between getting the vcpu @@ -74,7 +74,7 @@ static void xen_irq_disable(void) } PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); -static void xen_irq_enable(void) +asmlinkage void xen_irq_enable(void) { struct vcpu_info *vcpu; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c1d406f..2423ef0 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -431,7 +431,7 @@ static pteval_t iomap_pte(pteval_t val) return val; } -static pteval_t xen_pte_val(pte_t pte) +__visible pteval_t xen_pte_val(pte_t pte) { pteval_t pteval = pte.pte; #if 0 @@ -448,7 +448,7 @@ static pteval_t xen_pte_val(pte_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); -static pgdval_t xen_pgd_val(pgd_t pgd) +__visible pgdval_t xen_pgd_val(pgd_t pgd) { return pte_mfn_to_pfn(pgd.pgd); } @@ -479,7 +479,7 @@ void xen_set_pat(u64 pat) WARN_ON(pat != 0x0007010600070106ull); } -static pte_t xen_make_pte(pteval_t pte) +__visible pte_t xen_make_pte(pteval_t pte) { phys_addr_t addr = (pte & PTE_PFN_MASK); #if 0 @@ -514,14 +514,14 @@ static pte_t xen_make_pte(pteval_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); -static pgd_t xen_make_pgd(pgdval_t pgd) +__visible pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); return native_make_pgd(pgd); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); -static pmdval_t xen_pmd_val(pmd_t pmd) +__visible pmdval_t xen_pmd_val(pmd_t pmd) { return pte_mfn_to_pfn(pmd.pmd); } @@ -580,7 +580,7 @@ static void xen_pmd_clear(pmd_t *pmdp) } #endif /* CONFIG_X86_PAE */ -static pmd_t xen_make_pmd(pmdval_t pmd) +__visible pmd_t xen_make_pmd(pmdval_t pmd) { pmd = pte_pfn_to_mfn(pmd); return native_make_pmd(pmd); @@ -588,13 +588,13 @@ static pmd_t xen_make_pmd(pmdval_t pmd) PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); #if PAGETABLE_LEVELS == 4 -static pudval_t xen_pud_val(pud_t pud) +__visible pudval_t xen_pud_val(pud_t pud) { return pte_mfn_to_pfn(pud.pud); } PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); -static pud_t xen_make_pud(pudval_t pud) +__visible pud_t xen_make_pud(pudval_t pud) { pud = pte_pfn_to_mfn(pud); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index dd5f905..0982233 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -35,7 +35,7 @@ extern const char xen_hypervisor_callback[]; extern const char xen_failsafe_callback[]; #ifdef CONFIG_X86_64 -extern const char nmi[]; +extern asmlinkage void nmi(void); #endif extern void xen_sysenter_target(void); extern void xen_syscall_target(void); @@ -577,7 +577,7 @@ void xen_enable_syscall(void) void xen_enable_nmi(void) { #ifdef CONFIG_X86_64 - if (register_callback(CALLBACKTYPE_nmi, nmi)) + if (register_callback(CALLBACKTYPE_nmi, (char *)nmi)) BUG(); #endif } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 0e36cde..581521c 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -106,7 +106,7 @@ static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); static cpumask_t waiting_cpus; static bool xen_pvspin = true; -static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) +__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) { int irq = __this_cpu_read(lock_kicker_irq); struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting); |