summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/tlb.h9
-rw-r--r--arch/s390/kernel/early.c7
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/sigp.c7
-rw-r--r--arch/s390/power/swsusp.c36
-rw-r--r--arch/s390/power/swsusp_asm64.S35
8 files changed, 62 insertions, 52 deletions
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 3d8a96d..81150b0 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
-static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
+static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long address)
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = pte;
@@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
* as the pgd. pmd_free_tlb checks the asce_limit against 2GB
* to avoid the double free of the pmd in this case.
*/
-static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long address)
{
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 31))
@@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
* as the pgd. pud_free_tlb checks the asce_limit against 4TB
* to avoid the double free of the pud in this case.
*/
-static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
+static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
{
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 42))
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index f9b1440..cae14c4 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -208,9 +208,12 @@ static noinline __init void detect_machine_type(void)
machine_flags |= MACHINE_FLAG_KVM;
else
machine_flags |= MACHINE_FLAG_VM;
+
+ /* Store machine flags for setting up lowcore early */
+ S390_lowcore.machine_flags = machine_flags;
}
-static void early_pgm_check_handler(void)
+static __init void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
@@ -222,7 +225,7 @@ static void early_pgm_check_handler(void)
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
}
-void setup_lowcore_early(void)
+static noinline __init void setup_lowcore_early(void)
{
psw_t psw;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2270730..be2cae0 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -687,13 +687,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area;
-#else
- if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
- BUG();
#endif
set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable();
local_irq_enable();
+#ifdef CONFIG_64BIT
+ if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
+ BUG();
+#endif
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 79dbfee..49106c6 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -88,10 +88,17 @@ __kernel_clock_gettime:
llilh %r4,0x0100
sar %a4,%r4
lghi %r4,0
+ epsw %r5,0
sacf 512 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
- sacf 0
- sar %a4,%r2
+ tml %r5,0x4000
+ jo 11f
+ tml %r5,0x8000
+ jno 10f
+ sacf 256
+ j 11f
+10: sacf 0
+11: sar %a4,%r2
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index f04f530..4d61341 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -386,7 +386,7 @@ no_timer:
}
__unset_cpu_idle(vcpu);
__set_current_state(TASK_RUNNING);
- remove_wait_queue(&vcpu->wq, &wait);
+ remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 3667883..0ef81d6 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -169,7 +169,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
unsigned long *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
- struct kvm_s390_local_interrupt *li;
+ struct kvm_s390_local_interrupt *li = NULL;
struct kvm_s390_interrupt_info *inti;
int rc;
u8 tmp;
@@ -189,9 +189,10 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
return 2; /* busy */
spin_lock(&fi->lock);
- li = fi->local_int[cpu_addr];
+ if (cpu_addr < KVM_MAX_VCPUS)
+ li = fi->local_int[cpu_addr];
- if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
+ if (li == NULL) {
rc = 1; /* incorrect state */
*reg &= SIGP_STAT_INCORRECT_STATE;
kfree(inti);
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c
index e6a4fe9..bd1f5c6 100644
--- a/arch/s390/power/swsusp.c
+++ b/arch/s390/power/swsusp.c
@@ -7,24 +7,36 @@
*
*/
+#include <asm/system.h>
-/*
- * save CPU registers before creating a hibernation image and before
- * restoring the memory state from it
- */
void save_processor_state(void)
{
- /* implentation contained in the
- * swsusp_arch_suspend function
+ /* swsusp_arch_suspend() actually saves all cpu register contents.
+ * Machine checks must be disabled since swsusp_arch_suspend() stores
+ * register contents to their lowcore save areas. That's the same
+ * place where register contents on machine checks would be saved.
+ * To avoid register corruption disable machine checks.
+ * We must also disable machine checks in the new psw mask for
+ * program checks, since swsusp_arch_suspend() may generate program
+ * checks. Disabling machine checks for all other new psw masks is
+ * just paranoia.
*/
+ local_mcck_disable();
+ /* Disable lowcore protection */
+ __ctl_clear_bit(0,28);
+ S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
}
-/*
- * restore the contents of CPU registers
- */
void restore_processor_state(void)
{
- /* implentation contained in the
- * swsusp_arch_resume function
- */
+ S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
+ /* Enable lowcore protection */
+ __ctl_set_bit(0,28);
+ local_mcck_enable();
}
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
index 76d688d..b26df5c 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/power/swsusp_asm64.S
@@ -32,19 +32,14 @@ swsusp_arch_suspend:
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
- /* Switch off lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- ni __SF_EMPTY+4(%r15),0xef
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
- /* Setup base register for lowcore (absolute 0) */
- llgf %r1,__SF_EMPTY(%r15)
+ /* Save prefix register contents for lowcore */
+ llgf %r4,__SF_EMPTY(%r15)
/* Get pointer to save area */
- aghi %r1,0x1000
+ lghi %r1,0x1000
/* Store registers */
mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
@@ -79,17 +74,15 @@ swsusp_arch_suspend:
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
- /* Setup lowcore */
- brasl %r14,setup_lowcore_early
+ lghi %r2,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
/* Save image */
brasl %r14,swsusp_save
- /* Switch on lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- oi __SF_EMPTY+4(%r15),0x10
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Restore prefix register and return */
lghi %r1,0x1000
spx 0x318(%r1)
@@ -117,11 +110,6 @@ swsusp_arch_resume:
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
- /* Switch off lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- ni __SF_EMPTY+4(%r15),0xef
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Set prefix page to zero */
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
@@ -175,7 +163,7 @@ swsusp_arch_resume:
/* Load old stack */
lg %r15,0x2f8(%r13)
- /* Pointer to save arae */
+ /* Pointer to save area */
lghi %r13,0x1000
#ifdef CONFIG_SMP
@@ -187,11 +175,6 @@ swsusp_arch_resume:
/* Restore prefix register */
spx 0x318(%r13)
- /* Switch on lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- oi __SF_EMPTY+4(%r15),0x10
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04
OpenPOWER on IntegriCloud