summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-05-26 00:41:21 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-05-26 00:41:21 +0100
commitae1d3b974e091b5fc9008bd41bcbdaac68110b62 (patch)
tree82e50a66925ced79f59ed4504f6e073ef55edee3 /arch/s390/mm
parent586893ebc42943008010b4c210cfc9167df615e5 (diff)
parent7a2207a0e1142a9b214b323e43ab2ecc592e5b0e (diff)
downloadop-kernel-dev-ae1d3b974e091b5fc9008bd41bcbdaac68110b62.zip
op-kernel-dev-ae1d3b974e091b5fc9008bd41bcbdaac68110b62.tar.gz
Merge branch 'for-rmk' of git://github.com/at91linux/linux-2.6-at91 into devel-stable
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/extmem.c6
-rw-r--r--arch/s390/mm/fault.c187
-rw-r--r--arch/s390/mm/hugetlbpage.c10
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c68
-rw-r--r--arch/s390/mm/vmem.c14
7 files changed, 119 insertions, 171 deletions
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 3cc95dd..075ddad 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -412,6 +412,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
struct dcss_segment *seg;
int rc, diag_cc;
+ start_addr = end_addr = 0;
seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
if (seg == NULL) {
rc = -ENOMEM;
@@ -573,6 +574,7 @@ segment_modify_shared (char *name, int do_nonshared)
unsigned long start_addr, end_addr, dummy;
int rc, diag_cc;
+ start_addr = end_addr = 0;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
@@ -681,8 +683,6 @@ void
segment_save(char *name)
{
struct dcss_segment *seg;
- int startpfn = 0;
- int endpfn = 0;
char cmd1[160];
char cmd2[80];
int i, response;
@@ -698,8 +698,6 @@ segment_save(char *name)
goto out;
}
- startpfn = seg->start_addr >> PAGE_SHIFT;
- endpfn = (seg->end) >> PAGE_SHIFT;
sprintf(cmd1, "DEFSEG %s", name);
for (i=0; i<seg->segcnt; i++) {
sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ab98813..a0f9e73 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -225,33 +225,6 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code,
force_sig_info(SIGBUS, &si, tsk);
}
-#ifdef CONFIG_S390_EXEC_PROTECT
-static noinline int signal_return(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code)
-{
- u16 instruction;
- int rc;
-
- rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
-
- if (!rc && instruction == 0x0a77) {
- clear_tsk_thread_flag(current, TIF_PER_TRAP);
- if (is_compat_task())
- sys32_sigreturn();
- else
- sys_sigreturn();
- } else if (!rc && instruction == 0x0aad) {
- clear_tsk_thread_flag(current, TIF_PER_TRAP);
- if (is_compat_task())
- sys32_rt_sigreturn();
- else
- sys_rt_sigreturn();
- } else
- do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
- return 0;
-}
-#endif /* CONFIG_S390_EXEC_PROTECT */
-
static noinline void do_fault_error(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code, int fault)
{
@@ -259,13 +232,6 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
switch (fault) {
case VM_FAULT_BADACCESS:
-#ifdef CONFIG_S390_EXEC_PROTECT
- if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
- (trans_exc_code & 3) == 0) {
- signal_return(regs, int_code, trans_exc_code);
- break;
- }
-#endif /* CONFIG_S390_EXEC_PROTECT */
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
@@ -414,11 +380,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
-#ifdef CONFIG_S390_EXEC_PROTECT
- if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
- (trans_exc_code & 3) == 0)
- access = VM_EXEC;
-#endif
fault = do_exception(regs, access, trans_exc_code);
if (unlikely(fault))
do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
@@ -491,22 +452,28 @@ static int __init nopfault(char *str)
__setup("nopfault", nopfault);
-typedef struct {
- __u16 refdiagc;
- __u16 reffcode;
- __u16 refdwlen;
- __u16 refversn;
- __u64 refgaddr;
- __u64 refselmk;
- __u64 refcmpmk;
- __u64 reserved;
-} __attribute__ ((packed, aligned(8))) pfault_refbk_t;
+struct pfault_refbk {
+ u16 refdiagc;
+ u16 reffcode;
+ u16 refdwlen;
+ u16 refversn;
+ u64 refgaddr;
+ u64 refselmk;
+ u64 refcmpmk;
+ u64 reserved;
+} __attribute__ ((packed, aligned(8)));
int pfault_init(void)
{
- pfault_refbk_t refbk =
- { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
- __PF_RES_FIELD };
+ struct pfault_refbk refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 0,
+ .refdwlen = 5,
+ .refversn = 2,
+ .refgaddr = __LC_CURRENT_PID,
+ .refselmk = 1ULL << 48,
+ .refcmpmk = 1ULL << 48,
+ .reserved = __PF_RES_FIELD };
int rc;
if (!MACHINE_IS_VM || pfault_disable)
@@ -524,8 +491,12 @@ int pfault_init(void)
void pfault_fini(void)
{
- pfault_refbk_t refbk =
- { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
+ struct pfault_refbk refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 1,
+ .refdwlen = 5,
+ .refversn = 2,
+ };
if (!MACHINE_IS_VM || pfault_disable)
return;
@@ -537,11 +508,15 @@ void pfault_fini(void)
: : "a" (&refbk), "m" (refbk) : "cc");
}
+static DEFINE_SPINLOCK(pfault_lock);
+static LIST_HEAD(pfault_list);
+
static void pfault_interrupt(unsigned int ext_int_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
+ pid_t pid;
/*
* Get the external interruption subcode & pfault
@@ -553,44 +528,79 @@ static void pfault_interrupt(unsigned int ext_int_code,
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
-
- /*
- * Get the token (= address of the task structure of the affected task).
- */
-#ifdef CONFIG_64BIT
- tsk = (struct task_struct *) param64;
-#else
- tsk = (struct task_struct *) param32;
-#endif
-
+ if (subcode & 0x0080) {
+ /* Get the token (= pid of the affected task). */
+ pid = sizeof(void *) == 4 ? param32 : param64;
+ rcu_read_lock();
+ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ if (tsk)
+ get_task_struct(tsk);
+ rcu_read_unlock();
+ if (!tsk)
+ return;
+ } else {
+ tsk = current;
+ }
+ spin_lock(&pfault_lock);
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
- if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
+ if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
+ list_del(&tsk->thread.list);
wake_up_process(tsk);
- put_task_struct(tsk);
+ } else {
+ /* Completion interrupt was faster than initial
+ * interrupt. Set pfault_wait to -1 so the initial
+ * interrupt doesn't put the task to sleep. */
+ tsk->thread.pfault_wait = -1;
}
+ put_task_struct(tsk);
} else {
/* signal bit not set -> a real page is missing. */
- get_task_struct(tsk);
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
+ if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
- * interrupt (swapped in a -1 for pfault_wait). Set
- * pfault_wait back to zero and exit. This can be
- * done safely because tsk is running in kernel
- * mode and can't produce new pfaults. */
+ * interrupt (pfault_wait == -1). Set pfault_wait
+ * back to zero and exit. */
tsk->thread.pfault_wait = 0;
- set_task_state(tsk, TASK_RUNNING);
- put_task_struct(tsk);
- } else
+ } else {
+ /* Initial interrupt arrived before completion
+ * interrupt. Let the task sleep. */
+ tsk->thread.pfault_wait = 1;
+ list_add(&tsk->thread.list, &pfault_list);
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
+ }
+ }
+ spin_unlock(&pfault_lock);
+}
+
+static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct thread_struct *thread, *next;
+ struct task_struct *tsk;
+
+ switch (action) {
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ spin_lock_irq(&pfault_lock);
+ list_for_each_entry_safe(thread, next, &pfault_list, list) {
+ thread->pfault_wait = 0;
+ list_del(&thread->list);
+ tsk = container_of(thread, struct task_struct, thread);
+ wake_up_process(tsk);
+ }
+ spin_unlock_irq(&pfault_lock);
+ break;
+ default:
+ break;
}
+ return NOTIFY_OK;
}
static int __init pfault_irq_init(void)
@@ -599,22 +609,21 @@ static int __init pfault_irq_init(void)
if (!MACHINE_IS_VM)
return 0;
- /*
- * Try to get pfault pseudo page faults going.
- */
rc = register_external_interrupt(0x2603, pfault_interrupt);
- if (rc) {
- pfault_disable = 1;
- return rc;
- }
- if (pfault_init() == 0)
- return 0;
+ if (rc)
+ goto out_extint;
+ rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
+ if (rc)
+ goto out_pfault;
+ hotcpu_notifier(pfault_cpu_notify, 0);
+ return 0;
- /* Tough luck, no pfault. */
- pfault_disable = 1;
+out_pfault:
unregister_external_interrupt(0x2603, pfault_interrupt);
- return 0;
+out_extint:
+ pfault_disable = 1;
+ return rc;
}
early_initcall(pfault_irq_init);
-#endif
+#endif /* CONFIG_PFAULT */
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 639cd21..a4d856d 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -13,7 +13,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval)
{
pmd_t *pmdp = (pmd_t *) pteptr;
- pte_t shadow_pteval = pteval;
unsigned long mask;
if (!MACHINE_HAS_HPAGE) {
@@ -21,18 +20,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
mask = pte_val(pteval) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
- if (mm->context.noexec) {
- pteptr += PTRS_PER_PTE;
- pte_val(shadow_pteval) =
- (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
- }
}
pmd_val(*pmdp) = pte_val(pteval);
- if (mm->context.noexec) {
- pmdp = get_shadow_table(pmdp);
- pmd_val(*pmdp) = pte_val(shadow_pteval);
- }
}
int arch_prepare_hugepage(struct page *page)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index bb40933..dfefc21 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -175,7 +175,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
- ptep_invalidate(&init_mm, address, pte);
+ __ptep_ipte(address, pte);
+ pte_val(*pte) = _PAGE_TYPE_EMPTY;
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index f05edcc..d013ed3 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -28,7 +28,7 @@ static void change_page_attr(unsigned long addr, int numpages,
pte = *ptep;
pte = set(pte);
- ptep_invalidate(&init_mm, addr, ptep);
+ __ptep_ipte(addr, ptep);
*ptep = pte;
addr += PAGE_SIZE;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e1850c2..8d43306 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
static void __page_table_free(struct mm_struct *mm, unsigned long *table);
-static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
{
@@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
while (batch->pgt_index > 0)
__page_table_free(batch->mm, batch->table[--batch->pgt_index]);
while (batch->crst_index < RCU_FREELIST_SIZE)
- __crst_table_free(batch->mm, batch->table[batch->crst_index++]);
+ crst_table_free(batch->mm, batch->table[batch->crst_index++]);
free_page((unsigned long) batch);
}
@@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg)
}
early_param("vmalloc", parse_vmalloc);
-unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
+unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
- page->index = 0;
- if (noexec) {
- struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
- if (!shadow) {
- __free_pages(page, ALLOC_ORDER);
- return NULL;
- }
- page->index = page_to_phys(shadow);
- }
- spin_lock_bh(&mm->context.list_lock);
- list_add(&page->lru, &mm->context.crst_list);
- spin_unlock_bh(&mm->context.list_lock);
return (unsigned long *) page_to_phys(page);
}
-static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
-{
- unsigned long *shadow = get_shadow_table(table);
-
- if (shadow)
- free_pages((unsigned long) shadow, ALLOC_ORDER);
- free_pages((unsigned long) table, ALLOC_ORDER);
-}
-
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
- struct page *page = virt_to_page(table);
-
- spin_lock_bh(&mm->context.list_lock);
- list_del(&page->lru);
- spin_unlock_bh(&mm->context.list_lock);
- __crst_table_free(mm, table);
+ free_pages((unsigned long) table, ALLOC_ORDER);
}
void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
{
struct rcu_table_freelist *batch;
- struct page *page = virt_to_page(table);
- spin_lock_bh(&mm->context.list_lock);
- list_del(&page->lru);
- spin_unlock_bh(&mm->context.list_lock);
if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- __crst_table_free(mm, table);
+ crst_table_free(mm, table);
return;
}
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
- __crst_table_free(mm, table);
+ crst_table_free(mm, table);
return;
}
batch->table[--batch->crst_index] = table;
@@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
BUG_ON(limit > (1UL << 53));
repeat:
- table = crst_table_alloc(mm, mm->context.noexec);
+ table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
spin_lock_bh(&mm->page_table_lock);
@@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long *table;
unsigned long bits;
- bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
+ bits = (mm->context.has_pgste) ? 3UL : 1UL;
spin_lock_bh(&mm->context.list_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
@@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned long bits;
- bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
+ bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock);
@@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
page_table_free(mm, table);
return;
}
- bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
+ bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock);
@@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
rcu_table_freelist_finish();
}
-void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
-{
- struct page *page;
-
- spin_lock_bh(&mm->context.list_lock);
- /* Free shadow region and segment tables. */
- list_for_each_entry(page, &mm->context.crst_list, lru)
- if (page->index) {
- free_pages((unsigned long) page->index, ALLOC_ORDER);
- page->index = 0;
- }
- /* "Free" second halves of page tables. */
- list_for_each_entry(page, &mm->context.pgtable_list, lru)
- page->flags &= ~SECOND_HALVES;
- spin_unlock_bh(&mm->context.list_lock);
- mm->context.noexec = 0;
- update_mm(mm, tsk);
-}
-
/*
* switch on pgstes for its userspace process (for kvm)
*/
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 34c43f2..8c1970d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -95,7 +95,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
- pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+ pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
@@ -103,7 +103,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
- pud_populate_kernel(&init_mm, pu_dir, pm_dir);
+ pud_populate(&init_mm, pu_dir, pm_dir);
}
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
@@ -123,7 +123,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
- pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
+ pmd_populate(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
@@ -159,7 +159,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
continue;
if (pmd_huge(*pm_dir)) {
- pmd_clear_kernel(pm_dir);
+ pmd_clear(pm_dir);
address += HPAGE_SIZE - PAGE_SIZE;
continue;
}
@@ -192,7 +192,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
- pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+ pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
@@ -200,7 +200,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
- pud_populate_kernel(&init_mm, pu_dir, pm_dir);
+ pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
@@ -208,7 +208,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
- pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
+ pmd_populate(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
OpenPOWER on IntegriCloud