summaryrefslogtreecommitdiffstats
path: root/arch/tile/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/mm')
-rw-r--r--arch/tile/mm/fault.c22
-rw-r--r--arch/tile/mm/homecache.c1
-rw-r--r--arch/tile/mm/init.c26
-rw-r--r--arch/tile/mm/pgtable.c38
4 files changed, 56 insertions, 31 deletions
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index cba30e9..22e58f5 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -130,7 +130,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
}
/*
- * Handle a fault on the vmalloc or module mapping area
+ * Handle a fault on the vmalloc area.
*/
static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
{
@@ -203,9 +203,14 @@ static pgd_t *get_current_pgd(void)
* interrupt or a critical region, and must do as little as possible.
* Similarly, we can't use atomic ops here, since we may be handling a
* fault caused by an atomic op access.
+ *
+ * If we find a migrating PTE while we're in an NMI context, and we're
+ * at a PC that has a registered exception handler, we don't wait,
+ * since this thread may (e.g.) have been interrupted while migrating
+ * its own stack, which would then cause us to self-deadlock.
*/
static int handle_migrating_pte(pgd_t *pgd, int fault_num,
- unsigned long address,
+ unsigned long address, unsigned long pc,
int is_kernel_mode, int write)
{
pud_t *pud;
@@ -227,6 +232,8 @@ static int handle_migrating_pte(pgd_t *pgd, int fault_num,
pte_offset_kernel(pmd, address);
pteval = *pte;
if (pte_migrating(pteval)) {
+ if (in_nmi() && search_exception_tables(pc))
+ return 0;
wait_for_migration(pte);
return 1;
}
@@ -300,7 +307,7 @@ static int handle_page_fault(struct pt_regs *regs,
* rather than trying to patch up the existing PTE.
*/
pgd = get_current_pgd();
- if (handle_migrating_pte(pgd, fault_num, address,
+ if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
is_kernel_mode, write))
return 1;
@@ -335,9 +342,12 @@ static int handle_page_fault(struct pt_regs *regs,
/*
* If we're trying to touch user-space addresses, we must
* be either at PL0, or else with interrupts enabled in the
- * kernel, so either way we can re-enable interrupts here.
+ * kernel, so either way we can re-enable interrupts here
+ * unless we are doing atomic access to user space with
+ * interrupts disabled.
*/
- local_irq_enable();
+ if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
+ local_irq_enable();
mm = tsk->mm;
@@ -665,7 +675,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
*/
if (fault_num == INT_DTLB_ACCESS)
write = 1;
- if (handle_migrating_pte(pgd, fault_num, address, 1, write))
+ if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
return state;
/* Return zero so that we continue on with normal fault handling. */
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 1cc6ae4..499f737 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -394,6 +394,7 @@ int page_home(struct page *page)
return pte_to_home(*virt_to_pte(NULL, kva));
}
}
+EXPORT_SYMBOL(page_home);
void homecache_change_page_home(struct page *page, int order, int home)
{
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 830c490..6a9d20d 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -254,11 +254,6 @@ static pgprot_t __init init_pgprot(ulong address)
return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
}
- /* As a performance optimization, keep the boot init stack here. */
- if (address >= (ulong)&init_thread_union &&
- address < (ulong)&init_thread_union + THREAD_SIZE)
- return construct_pgprot(PAGE_KERNEL, smp_processor_id());
-
#ifndef __tilegx__
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
/* Force the atomic_locks[] array page to be hash-for-home. */
@@ -557,6 +552,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
address = MEM_SV_INTRPT;
pmd = get_pmd(pgtables, address);
+ pfn = 0; /* code starts at PA 0 */
if (ktext_small) {
/* Allocate an L2 PTE for the kernel text */
int cpu = 0;
@@ -579,10 +575,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
}
BUG_ON(address != (unsigned long)_stext);
- pfn = 0; /* code starts at PA 0 */
- pte = alloc_pte();
- for (pte_ofs = 0; address < (unsigned long)_einittext;
- pfn++, pte_ofs++, address += PAGE_SIZE) {
+ pte = NULL;
+ for (; address < (unsigned long)_einittext;
+ pfn++, address += PAGE_SIZE) {
+ pte_ofs = pte_index(address);
+ if (pte_ofs == 0) {
+ if (pte)
+ assign_pte(pmd++, pte);
+ pte = alloc_pte();
+ }
if (!ktext_local) {
prot = set_remote_cache_cpu(prot, cpu);
cpu = cpumask_next(cpu, &ktext_mask);
@@ -591,7 +592,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
}
pte[pte_ofs] = pfn_pte(pfn, prot);
}
- assign_pte(pmd, pte);
+ if (pte)
+ assign_pte(pmd, pte);
} else {
pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
pteval = pte_mkhuge(pteval);
@@ -614,7 +616,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
else
pteval = hv_pte_set_mode(pteval,
HV_PTE_MODE_CACHE_NO_L3);
- *(pte_t *)pmd = pteval;
+ for (; address < (unsigned long)_einittext;
+ pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
+ *(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
}
/* Set swapper_pgprot here so it is flushed to memory right away. */
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 8730369..2410aa8 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr)
if (!pmd_huge_page(*pmd))
return;
- /*
- * Grab the pgd_lock, since we may need it to walk the pgd_list,
- * and since we need some kind of lock here to avoid races.
- */
- spin_lock_irqsave(&pgd_lock, flags);
+ spin_lock_irqsave(&init_mm.page_table_lock, flags);
if (!pmd_huge_page(*pmd)) {
/* Lost the race to convert the huge page. */
- spin_unlock_irqrestore(&pgd_lock, flags);
+ spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
return;
}
@@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr)
#ifdef __PAGETABLE_PMD_FOLDED
/* Walk every pgd on the system and update the pmd there. */
+ spin_lock(&pgd_lock);
list_for_each(pos, &pgd_list) {
pmd_t *copy_pmd;
pgd = list_to_pgd(pos) + pgd_index(addr);
@@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr)
copy_pmd = pmd_offset(pud, addr);
__set_pmd(copy_pmd, *pmd);
}
+ spin_unlock(&pgd_lock);
#endif
/* Tell every cpu to notice the change. */
@@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr)
cpu_possible_mask, NULL, 0);
/* Hold the lock until the TLB flush is finished to avoid races. */
- spin_unlock_irqrestore(&pgd_lock, flags);
+ spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
}
/*
@@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr)
* against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed.
- * The locking scheme was chosen on the basis of manfred's
- * recommendations and having no core impact whatsoever.
- * -- wli
+ *
+ * The lock is always taken with interrupts disabled, unlike on x86
+ * and other platforms, because we need to take the lock in
+ * shatter_huge_page(), which may be called from an interrupt context.
+ * We are not at risk from the tlbflush IPI deadlock that was seen on
+ * x86, since we use the flush_remote() API to have the hypervisor do
+ * the TLB flushes regardless of irq disabling.
*/
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
@@ -469,10 +471,18 @@ void __set_pte(pte_t *ptep, pte_t pte)
void set_pte(pte_t *ptep, pte_t pte)
{
- struct page *page = pfn_to_page(pte_pfn(pte));
-
- /* Update the home of a PTE if necessary */
- pte = pte_set_home(pte, page_home(page));
+ if (pte_present(pte) &&
+ (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
+ /* The PTE actually references physical memory. */
+ unsigned long pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ /* Update the home of the PTE from the struct page. */
+ pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
+ } else if (hv_pte_get_mode(pte) == 0) {
+ /* remap_pfn_range(), etc, must supply PTE mode. */
+ panic("set_pte(): out-of-range PFN and mode 0\n");
+ }
+ }
__set_pte(ptep, pte);
}
OpenPOWER on IntegriCloud