summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-11-19 13:05:18 +0900
committerPaul Mundt <lethal@linux-sh.org>2007-11-19 13:05:18 +0900
commit0f1a394ba68c4bbdedb1dbfdf6784ba54c07bbe4 (patch)
treeef68075d7d43e8f458bf653f072ae2f8cc0bcbd1 /arch/sh/mm
parent1c6b2ca5e0939bf8b5d1a11f1646f25189ecd447 (diff)
downloadop-kernel-dev-0f1a394ba68c4bbdedb1dbfdf6784ba54c07bbe4.zip
op-kernel-dev-0f1a394ba68c4bbdedb1dbfdf6784ba54c07bbe4.tar.gz
sh: lockless UTLB miss fast-path.
With the refactored update_mmu_cache() introduced in older kernels, there's no longer any need to take the page_table_lock in this path, so simply drop it completely. Without this, performance degradation is seen on SMP on heavily threaded workloads that don't use the split ptlock, and ultimately we have no reason to contend for the lock in the first place. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/fault.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index c56a5fa..60d74f7 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -258,9 +258,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
pmd_t *pmd;
pte_t *pte;
pte_t entry;
- struct mm_struct *mm = current->mm;
- spinlock_t *ptl = NULL;
- int ret = 1;
#ifdef CONFIG_SH_KGDB
if (kgdb_nofault && kgdb_bus_err_hook)
@@ -274,12 +271,11 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
*/
if (address >= P3SEG && address < P3_ADDR_MAX) {
pgd = pgd_offset_k(address);
- mm = NULL;
} else {
- if (unlikely(address >= TASK_SIZE || !mm))
+ if (unlikely(address >= TASK_SIZE || !current->mm))
return 1;
- pgd = pgd_offset(mm, address);
+ pgd = pgd_offset(current->mm, address);
}
pud = pud_offset(pgd, address);
@@ -289,16 +285,12 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
if (pmd_none_or_clear_bad(pmd))
return 1;
- if (mm)
- pte = pte_offset_map_lock(mm, pmd, address, &ptl);
- else
- pte = pte_offset_kernel(pmd, address);
-
+ pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (unlikely(pte_none(entry) || pte_not_present(entry)))
- goto unlock;
+ return 1;
if (unlikely(writeaccess && !pte_write(entry)))
- goto unlock;
+ return 1;
if (writeaccess)
entry = pte_mkdirty(entry);
@@ -306,9 +298,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
set_pte(pte, entry);
update_mmu_cache(NULL, address, entry);
- ret = 0;
-unlock:
- if (mm)
- pte_unmap_unlock(pte, ptl);
- return ret;
+
+ return 0;
}
OpenPOWER on IntegriCloud