summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/mips/include/tlb.h1
-rw-r--r--sys/mips/mips/pmap.c62
-rw-r--r--sys/mips/mips/tlb.c75
3 files changed, 129 insertions, 9 deletions
diff --git a/sys/mips/include/tlb.h b/sys/mips/include/tlb.h
index b38cf2b..c3baf78 100644
--- a/sys/mips/include/tlb.h
+++ b/sys/mips/include/tlb.h
@@ -53,6 +53,7 @@ void tlb_insert_wired(unsigned, vm_offset_t, pt_entry_t, pt_entry_t);
void tlb_invalidate_address(struct pmap *, vm_offset_t);
void tlb_invalidate_all(void);
void tlb_invalidate_all_user(struct pmap *);
+void tlb_invalidate_range(struct pmap *, vm_offset_t, vm_offset_t);
void tlb_save(void);
void tlb_update(struct pmap *, vm_offset_t, pt_entry_t);
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 21e34c4..c41b862 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -190,10 +190,9 @@ static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
-#ifdef SMP
static void pmap_invalidate_page_action(void *arg);
+static void pmap_invalidate_range_action(void *arg);
static void pmap_update_page_action(void *arg);
-#endif
#ifndef __mips_n64
/*
@@ -711,6 +710,31 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg);
}
+struct pmap_invalidate_range_arg {
+ pmap_t pmap;
+ vm_offset_t sva;
+ vm_offset_t eva;
+};
+
+static void
+pmap_invalidate_range_action(void *arg)
+{
+ struct pmap_invalidate_range_arg *p = arg;
+
+ tlb_invalidate_range(p->pmap, p->sva, p->eva);
+}
+
+static void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ struct pmap_invalidate_range_arg arg;
+
+ arg.pmap = pmap;
+ arg.sva = sva;
+ arg.eva = eva;
+ pmap_call_on_active_cpus(pmap, pmap_invalidate_range_action, &arg);
+}
+
struct pmap_update_page_arg {
pmap_t pmap;
vm_offset_t va;
@@ -1737,12 +1761,15 @@ pmap_remove_page(struct pmap *pmap, vm_offset_t va)
* rounded to the page size.
*/
void
-pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
- vm_offset_t va_next;
pd_entry_t *pde, *pdpe;
pt_entry_t *pte;
+ vm_offset_t va, va_next;
+ /*
+ * Perform an unsynchronized read. This is, however, safe.
+ */
if (pmap->pm_stats.resident_count == 0)
return;
@@ -1772,17 +1799,36 @@ pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
va_next = eva;
pde = pmap_pdpe_to_pde(pdpe, sva);
- if (*pde == 0)
+ if (*pde == NULL)
continue;
+
+ /*
+ * Limit our scan to either the end of the va represented
+ * by the current page table page, or to the end of the
+ * range being removed.
+ */
if (va_next > eva)
va_next = eva;
+
+ va = va_next;
for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
sva += PAGE_SIZE) {
- if (!pte_test(pte, PTE_V))
+ if (!pte_test(pte, PTE_V)) {
+ if (va != va_next) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = va_next;
+ }
continue;
- pmap_remove_pte(pmap, pte, sva, *pde);
- pmap_invalidate_page(pmap, sva);
+ }
+ if (va == va_next)
+ va = sva;
+ if (pmap_remove_pte(pmap, pte, sva, *pde)) {
+ sva += PAGE_SIZE;
+ break;
+ }
}
+ if (va != va_next)
+ pmap_invalidate_range(pmap, va, sva);
}
out:
rw_wunlock(&pvh_global_lock);
diff --git a/sys/mips/mips/tlb.c b/sys/mips/mips/tlb.c
index dff5b63..2c1fcb5 100644
--- a/sys/mips/mips/tlb.c
+++ b/sys/mips/mips/tlb.c
@@ -35,7 +35,7 @@
#include <sys/smp.h>
#include <vm/vm.h>
-#include <vm/vm_page.h>
+#include <vm/pmap.h>
#include <machine/pte.h>
#include <machine/tlb.h>
@@ -187,6 +187,79 @@ tlb_invalidate_all_user(struct pmap *pmap)
intr_restore(s);
}
+/*
+ * Invalidates any TLB entries that map a virtual page from the specified
+ * address range. If "end" is zero, then every virtual page is considered to
+ * be within the address range's upper bound.
+ */
+void
+tlb_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
+{
+ register_t asid, end_hi, hi, hi_pagemask, s, save_asid, start_hi;
+ int i;
+
+ KASSERT(start < end || (end == 0 && start > 0),
+ ("tlb_invalidate_range: invalid range"));
+
+ /*
+ * Truncate the virtual address "start" to an even page frame number,
+ * and round the virtual address "end" to an even page frame number.
+ */
+ start &= ~((1 << TLBMASK_SHIFT) - 1);
+ end = (end + (1 << TLBMASK_SHIFT) - 1) & ~((1 << TLBMASK_SHIFT) - 1);
+
+ s = intr_disable();
+ save_asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
+
+ asid = pmap_asid(pmap);
+ start_hi = TLBHI_ENTRY(start, asid);
+ end_hi = TLBHI_ENTRY(end, asid);
+
+ /*
+ * Select the fastest method for invalidating the TLB entries.
+ */
+ if (end - start < num_tlbentries << TLBMASK_SHIFT || (end == 0 &&
+ start >= -(num_tlbentries << TLBMASK_SHIFT))) {
+ /*
+ * The virtual address range is small compared to the size of
+ * the TLB. Probe the TLB for each even numbered page frame
+ * within the virtual address range.
+ */
+ for (hi = start_hi; hi != end_hi; hi += 1 << TLBMASK_SHIFT) {
+ mips_wr_pagemask(0);
+ mips_wr_entryhi(hi);
+ tlb_probe();
+ i = mips_rd_index();
+ if (i >= 0)
+ tlb_invalidate_one(i);
+ }
+ } else {
+ /*
+ * The virtual address range is large compared to the size of
+ * the TLB. Test every non-wired TLB entry.
+ */
+ for (i = mips_rd_wired(); i < num_tlbentries; i++) {
+ mips_wr_index(i);
+ tlb_read();
+ hi = mips_rd_entryhi();
+ if ((hi & TLBHI_ASID_MASK) == asid && (hi < end_hi ||
+ end == 0)) {
+ /*
+ * If "hi" is a large page that spans
+ * "start_hi", then it must be invalidated.
+ */
+ hi_pagemask = mips_rd_pagemask();
+ if (hi >= (start_hi & ~(hi_pagemask <<
+ TLBMASK_SHIFT)))
+ tlb_invalidate_one(i);
+ }
+ }
+ }
+
+ mips_wr_entryhi(save_asid);
+ intr_restore(s);
+}
+
/* XXX Only if DDB? */
void
tlb_save(void)
OpenPOWER on IntegriCloud