summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/powerpc/booke/pmap.c62
-rw-r--r--sys/powerpc/include/pmap.h1
-rw-r--r--sys/powerpc/include/tlb.h1
-rw-r--r--sys/powerpc/powerpc/mmu_if.m23
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c7
5 files changed, 94 insertions, 0 deletions
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 0b45c87..2b883b8 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -340,6 +340,8 @@ static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
static void mmu_booke_scan_init(mmu_t);
static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
+static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
+ vm_size_t sz, vm_memattr_t mode);
static mmu_method_t mmu_booke_methods[] = {
/* pmap dispatcher interface */
@@ -392,6 +394,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_kextract, mmu_booke_kextract),
/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
+ MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
/* dumpsys() support */
MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
@@ -419,6 +422,8 @@ tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
return (MAS2_I);
case VM_MEMATTR_WRITE_THROUGH:
return (MAS2_W | MAS2_M);
+ case VM_MEMATTR_CACHEABLE:
+ return (MAS2_M);
}
}
@@ -2900,6 +2905,63 @@ mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
return (0);
}
+static int
+mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
+ vm_memattr_t mode)
+{
+ vm_offset_t va;
+ pte_t *pte;
+ int i, j;
+
+ /* Check TLB1 mappings */
+ for (i = 0; i < tlb1_idx; i++) {
+ if (!(tlb1[i].mas1 & MAS1_VALID))
+ continue;
+ if (addr >= tlb1[i].virt && addr < tlb1[i].virt + tlb1[i].size)
+ break;
+ }
+ if (i < tlb1_idx) {
+ /* Only allow full mappings to be modified for now. */
+ /* Validate the range. */
+ for (j = i, va = addr; va < addr + sz; va += tlb1[j].size, j++) {
+ if (va != tlb1[j].virt || (sz - (va - addr) < tlb1[j].size))
+ return (EINVAL);
+ }
+ for (va = addr; va < addr + sz; va += tlb1[i].size, i++) {
+ tlb1[i].mas2 &= ~MAS2_WIMGE_MASK;
+ tlb1[i].mas2 |= tlb_calc_wimg(tlb1[i].phys, mode);
+
+ /*
+ * Write it out to the TLB. Should really re-sync with other
+ * cores.
+ */
+ tlb1_write_entry(i);
+ }
+ return (0);
+ }
+
+ /* Not in TLB1, try through pmap */
+ /* First validate the range. */
+ for (va = addr; va < addr + sz; va += PAGE_SIZE) {
+ pte = pte_find(mmu, kernel_pmap, va);
+ if (pte == NULL || !PTE_ISVALID(pte))
+ return (EINVAL);
+ }
+
+ mtx_lock_spin(&tlbivax_mutex);
+ tlb_miss_lock();
+ for (va = addr; va < addr + sz; va += PAGE_SIZE) {
+ pte = pte_find(mmu, kernel_pmap, va);
+ *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
+ *pte |= tlb_calc_wimg(PTE_PA(pte), mode << PTE_MAS2_SHIFT);
+ tlb0_flush_entry(va);
+ }
+ tlb_miss_unlock();
+ mtx_unlock_spin(&tlbivax_mutex);
+
+ return (pte_vatopa(mmu, kernel_pmap, va));
+}
+
/**************************************************************************/
/* TID handling */
/**************************************************************************/
diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
index 97c5c79..ea06c08 100644
--- a/sys/powerpc/include/pmap.h
+++ b/sys/powerpc/include/pmap.h
@@ -238,6 +238,7 @@ void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
+int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
void pmap_deactivate(struct thread *);
vm_paddr_t pmap_kextract(vm_offset_t);
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
diff --git a/sys/powerpc/include/tlb.h b/sys/powerpc/include/tlb.h
index 4f12c19..8fb7c85 100644
--- a/sys/powerpc/include/tlb.h
+++ b/sys/powerpc/include/tlb.h
@@ -74,6 +74,7 @@
#define MAS2_M 0x00000004
#define MAS2_G 0x00000002
#define MAS2_E 0x00000001
+#define MAS2_WIMGE_MASK 0x0000001F
#define MAS3_RPN 0xFFFFF000
#define MAS3_RPN_SHIFT 12
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 6c41c74..ce05bd4 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -124,6 +124,12 @@ CODE {
{
return;
}
+
+ static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
+ vm_size_t sz, vm_memattr_t mode)
+ {
+ return (0);
+ }
};
@@ -956,3 +962,20 @@ METHOD void quick_remove_page {
vm_offset_t _va;
};
+/**
+ * @brief Change the specified virtual address range's memory type.
+ *
+ * @param _va The virtual base address to change
+ *
+ * @param _sz Size of the region to change
+ *
+ * @param _mode New mode to set on the VA range
+ *
+ * @retval error 0 on success, EINVAL or ENOMEM on error.
+ */
+METHOD int change_attr {
+ mmu_t _mmu;
+ vm_offset_t _va;
+ vm_size_t _sz;
+ vm_memattr_t _mode;
+} DEFAULT mmu_null_change_attr;
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 2082ba0..e7b6d76 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -564,6 +564,13 @@ pmap_quick_remove_page(vm_offset_t addr)
MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
}
+int
+pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
+{
+ CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
+ return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
+}
+
/*
* MMU install routines. Highest priority wins, equal priority also
* overrides allowing last-set to win.
OpenPOWER on IntegriCloud