summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/booke
diff options
context:
space:
mode:
authorian <ian@FreeBSD.org>2014-05-14 00:51:26 +0000
committerian <ian@FreeBSD.org>2014-05-14 00:51:26 +0000
commit2963fd0dfbdafac049cadecf0774e0c027b28c88 (patch)
tree0a7f6d9dda09a9467d9629cbd1d020b502dc6dad /sys/powerpc/booke
parent065f33ceeaaf9a1809785180d2f3b30a66d52a40 (diff)
downloadFreeBSD-src-2963fd0dfbdafac049cadecf0774e0c027b28c88.zip
FreeBSD-src-2963fd0dfbdafac049cadecf0774e0c027b28c88.tar.gz
MFC r257161, r257169, r257178, r257190, r257191
Add pmap_mapdev_attr() and pmap_kenter_attr() interfaces. Fix concurrency issues with TLB1 updates and make pmap_kextract() search TLB1 mappings as well Interrelated improvements to early boot mappings: - Remove explicit requirement that the SOC registers be found except as an optimization (although the MPC85XX LAW drivers still require they be found externally, which should change). - Remove magic CCSRBAR_VA value. - Allow bus_machdep.c's early-boot code to handle non 1:1 mappings and systems not in real-mode or global 1:1 maps in early boot. - Allow pmap_mapdev() on Book-E to reissue previous addresses if the area is already mapped. Additionally have it check all mappings, not just the CCSR area. Add some extra sanity checking and checks to printf format specifiers. Bump initial TLB size. The kernel is not necessarily less than 16 MB Handle (in a slightly ugly way) ePAPR-type loaders that just place a device tree into r3.
Diffstat (limited to 'sys/powerpc/booke')
-rw-r--r--sys/powerpc/booke/locore.S2
-rw-r--r--sys/powerpc/booke/machdep.c53
-rw-r--r--sys/powerpc/booke/machdep_e500.c2
-rw-r--r--sys/powerpc/booke/pmap.c186
4 files changed, 196 insertions, 47 deletions
diff --git a/sys/powerpc/booke/locore.S b/sys/powerpc/booke/locore.S
index 631796d..d7ebac9 100644
--- a/sys/powerpc/booke/locore.S
+++ b/sys/powerpc/booke/locore.S
@@ -158,7 +158,7 @@ __start:
mtspr SPR_MAS0, %r3
isync
- li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
+ li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
isync
diff --git a/sys/powerpc/booke/machdep.c b/sys/powerpc/booke/machdep.c
index 340c7a0d..9318012 100644
--- a/sys/powerpc/booke/machdep.c
+++ b/sys/powerpc/booke/machdep.c
@@ -137,6 +137,7 @@ __FBSDID("$FreeBSD$");
#include <sys/linker.h>
#include <sys/reboot.h>
+#include <contrib/libfdt/libfdt.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/openfirm.h>
@@ -276,6 +277,23 @@ print_kernel_section_addr(void)
debugf(" _end = 0x%08x\n", (uint32_t)_end);
}
+static int
+booke_check_for_fdt(uint32_t arg1, vm_offset_t *dtbp)
+{
+ void *ptr;
+
+ if (arg1 % 8 != 0)
+ return (-1);
+
+ ptr = (void *)pmap_early_io_map(arg1, PAGE_SIZE);
+ if (fdt_check_header(ptr) != 0)
+ return (-1);
+
+ *dtbp = (vm_offset_t)ptr;
+
+ return (0);
+}
+
u_int
booke_init(uint32_t arg1, uint32_t arg2)
{
@@ -288,6 +306,10 @@ booke_init(uint32_t arg1, uint32_t arg2)
end = (uintptr_t)_end;
dtbp = (vm_offset_t)NULL;
+ /* Set up TLB initially */
+ bootinfo = NULL;
+ tlb1_init();
+
/*
* Handle the various ways we can get loaded and started:
* - FreeBSD's loader passes the pointer to the metadata
@@ -302,11 +324,21 @@ booke_init(uint32_t arg1, uint32_t arg2)
* in arg1 and arg2 (resp). arg1 is between 1 and some
* relatively small number, such as 64K. arg2 is the
* physical address of the argv vector.
+ * - ePAPR loaders pass an FDT blob in r3 (arg1) and the magic hex
+ * string 0x45504150 ('ePAP') in r6 (which has been lost by now).
+ * r4 (arg2) is supposed to be set to zero, but is not always.
*/
- if (arg1 > (uintptr_t)kernel_text) /* FreeBSD loader */
- mdp = (void *)arg1;
- else if (arg1 == 0) /* Juniper loader */
+
+ if (arg1 == 0) /* Juniper loader */
mdp = (void *)arg2;
+ else if (booke_check_for_fdt(arg1, &dtbp) == 0) { /* ePAPR */
+ end = roundup(end, 8);
+ memmove((void *)end, (void *)dtbp, fdt_totalsize((void *)dtbp));
+ dtbp = end;
+ end += fdt_totalsize((void *)dtbp);
+ mdp = NULL;
+ } else if (arg1 > (uintptr_t)kernel_text) /* FreeBSD loader */
+ mdp = (void *)arg1;
else /* U-Boot */
mdp = NULL;
@@ -350,13 +382,18 @@ booke_init(uint32_t arg1, uint32_t arg2)
if (OF_init((void *)dtbp) != 0)
while (1);
- if (fdt_immr_addr(CCSRBAR_VA) != 0)
- while (1);
-
OF_interpret("perform-fixup", 0);
- /* Set up TLB initially */
- booke_init_tlb(fdt_immr_pa);
+ /* Reset TLB1 to get rid of temporary mappings */
+ tlb1_init();
+
+ /* Set up IMMR */
+ if (fdt_immr_addr(0) == 0) {
+ fdt_immr_va = pmap_early_io_map(fdt_immr_pa, fdt_immr_size);
+ } else {
+ printf("Warning: SOC base registers could not be found!\n");
+ fdt_immr_va = 0;
+ }
/* Reset Time Base */
mttb(0);
diff --git a/sys/powerpc/booke/machdep_e500.c b/sys/powerpc/booke/machdep_e500.c
index 85805a2..ab47f62 100644
--- a/sys/powerpc/booke/machdep_e500.c
+++ b/sys/powerpc/booke/machdep_e500.c
@@ -47,8 +47,6 @@ void
booke_init_tlb(vm_paddr_t fdt_immr_pa)
{
- /* Initialize TLB1 handling */
- tlb1_init(fdt_immr_pa);
}
void
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 9406a4a..a9476a2 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -113,7 +113,6 @@ extern uint32_t *bootinfo;
extern uint32_t bp_ntlb1s;
#endif
-vm_paddr_t ccsrbar_pa;
vm_paddr_t kernload;
vm_offset_t kernstart;
vm_size_t kernsize;
@@ -315,9 +314,11 @@ static void mmu_booke_activate(mmu_t, struct thread *);
static void mmu_booke_deactivate(mmu_t, struct thread *);
static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
+static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
+static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
static void mmu_booke_kremove(mmu_t, vm_offset_t);
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
@@ -371,7 +372,9 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
+ MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
MMUMETHOD(mmu_kenter, mmu_booke_kenter),
+ MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
MMUMETHOD(mmu_kextract, mmu_booke_kextract),
/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
@@ -386,6 +389,42 @@ static mmu_method_t mmu_booke_methods[] = {
MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
+static __inline uint32_t
+tlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
+{
+ uint32_t attrib;
+ int i;
+
+ if (ma != VM_MEMATTR_DEFAULT) {
+ switch (ma) {
+ case VM_MEMATTR_UNCACHEABLE:
+ return (PTE_I | PTE_G);
+ case VM_MEMATTR_WRITE_COMBINING:
+ case VM_MEMATTR_WRITE_BACK:
+ case VM_MEMATTR_PREFETCHABLE:
+ return (PTE_I);
+ case VM_MEMATTR_WRITE_THROUGH:
+ return (PTE_W | PTE_M);
+ }
+ }
+
+ /*
+ * Assume the page is cache inhibited and access is guarded unless
+ * it's in our available memory array.
+ */
+ attrib = _TLB_ENTRY_IO;
+ for (i = 0; i < physmem_regions_sz; i++) {
+ if ((pa >= physmem_regions[i].mr_start) &&
+ (pa < (physmem_regions[i].mr_start +
+ physmem_regions[i].mr_size))) {
+ attrib = _TLB_ENTRY_MEM;
+ break;
+ }
+ }
+
+ return (attrib);
+}
+
static inline void
tlb_miss_lock(void)
{
@@ -1315,6 +1354,15 @@ mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
static vm_paddr_t
mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
{
+ int i;
+
+ /* Check TLB1 mappings */
+ for (i = 0; i < tlb1_idx; i++) {
+ if (!(tlb1[i].mas1 & MAS1_VALID))
+ continue;
+ if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size)
+ return (tlb1[i].phys + (va - tlb1[i].virt));
+ }
return (pte_vatopa(mmu, kernel_pmap, va));
}
@@ -1392,6 +1440,13 @@ mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
static void
mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
{
+
+ mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
+}
+
+static void
+mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
+{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
uint32_t flags;
@@ -1400,7 +1455,8 @@ mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
(va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
- flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
+ flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
+ flags |= tlb_calc_wimg(pa, ma);
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
@@ -2595,33 +2651,49 @@ mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
static void *
mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
{
+
+ return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
+}
+
+static void *
+mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
+{
void *res;
uintptr_t va;
vm_size_t sz;
+ int i;
/*
- * CCSR is premapped. Note that (pa + size - 1) is there to make sure
- * we don't wrap around. Devices on the local bus typically extend all
- * the way up to and including 0xffffffff. In that case (pa + size)
- * would be 0. This creates a false positive (i.e. we think it's
- * within the CCSR) and not create a mapping.
+ * Check if this is premapped in TLB1. Note: this should probably also
+ * check whether a sequence of TLB1 entries exist that match the
+ * requirement, but now only checks the easy case.
*/
- if (pa >= ccsrbar_pa && (pa + size - 1) < (ccsrbar_pa + CCSRBAR_SIZE)) {
- va = CCSRBAR_VA + (pa - ccsrbar_pa);
- return ((void *)va);
+ if (ma == VM_MEMATTR_DEFAULT) {
+ for (i = 0; i < tlb1_idx; i++) {
+ if (!(tlb1[i].mas1 & MAS1_VALID))
+ continue;
+ if (pa >= tlb1[i].phys &&
+ (pa + size) <= (tlb1[i].phys + tlb1[i].size))
+ return (void *)(tlb1[i].virt +
+ (pa - tlb1[i].phys));
+ }
}
- va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
+ size = roundup(size, PAGE_SIZE);
+
+ if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) &&
+ (pa + size - 1) < VM_MIN_KERNEL_ADDRESS)
+ va = pa;
+ else
+ va = kva_alloc(size);
res = (void *)va;
- if (size < PAGE_SIZE)
- size = PAGE_SIZE;
do {
sz = 1 << (ilog2(size) & ~1);
if (bootverbose)
printf("Wiring VA=%x to PA=%x (size=%x), "
"using TLB1[%d]\n", va, pa, sz, tlb1_idx);
- tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
+ tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma));
size -= sz;
pa += sz;
va += sz;
@@ -2636,6 +2708,7 @@ mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
static void
mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
{
+#ifdef SUPPORTS_SHRINKING_TLB1
vm_offset_t base, offset;
/*
@@ -2647,6 +2720,7 @@ mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
size = roundup(offset + size, PAGE_SIZE);
kva_free(base, size);
}
+#endif
}
/*
@@ -2827,9 +2901,8 @@ tlb0_print_tlbentries(void)
/*
* TLB1 mapping notes:
*
- * TLB1[0] CCSRBAR
- * TLB1[1] Kernel text and data.
- * TLB1[2-15] Additional kernel text and data mappings (if required), PCI
+ * TLB1[0] Kernel text and data.
+ * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
* windows, other devices mappings.
*/
@@ -2913,9 +2986,10 @@ tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
uint32_t flags)
{
uint32_t ts, tid;
- int tsize;
-
- if (tlb1_idx >= TLB1_ENTRIES) {
+ int tsize, index;
+
+ index = atomic_fetchadd_int(&tlb1_idx, 1);
+ if (index >= TLB1_ENTRIES) {
printf("tlb1_set_entry: TLB1 full!\n");
return (-1);
}
@@ -2927,18 +3001,22 @@ tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
/* XXX TS is hard coded to 0 for now as we only use single address space */
ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
- /* XXX LOCK tlb1[] */
+ /*
+ * Atomicity is preserved by the atomic increment above since nothing
+ * is ever removed from tlb1.
+ */
- tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
- tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
- tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
+ tlb1[index].phys = pa;
+ tlb1[index].virt = va;
+ tlb1[index].size = size;
+ tlb1[index].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
+ tlb1[index].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
+ tlb1[index].mas2 = (va & MAS2_EPN_MASK) | flags;
/* Set supervisor RWX permission bits */
- tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
+ tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
- tlb1_write_entry(tlb1_idx++);
-
- /* XXX UNLOCK tlb1[] */
+ tlb1_write_entry(index);
/*
* XXX in general TLB1 updates should be propagated between CPUs,
@@ -3017,14 +3095,12 @@ tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
* assembler level setup done in locore.S.
*/
void
-tlb1_init(vm_offset_t ccsrbar)
+tlb1_init()
{
- uint32_t mas0, mas1, mas3;
+ uint32_t mas0, mas1, mas2, mas3;
uint32_t tsz;
u_int i;
- ccsrbar_pa = ccsrbar;
-
if (bootinfo != NULL && bootinfo[0] != 1) {
tlb1_idx = *((uint16_t *)(bootinfo + 8));
} else
@@ -3040,22 +3116,23 @@ tlb1_init(vm_offset_t ccsrbar)
if ((mas1 & MAS1_VALID) == 0)
continue;
+ mas2 = mfspr(SPR_MAS2);
mas3 = mfspr(SPR_MAS3);
tlb1[i].mas1 = mas1;
tlb1[i].mas2 = mfspr(SPR_MAS2);
tlb1[i].mas3 = mas3;
+ tlb1[i].virt = mas2 & MAS2_EPN_MASK;
+ tlb1[i].phys = mas3 & MAS3_RPN;
if (i == 0)
kernload = mas3 & MAS3_RPN;
tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
- kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
+ tlb1[i].size = (tsz > 0) ? tsize2size(tsz) : 0;
+ kernsize += tlb1[i].size;
}
- /* Map in CCSRBAR. */
- tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
-
#ifdef SMP
bp_ntlb1s = tlb1_idx;
#endif
@@ -3068,6 +3145,43 @@ tlb1_init(vm_offset_t ccsrbar)
set_mas4_defaults();
}
+vm_offset_t
+pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
+{
+ static vm_offset_t early_io_map_base = VM_MAX_KERNEL_ADDRESS;
+ vm_paddr_t pa_base;
+ vm_offset_t va, sz;
+ int i;
+
+ KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
+
+ for (i = 0; i < tlb1_idx; i++) {
+ if (!(tlb1[i].mas1 & MAS1_VALID))
+ continue;
+ if (pa >= tlb1[i].phys && (pa + size) <=
+ (tlb1[i].phys + tlb1[i].size))
+ return (tlb1[i].virt + (pa - tlb1[i].phys));
+ }
+
+ pa_base = trunc_page(pa);
+ size = roundup(size + (pa - pa_base), PAGE_SIZE);
+ va = early_io_map_base + (pa - pa_base);
+
+ do {
+ sz = 1 << (ilog2(size) & ~1);
+ tlb1_set_entry(early_io_map_base, pa_base, sz, _TLB_ENTRY_IO);
+ size -= sz;
+ pa_base += sz;
+ early_io_map_base += sz;
+ } while (size > 0);
+
+#ifdef SMP
+ bp_ntlb1s = tlb1_idx;
+#endif
+
+ return (va);
+}
+
/*
* Setup MAS4 defaults.
* These values are loaded to MAS0-2 on a TLB miss.
OpenPOWER on IntegriCloud