summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/powerpc/aim/machdep.c9
-rw-r--r--sys/powerpc/booke/locore.S2
-rw-r--r--sys/powerpc/booke/machdep.c53
-rw-r--r--sys/powerpc/booke/machdep_e500.c2
-rw-r--r--sys/powerpc/booke/pmap.c186
-rw-r--r--sys/powerpc/include/pmap.h2
-rw-r--r--sys/powerpc/include/tlb.h5
-rw-r--r--sys/powerpc/include/vmparam.h6
-rw-r--r--sys/powerpc/mpc85xx/mpc85xx.c2
-rw-r--r--sys/powerpc/mpc85xx/mpc85xx.h1
-rw-r--r--sys/powerpc/powerpc/bus_machdep.c15
11 files changed, 223 insertions, 60 deletions
diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c
index 6cf3b24..72f9de7 100644
--- a/sys/powerpc/aim/machdep.c
+++ b/sys/powerpc/aim/machdep.c
@@ -777,6 +777,13 @@ va_to_vsid(pmap_t pm, vm_offset_t va)
#endif
+vm_offset_t
+pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pa);
+}
+
/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
void
flush_disable_caches(void)
@@ -944,4 +951,4 @@ cpu_sleep()
if (vectd == curthread)
enable_vec(curthread);
powerpc_sync();
-}
+} \ No newline at end of file
diff --git a/sys/powerpc/booke/locore.S b/sys/powerpc/booke/locore.S
index 631796d..d7ebac9 100644
--- a/sys/powerpc/booke/locore.S
+++ b/sys/powerpc/booke/locore.S
@@ -158,7 +158,7 @@ __start:
mtspr SPR_MAS0, %r3
isync
- li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
+ li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
isync
diff --git a/sys/powerpc/booke/machdep.c b/sys/powerpc/booke/machdep.c
index 340c7a0d..9318012 100644
--- a/sys/powerpc/booke/machdep.c
+++ b/sys/powerpc/booke/machdep.c
@@ -137,6 +137,7 @@ __FBSDID("$FreeBSD$");
#include <sys/linker.h>
#include <sys/reboot.h>
+#include <contrib/libfdt/libfdt.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/openfirm.h>
@@ -276,6 +277,23 @@ print_kernel_section_addr(void)
debugf(" _end = 0x%08x\n", (uint32_t)_end);
}
+static int
+booke_check_for_fdt(uint32_t arg1, vm_offset_t *dtbp)
+{
+ void *ptr;
+
+ if (arg1 % 8 != 0)
+ return (-1);
+
+ ptr = (void *)pmap_early_io_map(arg1, PAGE_SIZE);
+ if (fdt_check_header(ptr) != 0)
+ return (-1);
+
+ *dtbp = (vm_offset_t)ptr;
+
+ return (0);
+}
+
u_int
booke_init(uint32_t arg1, uint32_t arg2)
{
@@ -288,6 +306,10 @@ booke_init(uint32_t arg1, uint32_t arg2)
end = (uintptr_t)_end;
dtbp = (vm_offset_t)NULL;
+ /* Set up TLB initially */
+ bootinfo = NULL;
+ tlb1_init();
+
/*
* Handle the various ways we can get loaded and started:
* - FreeBSD's loader passes the pointer to the metadata
@@ -302,11 +324,21 @@ booke_init(uint32_t arg1, uint32_t arg2)
* in arg1 and arg2 (resp). arg1 is between 1 and some
* relatively small number, such as 64K. arg2 is the
* physical address of the argv vector.
+ * - ePAPR loaders pass an FDT blob in r3 (arg1) and the magic hex
+ * string 0x45504150 ('ePAP') in r6 (which has been lost by now).
+ * r4 (arg2) is supposed to be set to zero, but is not always.
*/
- if (arg1 > (uintptr_t)kernel_text) /* FreeBSD loader */
- mdp = (void *)arg1;
- else if (arg1 == 0) /* Juniper loader */
+
+ if (arg1 == 0) /* Juniper loader */
mdp = (void *)arg2;
+ else if (booke_check_for_fdt(arg1, &dtbp) == 0) { /* ePAPR */
+ end = roundup(end, 8);
+ memmove((void *)end, (void *)dtbp, fdt_totalsize((void *)dtbp));
+ dtbp = end;
+ end += fdt_totalsize((void *)dtbp);
+ mdp = NULL;
+ } else if (arg1 > (uintptr_t)kernel_text) /* FreeBSD loader */
+ mdp = (void *)arg1;
else /* U-Boot */
mdp = NULL;
@@ -350,13 +382,18 @@ booke_init(uint32_t arg1, uint32_t arg2)
if (OF_init((void *)dtbp) != 0)
while (1);
- if (fdt_immr_addr(CCSRBAR_VA) != 0)
- while (1);
-
OF_interpret("perform-fixup", 0);
- /* Set up TLB initially */
- booke_init_tlb(fdt_immr_pa);
+ /* Reset TLB1 to get rid of temporary mappings */
+ tlb1_init();
+
+ /* Set up IMMR */
+ if (fdt_immr_addr(0) == 0) {
+ fdt_immr_va = pmap_early_io_map(fdt_immr_pa, fdt_immr_size);
+ } else {
+ printf("Warning: SOC base registers could not be found!\n");
+ fdt_immr_va = 0;
+ }
/* Reset Time Base */
mttb(0);
diff --git a/sys/powerpc/booke/machdep_e500.c b/sys/powerpc/booke/machdep_e500.c
index 85805a2..ab47f62 100644
--- a/sys/powerpc/booke/machdep_e500.c
+++ b/sys/powerpc/booke/machdep_e500.c
@@ -47,8 +47,6 @@ void
booke_init_tlb(vm_paddr_t fdt_immr_pa)
{
- /* Initialize TLB1 handling */
- tlb1_init(fdt_immr_pa);
}
void
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 9406a4a..a9476a2 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -113,7 +113,6 @@ extern uint32_t *bootinfo;
extern uint32_t bp_ntlb1s;
#endif
-vm_paddr_t ccsrbar_pa;
vm_paddr_t kernload;
vm_offset_t kernstart;
vm_size_t kernsize;
@@ -315,9 +314,11 @@ static void mmu_booke_activate(mmu_t, struct thread *);
static void mmu_booke_deactivate(mmu_t, struct thread *);
static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
+static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
+static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
static void mmu_booke_kremove(mmu_t, vm_offset_t);
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
@@ -371,7 +372,9 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
+ MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
MMUMETHOD(mmu_kenter, mmu_booke_kenter),
+ MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
MMUMETHOD(mmu_kextract, mmu_booke_kextract),
/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
@@ -386,6 +389,42 @@ static mmu_method_t mmu_booke_methods[] = {
MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
+static __inline uint32_t
+tlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
+{
+ uint32_t attrib;
+ int i;
+
+ if (ma != VM_MEMATTR_DEFAULT) {
+ switch (ma) {
+ case VM_MEMATTR_UNCACHEABLE:
+ return (PTE_I | PTE_G);
+ case VM_MEMATTR_WRITE_COMBINING:
+ case VM_MEMATTR_WRITE_BACK:
+ case VM_MEMATTR_PREFETCHABLE:
+ return (PTE_I);
+ case VM_MEMATTR_WRITE_THROUGH:
+ return (PTE_W | PTE_M);
+ }
+ }
+
+ /*
+ * Assume the page is cache inhibited and access is guarded unless
+ * it's in our available memory array.
+ */
+ attrib = _TLB_ENTRY_IO;
+ for (i = 0; i < physmem_regions_sz; i++) {
+ if ((pa >= physmem_regions[i].mr_start) &&
+ (pa < (physmem_regions[i].mr_start +
+ physmem_regions[i].mr_size))) {
+ attrib = _TLB_ENTRY_MEM;
+ break;
+ }
+ }
+
+ return (attrib);
+}
+
static inline void
tlb_miss_lock(void)
{
@@ -1315,6 +1354,15 @@ mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
static vm_paddr_t
mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
{
+ int i;
+
+ /* Check TLB1 mappings */
+ for (i = 0; i < tlb1_idx; i++) {
+ if (!(tlb1[i].mas1 & MAS1_VALID))
+ continue;
+ if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size)
+ return (tlb1[i].phys + (va - tlb1[i].virt));
+ }
return (pte_vatopa(mmu, kernel_pmap, va));
}
@@ -1392,6 +1440,13 @@ mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
static void
mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
{
+
+ mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
+}
+
+static void
+mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
+{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
uint32_t flags;
@@ -1400,7 +1455,8 @@ mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
(va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
- flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
+ flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
+ flags |= tlb_calc_wimg(pa, ma);
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
@@ -2595,33 +2651,49 @@ mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
static void *
mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
{
+
+ return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
+}
+
+static void *
+mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
+{
void *res;
uintptr_t va;
vm_size_t sz;
+ int i;
/*
- * CCSR is premapped. Note that (pa + size - 1) is there to make sure
- * we don't wrap around. Devices on the local bus typically extend all
- * the way up to and including 0xffffffff. In that case (pa + size)
- * would be 0. This creates a false positive (i.e. we think it's
- * within the CCSR) and not create a mapping.
+ * Check if this is premapped in TLB1. Note: this should probably also
+ * check whether a sequence of TLB1 entries exist that match the
+ * requirement, but now only checks the easy case.
*/
- if (pa >= ccsrbar_pa && (pa + size - 1) < (ccsrbar_pa + CCSRBAR_SIZE)) {
- va = CCSRBAR_VA + (pa - ccsrbar_pa);
- return ((void *)va);
+ if (ma == VM_MEMATTR_DEFAULT) {
+ for (i = 0; i < tlb1_idx; i++) {
+ if (!(tlb1[i].mas1 & MAS1_VALID))
+ continue;
+ if (pa >= tlb1[i].phys &&
+ (pa + size) <= (tlb1[i].phys + tlb1[i].size))
+ return (void *)(tlb1[i].virt +
+ (pa - tlb1[i].phys));
+ }
}
- va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
+ size = roundup(size, PAGE_SIZE);
+
+ if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) &&
+ (pa + size - 1) < VM_MIN_KERNEL_ADDRESS)
+ va = pa;
+ else
+ va = kva_alloc(size);
res = (void *)va;
- if (size < PAGE_SIZE)
- size = PAGE_SIZE;
do {
sz = 1 << (ilog2(size) & ~1);
if (bootverbose)
printf("Wiring VA=%x to PA=%x (size=%x), "
"using TLB1[%d]\n", va, pa, sz, tlb1_idx);
- tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
+ tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma));
size -= sz;
pa += sz;
va += sz;
@@ -2636,6 +2708,7 @@ mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
static void
mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
{
+#ifdef SUPPORTS_SHRINKING_TLB1
vm_offset_t base, offset;
/*
@@ -2647,6 +2720,7 @@ mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
size = roundup(offset + size, PAGE_SIZE);
kva_free(base, size);
}
+#endif
}
/*
@@ -2827,9 +2901,8 @@ tlb0_print_tlbentries(void)
/*
* TLB1 mapping notes:
*
- * TLB1[0] CCSRBAR
- * TLB1[1] Kernel text and data.
- * TLB1[2-15] Additional kernel text and data mappings (if required), PCI
+ * TLB1[0] Kernel text and data.
+ * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
* windows, other devices mappings.
*/
@@ -2913,9 +2986,10 @@ tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
uint32_t flags)
{
uint32_t ts, tid;
- int tsize;
-
- if (tlb1_idx >= TLB1_ENTRIES) {
+ int tsize, index;
+
+ index = atomic_fetchadd_int(&tlb1_idx, 1);
+ if (index >= TLB1_ENTRIES) {
printf("tlb1_set_entry: TLB1 full!\n");
return (-1);
}
@@ -2927,18 +3001,22 @@ tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
/* XXX TS is hard coded to 0 for now as we only use single address space */
ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
- /* XXX LOCK tlb1[] */
+ /*
+ * Atomicity is preserved by the atomic increment above since nothing
+ * is ever removed from tlb1.
+ */
- tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
- tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
- tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
+ tlb1[index].phys = pa;
+ tlb1[index].virt = va;
+ tlb1[index].size = size;
+ tlb1[index].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
+ tlb1[index].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
+ tlb1[index].mas2 = (va & MAS2_EPN_MASK) | flags;
/* Set supervisor RWX permission bits */
- tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
+ tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
- tlb1_write_entry(tlb1_idx++);
-
- /* XXX UNLOCK tlb1[] */
+ tlb1_write_entry(index);
/*
* XXX in general TLB1 updates should be propagated between CPUs,
@@ -3017,14 +3095,12 @@ tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
* assembler level setup done in locore.S.
*/
void
-tlb1_init(vm_offset_t ccsrbar)
+tlb1_init()
{
- uint32_t mas0, mas1, mas3;
+ uint32_t mas0, mas1, mas2, mas3;
uint32_t tsz;
u_int i;
- ccsrbar_pa = ccsrbar;
-
if (bootinfo != NULL && bootinfo[0] != 1) {
tlb1_idx = *((uint16_t *)(bootinfo + 8));
} else
@@ -3040,22 +3116,23 @@ tlb1_init(vm_offset_t ccsrbar)
if ((mas1 & MAS1_VALID) == 0)
continue;
+ mas2 = mfspr(SPR_MAS2);
mas3 = mfspr(SPR_MAS3);
tlb1[i].mas1 = mas1;
tlb1[i].mas2 = mfspr(SPR_MAS2);
tlb1[i].mas3 = mas3;
+ tlb1[i].virt = mas2 & MAS2_EPN_MASK;
+ tlb1[i].phys = mas3 & MAS3_RPN;
if (i == 0)
kernload = mas3 & MAS3_RPN;
tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
- kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
+ tlb1[i].size = (tsz > 0) ? tsize2size(tsz) : 0;
+ kernsize += tlb1[i].size;
}
- /* Map in CCSRBAR. */
- tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
-
#ifdef SMP
bp_ntlb1s = tlb1_idx;
#endif
@@ -3068,6 +3145,43 @@ tlb1_init(vm_offset_t ccsrbar)
set_mas4_defaults();
}
+vm_offset_t
+pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
+{
+ static vm_offset_t early_io_map_base = VM_MAX_KERNEL_ADDRESS;
+ vm_paddr_t pa_base;
+ vm_offset_t va, sz;
+ int i;
+
+ KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
+
+ for (i = 0; i < tlb1_idx; i++) {
+ if (!(tlb1[i].mas1 & MAS1_VALID))
+ continue;
+ if (pa >= tlb1[i].phys && (pa + size) <=
+ (tlb1[i].phys + tlb1[i].size))
+ return (tlb1[i].virt + (pa - tlb1[i].phys));
+ }
+
+ pa_base = trunc_page(pa);
+ size = roundup(size + (pa - pa_base), PAGE_SIZE);
+ va = early_io_map_base + (pa - pa_base);
+
+ do {
+ sz = 1 << (ilog2(size) & ~1);
+ tlb1_set_entry(early_io_map_base, pa_base, sz, _TLB_ENTRY_IO);
+ size -= sz;
+ pa_base += sz;
+ early_io_map_base += sz;
+ } while (size > 0);
+
+#ifdef SMP
+ bp_ntlb1s = tlb1_idx;
+#endif
+
+ return (va);
+}
+
/*
* Setup MAS4 defaults.
* These values are loaded to MAS0-2 on a TLB miss.
diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
index 4b1f4f6..2c9c786 100644
--- a/sys/powerpc/include/pmap.h
+++ b/sys/powerpc/include/pmap.h
@@ -258,6 +258,8 @@ extern void pmap_dumpsys_unmap(struct pmap_md *, vm_size_t, vm_offset_t);
extern struct pmap_md *pmap_scan_md(struct pmap_md *);
+vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size);
+
#endif
#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/powerpc/include/tlb.h b/sys/powerpc/include/tlb.h
index 7e0d909..58a1a01 100644
--- a/sys/powerpc/include/tlb.h
+++ b/sys/powerpc/include/tlb.h
@@ -126,6 +126,9 @@
#if !defined(LOCORE)
typedef struct tlb_entry {
+ vm_paddr_t phys;
+ vm_offset_t virt;
+ vm_size_t size;
uint32_t mas1;
uint32_t mas2;
uint32_t mas3;
@@ -134,7 +137,7 @@ typedef struct tlb_entry {
void tlb0_print_tlbentries(void);
void tlb1_inval_entry(unsigned int);
-void tlb1_init(vm_offset_t);
+void tlb1_init(void);
void tlb1_print_entries(void);
void tlb1_print_tlbentries(void);
#endif /* !LOCORE */
diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h
index 5a29796..0b12933 100644
--- a/sys/powerpc/include/vmparam.h
+++ b/sys/powerpc/include/vmparam.h
@@ -108,12 +108,6 @@
#else /* Book-E */
-/*
- * Kernel CCSRBAR location. We make this the reset location.
- */
-#define CCSRBAR_VA 0xfef00000
-#define CCSRBAR_SIZE 0x00100000
-
#define KERNBASE 0xc0000000 /* start of kernel virtual */
#define VM_MIN_KERNEL_ADDRESS KERNBASE
diff --git a/sys/powerpc/mpc85xx/mpc85xx.c b/sys/powerpc/mpc85xx/mpc85xx.c
index 0db5d5e..e94a1d8 100644
--- a/sys/powerpc/mpc85xx/mpc85xx.c
+++ b/sys/powerpc/mpc85xx/mpc85xx.c
@@ -41,6 +41,8 @@ __FBSDID("$FreeBSD$");
#include <machine/pio.h>
#include <machine/spr.h>
+#include <dev/fdt/fdt_common.h>
+
#include <powerpc/mpc85xx/mpc85xx.h>
/*
diff --git a/sys/powerpc/mpc85xx/mpc85xx.h b/sys/powerpc/mpc85xx/mpc85xx.h
index fa3bde3..defb3bf 100644
--- a/sys/powerpc/mpc85xx/mpc85xx.h
+++ b/sys/powerpc/mpc85xx/mpc85xx.h
@@ -33,6 +33,7 @@
/*
* Configuration control and status registers
*/
+#define CCSRBAR_VA fdt_immr_va
#define OCP85XX_CCSRBAR (CCSRBAR_VA + 0x0)
#define OCP85XX_BPTR (CCSRBAR_VA + 0x20)
diff --git a/sys/powerpc/powerpc/bus_machdep.c b/sys/powerpc/powerpc/bus_machdep.c
index 950d026..1928539 100644
--- a/sys/powerpc/powerpc/bus_machdep.c
+++ b/sys/powerpc/powerpc/bus_machdep.c
@@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#define MAX_EARLYBOOT_MAPPINGS 6
static struct {
+ vm_offset_t virt;
bus_addr_t addr;
bus_size_t size;
int flags;
@@ -86,10 +87,12 @@ bs_gen_map(bus_addr_t addr, bus_size_t size, int flags,
KASSERT(earlyboot_map_idx < MAX_EARLYBOOT_MAPPINGS,
("%s: too many early boot mapping requests", __func__));
earlyboot_mappings[earlyboot_map_idx].addr = addr;
+ earlyboot_mappings[earlyboot_map_idx].virt =
+ pmap_early_io_map(addr, size);
earlyboot_mappings[earlyboot_map_idx].size = size;
earlyboot_mappings[earlyboot_map_idx].flags = flags;
+ *bshp = earlyboot_mappings[earlyboot_map_idx].virt;
earlyboot_map_idx++;
- *bshp = addr;
} else {
ma = VM_MEMATTR_DEFAULT;
switch (flags) {
@@ -110,13 +113,13 @@ void
bs_remap_earlyboot(void)
{
int i;
- vm_offset_t pa, spa;
+ vm_offset_t pa, spa, va;
vm_memattr_t ma;
for (i = 0; i < earlyboot_map_idx; i++) {
spa = earlyboot_mappings[i].addr;
- if (pmap_dev_direct_mapped(spa, earlyboot_mappings[i].size)
- == 0)
+ if (spa == earlyboot_mappings[i].virt &&
+ pmap_dev_direct_mapped(spa, earlyboot_mappings[i].size) == 0)
continue;
ma = VM_MEMATTR_DEFAULT;
@@ -130,8 +133,10 @@ bs_remap_earlyboot(void)
}
pa = trunc_page(spa);
+ va = trunc_page(earlyboot_mappings[i].virt);
while (pa < spa + earlyboot_mappings[i].size) {
- pmap_kenter_attr(pa, pa, ma);
+ pmap_kenter_attr(va, pa, ma);
+ va += PAGE_SIZE;
pa += PAGE_SIZE;
}
}
OpenPOWER on IntegriCloud