summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorian <ian@FreeBSD.org>2014-05-14 01:16:05 +0000
committerian <ian@FreeBSD.org>2014-05-14 01:16:05 +0000
commit147aa314a7ea59fab69a92349cee312e49cae2bb (patch)
treeb553b4077fe2c8c6098e20d3d69729a8bc8e9c11
parent2963fd0dfbdafac049cadecf0774e0c027b28c88 (diff)
downloadFreeBSD-src-147aa314a7ea59fab69a92349cee312e49cae2bb.zip
FreeBSD-src-147aa314a7ea59fab69a92349cee312e49cae2bb.tar.gz
MFC r257180, r257195, r257196, r257198, r257209, r257295
Add some extra sanity checking and checks to printf format specifiers. Try even harder to find a console before giving up. Make devices with registers into the KVA region work reliably. Turn on VM_KMEM_SIZE_SCALE on 32-bit as well as 64-bit PowerPC. Return NOKEY instead of 0 if there are no more key presses queued.
-rw-r--r--sys/boot/powerpc/ps3/start.S2
-rw-r--r--sys/dev/adb/adb_kbd.c2
-rw-r--r--sys/dev/uart/uart_cpu_fdt.c17
-rw-r--r--sys/powerpc/aim/mmu_oea64.c22
-rw-r--r--sys/powerpc/booke/pmap.c24
-rw-r--r--sys/powerpc/include/vmparam.h16
6 files changed, 54 insertions, 29 deletions
diff --git a/sys/boot/powerpc/ps3/start.S b/sys/boot/powerpc/ps3/start.S
index 865019c..570b3f5 100644
--- a/sys/boot/powerpc/ps3/start.S
+++ b/sys/boot/powerpc/ps3/start.S
@@ -27,7 +27,7 @@
#define LOCORE
-#include <machine/trap_aim.h>
+#include <machine/trap.h>
/*
* KBoot and simulators will start this program from the _start symbol, with
diff --git a/sys/dev/adb/adb_kbd.c b/sys/dev/adb/adb_kbd.c
index 6fca5ff..76d4e88 100644
--- a/sys/dev/adb/adb_kbd.c
+++ b/sys/dev/adb/adb_kbd.c
@@ -621,7 +621,7 @@ akbd_read_char(keyboard_t *kbd, int wait)
if (!sc->buffers) {
mtx_unlock(&sc->sc_mutex);
- return (0);
+ return (NOKEY);
}
adb_code = sc->buffer[0];
diff --git a/sys/dev/uart/uart_cpu_fdt.c b/sys/dev/uart/uart_cpu_fdt.c
index b063cb4..9bf3549 100644
--- a/sys/dev/uart/uart_cpu_fdt.c
+++ b/sys/dev/uart/uart_cpu_fdt.c
@@ -142,14 +142,19 @@ uart_cpu_getdev(int devtype, struct uart_devinfo *di)
/*
* Retrieve /chosen/std{in,out}.
*/
- if ((chosen = OF_finddevice("/chosen")) == -1)
- return (ENXIO);
- for (name = propnames; *name != NULL; name++) {
- if (phandle_chosen_propdev(chosen, *name, &node) == 0)
- break;
+ node = -1;
+ if ((chosen = OF_finddevice("/chosen")) != -1) {
+ for (name = propnames; *name != NULL; name++) {
+ if (phandle_chosen_propdev(chosen, *name, &node) == 0)
+ break;
+ }
}
- if (*name == NULL)
+ if (chosen == -1 || *name == NULL)
+ node = OF_finddevice("serial0"); /* Last ditch */
+
+ if (node == -1) /* Can't find anything */
return (ENXIO);
+
/*
* Retrieve serial attributes.
*/
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index a470ded..0669f66 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -501,15 +501,7 @@ moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
qsort(translations, sz, sizeof (*translations), om_cmp);
for (i = 0; i < sz; i++) {
- CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
- (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
- translations[i].om_len);
-
- if (translations[i].om_pa_lo % PAGE_SIZE)
- panic("OFW translation not page-aligned!");
-
pa_base = translations[i].om_pa_lo;
-
#ifdef __powerpc64__
pa_base += (vm_offset_t)translations[i].om_pa_hi << 32;
#else
@@ -517,6 +509,14 @@ moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
panic("OFW translations above 32-bit boundary!");
#endif
+ if (pa_base % PAGE_SIZE)
+ panic("OFW translation not page-aligned (phys)!");
+ if (translations[i].om_va % PAGE_SIZE)
+ panic("OFW translation not page-aligned (virt)!");
+
+ CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
+ pa_base, translations[i].om_va, translations[i].om_len);
+
/* Now enter the pages for this mapping */
DISABLE_TRANS(msr);
@@ -693,9 +693,9 @@ moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelen
hwphyssz = 0;
TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
- CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
- regions[i].mr_start + regions[i].mr_size,
- regions[i].mr_size);
+ CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
+ regions[i].mr_start, regions[i].mr_start +
+ regions[i].mr_size, regions[i].mr_size);
if (hwphyssz != 0 &&
(physsz + regions[i].mr_size) >= hwphyssz) {
if (physsz < hwphyssz) {
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index a9476a2..6ccdc74 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -189,6 +189,7 @@ static tlb_entry_t tlb1[TLB1_ENTRIES];
/* Next free entry in the TLB1 */
static unsigned int tlb1_idx;
+static vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS;
static tlbtid_t tid_alloc(struct pmap *);
@@ -2681,11 +2682,23 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
size = roundup(size, PAGE_SIZE);
+ /*
+ * We leave a hole for device direct mapping between the maximum user
+ * address (0x8000000) and the minimum KVA address (0xc0000000). If
+ * devices are in there, just map them 1:1. If not, map them to the
+ * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped
+ * addresses should be pulled from an allocator, but since we do not
+ * ever free TLB1 entries, it is safe just to increment a counter.
+ * Note that there isn't a lot of address space here (128 MB) and it
+ * is not at all difficult to imagine running out, since that is a 4:1
+ * compression from the 0xc0000000 - 0xf0000000 address space that gets
+ * mapped there.
+ */
if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) &&
(pa + size - 1) < VM_MIN_KERNEL_ADDRESS)
va = pa;
else
- va = kva_alloc(size);
+ va = atomic_fetchadd_int(&tlb1_map_base, size);
res = (void *)va;
do {
@@ -3085,7 +3098,7 @@ tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
}
mapped = (va - base);
- debugf("mapped size 0x%08x (wasted space 0x%08x)\n",
+ printf("mapped size 0x%08x (wasted space 0x%08x)\n",
mapped, mapped - size);
return (mapped);
}
@@ -3148,7 +3161,6 @@ tlb1_init()
vm_offset_t
pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
{
- static vm_offset_t early_io_map_base = VM_MAX_KERNEL_ADDRESS;
vm_paddr_t pa_base;
vm_offset_t va, sz;
int i;
@@ -3165,14 +3177,14 @@ pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
pa_base = trunc_page(pa);
size = roundup(size + (pa - pa_base), PAGE_SIZE);
- va = early_io_map_base + (pa - pa_base);
+ va = tlb1_map_base + (pa - pa_base);
do {
sz = 1 << (ilog2(size) & ~1);
- tlb1_set_entry(early_io_map_base, pa_base, sz, _TLB_ENTRY_IO);
+ tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO);
size -= sz;
pa_base += sz;
- early_io_map_base += sz;
+ tlb1_map_base += sz;
} while (size > 0);
#ifdef SMP
diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h
index 0b12933..e8e22cd 100644
--- a/sys/powerpc/include/vmparam.h
+++ b/sys/powerpc/include/vmparam.h
@@ -112,6 +112,7 @@
#define VM_MIN_KERNEL_ADDRESS KERNBASE
#define VM_MAX_KERNEL_ADDRESS 0xf8000000
+#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
#endif /* AIM/E500 */
@@ -175,14 +176,21 @@ struct pmap_physseg {
#define VM_KMEM_SIZE (12 * 1024 * 1024)
#endif
-#ifdef __powerpc64__
+/*
+ * How many physical pages per KVA page allocated.
+ * min(max(VM_KMEM_SIZE, Physical memory/VM_KMEM_SIZE_SCALE), VM_KMEM_SIZE_MAX)
+ * is the total KVA space allocated for kmem_map.
+ */
#ifndef VM_KMEM_SIZE_SCALE
-#define VM_KMEM_SIZE_SCALE (3)
+#define VM_KMEM_SIZE_SCALE (3)
#endif
+/*
+ * Ceiling on the amount of kmem_map KVA space: 40% of the entire KVA space.
+ */
#ifndef VM_KMEM_SIZE_MAX
-#define VM_KMEM_SIZE_MAX 0x1c0000000 /* 7 GB */
-#endif
+#define VM_KMEM_SIZE_MAX ((VM_MAX_SAFE_KERNEL_ADDRESS - \
+ VM_MIN_KERNEL_ADDRESS + 1) * 2 / 5)
#endif
#define ZERO_REGION_SIZE (64 * 1024) /* 64KB */
OpenPOWER on IntegriCloud