summaryrefslogtreecommitdiffstats
path: root/sys/i386/i386
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2015-08-25 14:39:40 +0000
committermarcel <marcel@FreeBSD.org>2015-08-25 14:39:40 +0000
commit87b09c366d87c98fd8a35cb0883c973290b0858b (patch)
treee20265d6eec7a4355210772dd496b1c6da24ed19 /sys/i386/i386
parentd53cfebe2f15ceb38c2d4e7897391b81ca63387c (diff)
downloadFreeBSD-src-87b09c366d87c98fd8a35cb0883c973290b0858b.zip
FreeBSD-src-87b09c366d87c98fd8a35cb0883c973290b0858b.tar.gz
MFC r286667 & r286723
Better support memory mapped console devices, such as VGA and EFI frame buffers and memory mapped UARTs. PR: 191564, 194952, 202276
Diffstat (limited to 'sys/i386/i386')
-rw-r--r--sys/i386/i386/machdep.c68
-rw-r--r--sys/i386/i386/pmap.c82
2 files changed, 109 insertions, 41 deletions
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 3244be6..ab543fe 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -3320,6 +3320,40 @@ init386(first)
*/
i8254_init();
+ finishidentcpu(); /* Final stage of CPU initialization */
+ setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
+ GSEL(GCODE_SEL, SEL_KPL));
+ setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
+ GSEL(GCODE_SEL, SEL_KPL));
+ initializecpu(); /* Initialize CPU registers */
+ initializecpucache();
+
+ /* pointer to selector slot for %fs/%gs */
+ PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
+
+ dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
+ dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
+ dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
+ dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
+#if defined(PAE) || defined(PAE_TABLES)
+ dblfault_tss.tss_cr3 = (int)IdlePDPT;
+#else
+ dblfault_tss.tss_cr3 = (int)IdlePTD;
+#endif
+ dblfault_tss.tss_eip = (int)dblfault_handler;
+ dblfault_tss.tss_eflags = PSL_KERNEL;
+ dblfault_tss.tss_ds = dblfault_tss.tss_es =
+ dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
+ dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
+ dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
+ dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
+
+ vm86_initialize();
+ getmemsize(first);
+ init_param2(physmem);
+
+ /* now running on new page tables, configured,and u/iom is accessible */
+
/*
* Initialize the console before we print anything out.
*/
@@ -3361,40 +3395,6 @@ init386(first)
kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
- finishidentcpu(); /* Final stage of CPU initialization */
- setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
- GSEL(GCODE_SEL, SEL_KPL));
- setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
- GSEL(GCODE_SEL, SEL_KPL));
- initializecpu(); /* Initialize CPU registers */
- initializecpucache();
-
- /* pointer to selector slot for %fs/%gs */
- PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
-
- dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
- dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
- dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
- dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
-#if defined(PAE) || defined(PAE_TABLES)
- dblfault_tss.tss_cr3 = (int)IdlePDPT;
-#else
- dblfault_tss.tss_cr3 = (int)IdlePTD;
-#endif
- dblfault_tss.tss_eip = (int)dblfault_handler;
- dblfault_tss.tss_eflags = PSL_KERNEL;
- dblfault_tss.tss_ds = dblfault_tss.tss_es =
- dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
- dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
- dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
- dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
-
- vm86_initialize();
- getmemsize(first);
- init_param2(physmem);
-
- /* now running on new page tables, configured,and u/iom is accessible */
-
msgbufinit(msgbufp, msgbufsize);
#ifdef DEV_NPX
npxinit(true);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 4e09a90..540eb65 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -232,6 +232,18 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
#define PAT_INDEX_SIZE 8
static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
+/*
+ * pmap_mapdev support pre initialization (i.e. console)
+ */
+#define PMAP_PREINIT_MAPPING_COUNT 8
+static struct pmap_preinit_mapping {
+ vm_paddr_t pa;
+ vm_offset_t va;
+ vm_size_t sz;
+ int mode;
+} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
+static int pmap_initialized;
+
static struct rwlock_padalign pvh_global_lock;
/*
@@ -731,6 +743,7 @@ pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
void
pmap_init(void)
{
+ struct pmap_preinit_mapping *ppim;
vm_page_t mpte;
vm_size_t s;
int i, pv_npg;
@@ -814,6 +827,17 @@ pmap_init(void)
UMA_ZONE_VM | UMA_ZONE_NOFREE);
uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
#endif
+
+ pmap_initialized = 1;
+ if (!bootverbose)
+ return;
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == 0)
+ continue;
+ printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i,
+ (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode);
+ }
}
@@ -5162,8 +5186,10 @@ pmap_pde_attr(pd_entry_t *pde, int cache_bits)
void *
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
{
+ struct pmap_preinit_mapping *ppim;
vm_offset_t va, offset;
vm_size_t tmpsize;
+ int i;
offset = pa & PAGE_MASK;
size = round_page(offset + size);
@@ -5171,11 +5197,36 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
if (pa < KERNLOAD && pa + size <= KERNLOAD)
va = KERNBASE + pa;
- else
+ else if (!pmap_initialized) {
+ va = 0;
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == 0) {
+ ppim->pa = pa;
+ ppim->sz = size;
+ ppim->mode = mode;
+ ppim->va = virtual_avail;
+ virtual_avail += size;
+ va = ppim->va;
+ break;
+ }
+ }
+ if (va == 0)
+ panic("%s: too many preinit mappings", __func__);
+ } else {
+ /*
+ * If we have a preinit mapping, re-use it.
+ */
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->pa == pa && ppim->sz == size &&
+ ppim->mode == mode)
+ return ((void *)(ppim->va + offset));
+ }
va = kva_alloc(size);
- if (!va)
- panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
-
+ if (va == 0)
+ panic("%s: Couldn't allocate KVA", __func__);
+ }
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
@@ -5200,14 +5251,31 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
- vm_offset_t base, offset;
+ struct pmap_preinit_mapping *ppim;
+ vm_offset_t offset;
+ int i;
if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
return;
- base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
- kva_free(base, size);
+ va = trunc_page(va);
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == va && ppim->sz == size) {
+ if (pmap_initialized)
+ return;
+ ppim->pa = 0;
+ ppim->va = 0;
+ ppim->sz = 0;
+ ppim->mode = 0;
+ if (va + size == virtual_avail)
+ virtual_avail = va;
+ return;
+ }
+ }
+ if (pmap_initialized)
+ kva_free(va, size);
}
/*
OpenPOWER on IntegriCloud