summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorkato <kato@FreeBSD.org>1999-06-03 13:49:52 +0000
committerkato <kato@FreeBSD.org>1999-06-03 13:49:52 +0000
commitd6a048fdfe640a6182d7be83858c25df91e8450c (patch)
treeedc1ae3fadda3859b48927f8f000289496c99d62 /sys
parent3af49a50615a772322a03e657cd398337dea721e (diff)
downloadFreeBSD-src-d6a048fdfe640a6182d7be83858c25df91e8450c.zip
FreeBSD-src-d6a048fdfe640a6182d7be83858c25df91e8450c.tar.gz
Sync with sys/i386/i386/machdep.c revision 1.339.
New function getmemsize_pc98 is added in this commit, since PC98 is quite different in obtaining memory size from IBM-PC. Many lines of this function is shareable with IBM-PC's getmemsize function, but sharing needs many #ifdef PC98 statements. Therefore, I gave up sharing code with IBM-PC's and just added new function.
Diffstat (limited to 'sys')
-rw-r--r--sys/pc98/i386/machdep.c854
-rw-r--r--sys/pc98/pc98/machdep.c854
2 files changed, 1118 insertions, 590 deletions
diff --git a/sys/pc98/i386/machdep.c b/sys/pc98/i386/machdep.c
index 3effcf9..37917bf 100644
--- a/sys/pc98/i386/machdep.c
+++ b/sys/pc98/i386/machdep.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
- * $Id: machdep.c,v 1.116 1999/05/06 00:54:54 luoqi Exp $
+ * $Id: machdep.c,v 1.117 1999/05/12 21:39:07 luoqi Exp $
*/
#include "apm.h"
@@ -53,7 +53,6 @@
#include "opt_sysvipc.h"
#include "opt_user_ldt.h"
#include "opt_userconfig.h"
-#include "opt_vm86.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -128,10 +127,9 @@
#include <pc98/pc98/pc98_machdep.h>
#include <pc98/pc98/pc98.h>
#else
-#ifndef VM86
#include <i386/isa/rtc.h>
#endif
-#endif
+#include <machine/vm86.h>
#include <machine/random.h>
#include <sys/ptrace.h>
@@ -581,7 +579,6 @@ sendsig(catcher, sig, mask, code)
sf.sf_sc.sc_trapno = regs->tf_trapno;
sf.sf_sc.sc_err = regs->tf_err;
-#ifdef VM86
/*
* If we're a vm86 process, we want to save the segment registers.
* We also change eflags to be our emulated eflags, not the actual
@@ -613,7 +610,6 @@ sendsig(catcher, sig, mask, code)
*/
tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP);
}
-#endif /* VM86 */
/*
* Copy the sigframe out to the user's stack.
@@ -670,7 +666,6 @@ sigreturn(p, uap)
return(EFAULT);
eflags = scp->sc_ps;
-#ifdef VM86
if (eflags & PSL_VM) {
struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
struct vm86_kernel *vm86;
@@ -704,7 +699,6 @@ sigreturn(p, uap)
tf->tf_es = _udatasel;
tf->tf_fs = _udatasel;
} else {
-#endif /* VM86 */
/*
* Don't allow users to change privileged or reserved flags.
*/
@@ -742,9 +736,7 @@ sigreturn(p, uap)
regs->tf_ds = scp->sc_ds;
regs->tf_es = scp->sc_es;
regs->tf_fs = scp->sc_fs;
-#ifdef VM86
}
-#endif
/* restore scratch registers */
regs->tf_eax = scp->sc_eax;
@@ -915,12 +907,10 @@ union descriptor ldt[NLDT]; /* local descriptor table */
struct region_descriptor r_gdt, r_idt;
#endif
-#ifdef VM86
#ifndef SMP
extern struct segment_descriptor common_tssd, *tss_gdt;
#endif
int private_tss; /* flag indicating private tss */
-#endif /* VM86 */
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
struct gate_descriptor *t_idt;
@@ -1151,270 +1141,23 @@ sdtossd(sd, ssd)
ssd->ssd_gran = sd->sd_gran;
}
-void
-init386(first)
- int first;
-{
- int x;
- unsigned biosbasemem, biosextmem;
- struct gate_descriptor *gdp;
- int gsel_tss;
-#if NNPX > 0
- int msize;
-#endif
-
-#ifndef SMP
- /* table descriptors - used to load tables by microp */
- struct region_descriptor r_gdt, r_idt;
-#endif
- int pagesinbase, pagesinext;
- vm_offset_t target_page;
- int pa_indx, off;
- int speculative_mprobe;
-
- /*
- * Prevent lowering of the ipl if we call tsleep() early.
- */
- safepri = cpl;
-
- proc0.p_addr = proc0paddr;
-
- atdevbase = ISA_HOLE_START + KERNBASE;
-
-#ifdef PC98
- /*
- * Initialize DMAC
- */
- pc98_init_dmac();
-#endif
-
- /*
- * make gdt memory segments, the code segment goes up to end of the
- * page with etext in it, the data segment goes to the end of
- * the address space
- */
- /*
- * XXX text protection is temporarily (?) disabled. The limit was
- * i386_btop(round_page(etext)) - 1.
- */
- gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1;
- gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1;
-#ifdef SMP
- gdt_segs[GPRIV_SEL].ssd_limit =
- i386_btop(sizeof(struct privatespace)) - 1;
- gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0];
- gdt_segs[GPROC0_SEL].ssd_base =
- (int) &SMP_prvspace[0].globaldata.gd_common_tss;
- SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0];
-#else
- gdt_segs[GPRIV_SEL].ssd_limit = i386_btop(0) - 1;
- gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss;
-#endif
-
- for (x = 0; x < NGDT; x++) {
-#ifdef BDE_DEBUGGER
- /* avoid overwriting db entries with APM ones */
- if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL)
- continue;
-#endif
- ssdtosd(&gdt_segs[x], &gdt[x].sd);
- }
-
- r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
- r_gdt.rd_base = (int) gdt;
- lgdt(&r_gdt);
-
- /* make ldt memory segments */
- /*
- * The data segment limit must not cover the user area because we
- * don't want the user area to be writable in copyout() etc. (page
- * level protection is lost in kernel mode on 386's). Also, we
- * don't want the user area to be writable directly (page level
- * protection of the user area is not available on 486's with
- * CR0_WP set, because there is no user-read/kernel-write mode).
- *
- * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it
- * should be spelled ...MAX_USER...
- */
-#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS
- /*
- * The code segment limit has to cover the user area until we move
- * the signal trampoline out of the user area. This is safe because
- * the code segment cannot be written to directly.
- */
-#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE)
- ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
- ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
- for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
- ssdtosd(&ldt_segs[x], &ldt[x].sd);
-
- _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
- lldt(_default_ldt);
-#ifdef USER_LDT
- currentldt = _default_ldt;
-#endif
-
- /* exceptions */
- for (x = 0; x < NIDT; x++)
- setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
- setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(0x80, &IDTVEC(int0x80_syscall),
- SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
-
- r_idt.rd_limit = sizeof(idt) - 1;
- r_idt.rd_base = (int) idt;
- lidt(&r_idt);
-
- /*
- * Initialize the console before we print anything out.
- */
- cninit();
-
-#include "isa.h"
-#if NISA >0
- isa_defaultirq();
-#endif
- rand_initialize();
-
-#ifdef DDB
- kdb_init();
- if (boothowto & RB_KDB)
- Debugger("Boot flags requested debugger");
-#endif
-
- finishidentcpu(); /* Final stage of CPU initialization */
- setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- initializecpu(); /* Initialize CPU registers */
-
- /* make an initial tss so cpu can get interrupt stack on syscall! */
-#ifdef VM86
- common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
-#else
- common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
-#endif /* VM86 */
- common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
- common_tss.tss_ioopt = (sizeof common_tss) << 16;
- gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
- ltr(gsel_tss);
-#ifdef VM86
- private_tss = 0;
- tss_gdt = &gdt[GPROC0_SEL].sd;
- common_tssd = *tss_gdt;
-#endif
-
- dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
- dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
- dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
- dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
- dblfault_tss.tss_cr3 = (int)IdlePTD;
- dblfault_tss.tss_eip = (int) dblfault_handler;
- dblfault_tss.tss_eflags = PSL_KERNEL;
- dblfault_tss.tss_ds = dblfault_tss.tss_es =
- dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
- dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
- dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
- dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
+#define PHYSMAP_SIZE (2 * 8)
-#ifdef VM86
- initial_bioscalls(&biosbasemem, &biosextmem);
+static void
+getmemsize_pc98(int first)
+{
+ u_int biosbasemem, biosextmem;
+ u_int pagesinbase, pagesinext;
+ int pa_indx;
+ int speculative_mprobe;
+#if NNPX > 0
+ int msize;
#endif
+ vm_offset_t target_page;
-#ifdef PC98
pc98_getmemsize();
biosbasemem = 640; /* 640KB */
biosextmem = (Maxmem * PAGE_SIZE - 0x100000)/1024; /* extent memory */
-#elif !defined(VM86) /* IBM-PC */
- /* Use BIOS values stored in RTC CMOS RAM, since probing
- * breaks certain 386 AT relics.
- */
- biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
- biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
-
- /*
- * If BIOS tells us that it has more than 640k in the basemem,
- * don't believe it - set it to 640k.
- */
- if (biosbasemem > 640) {
- printf("Preposterous RTC basemem of %uK, truncating to 640K\n",
- biosbasemem);
- biosbasemem = 640;
- }
- if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) {
- printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
- bootinfo.bi_basemem);
- bootinfo.bi_basemem = 640;
- }
-
- /*
- * Warn if the official BIOS interface disagrees with the RTC
- * interface used above about the amount of base memory or the
- * amount of extended memory. Prefer the BIOS value for the base
- * memory. This is necessary for machines that `steal' base
- * memory for use as BIOS memory, at least if we are going to use
- * the BIOS for apm. Prefer the RTC value for extended memory.
- * Eventually the hackish interface shouldn't even be looked at.
- */
- if (bootinfo.bi_memsizes_valid) {
- if (bootinfo.bi_basemem != biosbasemem) {
- vm_offset_t pa;
-
- printf(
- "BIOS basemem (%uK) != RTC basemem (%uK), setting to BIOS value\n",
- bootinfo.bi_basemem, biosbasemem);
- biosbasemem = bootinfo.bi_basemem;
-
- /*
- * XXX if biosbasemem is now < 640, there is `hole'
- * between the end of base memory and the start of
- * ISA memory. The hole may be empty or it may
- * contain BIOS code or data. Map it read/write so
- * that the BIOS can write to it. (Memory from 0 to
- * the physical end of the kernel is mapped read-only
- * to begin with and then parts of it are remapped.
- * The parts that aren't remapped form holes that
- * remain read-only and are unused by the kernel.
- * The base memory area is below the physical end of
- * the kernel and right now forms a read-only hole.
- * The part of it from PAGE_SIZE to
- * (trunc_page(biosbasemem * 1024) - 1) will be
- * remapped and used by the kernel later.)
- *
- * This code is similar to the code used in
- * pmap_mapdev, but since no memory needs to be
- * allocated we simply change the mapping.
- */
- for (pa = trunc_page(biosbasemem * 1024);
- pa < ISA_HOLE_START; pa += PAGE_SIZE) {
- unsigned *pte;
-
- pte = (unsigned *)vtopte(pa + KERNBASE);
- *pte = pa | PG_RW | PG_V;
- }
- }
- if (bootinfo.bi_extmem != biosextmem)
- printf("BIOS extmem (%uK) != RTC extmem (%uK)\n",
- bootinfo.bi_extmem, biosextmem);
- }
-#endif
#ifdef SMP
/* make hole for AP bootstrap code */
@@ -1426,22 +1169,6 @@ init386(first)
pagesinext = biosextmem * 1024 / PAGE_SIZE;
/*
- * Special hack for chipsets that still remap the 384k hole when
- * there's 16MB of memory - this really confuses people that
- * are trying to use bus mastering ISA controllers with the
- * "16MB limit"; they only have 16MB, but the remapping puts
- * them beyond the limit.
- */
-#ifndef PC98
- /*
- * If extended memory is between 15-16MB (16-17MB phys address range),
- * chop it to 15MB.
- */
- if ((pagesinext > 3840) && (pagesinext < 4096))
- pagesinext = 3840;
-#endif
-
- /*
* Maxmem isn't the "maximum memory", it's one larger than the
* highest page of the physical address space. It should be
* called something like "Maxphyspage".
@@ -1504,17 +1231,13 @@ init386(first)
int tmp, page_bad;
page_bad = FALSE;
-
-#ifdef PC98
/* skip system area */
if (target_page>=ptoa(Maxmem_under16M) &&
target_page < ptoa(4096))
page_bad = TRUE;
-#endif
/*
* map page into kernel: valid, read/write, non-cacheable
*/
-#ifdef PC98
if (pc98_machine_type & M_EPSON_PC98) {
switch (epson_machine_id) {
case 0x34: /* PC-486HX */
@@ -1531,11 +1254,8 @@ init386(first)
break;
}
} else {
-#endif /* PC98 */
*(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page;
-#ifdef PC98
}
-#endif
invltlb();
tmp = *(int *)CADDR1;
@@ -1631,7 +1351,553 @@ init386(first)
phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
avail_end = phys_avail[pa_indx];
+}
+
+#ifndef PC98
+static void
+getmemsize(int first)
+{
+ int i, physmap_idx, pa_indx;
+ u_int basemem, extmem;
+ int speculative_mprobe = FALSE;
+ struct vm86frame vmf;
+ struct vm86context vmc;
+ vm_offset_t pa, physmap[PHYSMAP_SIZE];
+ pt_entry_t pte;
+ struct {
+ u_int64_t base;
+ u_int64_t length;
+ u_int32_t type;
+ } *smap;
+#if NNPX > 0
+ int msize;
+#endif
+
+ bzero(&vmf, sizeof(struct vm86frame));
+ bzero(physmap, sizeof(physmap));
+
+ vm86_intcall(0x12, &vmf);
+ basemem = vmf.vmf_ax;
+ if (basemem > 640) {
+ printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
+ basemem);
+ basemem = 640;
+ }
+
+ /*
+ * XXX if biosbasemem is now < 640, there is `hole'
+ * between the end of base memory and the start of
+ * ISA memory. The hole may be empty or it may
+ * contain BIOS code or data. Map it read/write so
+ * that the BIOS can write to it. (Memory from 0 to
+ * the physical end of the kernel is mapped read-only
+ * to begin with and then parts of it are remapped.
+ * The parts that aren't remapped form holes that
+ * remain read-only and are unused by the kernel.
+ * The base memory area is below the physical end of
+ * the kernel and right now forms a read-only hole.
+ * The part of it from PAGE_SIZE to
+ * (trunc_page(biosbasemem * 1024) - 1) will be
+ * remapped and used by the kernel later.)
+ *
+ * This code is similar to the code used in
+ * pmap_mapdev, but since no memory needs to be
+ * allocated we simply change the mapping.
+ */
+ for (pa = trunc_page(basemem * 1024);
+ pa < ISA_HOLE_START; pa += PAGE_SIZE) {
+ pte = (pt_entry_t)vtopte(pa + KERNBASE);
+ *pte = pa | PG_RW | PG_V;
+ }
+
+ /*
+ * if basemem != 640, map pages r/w into vm86 page table so
+ * that the bios can scribble on it.
+ */
+ pte = (pt_entry_t)vm86paddr;
+ for (i = basemem / 4; i < 160; i++)
+ pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
+
+ /*
+ * map page 1 R/W into the kernel page table so we can use it
+ * as a buffer. The kernel will unmap this page later.
+ */
+ pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT));
+ *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
+
+ extmem = (Maxmem * PAGE_SIZE - 0x100000)/1024; /* extent memory */
+ /*
+ * get memory map with INT 15:E820
+ */
+#define SMAPSIZ sizeof(*smap)
+#define SMAP_SIG 0x534D4150 /* 'SMAP' */
+
+ vmc.npages = 0;
+ smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
+ vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
+
+ physmap_idx = 0;
+ vmf.vmf_ebx = 0;
+ do {
+ vmf.vmf_eax = 0xE820;
+ vmf.vmf_edx = SMAP_SIG;
+ vmf.vmf_ecx = SMAPSIZ;
+ i = vm86_datacall(0x15, &vmf, &vmc);
+ if (i || vmf.vmf_eax != SMAP_SIG)
+ break;
+ if (boothowto & RB_VERBOSE)
+ printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n",
+ smap->type,
+ *(u_int32_t *)((char *)&smap->base + 4),
+ (u_int32_t)smap->base,
+ *(u_int32_t *)((char *)&smap->length + 4),
+ (u_int32_t)smap->length);
+
+ if (smap->type != 0x01)
+ goto next_run;
+
+ if (smap->length == 0)
+ goto next_run;
+
+ for (i = 0; i <= physmap_idx; i += 2) {
+ if (smap->base < physmap[i + 1]) {
+ if (boothowto & RB_VERBOSE)
+ printf(
+ "Overlapping or non-montonic memory region, ignoring second region\n");
+ goto next_run;
+ }
+ }
+
+ if (smap->base == physmap[physmap_idx + 1]) {
+ physmap[physmap_idx + 1] += smap->length;
+ goto next_run;
+ }
+
+ physmap_idx += 2;
+ if (physmap_idx == PHYSMAP_SIZE) {
+ printf(
+ "Too many segments in the physical address map, giving up\n");
+ break;
+ }
+ physmap[physmap_idx] = smap->base;
+ physmap[physmap_idx + 1] = smap->base + smap->length;
+next_run:
+ } while (vmf.vmf_ebx != 0);
+
+ if (physmap[1] != 0)
+ goto physmap_done;
+
+ /*
+ * try memory map with INT 15:E801
+ */
+ vmf.vmf_ax = 0xE801;
+ if (vm86_intcall(0x15, &vmf) == 0) {
+ extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
+ } else {
+#if 0
+ vmf.vmf_ah = 0x88;
+ vm86_intcall(0x15, &vmf);
+ extmem = vmf.vmf_ax;
+#else
+ /*
+ * Prefer the RTC value for extended memory.
+ */
+ extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
+#endif
+ }
+
+ /*
+ * Only perform calculations in this section if there is no system
+ * map; any system new enough that supports SMAP probably does not
+ * need these workarounds.
+ */
+ /*
+ * Special hack for chipsets that still remap the 384k hole when
+ * there's 16MB of memory - this really confuses people that
+ * are trying to use bus mastering ISA controllers with the
+ * "16MB limit"; they only have 16MB, but the remapping puts
+ * them beyond the limit.
+ */
+ /*
+ * If extended memory is between 15-16MB (16-17MB phys address range),
+ * chop it to 15MB.
+ */
+ if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
+ extmem = 15 * 1024;
+
+ physmap[0] = 0;
+ physmap[1] = basemem * 1024;
+ physmap_idx = 2;
+ physmap[physmap_idx] = 0x100000;
+ physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
+
+ /*
+ * Indicate that we wish to do a speculative search for memory
+ * beyond the end of the reported size if the indicated amount
+ * is 64M (or more).
+ *
+ * XXX we should only do this in the RTC / 0x88 case
+ */
+ if (extmem >= 16 * 1024)
+ speculative_mprobe = TRUE;
+
+physmap_done:
+ /*
+ * Now, physmap contains a map of physical memory.
+ */
+
+#ifdef SMP
+ /* make hole for AP bootstrap code */
+ physmap[1] = mp_bootaddress(physmap[1] / 1024);
+#endif
+
+ /*
+ * Maxmem isn't the "maximum memory", it's one larger than the
+ * highest page of the physical address space. It should be
+ * called something like "Maxphyspage".
+ */
+ Maxmem = physmap[physmap_idx + 1] / PAGE_SIZE;
+
+ /*
+ * If a specific amount of memory is indicated via the MAXMEM
+ * option or the npx0 "msize", then don't do the speculative
+ * memory probe.
+ */
+#ifdef MAXMEM
+ Maxmem = MAXMEM / 4;
+ speculative_mprobe = FALSE;
+#endif
+
+#if NNPX > 0
+ if (resource_int_value("npx", 0, "msize", &msize) == 0) {
+ if (msize != 0) {
+ Maxmem = msize / 4;
+ speculative_mprobe = FALSE;
+ }
+ }
+#endif
+
+#ifdef SMP
+ /* look for the MP hardware - needed for apic addresses */
+ mp_probe();
+#endif
+ /* call pmap initialization to make new kernel address space */
+ pmap_bootstrap(first, 0);
+
+ /*
+ * Size up each available chunk of physical memory.
+ */
+ physmap[0] = PAGE_SIZE; /* mask off page 0 */
+ pa_indx = 0;
+ phys_avail[pa_indx++] = physmap[0];
+ phys_avail[pa_indx] = physmap[0];
+#if 0
+ pte = (pt_entry_t)vtopte(KERNBASE);
+#else
+ pte = (pt_entry_t)CMAP1;
+#endif
+
+ /*
+ * physmap is in bytes, so when converting to page boundaries,
+ * round up the start address and round down the end address.
+ */
+ for (i = 0; i <= physmap_idx; i += 2) {
+ int end;
+
+ end = ptoa(Maxmem);
+ if (physmap[i + 1] < end)
+ end = trunc_page(physmap[i + 1]);
+ for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
+ int tmp, page_bad;
+#if 0
+ int *ptr = 0;
+#else
+ int *ptr = (int *)CADDR1;
+#endif
+
+ /*
+ * block out kernel memory as not available.
+ */
+ if (pa >= 0x100000 && pa < first)
+ continue;
+
+ page_bad = FALSE;
+
+ /*
+ * map page into kernel: valid, read/write,non-cacheable
+ */
+ *pte = pa | PG_V | PG_RW | PG_N;
+ invltlb();
+
+ tmp = *(int *)ptr;
+ /*
+ * Test for alternating 1's and 0's
+ */
+ *(volatile int *)ptr = 0xaaaaaaaa;
+ if (*(volatile int *)ptr != 0xaaaaaaaa) {
+ page_bad = TRUE;
+ }
+ /*
+ * Test for alternating 0's and 1's
+ */
+ *(volatile int *)ptr = 0x55555555;
+ if (*(volatile int *)ptr != 0x55555555) {
+ page_bad = TRUE;
+ }
+ /*
+ * Test for all 1's
+ */
+ *(volatile int *)ptr = 0xffffffff;
+ if (*(volatile int *)ptr != 0xffffffff) {
+ page_bad = TRUE;
+ }
+ /*
+ * Test for all 0's
+ */
+ *(volatile int *)ptr = 0x0;
+ if (*(volatile int *)ptr != 0x0) {
+ page_bad = TRUE;
+ }
+ /*
+ * Restore original value.
+ */
+ *(int *)ptr = tmp;
+
+ /*
+ * Adjust array of valid/good pages.
+ */
+ if (page_bad == TRUE) {
+ continue;
+ }
+ /*
+ * If this good page is a continuation of the
+ * previous set of good pages, then just increase
+ * the end pointer. Otherwise start a new chunk.
+ * Note that "end" points one higher than end,
+ * making the range >= start and < end.
+ * If we're also doing a speculative memory
+ * test and we at or past the end, bump up Maxmem
+ * so that we keep going. The first bad page
+ * will terminate the loop.
+ */
+ if (phys_avail[pa_indx] == pa) {
+ phys_avail[pa_indx] += PAGE_SIZE;
+ if (speculative_mprobe == TRUE &&
+ phys_avail[pa_indx] >= (64*1024*1024))
+ end += PAGE_SIZE;
+ } else {
+ pa_indx++;
+ if (pa_indx == PHYS_AVAIL_ARRAY_END) {
+ printf("Too many holes in the physical address space, giving up\n");
+ pa_indx--;
+ break;
+ }
+ phys_avail[pa_indx++] = pa; /* start */
+ phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
+ }
+ physmem++;
+ }
+ }
+ *pte = 0;
+ invltlb();
+
+ /*
+ * XXX
+ * The last chunk must contain at least one page plus the message
+ * buffer to avoid complicating other code (message buffer address
+ * calculation, etc.).
+ */
+ while (phys_avail[pa_indx - 1] + PAGE_SIZE +
+ round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
+ physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
+ phys_avail[pa_indx--] = 0;
+ phys_avail[pa_indx--] = 0;
+ }
+
+ Maxmem = atop(phys_avail[pa_indx]);
+
+ /* Trim off space for the message buffer. */
+ phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
+
+ avail_end = phys_avail[pa_indx];
+}
+#endif
+
+void
+init386(first)
+ int first;
+{
+ int x;
+ struct gate_descriptor *gdp;
+ int gsel_tss;
+#ifndef SMP
+ /* table descriptors - used to load tables by microp */
+ struct region_descriptor r_gdt, r_idt;
+#endif
+ int off;
+
+ /*
+ * Prevent lowering of the ipl if we call tsleep() early.
+ */
+ safepri = cpl;
+
+ proc0.p_addr = proc0paddr;
+
+ atdevbase = ISA_HOLE_START + KERNBASE;
+
+#ifdef PC98
+ /*
+ * Initialize DMAC
+ */
+ pc98_init_dmac();
+#endif
+
+ /*
+ * make gdt memory segments, the code segment goes up to end of the
+ * page with etext in it, the data segment goes to the end of
+ * the address space
+ */
+ /*
+ * XXX text protection is temporarily (?) disabled. The limit was
+ * i386_btop(round_page(etext)) - 1.
+ */
+ gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1;
+ gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1;
+#ifdef SMP
+ gdt_segs[GPRIV_SEL].ssd_limit =
+ i386_btop(sizeof(struct privatespace)) - 1;
+ gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0];
+ gdt_segs[GPROC0_SEL].ssd_base =
+ (int) &SMP_prvspace[0].globaldata.gd_common_tss;
+ SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0];
+#else
+ gdt_segs[GPRIV_SEL].ssd_limit = i386_btop(0) - 1;
+ gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss;
+#endif
+
+ for (x = 0; x < NGDT; x++) {
+#ifdef BDE_DEBUGGER
+ /* avoid overwriting db entries with APM ones */
+ if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL)
+ continue;
+#endif
+ ssdtosd(&gdt_segs[x], &gdt[x].sd);
+ }
+
+ r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
+ r_gdt.rd_base = (int) gdt;
+ lgdt(&r_gdt);
+
+ /* make ldt memory segments */
+ /*
+ * The data segment limit must not cover the user area because we
+ * don't want the user area to be writable in copyout() etc. (page
+ * level protection is lost in kernel mode on 386's). Also, we
+ * don't want the user area to be writable directly (page level
+ * protection of the user area is not available on 486's with
+ * CR0_WP set, because there is no user-read/kernel-write mode).
+ *
+ * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it
+ * should be spelled ...MAX_USER...
+ */
+#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS
+ /*
+ * The code segment limit has to cover the user area until we move
+ * the signal trampoline out of the user area. This is safe because
+ * the code segment cannot be written to directly.
+ */
+#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE)
+ ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
+ ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
+ for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
+ ssdtosd(&ldt_segs[x], &ldt[x].sd);
+
+ _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
+ lldt(_default_ldt);
+#ifdef USER_LDT
+ currentldt = _default_ldt;
+#endif
+ /* exceptions */
+ for (x = 0; x < NIDT; x++)
+ setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
+ setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(0x80, &IDTVEC(int0x80_syscall),
+ SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
+
+ r_idt.rd_limit = sizeof(idt) - 1;
+ r_idt.rd_base = (int) idt;
+ lidt(&r_idt);
+
+ /*
+ * Initialize the console before we print anything out.
+ */
+ cninit();
+
+#include "isa.h"
+#if NISA >0
+ isa_defaultirq();
+#endif
+ rand_initialize();
+
+#ifdef DDB
+ kdb_init();
+ if (boothowto & RB_KDB)
+ Debugger("Boot flags requested debugger");
+#endif
+
+ finishidentcpu(); /* Final stage of CPU initialization */
+ setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ initializecpu(); /* Initialize CPU registers */
+
+ /* make an initial tss so cpu can get interrupt stack on syscall! */
+ common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
+ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
+ common_tss.tss_ioopt = (sizeof common_tss) << 16;
+ gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
+ ltr(gsel_tss);
+ private_tss = 0;
+ tss_gdt = &gdt[GPROC0_SEL].sd;
+ common_tssd = *tss_gdt;
+
+ dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
+ dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
+ dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
+ dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
+ dblfault_tss.tss_cr3 = (int)IdlePTD;
+ dblfault_tss.tss_eip = (int) dblfault_handler;
+ dblfault_tss.tss_eflags = PSL_KERNEL;
+ dblfault_tss.tss_ds = dblfault_tss.tss_es =
+ dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
+ dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
+ dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
+ dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
+
+ vm86_initialize();
+#ifdef PC98
+ getmemsize_pc98(first);
+#else
+ getmemsize(first);
+#endif
/* now running on new page tables, configured,and u/iom is accessible */
/* Map the message buffer. */
@@ -1668,9 +1934,7 @@ init386(first)
#ifdef SMP
proc0.p_addr->u_pcb.pcb_mpnest = 1;
#endif
-#ifdef VM86
proc0.p_addr->u_pcb.pcb_ext = 0;
-#endif
/* Sigh, relocate physical addresses left from bootstrap */
if (bootinfo.bi_modulep) {
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index 3effcf9..37917bf 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
- * $Id: machdep.c,v 1.116 1999/05/06 00:54:54 luoqi Exp $
+ * $Id: machdep.c,v 1.117 1999/05/12 21:39:07 luoqi Exp $
*/
#include "apm.h"
@@ -53,7 +53,6 @@
#include "opt_sysvipc.h"
#include "opt_user_ldt.h"
#include "opt_userconfig.h"
-#include "opt_vm86.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -128,10 +127,9 @@
#include <pc98/pc98/pc98_machdep.h>
#include <pc98/pc98/pc98.h>
#else
-#ifndef VM86
#include <i386/isa/rtc.h>
#endif
-#endif
+#include <machine/vm86.h>
#include <machine/random.h>
#include <sys/ptrace.h>
@@ -581,7 +579,6 @@ sendsig(catcher, sig, mask, code)
sf.sf_sc.sc_trapno = regs->tf_trapno;
sf.sf_sc.sc_err = regs->tf_err;
-#ifdef VM86
/*
* If we're a vm86 process, we want to save the segment registers.
* We also change eflags to be our emulated eflags, not the actual
@@ -613,7 +610,6 @@ sendsig(catcher, sig, mask, code)
*/
tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP);
}
-#endif /* VM86 */
/*
* Copy the sigframe out to the user's stack.
@@ -670,7 +666,6 @@ sigreturn(p, uap)
return(EFAULT);
eflags = scp->sc_ps;
-#ifdef VM86
if (eflags & PSL_VM) {
struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
struct vm86_kernel *vm86;
@@ -704,7 +699,6 @@ sigreturn(p, uap)
tf->tf_es = _udatasel;
tf->tf_fs = _udatasel;
} else {
-#endif /* VM86 */
/*
* Don't allow users to change privileged or reserved flags.
*/
@@ -742,9 +736,7 @@ sigreturn(p, uap)
regs->tf_ds = scp->sc_ds;
regs->tf_es = scp->sc_es;
regs->tf_fs = scp->sc_fs;
-#ifdef VM86
}
-#endif
/* restore scratch registers */
regs->tf_eax = scp->sc_eax;
@@ -915,12 +907,10 @@ union descriptor ldt[NLDT]; /* local descriptor table */
struct region_descriptor r_gdt, r_idt;
#endif
-#ifdef VM86
#ifndef SMP
extern struct segment_descriptor common_tssd, *tss_gdt;
#endif
int private_tss; /* flag indicating private tss */
-#endif /* VM86 */
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
struct gate_descriptor *t_idt;
@@ -1151,270 +1141,23 @@ sdtossd(sd, ssd)
ssd->ssd_gran = sd->sd_gran;
}
-void
-init386(first)
- int first;
-{
- int x;
- unsigned biosbasemem, biosextmem;
- struct gate_descriptor *gdp;
- int gsel_tss;
-#if NNPX > 0
- int msize;
-#endif
-
-#ifndef SMP
- /* table descriptors - used to load tables by microp */
- struct region_descriptor r_gdt, r_idt;
-#endif
- int pagesinbase, pagesinext;
- vm_offset_t target_page;
- int pa_indx, off;
- int speculative_mprobe;
-
- /*
- * Prevent lowering of the ipl if we call tsleep() early.
- */
- safepri = cpl;
-
- proc0.p_addr = proc0paddr;
-
- atdevbase = ISA_HOLE_START + KERNBASE;
-
-#ifdef PC98
- /*
- * Initialize DMAC
- */
- pc98_init_dmac();
-#endif
-
- /*
- * make gdt memory segments, the code segment goes up to end of the
- * page with etext in it, the data segment goes to the end of
- * the address space
- */
- /*
- * XXX text protection is temporarily (?) disabled. The limit was
- * i386_btop(round_page(etext)) - 1.
- */
- gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1;
- gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1;
-#ifdef SMP
- gdt_segs[GPRIV_SEL].ssd_limit =
- i386_btop(sizeof(struct privatespace)) - 1;
- gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0];
- gdt_segs[GPROC0_SEL].ssd_base =
- (int) &SMP_prvspace[0].globaldata.gd_common_tss;
- SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0];
-#else
- gdt_segs[GPRIV_SEL].ssd_limit = i386_btop(0) - 1;
- gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss;
-#endif
-
- for (x = 0; x < NGDT; x++) {
-#ifdef BDE_DEBUGGER
- /* avoid overwriting db entries with APM ones */
- if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL)
- continue;
-#endif
- ssdtosd(&gdt_segs[x], &gdt[x].sd);
- }
-
- r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
- r_gdt.rd_base = (int) gdt;
- lgdt(&r_gdt);
-
- /* make ldt memory segments */
- /*
- * The data segment limit must not cover the user area because we
- * don't want the user area to be writable in copyout() etc. (page
- * level protection is lost in kernel mode on 386's). Also, we
- * don't want the user area to be writable directly (page level
- * protection of the user area is not available on 486's with
- * CR0_WP set, because there is no user-read/kernel-write mode).
- *
- * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it
- * should be spelled ...MAX_USER...
- */
-#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS
- /*
- * The code segment limit has to cover the user area until we move
- * the signal trampoline out of the user area. This is safe because
- * the code segment cannot be written to directly.
- */
-#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE)
- ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
- ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
- for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
- ssdtosd(&ldt_segs[x], &ldt[x].sd);
-
- _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
- lldt(_default_ldt);
-#ifdef USER_LDT
- currentldt = _default_ldt;
-#endif
-
- /* exceptions */
- for (x = 0; x < NIDT; x++)
- setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
- setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(0x80, &IDTVEC(int0x80_syscall),
- SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
-
- r_idt.rd_limit = sizeof(idt) - 1;
- r_idt.rd_base = (int) idt;
- lidt(&r_idt);
-
- /*
- * Initialize the console before we print anything out.
- */
- cninit();
-
-#include "isa.h"
-#if NISA >0
- isa_defaultirq();
-#endif
- rand_initialize();
-
-#ifdef DDB
- kdb_init();
- if (boothowto & RB_KDB)
- Debugger("Boot flags requested debugger");
-#endif
-
- finishidentcpu(); /* Final stage of CPU initialization */
- setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- initializecpu(); /* Initialize CPU registers */
-
- /* make an initial tss so cpu can get interrupt stack on syscall! */
-#ifdef VM86
- common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
-#else
- common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
-#endif /* VM86 */
- common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
- common_tss.tss_ioopt = (sizeof common_tss) << 16;
- gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
- ltr(gsel_tss);
-#ifdef VM86
- private_tss = 0;
- tss_gdt = &gdt[GPROC0_SEL].sd;
- common_tssd = *tss_gdt;
-#endif
-
- dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
- dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
- dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
- dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
- dblfault_tss.tss_cr3 = (int)IdlePTD;
- dblfault_tss.tss_eip = (int) dblfault_handler;
- dblfault_tss.tss_eflags = PSL_KERNEL;
- dblfault_tss.tss_ds = dblfault_tss.tss_es =
- dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
- dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
- dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
- dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
+#define PHYSMAP_SIZE (2 * 8)
-#ifdef VM86
- initial_bioscalls(&biosbasemem, &biosextmem);
+static void
+getmemsize_pc98(int first)
+{
+ u_int biosbasemem, biosextmem;
+ u_int pagesinbase, pagesinext;
+ int pa_indx;
+ int speculative_mprobe;
+#if NNPX > 0
+ int msize;
#endif
+ vm_offset_t target_page;
-#ifdef PC98
pc98_getmemsize();
biosbasemem = 640; /* 640KB */
biosextmem = (Maxmem * PAGE_SIZE - 0x100000)/1024; /* extent memory */
-#elif !defined(VM86) /* IBM-PC */
- /* Use BIOS values stored in RTC CMOS RAM, since probing
- * breaks certain 386 AT relics.
- */
- biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
- biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
-
- /*
- * If BIOS tells us that it has more than 640k in the basemem,
- * don't believe it - set it to 640k.
- */
- if (biosbasemem > 640) {
- printf("Preposterous RTC basemem of %uK, truncating to 640K\n",
- biosbasemem);
- biosbasemem = 640;
- }
- if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) {
- printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
- bootinfo.bi_basemem);
- bootinfo.bi_basemem = 640;
- }
-
- /*
- * Warn if the official BIOS interface disagrees with the RTC
- * interface used above about the amount of base memory or the
- * amount of extended memory. Prefer the BIOS value for the base
- * memory. This is necessary for machines that `steal' base
- * memory for use as BIOS memory, at least if we are going to use
- * the BIOS for apm. Prefer the RTC value for extended memory.
- * Eventually the hackish interface shouldn't even be looked at.
- */
- if (bootinfo.bi_memsizes_valid) {
- if (bootinfo.bi_basemem != biosbasemem) {
- vm_offset_t pa;
-
- printf(
- "BIOS basemem (%uK) != RTC basemem (%uK), setting to BIOS value\n",
- bootinfo.bi_basemem, biosbasemem);
- biosbasemem = bootinfo.bi_basemem;
-
- /*
- * XXX if biosbasemem is now < 640, there is `hole'
- * between the end of base memory and the start of
- * ISA memory. The hole may be empty or it may
- * contain BIOS code or data. Map it read/write so
- * that the BIOS can write to it. (Memory from 0 to
- * the physical end of the kernel is mapped read-only
- * to begin with and then parts of it are remapped.
- * The parts that aren't remapped form holes that
- * remain read-only and are unused by the kernel.
- * The base memory area is below the physical end of
- * the kernel and right now forms a read-only hole.
- * The part of it from PAGE_SIZE to
- * (trunc_page(biosbasemem * 1024) - 1) will be
- * remapped and used by the kernel later.)
- *
- * This code is similar to the code used in
- * pmap_mapdev, but since no memory needs to be
- * allocated we simply change the mapping.
- */
- for (pa = trunc_page(biosbasemem * 1024);
- pa < ISA_HOLE_START; pa += PAGE_SIZE) {
- unsigned *pte;
-
- pte = (unsigned *)vtopte(pa + KERNBASE);
- *pte = pa | PG_RW | PG_V;
- }
- }
- if (bootinfo.bi_extmem != biosextmem)
- printf("BIOS extmem (%uK) != RTC extmem (%uK)\n",
- bootinfo.bi_extmem, biosextmem);
- }
-#endif
#ifdef SMP
/* make hole for AP bootstrap code */
@@ -1426,22 +1169,6 @@ init386(first)
pagesinext = biosextmem * 1024 / PAGE_SIZE;
/*
- * Special hack for chipsets that still remap the 384k hole when
- * there's 16MB of memory - this really confuses people that
- * are trying to use bus mastering ISA controllers with the
- * "16MB limit"; they only have 16MB, but the remapping puts
- * them beyond the limit.
- */
-#ifndef PC98
- /*
- * If extended memory is between 15-16MB (16-17MB phys address range),
- * chop it to 15MB.
- */
- if ((pagesinext > 3840) && (pagesinext < 4096))
- pagesinext = 3840;
-#endif
-
- /*
* Maxmem isn't the "maximum memory", it's one larger than the
* highest page of the physical address space. It should be
* called something like "Maxphyspage".
@@ -1504,17 +1231,13 @@ init386(first)
int tmp, page_bad;
page_bad = FALSE;
-
-#ifdef PC98
/* skip system area */
if (target_page>=ptoa(Maxmem_under16M) &&
target_page < ptoa(4096))
page_bad = TRUE;
-#endif
/*
* map page into kernel: valid, read/write, non-cacheable
*/
-#ifdef PC98
if (pc98_machine_type & M_EPSON_PC98) {
switch (epson_machine_id) {
case 0x34: /* PC-486HX */
@@ -1531,11 +1254,8 @@ init386(first)
break;
}
} else {
-#endif /* PC98 */
*(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page;
-#ifdef PC98
}
-#endif
invltlb();
tmp = *(int *)CADDR1;
@@ -1631,7 +1351,553 @@ init386(first)
phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
avail_end = phys_avail[pa_indx];
+}
+
+#ifndef PC98
+static void
+getmemsize(int first)
+{
+ int i, physmap_idx, pa_indx;
+ u_int basemem, extmem;
+ int speculative_mprobe = FALSE;
+ struct vm86frame vmf;
+ struct vm86context vmc;
+ vm_offset_t pa, physmap[PHYSMAP_SIZE];
+ pt_entry_t pte;
+ struct {
+ u_int64_t base;
+ u_int64_t length;
+ u_int32_t type;
+ } *smap;
+#if NNPX > 0
+ int msize;
+#endif
+
+ bzero(&vmf, sizeof(struct vm86frame));
+ bzero(physmap, sizeof(physmap));
+
+ vm86_intcall(0x12, &vmf);
+ basemem = vmf.vmf_ax;
+ if (basemem > 640) {
+ printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
+ basemem);
+ basemem = 640;
+ }
+
+ /*
+ * XXX if biosbasemem is now < 640, there is `hole'
+ * between the end of base memory and the start of
+ * ISA memory. The hole may be empty or it may
+ * contain BIOS code or data. Map it read/write so
+ * that the BIOS can write to it. (Memory from 0 to
+ * the physical end of the kernel is mapped read-only
+ * to begin with and then parts of it are remapped.
+ * The parts that aren't remapped form holes that
+ * remain read-only and are unused by the kernel.
+ * The base memory area is below the physical end of
+ * the kernel and right now forms a read-only hole.
+ * The part of it from PAGE_SIZE to
+ * (trunc_page(biosbasemem * 1024) - 1) will be
+ * remapped and used by the kernel later.)
+ *
+ * This code is similar to the code used in
+ * pmap_mapdev, but since no memory needs to be
+ * allocated we simply change the mapping.
+ */
+ for (pa = trunc_page(basemem * 1024);
+ pa < ISA_HOLE_START; pa += PAGE_SIZE) {
+ pte = (pt_entry_t)vtopte(pa + KERNBASE);
+ *pte = pa | PG_RW | PG_V;
+ }
+
+ /*
+ * if basemem != 640, map pages r/w into vm86 page table so
+ * that the bios can scribble on it.
+ */
+ pte = (pt_entry_t)vm86paddr;
+ for (i = basemem / 4; i < 160; i++)
+ pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
+
+ /*
+ * map page 1 R/W into the kernel page table so we can use it
+ * as a buffer. The kernel will unmap this page later.
+ */
+ pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT));
+ *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
+
+ extmem = (Maxmem * PAGE_SIZE - 0x100000)/1024; /* extent memory */
+ /*
+ * get memory map with INT 15:E820
+ */
+#define SMAPSIZ sizeof(*smap)
+#define SMAP_SIG 0x534D4150 /* 'SMAP' */
+
+ vmc.npages = 0;
+ smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
+ vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
+
+ physmap_idx = 0;
+ vmf.vmf_ebx = 0;
+ do {
+ vmf.vmf_eax = 0xE820;
+ vmf.vmf_edx = SMAP_SIG;
+ vmf.vmf_ecx = SMAPSIZ;
+ i = vm86_datacall(0x15, &vmf, &vmc);
+ if (i || vmf.vmf_eax != SMAP_SIG)
+ break;
+ if (boothowto & RB_VERBOSE)
+ printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n",
+ smap->type,
+ *(u_int32_t *)((char *)&smap->base + 4),
+ (u_int32_t)smap->base,
+ *(u_int32_t *)((char *)&smap->length + 4),
+ (u_int32_t)smap->length);
+
+ if (smap->type != 0x01)
+ goto next_run;
+
+ if (smap->length == 0)
+ goto next_run;
+
+ for (i = 0; i <= physmap_idx; i += 2) {
+ if (smap->base < physmap[i + 1]) {
+ if (boothowto & RB_VERBOSE)
+ printf(
+ "Overlapping or non-montonic memory region, ignoring second region\n");
+ goto next_run;
+ }
+ }
+
+ if (smap->base == physmap[physmap_idx + 1]) {
+ physmap[physmap_idx + 1] += smap->length;
+ goto next_run;
+ }
+
+ physmap_idx += 2;
+ if (physmap_idx == PHYSMAP_SIZE) {
+ printf(
+ "Too many segments in the physical address map, giving up\n");
+ break;
+ }
+ physmap[physmap_idx] = smap->base;
+ physmap[physmap_idx + 1] = smap->base + smap->length;
+next_run:
+ } while (vmf.vmf_ebx != 0);
+
+ if (physmap[1] != 0)
+ goto physmap_done;
+
+ /*
+ * try memory map with INT 15:E801
+ */
+ vmf.vmf_ax = 0xE801;
+ if (vm86_intcall(0x15, &vmf) == 0) {
+ extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
+ } else {
+#if 0
+ vmf.vmf_ah = 0x88;
+ vm86_intcall(0x15, &vmf);
+ extmem = vmf.vmf_ax;
+#else
+ /*
+ * Prefer the RTC value for extended memory.
+ */
+ extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
+#endif
+ }
+
+ /*
+ * Only perform calculations in this section if there is no system
+ * map; any system new enough that supports SMAP probably does not
+ * need these workarounds.
+ */
+ /*
+ * Special hack for chipsets that still remap the 384k hole when
+ * there's 16MB of memory - this really confuses people that
+ * are trying to use bus mastering ISA controllers with the
+ * "16MB limit"; they only have 16MB, but the remapping puts
+ * them beyond the limit.
+ */
+ /*
+ * If extended memory is between 15-16MB (16-17MB phys address range),
+ * chop it to 15MB.
+ */
+ if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
+ extmem = 15 * 1024;
+
+ physmap[0] = 0;
+ physmap[1] = basemem * 1024;
+ physmap_idx = 2;
+ physmap[physmap_idx] = 0x100000;
+ physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
+
+ /*
+ * Indicate that we wish to do a speculative search for memory
+ * beyond the end of the reported size if the indicated amount
+ * is 64M (or more).
+ *
+ * XXX we should only do this in the RTC / 0x88 case
+ */
+ if (extmem >= 16 * 1024)
+ speculative_mprobe = TRUE;
+
+physmap_done:
+ /*
+ * Now, physmap contains a map of physical memory.
+ */
+
+#ifdef SMP
+ /* make hole for AP bootstrap code */
+ physmap[1] = mp_bootaddress(physmap[1] / 1024);
+#endif
+
+ /*
+ * Maxmem isn't the "maximum memory", it's one larger than the
+ * highest page of the physical address space. It should be
+ * called something like "Maxphyspage".
+ */
+ Maxmem = physmap[physmap_idx + 1] / PAGE_SIZE;
+
+ /*
+ * If a specific amount of memory is indicated via the MAXMEM
+ * option or the npx0 "msize", then don't do the speculative
+ * memory probe.
+ */
+#ifdef MAXMEM
+ Maxmem = MAXMEM / 4;
+ speculative_mprobe = FALSE;
+#endif
+
+#if NNPX > 0
+ if (resource_int_value("npx", 0, "msize", &msize) == 0) {
+ if (msize != 0) {
+ Maxmem = msize / 4;
+ speculative_mprobe = FALSE;
+ }
+ }
+#endif
+
+#ifdef SMP
+ /* look for the MP hardware - needed for apic addresses */
+ mp_probe();
+#endif
+ /* call pmap initialization to make new kernel address space */
+ pmap_bootstrap(first, 0);
+
+ /*
+ * Size up each available chunk of physical memory.
+ */
+ physmap[0] = PAGE_SIZE; /* mask off page 0 */
+ pa_indx = 0;
+ phys_avail[pa_indx++] = physmap[0];
+ phys_avail[pa_indx] = physmap[0];
+#if 0
+ pte = (pt_entry_t)vtopte(KERNBASE);
+#else
+ pte = (pt_entry_t)CMAP1;
+#endif
+
+ /*
+ * physmap is in bytes, so when converting to page boundaries,
+ * round up the start address and round down the end address.
+ */
+ for (i = 0; i <= physmap_idx; i += 2) {
+ int end;
+
+ end = ptoa(Maxmem);
+ if (physmap[i + 1] < end)
+ end = trunc_page(physmap[i + 1]);
+ for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
+ int tmp, page_bad;
+#if 0
+ int *ptr = 0;
+#else
+ int *ptr = (int *)CADDR1;
+#endif
+
+ /*
+ * block out kernel memory as not available.
+ */
+ if (pa >= 0x100000 && pa < first)
+ continue;
+
+ page_bad = FALSE;
+
+ /*
+ * map page into kernel: valid, read/write,non-cacheable
+ */
+ *pte = pa | PG_V | PG_RW | PG_N;
+ invltlb();
+
+ tmp = *(int *)ptr;
+ /*
+ * Test for alternating 1's and 0's
+ */
+ *(volatile int *)ptr = 0xaaaaaaaa;
+ if (*(volatile int *)ptr != 0xaaaaaaaa) {
+ page_bad = TRUE;
+ }
+ /*
+ * Test for alternating 0's and 1's
+ */
+ *(volatile int *)ptr = 0x55555555;
+ if (*(volatile int *)ptr != 0x55555555) {
+ page_bad = TRUE;
+ }
+ /*
+ * Test for all 1's
+ */
+ *(volatile int *)ptr = 0xffffffff;
+ if (*(volatile int *)ptr != 0xffffffff) {
+ page_bad = TRUE;
+ }
+ /*
+ * Test for all 0's
+ */
+ *(volatile int *)ptr = 0x0;
+ if (*(volatile int *)ptr != 0x0) {
+ page_bad = TRUE;
+ }
+ /*
+ * Restore original value.
+ */
+ *(int *)ptr = tmp;
+
+ /*
+ * Adjust array of valid/good pages.
+ */
+ if (page_bad == TRUE) {
+ continue;
+ }
+ /*
+ * If this good page is a continuation of the
+ * previous set of good pages, then just increase
+ * the end pointer. Otherwise start a new chunk.
+ * Note that "end" points one higher than end,
+ * making the range >= start and < end.
+ * If we're also doing a speculative memory
+ * test and we at or past the end, bump up Maxmem
+ * so that we keep going. The first bad page
+ * will terminate the loop.
+ */
+ if (phys_avail[pa_indx] == pa) {
+ phys_avail[pa_indx] += PAGE_SIZE;
+ if (speculative_mprobe == TRUE &&
+ phys_avail[pa_indx] >= (64*1024*1024))
+ end += PAGE_SIZE;
+ } else {
+ pa_indx++;
+ if (pa_indx == PHYS_AVAIL_ARRAY_END) {
+ printf("Too many holes in the physical address space, giving up\n");
+ pa_indx--;
+ break;
+ }
+ phys_avail[pa_indx++] = pa; /* start */
+ phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
+ }
+ physmem++;
+ }
+ }
+ *pte = 0;
+ invltlb();
+
+ /*
+ * XXX
+ * The last chunk must contain at least one page plus the message
+ * buffer to avoid complicating other code (message buffer address
+ * calculation, etc.).
+ */
+ while (phys_avail[pa_indx - 1] + PAGE_SIZE +
+ round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
+ physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
+ phys_avail[pa_indx--] = 0;
+ phys_avail[pa_indx--] = 0;
+ }
+
+ Maxmem = atop(phys_avail[pa_indx]);
+
+ /* Trim off space for the message buffer. */
+ phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
+
+ avail_end = phys_avail[pa_indx];
+}
+#endif
+
+void
+init386(first)
+ int first;
+{
+ int x;
+ struct gate_descriptor *gdp;
+ int gsel_tss;
+#ifndef SMP
+ /* table descriptors - used to load tables by microp */
+ struct region_descriptor r_gdt, r_idt;
+#endif
+ int off;
+
+ /*
+ * Prevent lowering of the ipl if we call tsleep() early.
+ */
+ safepri = cpl;
+
+ proc0.p_addr = proc0paddr;
+
+ atdevbase = ISA_HOLE_START + KERNBASE;
+
+#ifdef PC98
+ /*
+ * Initialize DMAC
+ */
+ pc98_init_dmac();
+#endif
+
+ /*
+ * make gdt memory segments, the code segment goes up to end of the
+ * page with etext in it, the data segment goes to the end of
+ * the address space
+ */
+ /*
+ * XXX text protection is temporarily (?) disabled. The limit was
+ * i386_btop(round_page(etext)) - 1.
+ */
+ gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1;
+ gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1;
+#ifdef SMP
+ gdt_segs[GPRIV_SEL].ssd_limit =
+ i386_btop(sizeof(struct privatespace)) - 1;
+ gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0];
+ gdt_segs[GPROC0_SEL].ssd_base =
+ (int) &SMP_prvspace[0].globaldata.gd_common_tss;
+ SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0];
+#else
+ gdt_segs[GPRIV_SEL].ssd_limit = i386_btop(0) - 1;
+ gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss;
+#endif
+
+ for (x = 0; x < NGDT; x++) {
+#ifdef BDE_DEBUGGER
+ /* avoid overwriting db entries with APM ones */
+ if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL)
+ continue;
+#endif
+ ssdtosd(&gdt_segs[x], &gdt[x].sd);
+ }
+
+ r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
+ r_gdt.rd_base = (int) gdt;
+ lgdt(&r_gdt);
+
+ /* make ldt memory segments */
+ /*
+ * The data segment limit must not cover the user area because we
+ * don't want the user area to be writable in copyout() etc. (page
+ * level protection is lost in kernel mode on 386's). Also, we
+ * don't want the user area to be writable directly (page level
+ * protection of the user area is not available on 486's with
+ * CR0_WP set, because there is no user-read/kernel-write mode).
+ *
+ * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it
+ * should be spelled ...MAX_USER...
+ */
+#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS
+ /*
+ * The code segment limit has to cover the user area until we move
+ * the signal trampoline out of the user area. This is safe because
+ * the code segment cannot be written to directly.
+ */
+#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE)
+ ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
+ ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
+ for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
+ ssdtosd(&ldt_segs[x], &ldt[x].sd);
+
+ _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
+ lldt(_default_ldt);
+#ifdef USER_LDT
+ currentldt = _default_ldt;
+#endif
+ /* exceptions */
+ for (x = 0; x < NIDT; x++)
+ setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
+ setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(0x80, &IDTVEC(int0x80_syscall),
+ SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
+
+ r_idt.rd_limit = sizeof(idt) - 1;
+ r_idt.rd_base = (int) idt;
+ lidt(&r_idt);
+
+ /*
+ * Initialize the console before we print anything out.
+ */
+ cninit();
+
+#include "isa.h"
+#if NISA >0
+ isa_defaultirq();
+#endif
+ rand_initialize();
+
+#ifdef DDB
+ kdb_init();
+ if (boothowto & RB_KDB)
+ Debugger("Boot flags requested debugger");
+#endif
+
+ finishidentcpu(); /* Final stage of CPU initialization */
+ setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+ initializecpu(); /* Initialize CPU registers */
+
+ /* make an initial tss so cpu can get interrupt stack on syscall! */
+ common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
+ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
+ common_tss.tss_ioopt = (sizeof common_tss) << 16;
+ gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
+ ltr(gsel_tss);
+ private_tss = 0;
+ tss_gdt = &gdt[GPROC0_SEL].sd;
+ common_tssd = *tss_gdt;
+
+ dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
+ dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
+ dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
+ dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
+ dblfault_tss.tss_cr3 = (int)IdlePTD;
+ dblfault_tss.tss_eip = (int) dblfault_handler;
+ dblfault_tss.tss_eflags = PSL_KERNEL;
+ dblfault_tss.tss_ds = dblfault_tss.tss_es =
+ dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
+ dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
+ dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
+ dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
+
+ vm86_initialize();
+#ifdef PC98
+ getmemsize_pc98(first);
+#else
+ getmemsize(first);
+#endif
/* now running on new page tables, configured,and u/iom is accessible */
/* Map the message buffer. */
@@ -1668,9 +1934,7 @@ init386(first)
#ifdef SMP
proc0.p_addr->u_pcb.pcb_mpnest = 1;
#endif
-#ifdef VM86
proc0.p_addr->u_pcb.pcb_ext = 0;
-#endif
/* Sigh, relocate physical addresses left from bootstrap */
if (bootinfo.bi_modulep) {
OpenPOWER on IntegriCloud