summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2002-12-22 23:01:14 +0000
committerjake <jake@FreeBSD.org>2002-12-22 23:01:14 +0000
commit8355ab5ec5605d3dc2314473b03127582940b58f (patch)
tree872eae09585ba4dfdccd560c353c7fcb4b5939d2 /sys/sparc64
parentc7acde574ffeaa3b2bb138e8136e973a4a48f176 (diff)
downloadFreeBSD-src-8355ab5ec5605d3dc2314473b03127582940b58f.zip
FreeBSD-src-8355ab5ec5605d3dc2314473b03127582940b58f.tar.gz
- Rearrange pmap_bootstrap slightly to be more in dependency order.
- Put the kernel tsb before before the kernel load address, below VM_MIN_KERNEL_ADDRESS, instead of after the kernel where it consumes usable kva. This is magic mapped so the virtual address is irrelevant, it just needs to be out of the way.
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/sparc64/pmap.c99
1 files changed, 49 insertions, 50 deletions
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 0296376..4e57d77 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -305,41 +305,30 @@ pmap_bootstrap(vm_offset_t ekva)
}
physmem = btoc(physsz);
+ /*
+ * Calculate the size of kernel virtual memory, and the size and mask
+ * for the kernel tsb.
+ */
virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
/*
- * Get the available physical memory ranges from /memory/reg. These
- * are only used for kernel dumps, but it may not be wise to do prom
- * calls in that situation.
- */
- if ((sz = OF_getproplen(pmem, "reg")) == -1)
- panic("pmap_bootstrap: getproplen /memory/reg");
- if (sizeof(sparc64_memreg) < sz)
- panic("pmap_bootstrap: sparc64_memreg too small");
- if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
- panic("pmap_bootstrap: getprop /memory/reg");
- sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
-
- /*
- * Set the start and end of kva. The kernel is loaded at the first
- * available 4 meg super page, so round up to the end of the page.
- */
- virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
- virtual_end = vm_max_kernel_address;
- kernel_vm_end = vm_max_kernel_address;
-
- /*
- * Allocate the kernel tsb.
+ * Allocate the kernel tsb and lock it in the tlb.
*/
pa = pmap_bootstrap_alloc(tsb_kernel_size);
if (pa & PAGE_MASK_4M)
panic("pmap_bootstrap: tsb unaligned\n");
tsb_kernel_phys = pa;
- tsb_kernel = (struct tte *)virtual_avail;
- virtual_avail += tsb_kernel_size;
+ tsb_kernel = (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
+ pmap_map_tsb();
+ bzero(tsb_kernel, tsb_kernel_size);
+
+ /*
+ * Allocate the message buffer.
+ */
+ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
/*
* Patch the virtual address and the tsb mask into the trap table.
@@ -373,12 +362,6 @@ pmap_bootstrap(vm_offset_t ekva)
PATCH(tl1_dmmu_prot_patch_2);
/*
- * Lock it in the tlb.
- */
- pmap_map_tsb();
- bzero(tsb_kernel, tsb_kernel_size);
-
- /*
* Enter fake 8k pages for the 4MB kernel pages, so that
* pmap_kextract() will work for them.
*/
@@ -394,6 +377,26 @@ pmap_bootstrap(vm_offset_t ekva)
}
/*
+ * Set the start and end of kva. The kernel is loaded at the first
+ * available 4 meg super page, so round up to the end of the page.
+ */
+ virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
+ virtual_end = vm_max_kernel_address;
+ kernel_vm_end = vm_max_kernel_address;
+
+ /*
+ * Allocate virtual address space for the message buffer.
+ */
+ msgbufp = (struct msgbuf *)virtual_avail;
+ virtual_avail += round_page(MSGBUF_SIZE);
+
+ /*
+ * Allocate virtual address space to map pages during a kernel dump.
+ */
+ crashdumpmap = virtual_avail;
+ virtual_avail += MAXDUMPPGS * PAGE_SIZE;
+
+ /*
* Allocate a kernel stack with guard page for thread0 and map it into
* the kernel tsb.
*/
@@ -411,9 +414,13 @@ pmap_bootstrap(vm_offset_t ekva)
}
/*
- * Allocate the message buffer.
+ * Calculate the first and last available physical addresses.
*/
- msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
+ avail_start = phys_avail[0];
+ for (i = 0; phys_avail[i + 2] != 0; i += 2)
+ ;
+ avail_end = phys_avail[i + 1];
+ Maxmem = sparc64_btop(avail_end);
/*
* Add the prom mappings to the kernel tsb.
@@ -452,25 +459,17 @@ pmap_bootstrap(vm_offset_t ekva)
}
/*
- * Calculate the first and last available physical addresses.
- */
- avail_start = phys_avail[0];
- for (i = 0; phys_avail[i + 2] != 0; i += 2)
- ;
- avail_end = phys_avail[i + 1];
- Maxmem = sparc64_btop(avail_end);
-
- /*
- * Allocate virtual address space for the message buffer.
- */
- msgbufp = (struct msgbuf *)virtual_avail;
- virtual_avail += round_page(MSGBUF_SIZE);
-
- /*
- * Allocate virtual address space to map pages during a kernel dump.
+ * Get the available physical memory ranges from /memory/reg. These
+ * are only used for kernel dumps, but it may not be wise to do prom
+ * calls in that situation.
*/
- crashdumpmap = virtual_avail;
- virtual_avail += MAXDUMPPGS * PAGE_SIZE;
+ if ((sz = OF_getproplen(pmem, "reg")) == -1)
+ panic("pmap_bootstrap: getproplen /memory/reg");
+ if (sizeof(sparc64_memreg) < sz)
+ panic("pmap_bootstrap: sparc64_memreg too small");
+ if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
+ panic("pmap_bootstrap: getprop /memory/reg");
+ sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
/*
* Initialize the kernel pmap (which is statically allocated).
OpenPOWER on IntegriCloud