summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authormarius <marius@FreeBSD.org>2009-01-01 14:01:21 +0000
committermarius <marius@FreeBSD.org>2009-01-01 14:01:21 +0000
commit78ee2961aaa4bb0c734b924a9edf64aa2aef50fe (patch)
treeb4949f390128d9f1306b707fa701792681702660 /sys/sparc64
parent0596e3449bae55cdc93922846db4c24ac0201bdd (diff)
downloadFreeBSD-src-78ee2961aaa4bb0c734b924a9edf64aa2aef50fe.zip
FreeBSD-src-78ee2961aaa4bb0c734b924a9edf64aa2aef50fe.tar.gz
- Currently the PMAP code is laid out to let the kernel TSB cover the
whole KVA space using one locked 4MB dTLB entry per GB of physical memory. On Cheetah-class machines only the dt16 can hold locked entries though, which would be completely consumed for the kernel TSB on machines with >= 16GB. Therefore limit the KVA space to use no more than half of the lockable dTLB slots, given that we need them also for other things. - Add sanity checks which ensure that we don't exhaust the (lockable) TLB slots.
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/include/tlb.h2
-rw-r--r--sys/sparc64/sparc64/machdep.c17
-rw-r--r--sys/sparc64/sparc64/pmap.c12
3 files changed, 29 insertions, 2 deletions
diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h
index 06a8296..f0a4a7b 100644
--- a/sys/sparc64/include/tlb.h
+++ b/sys/sparc64/include/tlb.h
@@ -129,6 +129,8 @@ typedef void tlb_flush_user_t(void);
struct pmap;
struct tlb_entry;
+extern int dtlb_slots;
+extern int itlb_slots;
extern int kernel_tlb_slots;
extern struct tlb_entry *kernel_tlbs;
diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c
index 4e1e300..4eb8d9c 100644
--- a/sys/sparc64/sparc64/machdep.c
+++ b/sys/sparc64/sparc64/machdep.c
@@ -115,6 +115,8 @@ typedef int ofw_vec_t(void *);
extern vm_offset_t ksym_start, ksym_end;
#endif
+int dtlb_slots;
+int itlb_slots;
struct tlb_entry *kernel_tlbs;
int kernel_tlb_slots;
@@ -276,7 +278,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
tick_stop();
/*
- * Set up Open Firmware entry points
+ * Set up Open Firmware entry points.
*/
ofw_tba = rdpr(tba);
ofw_vec = (u_long)vec;
@@ -380,6 +382,19 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
end = (vm_offset_t)_end;
}
+ /*
+ * Determine the TLB slot maxima, which are expected to be
+ * equal across all CPUs.
+ * NB: for Cheetah-class CPUs, these properties only refer
+ * to the t16s.
+ */
+ if (OF_getprop(pc->pc_node, "#dtlb-entries", &dtlb_slots,
+ sizeof(dtlb_slots)) == -1)
+ panic("sparc64_init: cannot determine number of dTLB slots");
+ if (OF_getprop(pc->pc_node, "#itlb-entries", &itlb_slots,
+ sizeof(itlb_slots)) == -1)
+ panic("sparc64_init: cannot determine number of iTLB slots");
+
cache_init(pc);
cache_enable();
uma_set_align(pc->pc_cache.dc_linesize - 1);
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index c083bbb..08c9ac2 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -334,13 +334,23 @@ pmap_bootstrap(vm_offset_t ekva)
/*
* Calculate the size of kernel virtual memory, and the size and mask
- * for the kernel TSB.
+ * for the kernel TSB based on the phsyical memory size but limited
+ * by letting the kernel TSB take up no more than half of the dTLB
+ * slots available for locked entries.
*/
virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
+ virtsz = MIN(virtsz,
+ (dtlb_slots / 2 * PAGE_SIZE_4M) << (PAGE_SHIFT - TTE_SHIFT));
vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
+ if (kernel_tlb_slots + PCPU_PAGES + tsb_kernel_size / PAGE_SIZE_4M +
+ 1 /* PROM page */ + 1 /* spare */ > dtlb_slots)
+ panic("pmap_bootstrap: insufficient dTLB entries");
+ if (kernel_tlb_slots + 1 /* PROM page */ + 1 /* spare */ > itlb_slots)
+ panic("pmap_bootstrap: insufficient iTLB entries");
+
/*
* Allocate the kernel TSB and lock it in the TLB.
*/
OpenPOWER on IntegriCloud