summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjmallett <jmallett@FreeBSD.org>2010-04-18 22:32:07 +0000
committerjmallett <jmallett@FreeBSD.org>2010-04-18 22:32:07 +0000
commit4f9a815abecac01cf0fb2df739683c5cfb01c867 (patch)
treeda5df1cb44611f0607de0cedd81f3d7d10512466 /sys
parent2c6900a254f8747c55019ef3c5fff96917c31b48 (diff)
downloadFreeBSD-src-4f9a815abecac01cf0fb2df739683c5cfb01c867.zip
FreeBSD-src-4f9a815abecac01cf0fb2df739683c5cfb01c867.tar.gz
o) Add a VM find-space option, VMFS_TLB_ALIGNED_SPACE, which searches the
address space for an address as aligned by the new pmap_align_tlb() function, which is for constraints imposed by the TLB. [1] o) Add a kmem_alloc_nofault_space() function, which acts like kmem_alloc_nofault() but allows the caller to specify which find-space option to use. [1] o) Use kmem_alloc_nofault_space() with VMFS_TLB_ALIGNED_SPACE to allocate the kernel stack address on MIPS. [1] o) Make pmap_align_tlb() on MIPS align addresses so that they do not start on an odd boundary within the TLB, so that they are suitable for insertion as wired entries and do not have to share a TLB entry with another mapping, assuming they are appropriately-sized. o) Eliminate md_realstack now that the kstack will be appropriately-aligned on MIPS. o) Increase the number of guard pages to 2 so that we retain the proper alignment of the kstack address. Reviewed by: [1] alc X-MFC-after: Making sure alc has not come up with a better interface.
Diffstat (limited to 'sys')
-rw-r--r--sys/mips/include/param.h7
-rw-r--r--sys/mips/include/proc.h3
-rw-r--r--sys/mips/mips/exception.S4
-rw-r--r--sys/mips/mips/genassym.c2
-rw-r--r--sys/mips/mips/machdep.c5
-rw-r--r--sys/mips/mips/pmap.c15
-rw-r--r--sys/mips/mips/swtch.S2
-rw-r--r--sys/mips/mips/vm_machdep.c26
-rw-r--r--sys/vm/pmap.h3
-rw-r--r--sys/vm/vm_extern.h1
-rw-r--r--sys/vm/vm_glue.c9
-rw-r--r--sys/vm/vm_kern.c29
-rw-r--r--sys/vm/vm_map.c13
-rw-r--r--sys/vm/vm_map.h3
14 files changed, 88 insertions, 34 deletions
diff --git a/sys/mips/include/param.h b/sys/mips/include/param.h
index 0edb8b9..06cdeac 100644
--- a/sys/mips/include/param.h
+++ b/sys/mips/include/param.h
@@ -113,12 +113,9 @@
/*
* The kernel stack needs to be aligned on a (PAGE_SIZE * 2) boundary.
- *
- * Although we allocate 3 pages for the kernel stack we end up using
- * only the 2 pages that are aligned on a (PAGE_SIZE * 2) boundary.
*/
-#define KSTACK_PAGES 3 /* kernel stack*/
-#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
+#define KSTACK_PAGES 2 /* kernel stack*/
+#define KSTACK_GUARD_PAGES 2 /* pages of kstack guard; 0 disables */
#define UPAGES 2
diff --git a/sys/mips/include/proc.h b/sys/mips/include/proc.h
index 99dab78..0491b11 100644
--- a/sys/mips/include/proc.h
+++ b/sys/mips/include/proc.h
@@ -44,7 +44,7 @@
*/
struct mdthread {
int md_flags; /* machine-dependent flags */
- int md_upte[KSTACK_PAGES - 1]; /* ptes for mapping u pcb */
+ int md_upte[KSTACK_PAGES]; /* ptes for mapping u pcb */
int md_ss_addr; /* single step address for ptrace */
int md_ss_instr; /* single step instruction for ptrace */
register_t md_saved_intr;
@@ -53,7 +53,6 @@ struct mdthread {
int md_pc_ctrl; /* performance counter control */
int md_pc_count; /* performance counter */
int md_pc_spill; /* performance counter spill */
- vm_offset_t md_realstack;
void *md_tls;
};
diff --git a/sys/mips/mips/exception.S b/sys/mips/mips/exception.S
index 8c32b58..aa775ea 100644
--- a/sys/mips/mips/exception.S
+++ b/sys/mips/mips/exception.S
@@ -928,7 +928,7 @@ tlb_insert_random:
*/
GET_CPU_PCPU(k1)
lw k0, PC_CURTHREAD(k1)
- lw k0, TD_REALKSTACK(k0)
+ lw k0, TD_KSTACK(k0)
sltu k0, k0, sp
bnez k0, _C_LABEL(MipsKernGenException)
nop
@@ -975,7 +975,7 @@ tlb_insert_random:
*/
GET_CPU_PCPU(k1)
lw k0, PC_CURTHREAD(k1)
- sw zero, TD_REALKSTACK(k0)
+ sw zero, TD_KSTACK(k0)
move a1, a0
PANIC("kernel stack overflow - trapframe at %p")
diff --git a/sys/mips/mips/genassym.c b/sys/mips/mips/genassym.c
index 6cec2b7..b10a5e2 100644
--- a/sys/mips/mips/genassym.c
+++ b/sys/mips/mips/genassym.c
@@ -65,7 +65,7 @@ __FBSDID("$FreeBSD$");
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_UPTE, offsetof(struct thread, td_md.md_upte));
-ASSYM(TD_REALKSTACK, offsetof(struct thread, td_md.md_realstack));
+ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
diff --git a/sys/mips/mips/machdep.c b/sys/mips/mips/machdep.c
index 6ca4ec9..b3f36e5 100644
--- a/sys/mips/mips/machdep.c
+++ b/sys/mips/mips/machdep.c
@@ -298,14 +298,13 @@ mips_proc0_init(void)
(long)kstack0));
thread0.td_kstack = kstack0;
thread0.td_kstack_pages = KSTACK_PAGES;
- thread0.td_md.md_realstack = roundup2(thread0.td_kstack, PAGE_SIZE * 2);
/*
* Do not use cpu_thread_alloc to initialize these fields
* thread0 is the only thread that has kstack located in KSEG0
* while cpu_thread_alloc handles kstack allocated in KSEG2.
*/
- thread0.td_pcb = (struct pcb *)(thread0.td_md.md_realstack +
- (thread0.td_kstack_pages - 1) * PAGE_SIZE) - 1;
+ thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
+ thread0.td_kstack_pages * PAGE_SIZE) - 1;
thread0.td_frame = &thread0.td_pcb->pcb_regs;
/* Steal memory for the dynamic per-cpu area. */
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 6bb38d4..e3b7daf 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -2813,6 +2813,21 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
*addr = ((*addr + SEGOFSET) & ~SEGOFSET) + superpage_offset;
}
+/*
+ * Increase the starting virtual address of the given mapping so
+ * that it is aligned to not be the second page in a TLB entry.
+ * This routine assumes that the length is appropriately-sized so
+ * that the allocation does not share a TLB entry at all if required.
+ */
+void
+pmap_align_tlb(vm_offset_t *addr)
+{
+ if ((*addr & PAGE_SIZE) == 0)
+ return;
+ *addr += PAGE_SIZE;
+ return;
+}
+
int pmap_pid_dump(int pid);
int
diff --git a/sys/mips/mips/swtch.S b/sys/mips/mips/swtch.S
index 4fc3cbd..2919d21 100644
--- a/sys/mips/mips/swtch.S
+++ b/sys/mips/mips/swtch.S
@@ -339,7 +339,7 @@ blocked_loop:
sw a1, PC_CURTHREAD(a3)
lw a2, TD_PCB(a1)
sw a2, PC_CURPCB(a3)
- lw v0, TD_REALKSTACK(a1)
+ lw v0, TD_KSTACK(a1)
li s0, MIPS_KSEG2_START # If Uarea addr is below kseg2,
bltu v0, s0, sw2 # no need to insert in TLB.
lw a1, TD_UPTE+0(s7) # t0 = first u. pte
diff --git a/sys/mips/mips/vm_machdep.c b/sys/mips/mips/vm_machdep.c
index cb8a8d9..d385fdd 100644
--- a/sys/mips/mips/vm_machdep.c
+++ b/sys/mips/mips/vm_machdep.c
@@ -217,13 +217,9 @@ cpu_thread_swapin(struct thread *td)
* part of the thread struct so cpu_switch() can quickly map in
* the pcb struct and kernel stack.
*/
- if (!(pte = pmap_segmap(kernel_pmap, td->td_md.md_realstack)))
- panic("cpu_thread_swapin: invalid segmap");
- pte += ((vm_offset_t)td->td_md.md_realstack >> PAGE_SHIFT) & (NPTEPG - 1);
-
- for (i = 0; i < KSTACK_PAGES - 1; i++) {
+ for (i = 0; i < KSTACK_PAGES; i++) {
+ pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE);
td->td_md.md_upte[i] = *pte & ~(PTE_RO|PTE_WIRED);
- pte++;
}
}
@@ -238,22 +234,14 @@ cpu_thread_alloc(struct thread *td)
pt_entry_t *pte;
int i;
- if (td->td_kstack & (1 << PAGE_SHIFT))
- td->td_md.md_realstack = td->td_kstack + PAGE_SIZE;
- else
- td->td_md.md_realstack = td->td_kstack;
-
- td->td_pcb = (struct pcb *)(td->td_md.md_realstack +
- (td->td_kstack_pages - 1) * PAGE_SIZE) - 1;
+ KASSERT((td->td_kstack & (1 << PAGE_SHIFT)) == 0, ("kernel stack must be aligned."));
+ td->td_pcb = (struct pcb *)(td->td_kstack +
+ td->td_kstack_pages * PAGE_SIZE) - 1;
td->td_frame = &td->td_pcb->pcb_regs;
- if (!(pte = pmap_segmap(kernel_pmap, td->td_md.md_realstack)))
- panic("cpu_thread_alloc: invalid segmap");
- pte += ((vm_offset_t)td->td_md.md_realstack >> PAGE_SHIFT) & (NPTEPG - 1);
-
- for (i = 0; i < KSTACK_PAGES - 1; i++) {
+ for (i = 0; i < KSTACK_PAGES; i++) {
+ pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE);
td->td_md.md_upte[i] = *pte & ~(PTE_RO|PTE_WIRED);
- pte++;
}
}
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 02fda07..51a3bbe 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -98,6 +98,9 @@ extern vm_offset_t kernel_vm_end;
void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
vm_size_t);
+#if defined(__mips__)
+void pmap_align_tlb(vm_offset_t *);
+#endif
void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 83c468e..ca6d49c 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -47,6 +47,7 @@ vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
unsigned long boundary, vm_memattr_t memattr);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
+vm_offset_t kmem_alloc_nofault_space(vm_map_t, vm_size_t, int);
vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index c080ca0..bcdfd34 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -373,8 +373,17 @@ vm_thread_new(struct thread *td, int pages)
/*
* Get a kernel virtual address for this thread's kstack.
*/
+#if defined(__mips__)
+ /*
+ * We need to align the kstack's mapped address to fit within
+ * a single TLB entry.
+ */
+ ks = kmem_alloc_nofault_space(kernel_map,
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
+#else
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+#endif
if (ks == 0) {
printf("vm_thread_new: kstack allocation failed\n");
vm_object_deallocate(ksobj);
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 9006572..739d289 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -119,6 +119,35 @@ kmem_alloc_nofault(map, size)
}
/*
+ * kmem_alloc_nofault_space:
+ *
+ * Allocate a virtual address range with no underlying object and
+ * no initial mapping to physical memory within the specified
+ * address space. Any mapping from this range to physical memory
+ * must be explicitly created prior to its use, typically with
+ * pmap_qenter(). Any attempt to create a mapping on demand
+ * through vm_fault() will result in a panic.
+ */
+vm_offset_t
+kmem_alloc_nofault_space(map, size, find_space)
+ vm_map_t map;
+ vm_size_t size;
+ int find_space;
+{
+ vm_offset_t addr;
+ int result;
+
+ size = round_page(size);
+ addr = vm_map_min(map);
+ result = vm_map_find(map, NULL, 0, &addr, size, find_space,
+ VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
+ if (result != KERN_SUCCESS) {
+ return (0);
+ }
+ return (addr);
+}
+
+/*
* Allocate wired-down memory in the kernel's address map
* or a submap.
*/
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index fe0c0f5..1d22fa6 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1394,9 +1394,20 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
- if (find_space == VMFS_ALIGNED_SPACE)
+ switch (find_space) {
+ case VMFS_ALIGNED_SPACE:
pmap_align_superpage(object, offset, addr,
length);
+ break;
+#ifdef VMFS_TLB_ALIGNED_SPACE
+ case VMFS_TLB_ALIGNED_SPACE:
+ pmap_align_tlb(addr);
+ break;
+#endif
+ default:
+ break;
+ }
+
start = *addr;
}
result = vm_map_insert(map, object, offset, start, start +
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 5454ce6..d5c5b51 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -326,6 +326,9 @@ long vmspace_wired_count(struct vmspace *vmspace);
#define VMFS_NO_SPACE 0 /* don't find; use the given range */
#define VMFS_ANY_SPACE 1 /* find a range with any alignment */
#define VMFS_ALIGNED_SPACE 2 /* find a superpage-aligned range */
+#if defined(__mips__)
+#define VMFS_TLB_ALIGNED_SPACE 3 /* find a TLB entry aligned range */
+#endif
/*
* vm_map_wire and vm_map_unwire option flags
OpenPOWER on IntegriCloud