summaryrefslogtreecommitdiffstats
path: root/sys/alpha/alpha/pmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/alpha/alpha/pmap.c')
-rw-r--r--sys/alpha/alpha/pmap.c71
1 files changed, 61 insertions, 10 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 72db2bf..df7f181 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -898,12 +898,16 @@ retry:
return m;
}
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
/*
* Create the kernel stack for a new thread.
* This routine directly affects the fork perf for a process and thread.
*/
void
-pmap_new_thread(struct thread *td)
+pmap_new_thread(struct thread *td, int pages)
{
int i;
vm_object_t ksobj;
@@ -911,15 +915,21 @@ pmap_new_thread(struct thread *td)
vm_page_t m;
pt_entry_t *ptek, oldpte;
+ /* Bounds check */
+ if (pages <= 1)
+ pages = KSTACK_PAGES;
+ else if (pages > KSTACK_MAX_PAGES)
+ pages = KSTACK_MAX_PAGES;
+
/*
* allocate object for the kstack
*/
- ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
#ifdef KSTACK_GUARD
/* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE);
+ ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == NULL)
panic("pmap_new_thread: kstack allocation failed");
@@ -935,17 +945,23 @@ pmap_new_thread(struct thread *td)
ptek++;
#else
/* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE);
+ ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == NULL)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
ptek = vtopte(ks);
#endif
/*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+
+ /*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
@@ -976,15 +992,17 @@ pmap_dispose_thread(td)
struct thread *td;
{
int i;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pt_entry_t *ptek;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
ptek = vtopte(ks);
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@@ -1002,14 +1020,43 @@ pmap_dispose_thread(td)
* address map.
*/
#ifdef KSTACK_GUARD
- kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE);
+ kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
- kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE);
+ kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
+ * Set up a variable sized alternate kstack.
+ */
+void
+pmap_new_altkstack(struct thread *td, int pages)
+{
+ /* shuffle the original stack */
+ td->td_altkstack_obj = td->td_kstack_obj;
+ td->td_altkstack = td->td_kstack;
+ td->td_altkstack_pages = td->td_kstack_pages;
+
+ pmap_new_thread(td, pages);
+}
+
+void
+pmap_dispose_altkstack(td)
+ struct thread *td;
+{
+ pmap_dispose_thread(td);
+
+ /* restore the original kstack */
+ td->td_kstack = td->td_altkstack;
+ td->td_kstack_obj = td->td_altkstack_obj;
+ td->td_kstack_pages = td->td_altkstack_pages;
+ td->td_altkstack = 0;
+ td->td_altkstack_obj = NULL;
+ td->td_altkstack_pages = 0;
+}
+
+/*
* Allow the kernel stack for a thread to be prejudicially paged out.
*/
void
@@ -1017,6 +1064,7 @@ pmap_swapout_thread(td)
struct thread *td;
{
int i;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
@@ -1026,9 +1074,10 @@ pmap_swapout_thread(td)
*/
alpha_fpstate_save(td, 1);
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@@ -1048,13 +1097,15 @@ pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
if (m->valid != VM_PAGE_BITS_ALL) {
OpenPOWER on IntegriCloud