summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorscottl <scottl@FreeBSD.org>2002-10-02 07:44:29 +0000
committerscottl <scottl@FreeBSD.org>2002-10-02 07:44:29 +0000
commit3a150bca9cc1b9ca767ca73d95fd54081237210b (patch)
tree0a9cc28f7570a42ae4e5df4cfc55dee42c36d9d8 /sys
parent734ef490b8cfe3acc5f45425a63fac5e7c5173a3 (diff)
downloadFreeBSD-src-3a150bca9cc1b9ca767ca73d95fd54081237210b.zip
FreeBSD-src-3a150bca9cc1b9ca767ca73d95fd54081237210b.tar.gz
Some kernel threads try to do significant work, and the default KSTACK_PAGES
doesn't give them enough stack to do much before blowing away the pcb. This adds MI and MD code to allow the allocation of an alternate kstack who's size can be speficied when calling kthread_create. Passing the value 0 prevents the alternate kstack from being created. Note that the ia64 MD code is missing for now, and PowerPC was only partially written due to the pmap.c being incomplete there. Though this patch does not modify anything to make use of the alternate kstack, acpi and usb are good candidates. Reviewed by: jake, peter, jhb
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/pmap.c71
-rw-r--r--sys/amd64/amd64/pmap.c84
-rw-r--r--sys/conf/NOTES2
-rw-r--r--sys/conf/options1
-rw-r--r--sys/dev/aac/aac.c2
-rw-r--r--sys/dev/acpica/Osd/OsdSchedule.c2
-rw-r--r--sys/dev/acpica/acpi_thermal.c2
-rw-r--r--sys/dev/ata/ata-raid.c2
-rw-r--r--sys/dev/isp/isp_freebsd.c2
-rw-r--r--sys/dev/md/md.c2
-rw-r--r--sys/dev/pccbb/pccbb.c2
-rw-r--r--sys/dev/pcic/i82365.c2
-rw-r--r--sys/dev/random/randomdev.c2
-rw-r--r--sys/dev/usb/usb_port.h4
-rw-r--r--sys/i386/i386/pmap.c84
-rw-r--r--sys/i386/linux/linux_machdep.c2
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_fork.c13
-rw-r--r--sys/kern/kern_idle.c4
-rw-r--r--sys/kern/kern_intr.c2
-rw-r--r--sys/kern/kern_kse.c2
-rw-r--r--sys/kern/kern_kthread.c6
-rw-r--r--sys/kern/kern_ktrace.c2
-rw-r--r--sys/kern/kern_proc.c9
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/kern/vfs_aio.c2
-rw-r--r--sys/netsmb/smb_iod.c2
-rw-r--r--sys/nfsclient/nfs_nfsiod.c2
-rw-r--r--sys/powerpc/aim/mmu_oea.c30
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c30
-rw-r--r--sys/powerpc/powerpc/pmap.c30
-rw-r--r--sys/sparc64/sparc64/pmap.c78
-rw-r--r--sys/sys/kthread.h2
-rw-r--r--sys/sys/proc.h6
-rw-r--r--sys/vm/pmap.h4
35 files changed, 394 insertions, 100 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 72db2bf..df7f181 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -898,12 +898,16 @@ retry:
return m;
}
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
/*
* Create the kernel stack for a new thread.
* This routine directly affects the fork perf for a process and thread.
*/
void
-pmap_new_thread(struct thread *td)
+pmap_new_thread(struct thread *td, int pages)
{
int i;
vm_object_t ksobj;
@@ -911,15 +915,21 @@ pmap_new_thread(struct thread *td)
vm_page_t m;
pt_entry_t *ptek, oldpte;
+ /* Bounds check */
+ if (pages <= 1)
+ pages = KSTACK_PAGES;
+ else if (pages > KSTACK_MAX_PAGES)
+ pages = KSTACK_MAX_PAGES;
+
/*
* allocate object for the kstack
*/
- ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
#ifdef KSTACK_GUARD
/* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE);
+ ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == NULL)
panic("pmap_new_thread: kstack allocation failed");
@@ -935,17 +945,23 @@ pmap_new_thread(struct thread *td)
ptek++;
#else
/* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE);
+ ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == NULL)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
ptek = vtopte(ks);
#endif
/*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+
+ /*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
@@ -976,15 +992,17 @@ pmap_dispose_thread(td)
struct thread *td;
{
int i;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pt_entry_t *ptek;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
ptek = vtopte(ks);
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@@ -1002,14 +1020,43 @@ pmap_dispose_thread(td)
* address map.
*/
#ifdef KSTACK_GUARD
- kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE);
+ kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
- kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE);
+ kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
+ * Set up a variable sized alternate kstack.
+ */
+void
+pmap_new_altkstack(struct thread *td, int pages)
+{
+ /* shuffle the original stack */
+ td->td_altkstack_obj = td->td_kstack_obj;
+ td->td_altkstack = td->td_kstack;
+ td->td_altkstack_pages = td->td_kstack_pages;
+
+ pmap_new_thread(td, pages);
+}
+
+void
+pmap_dispose_altkstack(td)
+ struct thread *td;
+{
+ pmap_dispose_thread(td);
+
+ /* restore the original kstack */
+ td->td_kstack = td->td_altkstack;
+ td->td_kstack_obj = td->td_altkstack_obj;
+ td->td_kstack_pages = td->td_altkstack_pages;
+ td->td_altkstack = 0;
+ td->td_altkstack_obj = NULL;
+ td->td_altkstack_pages = 0;
+}
+
+/*
* Allow the kernel stack for a thread to be prejudicially paged out.
*/
void
@@ -1017,6 +1064,7 @@ pmap_swapout_thread(td)
struct thread *td;
{
int i;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
@@ -1026,9 +1074,10 @@ pmap_swapout_thread(td)
*/
alpha_fpstate_save(td, 1);
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@@ -1048,13 +1097,15 @@ pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
if (m->valid != VM_PAGE_BITS_ALL) {
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 0c9a2c7..b68ff09 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -934,29 +934,39 @@ retry:
return m;
}
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
/*
* Create the kernel stack (including pcb for i386) for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
-pmap_new_thread(struct thread *td)
+pmap_new_thread(struct thread *td, int pages)
{
int i;
- vm_page_t ma[KSTACK_PAGES];
+ vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_page_t m;
vm_offset_t ks;
+ /* Bounds check */
+ if (pages <= 1)
+ pages = KSTACK_PAGES;
+ else if (pages > KSTACK_MAX_PAGES)
+ pages = KSTACK_MAX_PAGES;
+
/*
* allocate object for the kstack
*/
- ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/* get a kernel virtual address for the kstack for this thread */
#ifdef KSTACK_GUARD
- ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE);
+ ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (*vtopte(ks) != 0)
@@ -965,16 +975,22 @@ pmap_new_thread(struct thread *td)
td->td_kstack = ks;
#else
/* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE);
+ ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
#endif
+ /*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
@@ -986,7 +1002,7 @@ pmap_new_thread(struct thread *td)
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}
- pmap_qenter(ks, ma, KSTACK_PAGES);
+ pmap_qenter(ks, ma, pages);
}
/*
@@ -998,14 +1014,16 @@ pmap_dispose_thread(td)
struct thread *td;
{
int i;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- pmap_qremove(ks, KSTACK_PAGES);
- for (i = 0; i < KSTACK_PAGES; i++) {
+ pmap_qremove(ks, pages);
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@@ -1020,14 +1038,44 @@ pmap_dispose_thread(td)
* address map.
*/
#ifdef KSTACK_GUARD
- kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE);
+ kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
- kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE);
+ kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
+ * Set up a variable sized alternate kstack. Though it may look MI, it may
+ * need to be different on certain arches like ia64.
+ */
+void
+pmap_new_altkstack(struct thread *td, int pages)
+{
+ /* shuffle the original stack */
+ td->td_altkstack_obj = td->td_kstack_obj;
+ td->td_altkstack = td->td_kstack;
+ td->td_altkstack_pages = td->td_kstack_pages;
+
+ pmap_new_thread(td, pages);
+}
+
+void
+pmap_dispose_altkstack(td)
+ struct thread *td;
+{
+ pmap_dispose_thread(td);
+
+ /* restore the original kstack */
+ td->td_kstack = td->td_altkstack;
+ td->td_kstack_obj = td->td_altkstack_obj;
+ td->td_kstack_pages = td->td_altkstack_pages;
+ td->td_altkstack = 0;
+ td->td_altkstack_obj = NULL;
+ td->td_altkstack_pages = 0;
+}
+
+/*
* Allow the Kernel stack for a thread to be prejudicially paged out.
*/
void
@@ -1035,14 +1083,16 @@ pmap_swapout_thread(td)
struct thread *td;
{
int i;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- pmap_qremove(ks, KSTACK_PAGES);
- for (i = 0; i < KSTACK_PAGES; i++) {
+ pmap_qremove(ks, pages);
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@@ -1061,14 +1111,16 @@ pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
- vm_page_t ma[KSTACK_PAGES];
+ int pages;
+ vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
@@ -1083,7 +1135,7 @@ pmap_swapin_thread(td)
vm_page_wakeup(m);
vm_page_unlock_queues();
}
- pmap_qenter(ks, ma, KSTACK_PAGES);
+ pmap_qenter(ks, ma, pages);
}
/***************************************************
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index c625246..df321f6 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -2253,6 +2253,8 @@ options SHOW_BUSYBUFS # List buffers that prevent root unmount
options SLIP_IFF_OPTS
options VFS_BIO_DEBUG # VFS buffer I/O debugging
+options KSTACK_MAX_PAGES=32 # Maximum pages to give the kernel stack
+
# Yet more undocumented options for linting.
options AAC_DEBUG
options ACD_DEBUG
diff --git a/sys/conf/options b/sys/conf/options
index 079d77b..20aa5af 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -89,6 +89,7 @@ GDBSPEED opt_ddb.h
GEOM
HW_WDOG
KSTACK_PAGES
+KSTACK_MAX_PAGES
KTRACE
KTRACE_REQUEST_POOL opt_ktrace.h
LIBICONV
diff --git a/sys/dev/aac/aac.c b/sys/dev/aac/aac.c
index 0ad1d83..2e1dbf1 100644
--- a/sys/dev/aac/aac.c
+++ b/sys/dev/aac/aac.c
@@ -312,7 +312,7 @@ aac_attach(struct aac_softc *sc)
/* Create the AIF thread */
#if __FreeBSD_version > 500005
if (kthread_create((void(*)(void *))aac_host_command, sc,
- &sc->aifthread, 0, "aac%daif", unit))
+ &sc->aifthread, 0, 0, "aac%daif", unit))
#else
if (kthread_create((void(*)(void *))aac_host_command, sc,
&sc->aifthread, "aac%daif", unit))
diff --git a/sys/dev/acpica/Osd/OsdSchedule.c b/sys/dev/acpica/Osd/OsdSchedule.c
index d8aca62..f402a02 100644
--- a/sys/dev/acpica/Osd/OsdSchedule.c
+++ b/sys/dev/acpica/Osd/OsdSchedule.c
@@ -148,7 +148,7 @@ acpi_task_thread_init(void)
for (i = 0; i < ACPI_MAX_THREADS; i++) {
err = kthread_create(acpi_task_thread, NULL, &acpi_kthread_proc,
- 0, "acpi_task%d", i);
+ 0, 0, "acpi_task%d", i);
if (err != 0) {
printf("%s: kthread_create failed(%d)\n", __func__, err);
break;
diff --git a/sys/dev/acpica/acpi_thermal.c b/sys/dev/acpica/acpi_thermal.c
index d781272..74fc962 100644
--- a/sys/dev/acpica/acpi_thermal.c
+++ b/sys/dev/acpica/acpi_thermal.c
@@ -263,7 +263,7 @@ acpi_tz_attach(device_t dev)
*/
if (acpi_tz_proc == NULL) {
error = kthread_create(acpi_tz_thread, NULL, &acpi_tz_proc,
- RFHIGHPID, "acpi_thermal");
+ RFHIGHPID, 0, "acpi_thermal");
if (error != 0) {
device_printf(sc->tz_dev, "could not create thread - %d", error);
goto out;
diff --git a/sys/dev/ata/ata-raid.c b/sys/dev/ata/ata-raid.c
index f1afcc7..2c3fb19 100644
--- a/sys/dev/ata/ata-raid.c
+++ b/sys/dev/ata/ata-raid.c
@@ -463,7 +463,7 @@ ata_raid_rebuild(int array)
return ENXIO;
if (rdp->flags & AR_F_REBUILDING)
return EBUSY;
- return kthread_create(ar_rebuild, rdp, &rdp->pid, RFNOWAIT,
+ return kthread_create(ar_rebuild, rdp, &rdp->pid, RFNOWAIT, 0,
"rebuilding ar%d", array);
}
diff --git a/sys/dev/isp/isp_freebsd.c b/sys/dev/isp/isp_freebsd.c
index af93d20..c80c32c 100644
--- a/sys/dev/isp/isp_freebsd.c
+++ b/sys/dev/isp/isp_freebsd.c
@@ -148,7 +148,7 @@ isp_attach(struct ispsoftc *isp)
/* XXX: LOCK VIOLATION */
cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
- RFHIGHPID, "%s: fc_thrd",
+ RFHIGHPID, 0, "%s: fc_thrd",
device_get_nameunit(isp->isp_dev))) {
xpt_bus_deregister(cam_sim_path(sim));
cam_sim_free(sim, TRUE);
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index ae1953d..7836ced 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -646,7 +646,7 @@ mdnew(int unit)
sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
sc->unit = unit;
sprintf(sc->name, "md%d", unit);
- error = kthread_create(md_kthread, sc, &sc->procp, 0, "%s", sc->name);
+ error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
if (error) {
free(sc, M_MD);
return (NULL);
diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c
index 8d26665..cf3ff52 100644
--- a/sys/dev/pccbb/pccbb.c
+++ b/sys/dev/pccbb/pccbb.c
@@ -680,7 +680,7 @@ cbb_attach(device_t brdev)
cbb_set(sc, CBB_SOCKET_EVENT, cbb_get(sc, CBB_SOCKET_EVENT));
/* Start the thread */
- if (kthread_create(cbb_event_thread, sc, &sc->event_thread, 0,
+ if (kthread_create(cbb_event_thread, sc, &sc->event_thread, 0, 0,
"%s%d", device_get_name(sc->dev), device_get_unit(sc->dev))) {
device_printf (sc->dev, "unable to create event thread.\n");
panic ("cbb_create_event_thread");
diff --git a/sys/dev/pcic/i82365.c b/sys/dev/pcic/i82365.c
index 02ca122..387927b 100644
--- a/sys/dev/pcic/i82365.c
+++ b/sys/dev/pcic/i82365.c
@@ -487,7 +487,7 @@ pcic_create_event_thread(void *arg)
}
if (kthread_create(pcic_event_thread, h, &h->event_thread,
- 0, "%s,%s", device_get_name(PCIC_H2SOFTC(h)->dev), cs)) {
+ 0, 0, "%s,%s", device_get_name(PCIC_H2SOFTC(h)->dev), cs)) {
device_printf(PCIC_H2SOFTC(h)->dev,
"cannot create event thread for sock 0x%02x\n", h->sock);
panic("pcic_create_event_thread");
diff --git a/sys/dev/random/randomdev.c b/sys/dev/random/randomdev.c
index e018428..d965cbc 100644
--- a/sys/dev/random/randomdev.c
+++ b/sys/dev/random/randomdev.c
@@ -282,7 +282,7 @@ random_modevent(module_t mod __unused, int type, void *data __unused)
/* Start the hash/reseed thread */
error = kthread_create(random_kthread, NULL,
- &random_kthread_proc, RFHIGHPID, "random");
+ &random_kthread_proc, RFHIGHPID, 0, "random");
if (error != 0)
return error;
diff --git a/sys/dev/usb/usb_port.h b/sys/dev/usb/usb_port.h
index cbf90fd..1894b9a 100644
--- a/sys/dev/usb/usb_port.h
+++ b/sys/dev/usb/usb_port.h
@@ -365,9 +365,9 @@ typedef struct thread *usb_proc_ptr;
#define memcpy(d, s, l) bcopy((s),(d),(l))
#define memset(d, v, l) bzero((d),(l))
#define usb_kthread_create1(f, s, p, a0, a1) \
- kthread_create((f), (s), (p), RFHIGHPID, (a0), (a1))
+ kthread_create((f), (s), (p), RFHIGHPID, 0, (a0), (a1))
#define usb_kthread_create2(f, s, p, a0) \
- kthread_create((f), (s), (p), RFHIGHPID, (a0))
+ kthread_create((f), (s), (p), RFHIGHPID, 0, (a0))
#define usb_kthread_create kthread_create
#define config_pending_incr()
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 0c9a2c7..b68ff09 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -934,29 +934,39 @@ retry:
return m;
}
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
/*
* Create the kernel stack (including pcb for i386) for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
-pmap_new_thread(struct thread *td)
+pmap_new_thread(struct thread *td, int pages)
{
int i;
- vm_page_t ma[KSTACK_PAGES];
+ vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_page_t m;
vm_offset_t ks;
+ /* Bounds check */
+ if (pages <= 1)
+ pages = KSTACK_PAGES;
+ else if (pages > KSTACK_MAX_PAGES)
+ pages = KSTACK_MAX_PAGES;
+
/*
* allocate object for the kstack
*/
- ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/* get a kernel virtual address for the kstack for this thread */
#ifdef KSTACK_GUARD
- ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE);
+ ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (*vtopte(ks) != 0)
@@ -965,16 +975,22 @@ pmap_new_thread(struct thread *td)
td->td_kstack = ks;
#else
/* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE);
+ ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
#endif
+ /*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
@@ -986,7 +1002,7 @@ pmap_new_thread(struct thread *td)
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}
- pmap_qenter(ks, ma, KSTACK_PAGES);
+ pmap_qenter(ks, ma, pages);
}
/*
@@ -998,14 +1014,16 @@ pmap_dispose_thread(td)
struct thread *td;
{
int i;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- pmap_qremove(ks, KSTACK_PAGES);
- for (i = 0; i < KSTACK_PAGES; i++) {
+ pmap_qremove(ks, pages);
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@@ -1020,14 +1038,44 @@ pmap_dispose_thread(td)
* address map.
*/
#ifdef KSTACK_GUARD
- kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE);
+ kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
- kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE);
+ kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
+ * Set up a variable sized alternate kstack. Though it may look MI, it may
+ * need to be different on certain arches like ia64.
+ */
+void
+pmap_new_altkstack(struct thread *td, int pages)
+{
+ /* shuffle the original stack */
+ td->td_altkstack_obj = td->td_kstack_obj;
+ td->td_altkstack = td->td_kstack;
+ td->td_altkstack_pages = td->td_kstack_pages;
+
+ pmap_new_thread(td, pages);
+}
+
+void
+pmap_dispose_altkstack(td)
+ struct thread *td;
+{
+ pmap_dispose_thread(td);
+
+ /* restore the original kstack */
+ td->td_kstack = td->td_altkstack;
+ td->td_kstack_obj = td->td_altkstack_obj;
+ td->td_kstack_pages = td->td_altkstack_pages;
+ td->td_altkstack = 0;
+ td->td_altkstack_obj = NULL;
+ td->td_altkstack_pages = 0;
+}
+
+/*
* Allow the Kernel stack for a thread to be prejudicially paged out.
*/
void
@@ -1035,14 +1083,16 @@ pmap_swapout_thread(td)
struct thread *td;
{
int i;
+ int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- pmap_qremove(ks, KSTACK_PAGES);
- for (i = 0; i < KSTACK_PAGES; i++) {
+ pmap_qremove(ks, pages);
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@@ -1061,14 +1111,16 @@ pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
- vm_page_t ma[KSTACK_PAGES];
+ int pages;
+ vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
@@ -1083,7 +1135,7 @@ pmap_swapin_thread(td)
vm_page_wakeup(m);
vm_page_unlock_queues();
}
- pmap_qenter(ks, ma, KSTACK_PAGES);
+ pmap_qenter(ks, ma, pages);
}
/***************************************************
diff --git a/sys/i386/linux/linux_machdep.c b/sys/i386/linux/linux_machdep.c
index 56952cb..c468d89 100644
--- a/sys/i386/linux/linux_machdep.c
+++ b/sys/i386/linux/linux_machdep.c
@@ -342,7 +342,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
ff |= RFFDG;
mtx_lock(&Giant);
- error = fork1(td, ff, &p2);
+ error = fork1(td, ff, 0, &p2);
if (error == 0) {
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 49bb739..66b58d2 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -685,7 +685,7 @@ create_init(const void *udata __unused)
struct ucred *newcred, *oldcred;
int error;
- error = fork1(&thread0, RFFDG | RFPROC | RFSTOPPED, &initproc);
+ error = fork1(&thread0, RFFDG | RFPROC | RFSTOPPED, 0, &initproc);
if (error)
panic("cannot fork init: %d\n", error);
/* divorce init's credentials from the kernel's */
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index da7ca7d..9fbf602 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -118,7 +118,7 @@ fork(td, uap)
struct proc *p2;
mtx_lock(&Giant);
- error = fork1(td, RFFDG | RFPROC, &p2);
+ error = fork1(td, RFFDG | RFPROC, 0, &p2);
if (error == 0) {
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;
@@ -140,7 +140,7 @@ vfork(td, uap)
struct proc *p2;
mtx_lock(&Giant);
- error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2);
+ error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2);
if (error == 0) {
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;
@@ -164,7 +164,7 @@ rfork(td, uap)
if ((uap->flags & RFKERNELONLY) != 0)
return (EINVAL);
mtx_lock(&Giant);
- error = fork1(td, uap->flags, &p2);
+ error = fork1(td, uap->flags, 0, &p2);
if (error == 0) {
td->td_retval[0] = p2 ? p2->p_pid : 0;
td->td_retval[1] = 0;
@@ -215,9 +215,10 @@ SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
int
-fork1(td, flags, procp)
+fork1(td, flags, pages, procp)
struct thread *td; /* parent proc */
int flags;
+ int pages;
struct proc **procp; /* child proc */
{
struct proc *p2, *pptr;
@@ -471,6 +472,10 @@ again:
kg2 = FIRST_KSEGRP_IN_PROC(p2);
ke2 = FIRST_KSE_IN_KSEGRP(kg2);
+ /* Allocate and switch to an alternate kstack if specified */
+ if (pages != 0)
+ pmap_new_altkstack(td2, pages);
+
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
bzero(&p2->p_startzero,
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index b0f4fda..bf8e922 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -46,7 +46,7 @@ idle_setup(void *dummy)
#ifdef SMP
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
error = kthread_create(idle_proc, NULL, &p,
- RFSTOPPED | RFHIGHPID, "idle: cpu%d", pc->pc_cpuid);
+ RFSTOPPED | RFHIGHPID, 0, "idle: cpu%d", pc->pc_cpuid);
pc->pc_idlethread = FIRST_THREAD_IN_PROC(p);
if (pc->pc_curthread == NULL) {
pc->pc_curthread = pc->pc_idlethread;
@@ -54,7 +54,7 @@ idle_setup(void *dummy)
}
#else
error = kthread_create(idle_proc, NULL, &p,
- RFSTOPPED | RFHIGHPID, "idle");
+ RFSTOPPED | RFHIGHPID, 0, "idle");
PCPU_SET(idlethread, FIRST_THREAD_IN_PROC(p));
#endif
if (error)
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 61f6579..2d7b160 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -188,7 +188,7 @@ ithread_create(struct ithd **ithread, int vector, int flags,
va_end(ap);
error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
- "%s", ithd->it_name);
+ 0, "%s", ithd->it_name);
if (error) {
mtx_destroy(&ithd->it_lock);
free(ithd, M_ITHREAD);
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 907eba7..7123616 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -142,7 +142,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
mtx_lock(&Giant);
- pmap_new_thread(td);
+ pmap_new_thread(td, 0);
mtx_unlock(&Giant);
cpu_thread_setup(td);
}
diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c
index 08ef71f..e785d0b 100644
--- a/sys/kern/kern_kthread.c
+++ b/sys/kern/kern_kthread.c
@@ -55,7 +55,7 @@ kproc_start(udata)
int error;
error = kthread_create((void (*)(void *))kp->func, NULL,
- kp->global_procpp, 0, "%s", kp->arg0);
+ kp->global_procpp, 0, 0, "%s", kp->arg0);
if (error)
panic("kproc_start: %s: error %d", kp->arg0, error);
}
@@ -72,7 +72,7 @@ kproc_start(udata)
*/
int
kthread_create(void (*func)(void *), void *arg,
- struct proc **newpp, int flags, const char *fmt, ...)
+ struct proc **newpp, int flags, int pages, const char *fmt, ...)
{
int error;
va_list ap;
@@ -83,7 +83,7 @@ kthread_create(void (*func)(void *), void *arg,
panic("kthread_create called too soon");
error = fork1(&thread0, RFMEM | RFFDG | RFPROC | RFSTOPPED | flags,
- &p2);
+ pages, &p2);
if (error)
return error;
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
index 0ad2df3..bb4642c 100644
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -134,7 +134,7 @@ ktrace_init(void *dummy)
req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
}
- kthread_create(ktr_loop, NULL, NULL, RFHIGHPID, "ktrace");
+ kthread_create(ktr_loop, NULL, NULL, RFHIGHPID, 0, "ktrace");
}
SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 633c66e..9f20562 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -164,6 +164,15 @@ proc_dtor(void *mem, int size, void *arg)
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
ke = FIRST_KSE_IN_KSEGRP(kg);
KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
+
+ /* Dispose of an alternate kstack, if it exists.
+ * XXX What if there are more than one thread in the proc?
+ * The first thread in the proc is special and not
+ * freed, so you gotta do this here.
+ */
+ if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
+ pmap_dispose_altkstack(td);
+
/*
* We want to make sure we know the initial linkages.
* so for now tear them down and remake them.
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 907eba7..7123616 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -142,7 +142,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
mtx_lock(&Giant);
- pmap_new_thread(td);
+ pmap_new_thread(td, 0);
mtx_unlock(&Giant);
cpu_thread_setup(td);
}
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index b3a2670..d4230cb 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -1001,7 +1001,7 @@ aio_newproc()
int error;
struct proc *p;
- error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, "aiod%d",
+ error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, 0, "aiod%d",
num_aio_procs);
if (error)
return error;
diff --git a/sys/netsmb/smb_iod.c b/sys/netsmb/smb_iod.c
index 38c8184..7edbc1b 100644
--- a/sys/netsmb/smb_iod.c
+++ b/sys/netsmb/smb_iod.c
@@ -676,7 +676,7 @@ smb_iod_create(struct smb_vc *vcp)
smb_sl_init(&iod->iod_evlock, "90evl");
STAILQ_INIT(&iod->iod_evlist);
error = kthread_create(smb_iod_thread, iod, &iod->iod_p,
- RFNOWAIT, "smbiod%d", iod->iod_id);
+ RFNOWAIT, 0, "smbiod%d", iod->iod_id);
if (error) {
SMBERROR("can't start smbiod: %d", error);
free(iod, M_SMBIOD);
diff --git a/sys/nfsclient/nfs_nfsiod.c b/sys/nfsclient/nfs_nfsiod.c
index 4a3d0f7..8762b16 100644
--- a/sys/nfsclient/nfs_nfsiod.c
+++ b/sys/nfsclient/nfs_nfsiod.c
@@ -170,7 +170,7 @@ nfs_nfsiodnew(void)
if (newiod == -1)
return (-1);
error = kthread_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, RFHIGHPID,
- "nfsiod %d", newiod);
+ 0, "nfsiod %d", newiod);
if (error)
return (-1);
nfs_numasync++;
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index bd15689..8802cfb 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1532,13 +1532,17 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
-pmap_new_thread(struct thread *td)
+pmap_new_thread(struct thread *td, int pages)
{
vm_object_t ksobj;
vm_offset_t ks;
@@ -1548,21 +1552,27 @@ pmap_new_thread(struct thread *td)
/*
* Allocate object for the kstack.
*/
- ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
- (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ /*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+
+ for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
@@ -1587,6 +1597,18 @@ pmap_dispose_thread(struct thread *td)
}
void
+pmap_new_altkstack(struct thread *td, int pages)
+{
+ TODO;
+}
+
+void
+pmap_dispose_altkstack(struct thread *td)
+{
+ TODO;
+}
+
+void
pmap_swapin_thread(struct thread *td)
{
TODO;
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index bd15689..8802cfb 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -1532,13 +1532,17 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
-pmap_new_thread(struct thread *td)
+pmap_new_thread(struct thread *td, int pages)
{
vm_object_t ksobj;
vm_offset_t ks;
@@ -1548,21 +1552,27 @@ pmap_new_thread(struct thread *td)
/*
* Allocate object for the kstack.
*/
- ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
- (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ /*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+
+ for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
@@ -1587,6 +1597,18 @@ pmap_dispose_thread(struct thread *td)
}
void
+pmap_new_altkstack(struct thread *td, int pages)
+{
+ TODO;
+}
+
+void
+pmap_dispose_altkstack(struct thread *td)
+{
+ TODO;
+}
+
+void
pmap_swapin_thread(struct thread *td)
{
TODO;
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index bd15689..8802cfb 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -1532,13 +1532,17 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
-pmap_new_thread(struct thread *td)
+pmap_new_thread(struct thread *td, int pages)
{
vm_object_t ksobj;
vm_offset_t ks;
@@ -1548,21 +1552,27 @@ pmap_new_thread(struct thread *td)
/*
* Allocate object for the kstack.
*/
- ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
- (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ /*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+
+ for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
@@ -1587,6 +1597,18 @@ pmap_dispose_thread(struct thread *td)
}
void
+pmap_new_altkstack(struct thread *td, int pages)
+{
+ TODO;
+}
+
+void
+pmap_dispose_altkstack(struct thread *td)
+{
+ TODO;
+}
+
+void
pmap_swapin_thread(struct thread *td)
{
TODO;
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 4485a55..60c264d 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -951,31 +951,41 @@ pmap_qremove(vm_offset_t sva, int count)
tlb_range_demap(kernel_pmap, sva, sva + (count * PAGE_SIZE) - 1);
}
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
-pmap_new_thread(struct thread *td)
+pmap_new_thread(struct thread *td, int pages)
{
- vm_page_t ma[KSTACK_PAGES];
+ vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
u_int i;
+ /* Bounds check */
+ if (pages <= 1)
+ pages = KSTACK_PAGES;
+ else if (pages > KSTACK_MAX_PAGES)
+ pages = KSTACK_MAX_PAGES;
+
/*
* Allocate object for the kstack,
*/
- ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
- (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (KSTACK_GUARD_PAGES != 0) {
@@ -984,7 +994,13 @@ pmap_new_thread(struct thread *td)
}
td->td_kstack = ks;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ /*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+
+ for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
@@ -1000,7 +1016,7 @@ pmap_new_thread(struct thread *td)
/*
* Enter the page into the kernel address space.
*/
- pmap_qenter(ks, ma, KSTACK_PAGES);
+ pmap_qenter(ks, ma, pages);
}
/*
@@ -1014,10 +1030,12 @@ pmap_dispose_thread(struct thread *td)
vm_offset_t ks;
vm_page_t m;
int i;
+ int pages;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages ; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@@ -1027,13 +1045,41 @@ pmap_dispose_thread(struct thread *td)
vm_page_free(m);
vm_page_unlock_queues();
}
- pmap_qremove(ks, KSTACK_PAGES);
+ pmap_qremove(ks, pages);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
- (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
vm_object_deallocate(ksobj);
}
/*
+ * Set up a variable sized alternate kstack.
+ */
+void
+pmap_new_altkstack(struct thread *td, int pages)
+{
+ /* shuffle the original stack */
+ td->td_altkstack_obj = td->td_kstack_obj;
+ td->td_altkstack = td->td_kstack;
+ td->td_altkstack_pages = td->td_kstack_pages;
+
+ pmap_new_thread(td, pages);
+}
+
+void
+pmap_dispose_altkstack(struct thread *td)
+{
+ pmap_dispose_thread(td);
+
+ /* restore the original kstack */
+ td->td_kstack = td->td_altkstack;
+ td->td_kstack_obj = td->td_altkstack_obj;
+ td->td_kstack_pages = td->td_altkstack_pages;
+ td->td_altkstack = 0;
+ td->td_altkstack_obj = NULL;
+ td->td_altkstack_pages = 0;
+}
+
+/*
* Allow the kernel stack for a thread to be prejudicially paged out.
*/
void
@@ -1043,10 +1089,12 @@ pmap_swapout_thread(struct thread *td)
vm_offset_t ks;
vm_page_t m;
int i;
+ int pages;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = (vm_offset_t)td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@@ -1055,7 +1103,7 @@ pmap_swapout_thread(struct thread *td)
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
- pmap_qremove(ks, KSTACK_PAGES);
+ pmap_qremove(ks, pages);
}
/*
@@ -1064,16 +1112,18 @@ pmap_swapout_thread(struct thread *td)
void
pmap_swapin_thread(struct thread *td)
{
- vm_page_t ma[KSTACK_PAGES];
+ vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int rv;
int i;
+ int pages;
+ pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
- for (i = 0; i < KSTACK_PAGES; i++) {
+ for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
@@ -1088,7 +1138,7 @@ pmap_swapin_thread(struct thread *td)
vm_page_wakeup(m);
vm_page_unlock_queues();
}
- pmap_qenter(ks, ma, KSTACK_PAGES);
+ pmap_qenter(ks, ma, pages);
}
/*
diff --git a/sys/sys/kthread.h b/sys/sys/kthread.h
index 548c410..5f41cfc 100644
--- a/sys/sys/kthread.h
+++ b/sys/sys/kthread.h
@@ -45,7 +45,7 @@ struct kproc_desc {
void kproc_shutdown(void *, int);
void kproc_start(const void *);
int kthread_create(void (*)(void *), void *, struct proc **,
- int flags, const char *, ...) __printflike(5, 6);
+ int flags, int pages, const char *, ...) __printflike(6, 7);
void kthread_exit(int) __dead2;
int kthread_resume(struct proc *); /* XXXKSE */
int kthread_suspend(struct proc *, int); /* XXXKSE */
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 74ca25d..9f4bf95 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -314,6 +314,10 @@ struct thread {
struct trapframe *td_frame; /* (k) */
struct vm_object *td_kstack_obj;/* (a) Kstack object. */
vm_offset_t td_kstack; /* Kernel VA of kstack. */
+ int td_kstack_pages; /* Size of the kstack */
+ struct vm_object *td_altkstack_obj;/* (a) Alternate kstack object. */
+ vm_offset_t td_altkstack; /* Kernel VA of alternate kstack. */
+ int td_altkstack_pages; /* Size of the alternate kstack */
};
/* flags kept in td_flags */
#define TDF_UNBOUND 0x000001 /* May give away the kse, uses the kg runq. */
@@ -844,7 +848,7 @@ int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *ses
int enterthispgrp(struct proc *p, struct pgrp *pgrp);
void faultin(struct proc *p);
void fixjobc(struct proc *p, struct pgrp *pgrp, int entering);
-int fork1(struct thread *, int, struct proc **);
+int fork1(struct thread *, int, int, struct proc **);
void fork_exit(void (*)(void *, struct trapframe *), void *,
struct trapframe *);
void fork_return(struct thread *, struct trapframe *);
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 2ea797e..69e7122 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -128,8 +128,10 @@ void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
-void pmap_new_thread(struct thread *td);
+void pmap_new_thread(struct thread *td, int pages);
void pmap_dispose_thread(struct thread *td);
+void pmap_new_altkstack(struct thread *td, int pages);
+void pmap_dispose_altkstack(struct thread *td);
void pmap_swapout_thread(struct thread *td);
void pmap_swapin_thread(struct thread *td);
void pmap_activate(struct thread *td);
OpenPOWER on IntegriCloud