summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-12 19:49:39 +0000
committerjeff <jeff@FreeBSD.org>2007-06-12 19:49:39 +0000
commit26422aea29504a29d366848358773f507dc368f4 (patch)
treeee37a2f65e950e27f5043353c0358f93c2e7a439 /sys/kern
parentbed92e37b990e7d96eecf2017d1eb53cfcf0eb4b (diff)
downloadFreeBSD-src-26422aea29504a29d366848358773f507dc368f4.zip
FreeBSD-src-26422aea29504a29d366848358773f507dc368f4.tar.gz
- Garbage collect unused concurrency functions.
- Remove unused kse fields from struct proc. - Group remaining fields and #ifdef KSE them. - Move some kern_kse.c only prototypes out of proc and into kern_kse. Discussed with: Julian
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_kse.c10
-rw-r--r--sys/kern/kern_thread.c16
2 files changed, 10 insertions, 16 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index d42bc6b..105e4db 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -65,6 +65,13 @@ TAILQ_HEAD(, kse_upcall) zombie_upcalls =
static int thread_update_usr_ticks(struct thread *td);
static void thread_alloc_spare(struct thread *td);
+static struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
+static struct kse_upcall *upcall_alloc(void);
+static void upcall_free(struct kse_upcall *ku);
+static void upcall_link(struct kse_upcall *ku, struct proc *p);
+static void upcall_unlink(struct kse_upcall *ku);
+static void upcall_stash(struct kse_upcall *ke);
+
struct mtx kse_lock;
MTX_SYSINIT(kse_lock, &kse_lock, "kse lock", MTX_SPIN);
@@ -138,6 +145,7 @@ kse_unlink(struct thread *td)
mtx_lock_spin(&kse_lock);
thread_unlink(td);
mtx_unlock_spin(&kse_lock);
+ upcall_remove(td);
}
#endif
@@ -1293,7 +1301,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
max_threads_hits++;
PROC_LOCK(p);
PROC_SLOCK(p);
- p->p_maxthrwaits++;
while (p->p_numthreads > max_threads_per_proc) {
if (p->p_numupcalls >= max_threads_per_proc)
break;
@@ -1306,7 +1313,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
PROC_SLOCK(p);
}
}
- p->p_maxthrwaits--;
PROC_SUNLOCK(p);
PROC_UNLOCK(p);
}
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index abbac4a..91870d4 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -219,7 +219,9 @@ proc_linkup(struct proc *p, struct thread *td)
{
TAILQ_INIT(&p->p_threads); /* all threads in proc */
+#ifdef KSE
TAILQ_INIT(&p->p_upcalls); /* upcall list */
+#endif
sigqueue_init(&p->p_sigqueue, p);
p->p_ksi = ksiginfo_alloc(1);
if (p->p_ksi != NULL) {
@@ -439,19 +441,6 @@ thread_exit(void)
}
}
-#ifdef KSE
- /*
- * Because each upcall structure has an owner thread,
- * owner thread exits only when process is in exiting
- * state, so upcall to userland is no longer needed,
- * deleting upcall structure is safe here.
- * So when all threads in a group is exited, all upcalls
- * in the group should be automatically freed.
- * XXXKSE This is a KSE thing and should be exported
- * there somehow.
- */
- upcall_remove(td);
-#endif
atomic_add_int(&td->td_proc->p_exitthreads, 1);
PCPU_SET(deadthread, td);
} else {
@@ -569,7 +558,6 @@ thread_unthread(struct thread *td)
thread_zombie(td->td_standin);
td->td_standin = NULL;
}
- sched_set_concurrency(p, 1);
#else
p->p_flag &= ~P_HADTHREADS;
#endif
OpenPOWER on IntegriCloud