summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-12 19:49:39 +0000
committerjeff <jeff@FreeBSD.org>2007-06-12 19:49:39 +0000
commit26422aea29504a29d366848358773f507dc368f4 (patch)
treeee37a2f65e950e27f5043353c0358f93c2e7a439
parentbed92e37b990e7d96eecf2017d1eb53cfcf0eb4b (diff)
downloadFreeBSD-src-26422aea29504a29d366848358773f507dc368f4.zip
FreeBSD-src-26422aea29504a29d366848358773f507dc368f4.tar.gz
- Garbage collect unused concurrency functions.
- Remove unused kse fields from struct proc. - Group remaining fields and #ifdef KSE them. - Move some kern_kse.c only prototypes out of proc and into kern_kse. Discussed with: Julian
-rw-r--r--sys/kern/kern_kse.c10
-rw-r--r--sys/kern/kern_thread.c16
-rw-r--r--sys/sys/proc.h20
-rw-r--r--sys/sys/sched.h3
4 files changed, 20 insertions, 29 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index d42bc6b..105e4db 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -65,6 +65,13 @@ TAILQ_HEAD(, kse_upcall) zombie_upcalls =
static int thread_update_usr_ticks(struct thread *td);
static void thread_alloc_spare(struct thread *td);
+static struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
+static struct kse_upcall *upcall_alloc(void);
+static void upcall_free(struct kse_upcall *ku);
+static void upcall_link(struct kse_upcall *ku, struct proc *p);
+static void upcall_unlink(struct kse_upcall *ku);
+static void upcall_stash(struct kse_upcall *ke);
+
struct mtx kse_lock;
MTX_SYSINIT(kse_lock, &kse_lock, "kse lock", MTX_SPIN);
@@ -138,6 +145,7 @@ kse_unlink(struct thread *td)
mtx_lock_spin(&kse_lock);
thread_unlink(td);
mtx_unlock_spin(&kse_lock);
+ upcall_remove(td);
}
#endif
@@ -1293,7 +1301,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
max_threads_hits++;
PROC_LOCK(p);
PROC_SLOCK(p);
- p->p_maxthrwaits++;
while (p->p_numthreads > max_threads_per_proc) {
if (p->p_numupcalls >= max_threads_per_proc)
break;
@@ -1306,7 +1313,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
PROC_SLOCK(p);
}
}
- p->p_maxthrwaits--;
PROC_SUNLOCK(p);
PROC_UNLOCK(p);
}
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index abbac4a..91870d4 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -219,7 +219,9 @@ proc_linkup(struct proc *p, struct thread *td)
{
TAILQ_INIT(&p->p_threads); /* all threads in proc */
+#ifdef KSE
TAILQ_INIT(&p->p_upcalls); /* upcall list */
+#endif
sigqueue_init(&p->p_sigqueue, p);
p->p_ksi = ksiginfo_alloc(1);
if (p->p_ksi != NULL) {
@@ -439,19 +441,6 @@ thread_exit(void)
}
}
-#ifdef KSE
- /*
- * Because each upcall structure has an owner thread,
- * owner thread exits only when process is in exiting
- * state, so upcall to userland is no longer needed,
- * deleting upcall structure is safe here.
- * So when all threads in a group is exited, all upcalls
- * in the group should be automatically freed.
- * XXXKSE This is a KSE thing and should be exported
- * there somehow.
- */
- upcall_remove(td);
-#endif
atomic_add_int(&td->td_proc->p_exitthreads, 1);
PCPU_SET(deadthread, td);
} else {
@@ -569,7 +558,6 @@ thread_unthread(struct thread *td)
thread_zombie(td->td_standin);
td->td_standin = NULL;
}
- sched_set_concurrency(p, 1);
#else
p->p_flag &= ~P_HADTHREADS;
#endif
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index e9a0917..18b15ff 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -239,8 +239,10 @@ struct thread {
int td_pinned; /* (k) Temporary cpu pin count. */
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */
struct ucred *td_ucred; /* (k) Reference to credentials. */
+#ifdef KSE
struct thread *td_standin; /* (k + a) Use this for an upcall. */
struct kse_upcall *td_upcall; /* (k + t) Upcall structure. */
+#endif
u_int td_estcpu; /* (t) estimated cpu utilization */
u_int td_slptime; /* (t) How long completely blocked. */
struct rusage td_ru; /* (t) rusage information */
@@ -435,6 +437,7 @@ do { \
#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
+#ifdef KSE
/*
* An upcall is used when returning to userland. If a thread does not have
* an upcall on return to userland the thread exports its context and exits.
@@ -452,6 +455,7 @@ struct kse_upcall {
#define KUF_DOUPCALL 0x00001 /* Do upcall now; don't wait. */
#define KUF_EXITING 0x00002 /* Upcall structure is exiting. */
+#endif
/*
* XXX: Does this belong in resource.h or resourcevar.h instead?
@@ -489,7 +493,6 @@ struct proc {
struct plimit *p_limit; /* (c) Process limits. */
struct callout p_limco; /* (c) Limit callout handle */
struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */
- TAILQ_HEAD(, kse_upcall) p_upcalls; /* (j) All upcalls in the proc. */
/*
* The following don't make too much sense.
@@ -523,7 +526,6 @@ struct proc {
struct rusage_ext p_rux; /* (cj) Internal resource usage. */
struct rusage_ext p_crux; /* (c) Internal child resource usage. */
int p_profthreads; /* (c) Num threads in addupc_task. */
- int p_maxthrwaits; /* (c) Max threads num waiters */
volatile int p_exitthreads; /* (j) Number of threads exiting */
int p_traceflag; /* (o) Kernel trace points. */
struct vnode *p_tracevp; /* (c + o) Trace to vnode. */
@@ -546,12 +548,14 @@ struct proc {
int p_boundary_count;/* (c) Num threads at user boundary */
int p_pendingcnt; /* how many signals are pending */
struct itimers *p_itimers; /* (c) POSIX interval timers. */
-/* from ksegrp */
+#ifdef KSE
+ TAILQ_HEAD(, kse_upcall) p_upcalls; /* (j) All upcalls in the proc. */
int p_numupcalls; /* (j) Num upcalls. */
int p_upsleeps; /* (c) Num threads in kse_release(). */
struct kse_thr_mailbox *p_completed; /* (c) Completed thread mboxes. */
int p_nextupcall; /* (n) Next upcall time. */
int p_upquantum; /* (n) Quantum to schedule an upcall. */
+#endif
/* End area that is zeroed on creation. */
#define p_endzero p_magic
@@ -870,9 +874,12 @@ void cpu_fork(struct thread *, struct proc *, struct thread *, int);
void cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
/* New in KSE. */
+#ifdef KSE
void kse_unlink(struct thread *);
void kse_GC(void);
void kseinit(void);
+void upcall_remove(struct thread *td);
+#endif
void cpu_set_upcall(struct thread *td, struct thread *td0);
void cpu_set_upcall_kse(struct thread *, void (*)(void *), void *, stack_t *);
int cpu_set_user_tls(struct thread *, void *tls_base);
@@ -888,7 +895,6 @@ int thread_export_context(struct thread *td, int willexit);
void thread_free(struct thread *td);
void thread_link(struct thread *td, struct proc *p);
void thread_reap(void);
-struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
void thread_signal_add(struct thread *td, ksiginfo_t *);
int thread_single(int how);
void thread_single_end(void);
@@ -912,12 +918,6 @@ void thread_user_enter(struct thread *td);
void thread_wait(struct proc *p);
struct thread *thread_find(struct proc *p, lwpid_t tid);
void thr_exit1(void);
-struct kse_upcall *upcall_alloc(void);
-void upcall_free(struct kse_upcall *ku);
-void upcall_link(struct kse_upcall *ku, struct proc *p);
-void upcall_unlink(struct kse_upcall *ku);
-void upcall_remove(struct thread *td);
-void upcall_stash(struct kse_upcall *ke);
#endif /* _KERNEL */
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 0dcf369..5f225e1 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -173,9 +173,6 @@ extern long switch_needresched;
/* temporarily here */
void schedinit(void);
-void sched_init_concurrency(struct proc *p);
-void sched_set_concurrency(struct proc *p, int cuncurrency);
-void sched_schedinit(void);
void sched_newproc(struct proc *p, struct thread *td);
void sched_newthread(struct thread *td);
#endif /* _KERNEL */
OpenPOWER on IntegriCloud