summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2003-03-19 05:49:38 +0000
committerdavidxu <davidxu@FreeBSD.org>2003-03-19 05:49:38 +0000
commit8e88e8da057497b66069752a6391f435c706b441 (patch)
tree8bb70eacef4e1a4571ae518b0ff0001588c0db1b /sys
parent6564ae81018f9caddd72fe4b8f51c200cad0bd78 (diff)
downloadFreeBSD-src-8e88e8da057497b66069752a6391f435c706b441.zip
FreeBSD-src-8e88e8da057497b66069752a6391f435c706b441.tar.gz
Adjust code for userland preemptive. Userland can set a quantum in
kse_mailbox to schedule an upcall, this is useful for userland timeout routine, for example pthread_cond_timedwait(). Also extract upcall scheduling code from kse_reassign and create a new function called thread_switchout to include these code. Reviewed by: julain
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_kse.c69
-rw-r--r--sys/kern/kern_switch.c28
-rw-r--r--sys/kern/kern_synch.c3
-rw-r--r--sys/kern/kern_thread.c69
-rw-r--r--sys/sys/kse.h3
-rw-r--r--sys/sys/proc.h5
6 files changed, 99 insertions, 78 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 81b0ed5..9456646 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -696,6 +696,8 @@ kse_create(struct thread *td, struct kse_create_args *uap)
return (EPROCLIM);
}
upcall_link(newku, newkg);
+ if (mbx.km_quantum)
+ newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
/*
* Each upcall structure has an owner thread, find which
@@ -1002,11 +1004,6 @@ thread_export_context(struct thread *td)
if (suword(addr, temp))
goto bad;
- addr = (caddr_t)(&td->td_mailbox->tm_slices);
- temp = fuword(addr) - td->td_usticks;
- if (suword(addr, temp))
- goto bad;
-
/* Get address in latest mbox of list pointer */
addr = (void *)(&td->td_mailbox->tm_next);
/*
@@ -1118,9 +1115,9 @@ thread_update_usr_ticks(struct thread *td, int user)
struct proc *p = td->td_proc;
struct kse_thr_mailbox *tmbx;
struct kse_upcall *ku;
+ struct ksegrp *kg;
caddr_t addr;
uint uticks;
- int slices;
if ((ku = td->td_upcall) == NULL)
return (-1);
@@ -1144,22 +1141,12 @@ thread_update_usr_ticks(struct thread *td, int user)
PROC_UNLOCK(p);
return (-2);
}
- addr = (caddr_t)&tmbx->tm_slices;
- slices = (int)fuword(addr);
- if (slices > 0) {
- slices -= (int)uticks;
- if (suword(addr, slices)) {
- PROC_LOCK(p);
- psignal(p, SIGSEGV);
- PROC_UNLOCK(p);
- return (-2);
- }
- if (slices <= 0) {
- mtx_lock_spin(&sched_lock);
- td->td_upcall->ku_flags |= KUF_DOUPCALL;
- mtx_unlock_spin(&sched_lock);
- }
- }
+ }
+ kg = td->td_ksegrp;
+ if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
+ mtx_lock_spin(&sched_lock);
+ td->td_upcall->ku_flags |= KUF_DOUPCALL;
+ mtx_unlock_spin(&sched_lock);
}
return (0);
}
@@ -1496,6 +1483,38 @@ thread_signal_upcall(struct thread *td)
return;
}
+void
+thread_switchout(struct thread *td)
+{
+ struct kse_upcall *ku;
+
+ mtx_assert(&sched_lock, MA_OWNED);
+
+ /*
+ * If the outgoing thread is in threaded group and has never
+ * scheduled an upcall, decide whether this is a short
+ * or long term event and thus whether or not to schedule
+ * an upcall.
+ * If it is a short term event, just suspend it in
+ * a way that takes its KSE with it.
+ * Select the events for which we want to schedule upcalls.
+ * For now it's just sleep.
+ * XXXKSE eventually almost any inhibition could do.
+ */
+ if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
+ /*
+ * Release ownership of upcall, and schedule an upcall
+ * thread, this new upcall thread becomes the owner of
+ * the upcall structure.
+ */
+ ku = td->td_upcall;
+ ku->ku_owner = NULL;
+ td->td_upcall = NULL;
+ td->td_flags &= ~TDF_CAN_UNBIND;
+ thread_schedule_upcall(td, ku);
+ }
+}
+
/*
* Setup done on the thread when it enters the kernel.
* XXXKSE Presently only for syscalls but eventually all kernel entries.
@@ -1606,10 +1625,11 @@ thread_userret(struct thread *td, struct trapframe *frame)
ku = td->td_upcall;
if ((p->p_flag & PS_NEEDSIGCHK) == 0 &&
(kg->kg_completed == NULL) &&
- (ku->ku_flags & KUF_DOUPCALL) == 0) {
+ (ku->ku_flags & KUF_DOUPCALL) == 0 &&
+ (kg->kg_upquantum && ticks >= kg->kg_nextupcall)) {
thread_update_usr_ticks(td, 0);
nanotime(&ts);
- error = copyout(&ts,
+ error = copyout(&ts,
(caddr_t)&ku->ku_mailbox->km_timeofday,
sizeof(ts));
td->td_mailbox = 0;
@@ -1679,6 +1699,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
}
if (td->td_flags & TDF_UPCALLING) {
+ kg->kg_nextupcall = ticks+kg->kg_upquantum;
ku = td->td_upcall;
/*
* There is no more work to do and we are going to ride
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 1cd69bf..8c39243 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -168,40 +168,14 @@ kse_reassign(struct kse *ke)
struct ksegrp *kg;
struct thread *td;
struct thread *original;
- struct kse_upcall *ku;
mtx_assert(&sched_lock, MA_OWNED);
original = ke->ke_thread;
KASSERT(original == NULL || TD_IS_INHIBITED(original),
("reassigning KSE with runnable thread"));
kg = ke->ke_ksegrp;
- if (original) {
- /*
- * If the outgoing thread is in threaded group and has never
- * scheduled an upcall, decide whether this is a short
- * or long term event and thus whether or not to schedule
- * an upcall.
- * If it is a short term event, just suspend it in
- * a way that takes its KSE with it.
- * Select the events for which we want to schedule upcalls.
- * For now it's just sleep.
- * XXXKSE eventually almost any inhibition could do.
- */
- if (TD_CAN_UNBIND(original) && (original->td_standin) &&
- TD_ON_SLEEPQ(original)) {
- /*
- * Release ownership of upcall, and schedule an upcall
- * thread, this new upcall thread becomes the owner of
- * the upcall structure.
- */
- ku = original->td_upcall;
- ku->ku_owner = NULL;
- original->td_upcall = NULL;
- original->td_flags &= ~TDF_CAN_UNBIND;
- thread_schedule_upcall(original, ku);
- }
+ if (original)
original->td_kse = NULL;
- }
/*
* Find the first unassigned thread
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 3c0d5fa..0c479c7 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -501,8 +501,9 @@ mi_switch(void)
PCPU_SET(switchtime, new_switchtime);
CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
-
sched_nest = sched_lock.mtx_recurse;
+ if (td->td_proc->p_flag & P_THREADED)
+ thread_switchout(td);
sched_switchout(td);
cpu_switch(); /* SHAZAM!!*/
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 81b0ed5..9456646 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -696,6 +696,8 @@ kse_create(struct thread *td, struct kse_create_args *uap)
return (EPROCLIM);
}
upcall_link(newku, newkg);
+ if (mbx.km_quantum)
+ newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
/*
* Each upcall structure has an owner thread, find which
@@ -1002,11 +1004,6 @@ thread_export_context(struct thread *td)
if (suword(addr, temp))
goto bad;
- addr = (caddr_t)(&td->td_mailbox->tm_slices);
- temp = fuword(addr) - td->td_usticks;
- if (suword(addr, temp))
- goto bad;
-
/* Get address in latest mbox of list pointer */
addr = (void *)(&td->td_mailbox->tm_next);
/*
@@ -1118,9 +1115,9 @@ thread_update_usr_ticks(struct thread *td, int user)
struct proc *p = td->td_proc;
struct kse_thr_mailbox *tmbx;
struct kse_upcall *ku;
+ struct ksegrp *kg;
caddr_t addr;
uint uticks;
- int slices;
if ((ku = td->td_upcall) == NULL)
return (-1);
@@ -1144,22 +1141,12 @@ thread_update_usr_ticks(struct thread *td, int user)
PROC_UNLOCK(p);
return (-2);
}
- addr = (caddr_t)&tmbx->tm_slices;
- slices = (int)fuword(addr);
- if (slices > 0) {
- slices -= (int)uticks;
- if (suword(addr, slices)) {
- PROC_LOCK(p);
- psignal(p, SIGSEGV);
- PROC_UNLOCK(p);
- return (-2);
- }
- if (slices <= 0) {
- mtx_lock_spin(&sched_lock);
- td->td_upcall->ku_flags |= KUF_DOUPCALL;
- mtx_unlock_spin(&sched_lock);
- }
- }
+ }
+ kg = td->td_ksegrp;
+ if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
+ mtx_lock_spin(&sched_lock);
+ td->td_upcall->ku_flags |= KUF_DOUPCALL;
+ mtx_unlock_spin(&sched_lock);
}
return (0);
}
@@ -1496,6 +1483,38 @@ thread_signal_upcall(struct thread *td)
return;
}
+void
+thread_switchout(struct thread *td)
+{
+ struct kse_upcall *ku;
+
+ mtx_assert(&sched_lock, MA_OWNED);
+
+ /*
+ * If the outgoing thread is in threaded group and has never
+ * scheduled an upcall, decide whether this is a short
+ * or long term event and thus whether or not to schedule
+ * an upcall.
+ * If it is a short term event, just suspend it in
+ * a way that takes its KSE with it.
+ * Select the events for which we want to schedule upcalls.
+ * For now it's just sleep.
+ * XXXKSE eventually almost any inhibition could do.
+ */
+ if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
+ /*
+ * Release ownership of upcall, and schedule an upcall
+ * thread, this new upcall thread becomes the owner of
+ * the upcall structure.
+ */
+ ku = td->td_upcall;
+ ku->ku_owner = NULL;
+ td->td_upcall = NULL;
+ td->td_flags &= ~TDF_CAN_UNBIND;
+ thread_schedule_upcall(td, ku);
+ }
+}
+
/*
* Setup done on the thread when it enters the kernel.
* XXXKSE Presently only for syscalls but eventually all kernel entries.
@@ -1606,10 +1625,11 @@ thread_userret(struct thread *td, struct trapframe *frame)
ku = td->td_upcall;
if ((p->p_flag & PS_NEEDSIGCHK) == 0 &&
(kg->kg_completed == NULL) &&
- (ku->ku_flags & KUF_DOUPCALL) == 0) {
+ (ku->ku_flags & KUF_DOUPCALL) == 0 &&
+ (kg->kg_upquantum && ticks >= kg->kg_nextupcall)) {
thread_update_usr_ticks(td, 0);
nanotime(&ts);
- error = copyout(&ts,
+ error = copyout(&ts,
(caddr_t)&ku->ku_mailbox->km_timeofday,
sizeof(ts));
td->td_mailbox = 0;
@@ -1679,6 +1699,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
}
if (td->td_flags & TDF_UPCALLING) {
+ kg->kg_nextupcall = ticks+kg->kg_upquantum;
ku = td->td_upcall;
/*
* There is no more work to do and we are going to ride
diff --git a/sys/sys/kse.h b/sys/sys/kse.h
index 1549796..8e96525 100644
--- a/sys/sys/kse.h
+++ b/sys/sys/kse.h
@@ -59,7 +59,6 @@ struct kse_thr_mailbox {
void *tm_udata; /* For use by the UTS */
unsigned int tm_uticks;
unsigned int tm_sticks;
- int tm_slices;
int tm_spare[8];
};
@@ -79,10 +78,12 @@ struct kse_mailbox {
stack_t km_stack; /* UTS context */
void *km_udata; /* For use by the UTS */
struct timespec km_timeofday; /* Time of day */
+ int km_quantum; /* Upcall quantum in msecs */
int km_spare[8];
};
#define KSE_VER_0 0
+#define KSE_VERSION KSE_VER_0
#ifndef _KERNEL
int kse_create(struct kse_mailbox *, int);
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 4404796..ceef3ad 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -486,6 +486,8 @@ struct ksegrp {
int kg_numupcalls; /* (j) Num upcalls */
int kg_upsleeps; /* (c) Num threads in kse_release() */
struct kse_thr_mailbox *kg_completed; /* (c) completed thread mboxes */
+ int kg_nextupcall; /* next upcall time */
+ int kg_upquantum; /* quantum to schedule an upcall */
#define kg_endzero kg_pri_class
#define kg_startcopy kg_endzero
@@ -946,7 +948,8 @@ void upcall_unlink(struct kse_upcall *ku);
void upcall_remove(struct thread *td);
void upcall_stash(struct kse_upcall *ke);
void thread_sanity_check(struct thread *td, char *);
-void thread_stopped(struct proc *);
+void thread_stopped(struct proc *p);
+void thread_switchout(struct thread *td);
#endif /* _KERNEL */
#endif /* !_SYS_PROC_H_ */
OpenPOWER on IntegriCloud