summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_cpuset.c89
-rw-r--r--sys/kern/kern_intr.c16
-rw-r--r--sys/sys/cpuset.h1
3 files changed, 92 insertions, 14 deletions
diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c
index 42c95c9..aca26ef 100644
--- a/sys/kern/kern_cpuset.c
+++ b/sys/kern/kern_cpuset.c
@@ -106,7 +106,7 @@ static uma_zone_t cpuset_zone;
static struct mtx cpuset_lock;
static struct setlist cpuset_ids;
static struct unrhdr *cpuset_unr;
-static struct cpuset *cpuset_zero;
+static struct cpuset *cpuset_zero, *cpuset_default;
/* Return the size of cpuset_t at the kernel level */
SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD,
@@ -716,6 +716,89 @@ out:
}
/*
+ * Apply new cpumask to the ithread.
+ */
+int
+cpuset_setithread(lwpid_t id, u_char cpu)
+{
+ struct cpuset *nset, *rset;
+ struct cpuset *parent, *old_set;
+ struct thread *td;
+ struct proc *p;
+ cpusetid_t cs_id;
+ cpuset_t mask;
+ int error;
+
+ nset = uma_zalloc(cpuset_zone, M_WAITOK);
+ rset = uma_zalloc(cpuset_zone, M_WAITOK);
+
+ CPU_ZERO(&mask);
+ if (cpu == NOCPU)
+ CPU_COPY(cpuset_root, &mask);
+ else
+ CPU_SET(cpu, &mask);
+
+ error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set);
+ if (((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID) || error != 0)
+ goto out;
+
+ thread_lock(td);
+ old_set = td->td_cpuset;
+
+ if (cpu == NOCPU) {
+ /*
+ * roll back to default set. We're not using cpuset_shadow()
+ * here because we can fail CPU_SUBSET() check. This can happen
+ * if default set does not contain all CPUs.
+ */
+ error = _cpuset_create(nset, cpuset_default, &mask,
+ CPUSET_INVALID);
+
+ goto applyset;
+ }
+
+ if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID &&
+ old_set->cs_parent->cs_id == 1)) {
+ /* Default mask, we need to use new root set */
+ error = _cpuset_create(rset, cpuset_zero,
+ &cpuset_zero->cs_mask, cs_id);
+ if (error != 0) {
+ PROC_UNLOCK(p);
+ goto out;
+ }
+ rset->cs_flags |= CPU_SET_ROOT;
+ parent = rset;
+ rset = NULL;
+ cs_id = CPUSET_INVALID;
+ } else {
+ /* Assume existing set was already allocated by previous call */
+ parent = td->td_cpuset;
+ old_set = NULL;
+ }
+
+ error = cpuset_shadow(parent, nset, &mask);
+applyset:
+ if (error == 0) {
+ td->td_cpuset = nset;
+ sched_affinity(td);
+ nset = NULL;
+ }
+ thread_unlock(td);
+ PROC_UNLOCK(p);
+ if (old_set != NULL)
+ cpuset_rel(old_set);
+out:
+ if (nset != NULL)
+ uma_zfree(cpuset_zone, nset);
+ if (rset != NULL)
+ uma_zfree(cpuset_zone, rset);
+ if (cs_id != CPUSET_INVALID)
+ free_unr(cpuset_unr, cs_id);
+ return (error);
+}
+
+
+/*
* Creates the cpuset for thread0. We make two sets:
*
* 0 - The root set which should represent all valid processors in the
@@ -735,6 +818,7 @@ cpuset_thread0(void)
cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, 0);
mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
+
/*
* Create the root system set for the whole machine. Doesn't use
* cpuset_create() due to NULL parent.
@@ -747,12 +831,15 @@ cpuset_thread0(void)
set->cs_flags = CPU_SET_ROOT;
cpuset_zero = set;
cpuset_root = &set->cs_mask;
+
/*
* Now derive a default, modifiable set from that to give out.
*/
set = uma_zalloc(cpuset_zone, M_WAITOK);
error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
KASSERT(error == 0, ("Error creating default set: %d\n", error));
+ cpuset_default = set;
+
/*
* Initialize the unit allocator. 0 and 1 are allocated above.
*/
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index e4f1c82..5555d68 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -295,7 +295,6 @@ intr_event_create(struct intr_event **event, void *source, int flags, int irq,
int
intr_event_bind(struct intr_event *ie, u_char cpu)
{
- cpuset_t mask;
lwpid_t id;
int error;
@@ -316,14 +315,9 @@ intr_event_bind(struct intr_event *ie, u_char cpu)
*/
mtx_lock(&ie->ie_lock);
if (ie->ie_thread != NULL) {
- CPU_ZERO(&mask);
- if (cpu == NOCPU)
- CPU_COPY(cpuset_root, &mask);
- else
- CPU_SET(cpu, &mask);
id = ie->ie_thread->it_thread->td_tid;
mtx_unlock(&ie->ie_lock);
- error = cpuset_setthread(id, &mask);
+ error = cpuset_setithread(id, cpu);
if (error)
return (error);
} else
@@ -332,14 +326,10 @@ intr_event_bind(struct intr_event *ie, u_char cpu)
if (error) {
mtx_lock(&ie->ie_lock);
if (ie->ie_thread != NULL) {
- CPU_ZERO(&mask);
- if (ie->ie_cpu == NOCPU)
- CPU_COPY(cpuset_root, &mask);
- else
- CPU_SET(ie->ie_cpu, &mask);
+ cpu = ie->ie_cpu;
id = ie->ie_thread->it_thread->td_tid;
mtx_unlock(&ie->ie_lock);
- (void)cpuset_setthread(id, &mask);
+ (void)cpuset_setithread(id, cpu);
} else
mtx_unlock(&ie->ie_lock);
return (error);
diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h
index e1ee37d..e1ad341 100644
--- a/sys/sys/cpuset.h
+++ b/sys/sys/cpuset.h
@@ -118,6 +118,7 @@ struct cpuset *cpuset_thread0(void);
struct cpuset *cpuset_ref(struct cpuset *);
void cpuset_rel(struct cpuset *);
int cpuset_setthread(lwpid_t id, cpuset_t *);
+int cpuset_setithread(lwpid_t id, u_char cpu);
int cpuset_create_root(struct prison *, struct cpuset **);
int cpuset_setproc_update_set(struct proc *, struct cpuset *);
char *cpusetobj_strprint(char *, const cpuset_t *);
OpenPOWER on IntegriCloud