summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_cpuset.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_cpuset.c')
-rw-r--r--sys/kern/kern_cpuset.c89
1 files changed, 88 insertions, 1 deletions
diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c
index 42c95c9..aca26ef 100644
--- a/sys/kern/kern_cpuset.c
+++ b/sys/kern/kern_cpuset.c
@@ -106,7 +106,7 @@ static uma_zone_t cpuset_zone;
static struct mtx cpuset_lock;
static struct setlist cpuset_ids;
static struct unrhdr *cpuset_unr;
-static struct cpuset *cpuset_zero;
+static struct cpuset *cpuset_zero, *cpuset_default;
/* Return the size of cpuset_t at the kernel level */
SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD,
@@ -716,6 +716,89 @@ out:
}
/*
+ * Apply new cpumask to the ithread.
+ */
+int
+cpuset_setithread(lwpid_t id, u_char cpu)
+{
+ struct cpuset *nset, *rset;
+ struct cpuset *parent, *old_set;
+ struct thread *td;
+ struct proc *p;
+ cpusetid_t cs_id;
+ cpuset_t mask;
+ int error;
+
+ nset = uma_zalloc(cpuset_zone, M_WAITOK);
+ rset = uma_zalloc(cpuset_zone, M_WAITOK);
+
+ CPU_ZERO(&mask);
+ if (cpu == NOCPU)
+ CPU_COPY(cpuset_root, &mask);
+ else
+ CPU_SET(cpu, &mask);
+
+ error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set);
+ if (((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID) || error != 0)
+ goto out;
+
+ thread_lock(td);
+ old_set = td->td_cpuset;
+
+ if (cpu == NOCPU) {
+ /*
+ * roll back to default set. We're not using cpuset_shadow()
+ * here because we can fail CPU_SUBSET() check. This can happen
+ * if default set does not contain all CPUs.
+ */
+ error = _cpuset_create(nset, cpuset_default, &mask,
+ CPUSET_INVALID);
+
+ goto applyset;
+ }
+
+ if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID &&
+ old_set->cs_parent->cs_id == 1)) {
+ /* Default mask, we need to use new root set */
+ error = _cpuset_create(rset, cpuset_zero,
+ &cpuset_zero->cs_mask, cs_id);
+ if (error != 0) {
+ PROC_UNLOCK(p);
+ goto out;
+ }
+ rset->cs_flags |= CPU_SET_ROOT;
+ parent = rset;
+ rset = NULL;
+ cs_id = CPUSET_INVALID;
+ } else {
+ /* Assume existing set was already allocated by previous call */
+ parent = td->td_cpuset;
+ old_set = NULL;
+ }
+
+ error = cpuset_shadow(parent, nset, &mask);
+applyset:
+ if (error == 0) {
+ td->td_cpuset = nset;
+ sched_affinity(td);
+ nset = NULL;
+ }
+ thread_unlock(td);
+ PROC_UNLOCK(p);
+ if (old_set != NULL)
+ cpuset_rel(old_set);
+out:
+ if (nset != NULL)
+ uma_zfree(cpuset_zone, nset);
+ if (rset != NULL)
+ uma_zfree(cpuset_zone, rset);
+ if (cs_id != CPUSET_INVALID)
+ free_unr(cpuset_unr, cs_id);
+ return (error);
+}
+
+
+/*
* Creates the cpuset for thread0. We make two sets:
*
* 0 - The root set which should represent all valid processors in the
@@ -735,6 +818,7 @@ cpuset_thread0(void)
cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, 0);
mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
+
/*
* Create the root system set for the whole machine. Doesn't use
* cpuset_create() due to NULL parent.
@@ -747,12 +831,15 @@ cpuset_thread0(void)
set->cs_flags = CPU_SET_ROOT;
cpuset_zero = set;
cpuset_root = &set->cs_mask;
+
/*
* Now derive a default, modifiable set from that to give out.
*/
set = uma_zalloc(cpuset_zone, M_WAITOK);
error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
KASSERT(error == 0, ("Error creating default set: %d\n", error));
+ cpuset_default = set;
+
/*
* Initialize the unit allocator. 0 and 1 are allocated above.
*/
OpenPOWER on IntegriCloud