summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2011-05-05 14:39:14 +0000
committerattilio <attilio@FreeBSD.org>2011-05-05 14:39:14 +0000
commitfe4de567b50f7ca317b16f69b7b3a7de693025af (patch)
treee5d54bcefbf1fe0c4c6804bdc5c4852b1b64518a /sys/kern
parentd3d3db9bac709a7fa4319bf5e8c8fb4e05918772 (diff)
downloadFreeBSD-src-fe4de567b50f7ca317b16f69b7b3a7de693025af.zip
FreeBSD-src-fe4de567b50f7ca317b16f69b7b3a7de693025af.tar.gz
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects. That is going to offer the underlying support for a simple bump of MAXCPU and then support for number of cpus > 32 (as it is today). Right now, cpumask_t is an int, 32 bits on all our supported architecture. cpumask_t on the other side is implemented as an array of longs, and easilly extendible by definition. The architectures touched by this commit are the following: - amd64 - i386 - pc98 - arm - ia64 - XEN while the others are still missing. Userland is believed to be fully converted with the changes contained here. Some technical notes: - This commit may be considered an ABI nop for all the architectures different from amd64 and ia64 (and sparc64 in the future) - per-cpu members, which are now converted to cpuset_t, needs to be accessed avoiding migration, because the size of cpuset_t should be considered unknown - size of cpuset_t objects is different from kernel and userland (this is primirally done in order to leave some more space in userland to cope with KBI extensions). If you need to access kernel cpuset_t from the userland please refer to example in this patch on how to do that correctly (kgdb may be a good source, for example). - Support for other architectures is going to be added soon - Only MAXCPU for amd64 is bumped now The patch has been tested by sbruno and Nicholas Esborn on opteron 4 x 12 pack CPUs. More testing on big SMP is expected to came soon. pluknet tested the patch with his 8-ways on both amd64 and i386. Tested by: pluknet, sbruno, gianni, Nicholas Esborn Reviewed by: jeff, jhb, sbruno
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_cpuset.c48
-rw-r--r--sys/kern/kern_pmc.c6
-rw-r--r--sys/kern/kern_rmlock.c15
-rw-r--r--sys/kern/sched_4bsd.c66
-rw-r--r--sys/kern/sched_ule.c9
-rw-r--r--sys/kern/subr_kdb.c3
-rw-r--r--sys/kern/subr_pcpu.c2
-rw-r--r--sys/kern/subr_smp.c87
8 files changed, 159 insertions, 77 deletions
diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c
index bf9eac7..9ed19d4 100644
--- a/sys/kern/kern_cpuset.c
+++ b/sys/kern/kern_cpuset.c
@@ -617,6 +617,49 @@ out:
}
/*
+ * Calculate the ffs() of the cpuset.
+ */
+int
+cpusetobj_ffs(const cpuset_t *set)
+{
+ size_t i;
+ int cbit;
+
+ cbit = 0;
+ for (i = 0; i < _NCPUWORDS; i++) {
+ if (set->__bits[i] != 0) {
+ cbit = ffsl(set->__bits[i]);
+ cbit += i * _NCPUBITS;
+ break;
+ }
+ }
+ return (cbit);
+}
+
+/*
+ * Return a string representing a valid layout for a cpuset_t object.
+ * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
+ */
+char *
+cpusetobj_strprint(char *buf, const cpuset_t *set)
+{
+ char *tbuf;
+ size_t i, bytesp, bufsiz;
+
+ tbuf = buf;
+ bytesp = 0;
+ bufsiz = CPUSETBUFSIZ;
+
+ for (i = 0; i < (_NCPUWORDS - 1); i++) {
+ bytesp = snprintf(tbuf, bufsiz, "%lx, ", set->__bits[i]);
+ bufsiz -= bytesp;
+ tbuf += bytesp;
+ }
+ snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]);
+ return (buf);
+}
+
+/*
* Apply an anonymous mask to a single thread.
*/
int
@@ -754,11 +797,10 @@ cpuset_init(void *arg)
{
cpuset_t mask;
- CPU_ZERO(&mask);
#ifdef SMP
- mask.__bits[0] = all_cpus;
+ mask = all_cpus;
#else
- mask.__bits[0] = 1;
+ CPU_SETOF(0, &mask);
#endif
if (cpuset_modify(cpuset_zero, &mask))
panic("Can't set initial cpuset mask.\n");
diff --git a/sys/kern/kern_pmc.c b/sys/kern/kern_pmc.c
index 7532378..8d9c7c0 100644
--- a/sys/kern/kern_pmc.c
+++ b/sys/kern/kern_pmc.c
@@ -55,7 +55,7 @@ int (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
int (*pmc_intr)(int cpu, struct trapframe *tf) = NULL;
/* Bitmask of CPUs requiring servicing at hardclock time */
-volatile cpumask_t pmc_cpumask;
+volatile cpuset_t pmc_cpumask;
/*
* A global count of SS mode PMCs. When non-zero, this means that
@@ -112,7 +112,7 @@ pmc_cpu_is_active(int cpu)
{
#ifdef SMP
return (pmc_cpu_is_present(cpu) &&
- (hlt_cpus_mask & (1 << cpu)) == 0);
+ !CPU_ISSET(cpu, &hlt_cpus_mask));
#else
return (1);
#endif
@@ -139,7 +139,7 @@ int
pmc_cpu_is_primary(int cpu)
{
#ifdef SMP
- return ((logical_cpus_mask & (1 << cpu)) == 0);
+ return (!CPU_ISSET(cpu, &logical_cpus_mask));
#else
return (1);
#endif
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index 7f2b4e7..3214e1b 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -263,7 +263,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
pc = pcpu_find(curcpu);
/* Check if we just need to do a proper critical_exit. */
- if (!(pc->pc_cpumask & rm->rm_writecpus)) {
+ if (!CPU_OVERLAP(&pc->pc_cpumask, &rm->rm_writecpus)) {
critical_exit();
return (1);
}
@@ -325,7 +325,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
critical_enter();
pc = pcpu_find(curcpu);
- rm->rm_writecpus &= ~pc->pc_cpumask;
+ CPU_NAND(&rm->rm_writecpus, &pc->pc_cpumask);
rm_tracker_add(pc, tracker);
sched_pin();
critical_exit();
@@ -366,7 +366,8 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
* Fast path to combine two common conditions into a single
* conditional jump.
*/
- if (0 == (td->td_owepreempt | (rm->rm_writecpus & pc->pc_cpumask)))
+ if (0 == (td->td_owepreempt |
+ CPU_OVERLAP(&rm->rm_writecpus, &pc->pc_cpumask)))
return (1);
/* We do not have a read token and need to acquire one. */
@@ -429,17 +430,17 @@ _rm_wlock(struct rmlock *rm)
{
struct rm_priotracker *prio;
struct turnstile *ts;
- cpumask_t readcpus;
+ cpuset_t readcpus;
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
sx_xlock(&rm->rm_lock_sx);
else
mtx_lock(&rm->rm_lock_mtx);
- if (rm->rm_writecpus != all_cpus) {
+ if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
/* Get all read tokens back */
-
- readcpus = all_cpus & (all_cpus & ~rm->rm_writecpus);
+ readcpus = all_cpus;
+ CPU_NAND(&readcpus, &rm->rm_writecpus);
rm->rm_writecpus = all_cpus;
/*
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index fef9e25..2fad27c 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -156,7 +156,7 @@ static struct runq runq;
static struct runq runq_pcpu[MAXCPU];
long runq_length[MAXCPU];
-static cpumask_t idle_cpus_mask;
+static cpuset_t idle_cpus_mask;
#endif
struct pcpuidlestat {
@@ -951,7 +951,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (td->td_flags & TDF_IDLETD) {
TD_SET_CAN_RUN(td);
#ifdef SMP
- idle_cpus_mask &= ~PCPU_GET(cpumask);
+ /* Spinlock held here, assume no migration. */
+ CPU_NAND(&idle_cpus_mask, PCPU_PTR(cpumask));
#endif
} else {
if (TD_IS_RUNNING(td)) {
@@ -1025,7 +1026,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
#ifdef SMP
if (td->td_flags & TDF_IDLETD)
- idle_cpus_mask |= PCPU_GET(cpumask);
+ CPU_OR(&idle_cpus_mask, PCPU_PTR(cpumask));
#endif
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
@@ -1054,7 +1055,8 @@ static int
forward_wakeup(int cpunum)
{
struct pcpu *pc;
- cpumask_t dontuse, id, map, map2, me;
+ cpuset_t dontuse, id, map, map2, me;
+ int iscpuset;
mtx_assert(&sched_lock, MA_OWNED);
@@ -1071,32 +1073,38 @@ forward_wakeup(int cpunum)
/*
* Check the idle mask we received against what we calculated
* before in the old version.
+ *
+ * Also note that sched_lock is held now, thus no migration is
+ * expected.
*/
me = PCPU_GET(cpumask);
/* Don't bother if we should be doing it ourself. */
- if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
+ if (CPU_OVERLAP(&me, &idle_cpus_mask) &&
+ (cpunum == NOCPU || CPU_ISSET(cpunum, &me)))
return (0);
- dontuse = me | stopped_cpus | hlt_cpus_mask;
- map2 = 0;
+ dontuse = me;
+ CPU_OR(&dontuse, &stopped_cpus);
+ CPU_OR(&dontuse, &hlt_cpus_mask);
+ CPU_ZERO(&map2);
if (forward_wakeup_use_loop) {
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask;
- if ((id & dontuse) == 0 &&
+ if (!CPU_OVERLAP(&id, &dontuse) &&
pc->pc_curthread == pc->pc_idlethread) {
- map2 |= id;
+ CPU_OR(&map2, &id);
}
}
}
if (forward_wakeup_use_mask) {
- map = 0;
- map = idle_cpus_mask & ~dontuse;
+ map = idle_cpus_mask;
+ CPU_NAND(&map, &dontuse);
/* If they are both on, compare and use loop if different. */
if (forward_wakeup_use_loop) {
- if (map != map2) {
+ if (CPU_CMP(&map, &map2)) {
printf("map != map2, loop method preferred\n");
map = map2;
}
@@ -1108,18 +1116,22 @@ forward_wakeup(int cpunum)
/* If we only allow a specific CPU, then mask off all the others. */
if (cpunum != NOCPU) {
KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
- map &= (1 << cpunum);
+ iscpuset = CPU_ISSET(cpunum, &map);
+ if (iscpuset == 0)
+ CPU_ZERO(&map);
+ else
+ CPU_SETOF(cpunum, &map);
}
- if (map) {
+ if (!CPU_EMPTY(&map)) {
forward_wakeups_delivered++;
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask;
- if ((map & id) == 0)
+ if (!CPU_OVERLAP(&map, &id))
continue;
if (cpu_idle_wakeup(pc->pc_cpuid))
- map &= ~id;
+ CPU_NAND(&map, &id);
}
- if (map)
+ if (!CPU_EMPTY(&map))
ipi_selected(map, IPI_AST);
return (1);
}
@@ -1135,7 +1147,7 @@ kick_other_cpu(int pri, int cpuid)
int cpri;
pcpu = pcpu_find(cpuid);
- if (idle_cpus_mask & pcpu->pc_cpumask) {
+ if (CPU_OVERLAP(&idle_cpus_mask, &pcpu->pc_cpumask)) {
forward_wakeups_delivered++;
if (!cpu_idle_wakeup(cpuid))
ipi_cpu(cpuid, IPI_AST);
@@ -1193,6 +1205,7 @@ void
sched_add(struct thread *td, int flags)
#ifdef SMP
{
+ cpuset_t idle, me, tidlemsk;
struct td_sched *ts;
int forwarded = 0;
int cpu;
@@ -1262,11 +1275,20 @@ sched_add(struct thread *td, int flags)
kick_other_cpu(td->td_priority, cpu);
} else {
if (!single_cpu) {
- cpumask_t me = PCPU_GET(cpumask);
- cpumask_t idle = idle_cpus_mask & me;
- if (!idle && ((flags & SRQ_INTR) == 0) &&
- (idle_cpus_mask & ~(hlt_cpus_mask | me)))
+ /*
+ * Thread spinlock is held here, assume no
+ * migration is possible.
+ */
+ me = PCPU_GET(cpumask);
+ idle = idle_cpus_mask;
+ tidlemsk = idle;
+ CPU_AND(&idle, &me);
+ CPU_OR(&me, &hlt_cpus_mask);
+ CPU_NAND(&tidlemsk, &me);
+
+ if (CPU_EMPTY(&idle) && ((flags & SRQ_INTR) == 0) &&
+ !CPU_EMPTY(&tidlemsk))
forwarded = forward_wakeup(cpu);
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index ac18e77..05267f3 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -564,7 +564,7 @@ struct cpu_search {
#define CPUSET_FOREACH(cpu, mask) \
for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++) \
- if ((mask) & 1 << (cpu))
+ if (CPU_ISSET(cpu, &mask))
static __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low,
struct cpu_search *high, const int match);
@@ -2650,15 +2650,16 @@ static int
sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg,
int indent)
{
+ char cpusetbuf[CPUSETBUFSIZ];
int i, first;
sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
"", 1 + indent / 2, cg->cg_level);
- sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"0x%x\">", indent, "",
- cg->cg_count, cg->cg_mask);
+ sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
+ cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
first = TRUE;
for (i = 0; i < MAXCPU; i++) {
- if ((cg->cg_mask & (1 << i)) != 0) {
+ if (CPU_ISSET(i, &cg->cg_mask)) {
if (!first)
sbuf_printf(sb, ", ");
else
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 342c5ca..bb78c00 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -413,7 +413,8 @@ kdb_thr_ctx(struct thread *thr)
#if defined(SMP) && defined(KDB_STOPPEDPCB)
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
- if (pc->pc_curthread == thr && (stopped_cpus & pc->pc_cpumask))
+ if (pc->pc_curthread == thr &&
+ CPU_OVERLAP(&stopped_cpus, &pc->pc_cpumask))
return (KDB_STOPPEDPCB(pc));
}
#endif
diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c
index de5cafc..9201bb7 100644
--- a/sys/kern/subr_pcpu.c
+++ b/sys/kern/subr_pcpu.c
@@ -87,7 +87,7 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
KASSERT(cpuid >= 0 && cpuid < MAXCPU,
("pcpu_init: invalid cpuid %d", cpuid));
pcpu->pc_cpuid = cpuid;
- pcpu->pc_cpumask = 1 << cpuid;
+ CPU_SETOF(cpuid, &pcpu->pc_cpumask);
cpuid_to_pcpu[cpuid] = pcpu;
SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu);
cpu_pcpu_init(pcpu, cpuid, size);
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index aba6f0e..ae061f3 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -53,15 +53,15 @@ __FBSDID("$FreeBSD$");
#include "opt_sched.h"
#ifdef SMP
-volatile cpumask_t stopped_cpus;
-volatile cpumask_t started_cpus;
-cpumask_t hlt_cpus_mask;
-cpumask_t logical_cpus_mask;
+volatile cpuset_t stopped_cpus;
+volatile cpuset_t started_cpus;
+cpuset_t hlt_cpus_mask;
+cpuset_t logical_cpus_mask;
void (*cpustop_restartfunc)(void);
#endif
/* This is used in modules that need to work in both SMP and UP. */
-cpumask_t all_cpus;
+cpuset_t all_cpus;
int mp_ncpus;
/* export this for libkvm consumers. */
@@ -199,8 +199,11 @@ forward_signal(struct thread *td)
*
*/
static int
-generic_stop_cpus(cpumask_t map, u_int type)
+generic_stop_cpus(cpuset_t map, u_int type)
{
+#ifdef KTR
+ char cpusetbuf[CPUSETBUFSIZ];
+#endif
static volatile u_int stopping_cpu = NOCPU;
int i;
@@ -215,7 +218,8 @@ generic_stop_cpus(cpumask_t map, u_int type)
if (!smp_started)
return (0);
- CTR2(KTR_SMP, "stop_cpus(%x) with %u type", map, type);
+ CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
+ cpusetobj_strprint(cpusetbuf, &map), type);
if (stopping_cpu != PCPU_GET(cpuid))
while (atomic_cmpset_int(&stopping_cpu, NOCPU,
@@ -227,7 +231,7 @@ generic_stop_cpus(cpumask_t map, u_int type)
ipi_selected(map, type);
i = 0;
- while ((stopped_cpus & map) != map) {
+ while (!CPU_SUBSET(&stopped_cpus, &map)) {
/* spin */
cpu_spinwait();
i++;
@@ -244,14 +248,14 @@ generic_stop_cpus(cpumask_t map, u_int type)
}
int
-stop_cpus(cpumask_t map)
+stop_cpus(cpuset_t map)
{
return (generic_stop_cpus(map, IPI_STOP));
}
int
-stop_cpus_hard(cpumask_t map)
+stop_cpus_hard(cpuset_t map)
{
return (generic_stop_cpus(map, IPI_STOP_HARD));
@@ -259,7 +263,7 @@ stop_cpus_hard(cpumask_t map)
#if defined(__amd64__)
int
-suspend_cpus(cpumask_t map)
+suspend_cpus(cpuset_t map)
{
return (generic_stop_cpus(map, IPI_SUSPEND));
@@ -280,19 +284,22 @@ suspend_cpus(cpumask_t map)
* 1: ok
*/
int
-restart_cpus(cpumask_t map)
+restart_cpus(cpuset_t map)
{
+#ifdef KTR
+ char cpusetbuf[CPUSETBUFSIZ];
+#endif
if (!smp_started)
return 0;
- CTR1(KTR_SMP, "restart_cpus(%x)", map);
+ CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
/* signal other cpus to restart */
- atomic_store_rel_int(&started_cpus, map);
+ CPU_COPY_STORE_REL(&map, &started_cpus);
/* wait for each to clear its bit */
- while ((stopped_cpus & map) != 0)
+ while (CPU_OVERLAP(&stopped_cpus, &map))
cpu_spinwait();
return 1;
@@ -348,13 +355,13 @@ smp_rendezvous_action(void)
}
void
-smp_rendezvous_cpus(cpumask_t map,
+smp_rendezvous_cpus(cpuset_t map,
void (* setup_func)(void *),
void (* action_func)(void *),
void (* teardown_func)(void *),
void *arg)
{
- int i, ncpus = 0;
+ int curcpumap, i, ncpus = 0;
if (!smp_started) {
if (setup_func != NULL)
@@ -367,11 +374,11 @@ smp_rendezvous_cpus(cpumask_t map,
}
CPU_FOREACH(i) {
- if (((1 << i) & map) != 0)
+ if (CPU_ISSET(i, &map))
ncpus++;
}
if (ncpus == 0)
- panic("ncpus is 0 with map=0x%x", map);
+ panic("ncpus is 0 with non-zero map");
/* obtain rendezvous lock */
mtx_lock_spin(&smp_ipi_mtx);
@@ -387,10 +394,12 @@ smp_rendezvous_cpus(cpumask_t map,
atomic_store_rel_int(&smp_rv_waiters[0], 0);
/* signal other processors, which will enter the IPI with interrupts off */
- ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);
+ curcpumap = CPU_ISSET(curcpu, &map);
+ CPU_CLR(curcpu, &map);
+ ipi_selected(map, IPI_RENDEZVOUS);
/* Check if the current CPU is in the map */
- if ((map & (1 << curcpu)) != 0)
+ if (curcpumap != 0)
smp_rendezvous_action();
if (teardown_func == smp_no_rendevous_barrier)
@@ -415,6 +424,7 @@ static struct cpu_group group[MAXCPU];
struct cpu_group *
smp_topo(void)
{
+ char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
struct cpu_group *top;
/*
@@ -461,9 +471,10 @@ smp_topo(void)
if (top->cg_count != mp_ncpus)
panic("Built bad topology at %p. CPU count %d != %d",
top, top->cg_count, mp_ncpus);
- if (top->cg_mask != all_cpus)
- panic("Built bad topology at %p. CPU mask 0x%X != 0x%X",
- top, top->cg_mask, all_cpus);
+ if (CPU_CMP(&top->cg_mask, &all_cpus))
+ panic("Built bad topology at %p. CPU mask (%s) != (%s)",
+ top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
+ cpusetobj_strprint(cpusetbuf2, &all_cpus));
return (top);
}
@@ -488,11 +499,13 @@ static int
smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
int count, int flags, int start)
{
- cpumask_t mask;
+ char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
+ cpuset_t mask;
int i;
- for (mask = 0, i = 0; i < count; i++, start++)
- mask |= (1 << start);
+ CPU_ZERO(&mask);
+ for (i = 0; i < count; i++, start++)
+ CPU_SET(start, &mask);
child->cg_parent = parent;
child->cg_child = NULL;
child->cg_children = 0;
@@ -502,10 +515,12 @@ smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
child->cg_mask = mask;
parent->cg_children++;
for (; parent != NULL; parent = parent->cg_parent) {
- if ((parent->cg_mask & child->cg_mask) != 0)
- panic("Duplicate children in %p. mask 0x%X child 0x%X",
- parent, parent->cg_mask, child->cg_mask);
- parent->cg_mask |= child->cg_mask;
+ if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
+ panic("Duplicate children in %p. mask (%s) child (%s)",
+ parent,
+ cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
+ cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
+ CPU_OR(&parent->cg_mask, &child->cg_mask);
parent->cg_count += child->cg_count;
}
@@ -565,20 +580,20 @@ struct cpu_group *
smp_topo_find(struct cpu_group *top, int cpu)
{
struct cpu_group *cg;
- cpumask_t mask;
+ cpuset_t mask;
int children;
int i;
- mask = (1 << cpu);
+ CPU_SETOF(cpu, &mask);
cg = top;
for (;;) {
- if ((cg->cg_mask & mask) == 0)
+ if (!CPU_OVERLAP(&cg->cg_mask, &mask))
return (NULL);
if (cg->cg_children == 0)
return (cg);
children = cg->cg_children;
for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
- if ((cg->cg_mask & mask) != 0)
+ if (CPU_OVERLAP(&cg->cg_mask, &mask))
break;
}
return (NULL);
@@ -586,7 +601,7 @@ smp_topo_find(struct cpu_group *top, int cpu)
#else /* !SMP */
void
-smp_rendezvous_cpus(cpumask_t map,
+smp_rendezvous_cpus(cpuset_t map,
void (*setup_func)(void *),
void (*action_func)(void *),
void (*teardown_func)(void *),
OpenPOWER on IntegriCloud