diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_cpuset.c | 88 | ||||
-rw-r--r-- | sys/kern/kern_ktr.c | 61 | ||||
-rw-r--r-- | sys/kern/kern_pmc.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_rmlock.c | 15 | ||||
-rw-r--r-- | sys/kern/kern_shutdown.c | 30 | ||||
-rw-r--r-- | sys/kern/ksched.c | 2 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 66 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 9 | ||||
-rw-r--r-- | sys/kern/subr_kdb.c | 3 | ||||
-rw-r--r-- | sys/kern/subr_pcpu.c | 2 | ||||
-rw-r--r-- | sys/kern/subr_prf.c | 3 | ||||
-rw-r--r-- | sys/kern/subr_rman.c | 1 | ||||
-rw-r--r-- | sys/kern/subr_smp.c | 87 | ||||
-rw-r--r-- | sys/kern/uipc_syscalls.c | 4 |
14 files changed, 272 insertions, 105 deletions
diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c index 6489ffb..e1f2801 100644 --- a/sys/kern/kern_cpuset.c +++ b/sys/kern/kern_cpuset.c @@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$"); #include <sys/cpuset.h> #include <sys/sx.h> #include <sys/queue.h> +#include <sys/libkern.h> #include <sys/limits.h> #include <sys/bus.h> #include <sys/interrupt.h> @@ -617,6 +618,86 @@ out: } /* + * Calculate the ffs() of the cpuset. + */ +int +cpusetobj_ffs(const cpuset_t *set) +{ + size_t i; + int cbit; + + cbit = 0; + for (i = 0; i < _NCPUWORDS; i++) { + if (set->__bits[i] != 0) { + cbit = ffsl(set->__bits[i]); + cbit += i * _NCPUBITS; + break; + } + } + return (cbit); +} + +/* + * Return a string representing a valid layout for a cpuset_t object. + * It expects an incoming buffer at least sized as CPUSETBUFSIZ. + */ +char * +cpusetobj_strprint(char *buf, const cpuset_t *set) +{ + char *tbuf; + size_t i, bytesp, bufsiz; + + tbuf = buf; + bytesp = 0; + bufsiz = CPUSETBUFSIZ; + + for (i = _NCPUWORDS - 1; i > 0; i--) { + bytesp = snprintf(tbuf, bufsiz, "%lx, ", set->__bits[i]); + bufsiz -= bytesp; + tbuf += bytesp; + } + snprintf(tbuf, bufsiz, "%lx", set->__bits[0]); + return (buf); +} + +/* + * Build a valid cpuset_t object from a string representation. + * It expects an incoming buffer at least sized as CPUSETBUFSIZ. + */ +int +cpusetobj_strscan(cpuset_t *set, const char *buf) +{ + u_int nwords; + int i, ret; + + if (strlen(buf) > CPUSETBUFSIZ - 1) + return (-1); + + /* Allow to pass a shorter version of the mask when necessary. */ + nwords = 1; + for (i = 0; buf[i] != '\0'; i++) + if (buf[i] == ',') + nwords++; + if (nwords > _NCPUWORDS) + return (-1); + + CPU_ZERO(set); + for (i = nwords - 1; i > 0; i--) { + ret = sscanf(buf, "%lx, ", &set->__bits[i]); + if (ret == 0 || ret == -1) + return (-1); + buf = strstr(buf, " "); + if (buf == NULL) + return (-1); + buf++; + } + ret = sscanf(buf, "%lx", &set->__bits[0]); + if (ret == 0 || ret == -1) + return (-1); + return (0); +} + +/* * Apply an anonymous mask to a single thread. */ int @@ -754,12 +835,7 @@ cpuset_init(void *arg) { cpuset_t mask; - CPU_ZERO(&mask); -#ifdef SMP - mask.__bits[0] = all_cpus; -#else - mask.__bits[0] = 1; -#endif + mask = all_cpus; if (cpuset_modify(cpuset_zero, &mask)) panic("Can't set initial cpuset mask.\n"); cpuset_zero->cs_flags |= CPU_SET_RDONLY; diff --git a/sys/kern/kern_ktr.c b/sys/kern/kern_ktr.c index 2e5e06f..eff3d5b 100644 --- a/sys/kern/kern_ktr.c +++ b/sys/kern/kern_ktr.c @@ -40,8 +40,10 @@ __FBSDID("$FreeBSD$"); #include "opt_alq.h" #include <sys/param.h> +#include <sys/queue.h> #include <sys/alq.h> #include <sys/cons.h> +#include <sys/cpuset.h> #include <sys/kernel.h> #include <sys/ktr.h> #include <sys/libkern.h> @@ -68,10 +70,6 @@ __FBSDID("$FreeBSD$"); #define KTR_MASK (0) #endif -#ifndef KTR_CPUMASK -#define KTR_CPUMASK (~0) -#endif - #ifndef KTR_TIME #define KTR_TIME get_cyclecount() #endif @@ -84,11 +82,6 @@ FEATURE(ktr, "Kernel support for KTR kernel tracing facility"); SYSCTL_NODE(_debug, OID_AUTO, ktr, CTLFLAG_RD, 0, "KTR options"); -int ktr_cpumask = KTR_CPUMASK; -TUNABLE_INT("debug.ktr.cpumask", &ktr_cpumask); -SYSCTL_INT(_debug_ktr, OID_AUTO, cpumask, CTLFLAG_RW, - &ktr_cpumask, 0, "Bitmask of CPUs on which KTR logging is enabled"); - int ktr_mask = KTR_MASK; TUNABLE_INT("debug.ktr.mask", &ktr_mask); SYSCTL_INT(_debug_ktr, OID_AUTO, mask, CTLFLAG_RW, @@ -106,6 +99,54 @@ int ktr_version = KTR_VERSION; SYSCTL_INT(_debug_ktr, OID_AUTO, version, CTLFLAG_RD, &ktr_version, 0, "Version of the KTR interface"); +cpuset_t ktr_cpumask; +static char ktr_cpumask_str[CPUSETBUFSIZ]; +TUNABLE_STR("debug.ktr.cpumask", ktr_cpumask_str, sizeof(ktr_cpumask_str)); + +static void +ktr_cpumask_initializer(void *dummy __unused) +{ + + CPU_FILL(&ktr_cpumask); +#ifdef KTR_CPUMASK + if (cpusetobj_strscan(&ktr_cpumask, KTR_CPUMASK) == -1) + CPU_FILL(&ktr_cpumask); +#endif + + /* + * TUNABLE_STR() runs with SI_ORDER_MIDDLE priority, thus it must be + * already set, if necessary. + */ + if (ktr_cpumask_str[0] != '\0' && + cpusetobj_strscan(&ktr_cpumask, ktr_cpumask_str) == -1) + CPU_FILL(&ktr_cpumask); +} +SYSINIT(ktr_cpumask_initializer, SI_SUB_TUNABLES, SI_ORDER_ANY, + ktr_cpumask_initializer, NULL); + +static int +sysctl_debug_ktr_cpumask(SYSCTL_HANDLER_ARGS) +{ + char lktr_cpumask_str[CPUSETBUFSIZ]; + cpuset_t imask; + int error; + + cpusetobj_strprint(lktr_cpumask_str, &ktr_cpumask); + error = sysctl_handle_string(oidp, lktr_cpumask_str, + sizeof(lktr_cpumask_str), req); + if (error != 0 || req->newptr == NULL) + return (error); + if (cpusetobj_strscan(&imask, lktr_cpumask_str) == -1) + return (EINVAL); + CPU_COPY(&imask, &ktr_cpumask); + + return (error); +} +SYSCTL_PROC(_debug_ktr, OID_AUTO, cpumask, + CTLFLAG_RW | CTLFLAG_MPSAFE | CTLTYPE_STRING, NULL, 0, + sysctl_debug_ktr_cpumask, "S", + "Bitmask of CPUs on which KTR logging is enabled"); + volatile int ktr_idx = 0; struct ktr_entry ktr_buf[KTR_ENTRIES]; @@ -213,7 +254,7 @@ ktr_tracepoint(u_int mask, const char *file, int line, const char *format, if ((ktr_mask & mask) == 0) return; cpu = KTR_CPU; - if (((1 << cpu) & ktr_cpumask) == 0) + if (!CPU_ISSET(cpu, &ktr_cpumask)) return; #if defined(KTR_VERBOSE) || defined(KTR_ALQ) td = curthread; diff --git a/sys/kern/kern_pmc.c b/sys/kern/kern_pmc.c index 7532378..8d9c7c0 100644 --- a/sys/kern/kern_pmc.c +++ b/sys/kern/kern_pmc.c @@ -55,7 +55,7 @@ int (*pmc_hook)(struct thread *td, int function, void *arg) = NULL; int (*pmc_intr)(int cpu, struct trapframe *tf) = NULL; /* Bitmask of CPUs requiring servicing at hardclock time */ -volatile cpumask_t pmc_cpumask; +volatile cpuset_t pmc_cpumask; /* * A global count of SS mode PMCs. When non-zero, this means that @@ -112,7 +112,7 @@ pmc_cpu_is_active(int cpu) { #ifdef SMP return (pmc_cpu_is_present(cpu) && - (hlt_cpus_mask & (1 << cpu)) == 0); + !CPU_ISSET(cpu, &hlt_cpus_mask)); #else return (1); #endif @@ -139,7 +139,7 @@ int pmc_cpu_is_primary(int cpu) { #ifdef SMP - return ((logical_cpus_mask & (1 << cpu)) == 0); + return (!CPU_ISSET(cpu, &logical_cpus_mask)); #else return (1); #endif diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c index 7f2b4e7..3214e1b 100644 --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -263,7 +263,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) pc = pcpu_find(curcpu); /* Check if we just need to do a proper critical_exit. */ - if (!(pc->pc_cpumask & rm->rm_writecpus)) { + if (!CPU_OVERLAP(&pc->pc_cpumask, &rm->rm_writecpus)) { critical_exit(); return (1); } @@ -325,7 +325,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) critical_enter(); pc = pcpu_find(curcpu); - rm->rm_writecpus &= ~pc->pc_cpumask; + CPU_NAND(&rm->rm_writecpus, &pc->pc_cpumask); rm_tracker_add(pc, tracker); sched_pin(); critical_exit(); @@ -366,7 +366,8 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) * Fast path to combine two common conditions into a single * conditional jump. */ - if (0 == (td->td_owepreempt | (rm->rm_writecpus & pc->pc_cpumask))) + if (0 == (td->td_owepreempt | + CPU_OVERLAP(&rm->rm_writecpus, &pc->pc_cpumask))) return (1); /* We do not have a read token and need to acquire one. */ @@ -429,17 +430,17 @@ _rm_wlock(struct rmlock *rm) { struct rm_priotracker *prio; struct turnstile *ts; - cpumask_t readcpus; + cpuset_t readcpus; if (rm->lock_object.lo_flags & RM_SLEEPABLE) sx_xlock(&rm->rm_lock_sx); else mtx_lock(&rm->rm_lock_mtx); - if (rm->rm_writecpus != all_cpus) { + if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { /* Get all read tokens back */ - - readcpus = all_cpus & (all_cpus & ~rm->rm_writecpus); + readcpus = all_cpus; + CPU_NAND(&readcpus, &rm->rm_writecpus); rm->rm_writecpus = all_cpus; /* diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c index 001da3d..da041fa 100644 --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -233,30 +233,32 @@ print_uptime(void) printf("%lds\n", (long)ts.tv_sec); } -static void -doadump(void) +int +doadump(boolean_t textdump) { + boolean_t coredump; - /* - * Sometimes people have to call this from the kernel debugger. - * (if 'panic' can not dump) - * Give them a clue as to why they can't dump. - */ - if (dumper.dumper == NULL) { - printf("Cannot dump. Device not defined or unavailable.\n"); - return; - } + if (dumping) + return (EBUSY); + if (dumper.dumper == NULL) + return (ENXIO); savectx(&dumppcb); dumptid = curthread->td_tid; dumping++; + + coredump = TRUE; #ifdef DDB - if (textdump_pending) + if (textdump && textdump_pending) { + coredump = FALSE; textdump_dumpsys(&dumper); - else + } #endif + if (coredump) dumpsys(&dumper); + dumping--; + return (0); } static int @@ -425,7 +427,7 @@ kern_reboot(int howto) EVENTHANDLER_INVOKE(shutdown_post_sync, howto); if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) - doadump(); + doadump(TRUE); /* Now that we're going to really halt the system... */ EVENTHANDLER_INVOKE(shutdown_final, howto); diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c index 7ee56d5..799b60d 100644 --- a/sys/kern/ksched.c +++ b/sys/kern/ksched.c @@ -206,7 +206,7 @@ ksched_setscheduler(struct ksched *ksched, if (param->sched_priority >= 0 && param->sched_priority <= (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)) { rtp.type = RTP_PRIO_NORMAL; - rtp.prio = p4prio_to_rtpprio(param->sched_priority); + rtp.prio = p4prio_to_tsprio(param->sched_priority); rtp_to_pri(&rtp, td); } else e = EINVAL; diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 519cae5..592bb80 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -156,7 +156,7 @@ static struct runq runq; static struct runq runq_pcpu[MAXCPU]; long runq_length[MAXCPU]; -static cpumask_t idle_cpus_mask; +static cpuset_t idle_cpus_mask; #endif struct pcpuidlestat { @@ -951,7 +951,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) if (td->td_flags & TDF_IDLETD) { TD_SET_CAN_RUN(td); #ifdef SMP - idle_cpus_mask &= ~PCPU_GET(cpumask); + /* Spinlock held here, assume no migration. */ + CPU_NAND(&idle_cpus_mask, PCPU_PTR(cpumask)); #endif } else { if (TD_IS_RUNNING(td)) { @@ -1025,7 +1026,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) #ifdef SMP if (td->td_flags & TDF_IDLETD) - idle_cpus_mask |= PCPU_GET(cpumask); + CPU_OR(&idle_cpus_mask, PCPU_PTR(cpumask)); #endif sched_lock.mtx_lock = (uintptr_t)td; td->td_oncpu = PCPU_GET(cpuid); @@ -1054,7 +1055,8 @@ static int forward_wakeup(int cpunum) { struct pcpu *pc; - cpumask_t dontuse, id, map, map2, me; + cpuset_t dontuse, id, map, map2, me; + int iscpuset; mtx_assert(&sched_lock, MA_OWNED); @@ -1071,32 +1073,38 @@ forward_wakeup(int cpunum) /* * Check the idle mask we received against what we calculated * before in the old version. + * + * Also note that sched_lock is held now, thus no migration is + * expected. */ me = PCPU_GET(cpumask); /* Don't bother if we should be doing it ourself. */ - if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum))) + if (CPU_OVERLAP(&me, &idle_cpus_mask) && + (cpunum == NOCPU || CPU_ISSET(cpunum, &me))) return (0); - dontuse = me | stopped_cpus | hlt_cpus_mask; - map2 = 0; + dontuse = me; + CPU_OR(&dontuse, &stopped_cpus); + CPU_OR(&dontuse, &hlt_cpus_mask); + CPU_ZERO(&map2); if (forward_wakeup_use_loop) { STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { id = pc->pc_cpumask; - if ((id & dontuse) == 0 && + if (!CPU_OVERLAP(&id, &dontuse) && pc->pc_curthread == pc->pc_idlethread) { - map2 |= id; + CPU_OR(&map2, &id); } } } if (forward_wakeup_use_mask) { - map = 0; - map = idle_cpus_mask & ~dontuse; + map = idle_cpus_mask; + CPU_NAND(&map, &dontuse); /* If they are both on, compare and use loop if different. */ if (forward_wakeup_use_loop) { - if (map != map2) { + if (CPU_CMP(&map, &map2)) { printf("map != map2, loop method preferred\n"); map = map2; } @@ -1108,18 +1116,22 @@ forward_wakeup(int cpunum) /* If we only allow a specific CPU, then mask off all the others. */ if (cpunum != NOCPU) { KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); - map &= (1 << cpunum); + iscpuset = CPU_ISSET(cpunum, &map); + if (iscpuset == 0) + CPU_ZERO(&map); + else + CPU_SETOF(cpunum, &map); } - if (map) { + if (!CPU_EMPTY(&map)) { forward_wakeups_delivered++; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { id = pc->pc_cpumask; - if ((map & id) == 0) + if (!CPU_OVERLAP(&map, &id)) continue; if (cpu_idle_wakeup(pc->pc_cpuid)) - map &= ~id; + CPU_NAND(&map, &id); } - if (map) + if (!CPU_EMPTY(&map)) ipi_selected(map, IPI_AST); return (1); } @@ -1135,7 +1147,7 @@ kick_other_cpu(int pri, int cpuid) int cpri; pcpu = pcpu_find(cpuid); - if (idle_cpus_mask & pcpu->pc_cpumask) { + if (CPU_OVERLAP(&idle_cpus_mask, &pcpu->pc_cpumask)) { forward_wakeups_delivered++; if (!cpu_idle_wakeup(cpuid)) ipi_cpu(cpuid, IPI_AST); @@ -1193,6 +1205,7 @@ void sched_add(struct thread *td, int flags) #ifdef SMP { + cpuset_t idle, me, tidlemsk; struct td_sched *ts; int forwarded = 0; int cpu; @@ -1262,11 +1275,20 @@ sched_add(struct thread *td, int flags) kick_other_cpu(td->td_priority, cpu); } else { if (!single_cpu) { - cpumask_t me = PCPU_GET(cpumask); - cpumask_t idle = idle_cpus_mask & me; - if (!idle && ((flags & SRQ_INTR) == 0) && - (idle_cpus_mask & ~(hlt_cpus_mask | me))) + /* + * Thread spinlock is held here, assume no + * migration is possible. + */ + me = PCPU_GET(cpumask); + idle = idle_cpus_mask; + tidlemsk = idle; + CPU_AND(&idle, &me); + CPU_OR(&me, &hlt_cpus_mask); + CPU_NAND(&tidlemsk, &me); + + if (CPU_EMPTY(&idle) && ((flags & SRQ_INTR) == 0) && + !CPU_EMPTY(&tidlemsk)) forwarded = forward_wakeup(cpu); } diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index ac18e77..05267f3 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -564,7 +564,7 @@ struct cpu_search { #define CPUSET_FOREACH(cpu, mask) \ for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++) \ - if ((mask) & 1 << (cpu)) + if (CPU_ISSET(cpu, &mask)) static __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low, struct cpu_search *high, const int match); @@ -2650,15 +2650,16 @@ static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg, int indent) { + char cpusetbuf[CPUSETBUFSIZ]; int i, first; sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent, "", 1 + indent / 2, cg->cg_level); - sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"0x%x\">", indent, "", - cg->cg_count, cg->cg_mask); + sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "", + cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask)); first = TRUE; for (i = 0; i < MAXCPU; i++) { - if ((cg->cg_mask & (1 << i)) != 0) { + if (CPU_ISSET(i, &cg->cg_mask)) { if (!first) sbuf_printf(sb, ", "); else diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c index 5d68ae2..1d67864 100644 --- a/sys/kern/subr_kdb.c +++ b/sys/kern/subr_kdb.c @@ -413,7 +413,8 @@ kdb_thr_ctx(struct thread *thr) #if defined(SMP) && defined(KDB_STOPPEDPCB) STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { - if (pc->pc_curthread == thr && (stopped_cpus & pc->pc_cpumask)) + if (pc->pc_curthread == thr && + CPU_OVERLAP(&stopped_cpus, &pc->pc_cpumask)) return (KDB_STOPPEDPCB(pc)); } #endif diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c index 5cb4f26..a6b3ae0 100644 --- a/sys/kern/subr_pcpu.c +++ b/sys/kern/subr_pcpu.c @@ -87,7 +87,7 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) KASSERT(cpuid >= 0 && cpuid < MAXCPU, ("pcpu_init: invalid cpuid %d", cpuid)); pcpu->pc_cpuid = cpuid; - pcpu->pc_cpumask = 1 << cpuid; + CPU_SETOF(cpuid, &pcpu->pc_cpumask); cpuid_to_pcpu[cpuid] = pcpu; STAILQ_INSERT_TAIL(&cpuhead, pcpu, pc_allcpu); cpu_pcpu_init(pcpu, cpuid, size); diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c index 3334837..48f2dd9 100644 --- a/sys/kern/subr_prf.c +++ b/sys/kern/subr_prf.c @@ -163,6 +163,7 @@ uprintf(const char *fmt, ...) goto out; } pca.flags = TOTTY; + pca.p_bufr = NULL; va_start(ap, fmt); tty_lock(pca.tty); retval = kvprintf(fmt, putchar, &pca, 10, ap); @@ -206,6 +207,7 @@ tprintf(struct proc *p, int pri, const char *fmt, ...) pca.pri = pri; pca.tty = tp; pca.flags = flags; + pca.p_bufr = NULL; va_start(ap, fmt); if (pca.tty != NULL) tty_lock(pca.tty); @@ -234,6 +236,7 @@ ttyprintf(struct tty *tp, const char *fmt, ...) va_start(ap, fmt); pca.tty = tp; pca.flags = TOTTY; + pca.p_bufr = NULL; retval = kvprintf(fmt, putchar, &pca, 10, ap); va_end(ap); return (retval); diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c index 3014b19..abd72c0 100644 --- a/sys/kern/subr_rman.c +++ b/sys/kern/subr_rman.c @@ -839,6 +839,7 @@ int_rman_release_resource(struct rman *rm, struct resource_i *r) * without freeing anything. */ r->r_flags &= ~RF_ALLOCATED; + r->r_dev = NULL; return 0; } diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c index 351f096..c38177b 100644 --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -53,15 +53,15 @@ __FBSDID("$FreeBSD$"); #include "opt_sched.h" #ifdef SMP -volatile cpumask_t stopped_cpus; -volatile cpumask_t started_cpus; -cpumask_t hlt_cpus_mask; -cpumask_t logical_cpus_mask; +volatile cpuset_t stopped_cpus; +volatile cpuset_t started_cpus; +cpuset_t hlt_cpus_mask; +cpuset_t logical_cpus_mask; void (*cpustop_restartfunc)(void); #endif /* This is used in modules that need to work in both SMP and UP. */ -cpumask_t all_cpus; +cpuset_t all_cpus; int mp_ncpus; /* export this for libkvm consumers. */ @@ -200,8 +200,11 @@ forward_signal(struct thread *td) * */ static int -generic_stop_cpus(cpumask_t map, u_int type) +generic_stop_cpus(cpuset_t map, u_int type) { +#ifdef KTR + char cpusetbuf[CPUSETBUFSIZ]; +#endif static volatile u_int stopping_cpu = NOCPU; int i; @@ -216,7 +219,8 @@ generic_stop_cpus(cpumask_t map, u_int type) if (!smp_started) return (0); - CTR2(KTR_SMP, "stop_cpus(%x) with %u type", map, type); + CTR2(KTR_SMP, "stop_cpus(%s) with %u type", + cpusetobj_strprint(cpusetbuf, &map), type); if (stopping_cpu != PCPU_GET(cpuid)) while (atomic_cmpset_int(&stopping_cpu, NOCPU, @@ -228,7 +232,7 @@ generic_stop_cpus(cpumask_t map, u_int type) ipi_selected(map, type); i = 0; - while ((stopped_cpus & map) != map) { + while (!CPU_SUBSET(&stopped_cpus, &map)) { /* spin */ cpu_spinwait(); i++; @@ -245,14 +249,14 @@ generic_stop_cpus(cpumask_t map, u_int type) } int -stop_cpus(cpumask_t map) +stop_cpus(cpuset_t map) { return (generic_stop_cpus(map, IPI_STOP)); } int -stop_cpus_hard(cpumask_t map) +stop_cpus_hard(cpuset_t map) { return (generic_stop_cpus(map, IPI_STOP_HARD)); @@ -260,7 +264,7 @@ stop_cpus_hard(cpumask_t map) #if defined(__amd64__) int -suspend_cpus(cpumask_t map) +suspend_cpus(cpuset_t map) { return (generic_stop_cpus(map, IPI_SUSPEND)); @@ -281,19 +285,22 @@ suspend_cpus(cpumask_t map) * 1: ok */ int -restart_cpus(cpumask_t map) +restart_cpus(cpuset_t map) { +#ifdef KTR + char cpusetbuf[CPUSETBUFSIZ]; +#endif if (!smp_started) return 0; - CTR1(KTR_SMP, "restart_cpus(%x)", map); + CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); /* signal other cpus to restart */ - atomic_store_rel_int(&started_cpus, map); + CPU_COPY_STORE_REL(&map, &started_cpus); /* wait for each to clear its bit */ - while ((stopped_cpus & map) != 0) + while (CPU_OVERLAP(&stopped_cpus, &map)) cpu_spinwait(); return 1; @@ -409,13 +416,13 @@ smp_rendezvous_action(void) } void -smp_rendezvous_cpus(cpumask_t map, +smp_rendezvous_cpus(cpuset_t map, void (* setup_func)(void *), void (* action_func)(void *), void (* teardown_func)(void *), void *arg) { - int i, ncpus = 0; + int curcpumap, i, ncpus = 0; if (!smp_started) { if (setup_func != NULL) @@ -428,11 +435,11 @@ smp_rendezvous_cpus(cpumask_t map, } CPU_FOREACH(i) { - if (((1 << i) & map) != 0) + if (CPU_ISSET(i, &map)) ncpus++; } if (ncpus == 0) - panic("ncpus is 0 with map=0x%x", map); + panic("ncpus is 0 with non-zero map"); mtx_lock_spin(&smp_ipi_mtx); @@ -452,10 +459,12 @@ smp_rendezvous_cpus(cpumask_t map, * Signal other processors, which will enter the IPI with * interrupts off. */ - ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS); + curcpumap = CPU_ISSET(curcpu, &map); + CPU_CLR(curcpu, &map); + ipi_selected(map, IPI_RENDEZVOUS); /* Check if the current CPU is in the map */ - if ((map & (1 << curcpu)) != 0) + if (curcpumap != 0) smp_rendezvous_action(); /* @@ -484,6 +493,7 @@ static struct cpu_group group[MAXCPU]; struct cpu_group * smp_topo(void) { + char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; struct cpu_group *top; /* @@ -530,9 +540,10 @@ smp_topo(void) if (top->cg_count != mp_ncpus) panic("Built bad topology at %p. CPU count %d != %d", top, top->cg_count, mp_ncpus); - if (top->cg_mask != all_cpus) - panic("Built bad topology at %p. CPU mask 0x%X != 0x%X", - top, top->cg_mask, all_cpus); + if (CPU_CMP(&top->cg_mask, &all_cpus)) + panic("Built bad topology at %p. CPU mask (%s) != (%s)", + top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), + cpusetobj_strprint(cpusetbuf2, &all_cpus)); return (top); } @@ -557,11 +568,13 @@ static int smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, int count, int flags, int start) { - cpumask_t mask; + char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; + cpuset_t mask; int i; - for (mask = 0, i = 0; i < count; i++, start++) - mask |= (1 << start); + CPU_ZERO(&mask); + for (i = 0; i < count; i++, start++) + CPU_SET(start, &mask); child->cg_parent = parent; child->cg_child = NULL; child->cg_children = 0; @@ -571,10 +584,12 @@ smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, child->cg_mask = mask; parent->cg_children++; for (; parent != NULL; parent = parent->cg_parent) { - if ((parent->cg_mask & child->cg_mask) != 0) - panic("Duplicate children in %p. mask 0x%X child 0x%X", - parent, parent->cg_mask, child->cg_mask); - parent->cg_mask |= child->cg_mask; + if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) + panic("Duplicate children in %p. mask (%s) child (%s)", + parent, + cpusetobj_strprint(cpusetbuf, &parent->cg_mask), + cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); + CPU_OR(&parent->cg_mask, &child->cg_mask); parent->cg_count += child->cg_count; } @@ -634,20 +649,20 @@ struct cpu_group * smp_topo_find(struct cpu_group *top, int cpu) { struct cpu_group *cg; - cpumask_t mask; + cpuset_t mask; int children; int i; - mask = (1 << cpu); + CPU_SETOF(cpu, &mask); cg = top; for (;;) { - if ((cg->cg_mask & mask) == 0) + if (!CPU_OVERLAP(&cg->cg_mask, &mask)) return (NULL); if (cg->cg_children == 0) return (cg); children = cg->cg_children; for (i = 0, cg = cg->cg_child; i < children; cg++, i++) - if ((cg->cg_mask & mask) != 0) + if (CPU_OVERLAP(&cg->cg_mask, &mask)) break; } return (NULL); @@ -655,7 +670,7 @@ smp_topo_find(struct cpu_group *top, int cpu) #else /* !SMP */ void -smp_rendezvous_cpus(cpumask_t map, +smp_rendezvous_cpus(cpuset_t map, void (*setup_func)(void *), void (*action_func)(void *), void (*teardown_func)(void *), diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index a4bbdba..19aaee0 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -747,6 +747,10 @@ kern_sendit(td, s, mp, flags, control, segflg) return (error); so = (struct socket *)fp->f_data; +#ifdef KTRACE + if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT)) + ktrsockaddr(mp->msg_name); +#endif #ifdef MAC if (mp->msg_name != NULL) { error = mac_socket_check_connect(td->td_ucred, so, |