summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2010-08-06 15:36:59 +0000
committerjhb <jhb@FreeBSD.org>2010-08-06 15:36:59 +0000
commit19ddbf5c3845e0aaeed327e2be2f168e30013b63 (patch)
tree90823b92f30822fa8ac56ad8c70614f9aae1017a /sys/kern
parent57b610d580a3dd85aaac28dae65ce416f30c3f31 (diff)
downloadFreeBSD-src-19ddbf5c3845e0aaeed327e2be2f168e30013b63.zip
FreeBSD-src-19ddbf5c3845e0aaeed327e2be2f168e30013b63.tar.gz
Add a new ipi_cpu() function to the MI IPI API that can be used to send an
IPI to a specific CPU by its cpuid. Replace calls to ipi_selected() that constructed a mask for a single CPU with calls to ipi_cpu() instead. This will matter more in the future when we transition from cpumask_t to cpuset_t for CPU masks in which case building a CPU mask is more expensive. Submitted by: peter, sbruno Reviewed by: rookie Obtained from: Yahoo! (x86) MFC after: 1 month
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/sched_4bsd.c8
-rw-r--r--sys/kern/sched_ule.c6
-rw-r--r--sys/kern/subr_smp.c2
3 files changed, 8 insertions, 8 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 61366cd..e579198 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1154,7 +1154,7 @@ kick_other_cpu(int pri, int cpuid)
pcpu = pcpu_find(cpuid);
if (idle_cpus_mask & pcpu->pc_cpumask) {
forward_wakeups_delivered++;
- ipi_selected(pcpu->pc_cpumask, IPI_AST);
+ ipi_cpu(cpuid, IPI_AST);
return;
}
@@ -1167,13 +1167,13 @@ kick_other_cpu(int pri, int cpuid)
if (pri <= PRI_MAX_ITHD)
#endif /* ! FULL_PREEMPTION */
{
- ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
+ ipi_cpu(cpuid, IPI_PREEMPT);
return;
}
#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
- ipi_selected(pcpu->pc_cpumask, IPI_AST);
+ ipi_cpu(cpuid, IPI_AST);
return;
}
#endif /* SMP */
@@ -1666,7 +1666,7 @@ sched_affinity(struct thread *td)
td->td_flags |= TDF_NEEDRESCHED;
if (td != curthread)
- ipi_selected(1 << cpu, IPI_AST);
+ ipi_cpu(cpu, IPI_AST);
break;
default:
break;
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index f469a06b1..e210ebc 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -851,7 +851,7 @@ sched_balance_pair(struct tdq *high, struct tdq *low)
* IPI the target cpu to force it to reschedule with the new
* workload.
*/
- ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
+ ipi_cpu(TDQ_ID(low), IPI_PREEMPT);
}
tdq_unlock_pair(high, low);
return (moved);
@@ -974,7 +974,7 @@ tdq_notify(struct tdq *tdq, struct thread *td)
return;
}
tdq->tdq_ipipending = 1;
- ipi_selected(1 << cpu, IPI_PREEMPT);
+ ipi_cpu(cpu, IPI_PREEMPT);
}
/*
@@ -2411,7 +2411,7 @@ sched_affinity(struct thread *td)
cpu = ts->ts_cpu;
ts->ts_cpu = sched_pickcpu(td, 0);
if (cpu != PCPU_GET(cpuid))
- ipi_selected(1 << cpu, IPI_PREEMPT);
+ ipi_cpu(cpu, IPI_PREEMPT);
#endif
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 3e4a2ab..89542f9 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -181,7 +181,7 @@ forward_signal(struct thread *td)
id = td->td_oncpu;
if (id == NOCPU)
return;
- ipi_selected(1 << id, IPI_AST);
+ ipi_cpu(id, IPI_AST);
}
/*
OpenPOWER on IntegriCloud