summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authormarius <marius@FreeBSD.org>2011-10-06 11:48:13 +0000
committermarius <marius@FreeBSD.org>2011-10-06 11:48:13 +0000
commit1b1d84970aeb66200a907417b3a575737ae64db2 (patch)
tree62e61902376a6238af699856b2becd8def1a7f01 /sys/kern
parent8cb5f6fec8bffb90f27a43c9046239a7b9cf70c5 (diff)
downloadFreeBSD-src-1b1d84970aeb66200a907417b3a575737ae64db2.zip
FreeBSD-src-1b1d84970aeb66200a907417b3a575737ae64db2.tar.gz
- Currently, sched_balance_pair() may cause a CPU to send an IPI_PREEMPT to
itself, which sparc64 hardware doesn't support. One way to solve this would be to directly call sched_preempt() instead of issuing a self-IPI. However, quoting jhb@: "On the other hand, you can probably just skip the IPI entirely if we are going to send it to the current CPU. Presumably, once this routine finishes, the current CPU will exit softlock (or will do so "soon") and will then pick the next thread to run based on the adjustments made in this routine, so there's no need to IPI the CPU running this routine anyway. I think this is the better solution. Right now what is probably happening on other platforms is as soon as this routine finishes the CPU processes its self-IPI and causes mi_switch() which will just switch back to the softclock thread it is already running." - With r226054 and the the above change in place, sparc64 now no longer is incompatible with ULE and vice versa. However, powerpc/E500 still is. Submitted by: jhb Reviewed by: jeff
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/sched_ule.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index f505676..31d3d11 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -76,7 +76,7 @@ dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
#include <machine/cpu.h>
#include <machine/smp.h>
-#if defined(__sparc64__)
+#if defined(__powerpc__) && defined(E500)
#error "This architecture is not currently compatible with ULE"
#endif
@@ -839,6 +839,7 @@ sched_balance_pair(struct tdq *high, struct tdq *low)
int low_load;
int moved;
int move;
+ int cpu;
int diff;
int i;
@@ -860,10 +861,14 @@ sched_balance_pair(struct tdq *high, struct tdq *low)
for (i = 0; i < move; i++)
moved += tdq_move(high, low);
/*
- * IPI the target cpu to force it to reschedule with the new
- * workload.
+ * In case the target isn't the current cpu IPI it to force a
+ * reschedule with the new workload.
*/
- ipi_cpu(TDQ_ID(low), IPI_PREEMPT);
+ cpu = TDQ_ID(low);
+ sched_pin();
+ if (cpu != PCPU_GET(cpuid))
+ ipi_cpu(cpu, IPI_PREEMPT);
+ sched_unpin();
}
tdq_unlock_pair(high, low);
return (moved);
OpenPOWER on IntegriCloud