summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libmemstat/memstat.c2
-rw-r--r--lib/libmemstat/memstat.h6
-rw-r--r--lib/libmemstat/memstat_internal.h4
-rw-r--r--lib/libmemstat/memstat_malloc.c13
-rw-r--r--lib/libmemstat/memstat_uma.c9
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c6
-rw-r--r--sys/i386/i386/apic_vector.s16
-rw-r--r--sys/i386/i386/db_trace.c3
-rw-r--r--sys/i386/i386/mp_machdep.c7
-rw-r--r--sys/i386/i386/pmap.c93
-rw-r--r--sys/i386/i386/swtch.s6
-rw-r--r--sys/i386/include/smp.h4
-rw-r--r--sys/i386/include/xen/xenvar.h1
-rw-r--r--sys/i386/xen/mp_machdep.c81
-rw-r--r--sys/i386/xen/pmap.c93
-rw-r--r--sys/kern/sched_4bsd.c40
-rw-r--r--sys/kern/subr_smp.c1
-rw-r--r--sys/sys/cpuset.h2
-rw-r--r--sys/sys/smp.h1
19 files changed, 55 insertions, 333 deletions
diff --git a/lib/libmemstat/memstat.c b/lib/libmemstat/memstat.c
index 1a08d3f..b2749e2 100644
--- a/lib/libmemstat/memstat.c
+++ b/lib/libmemstat/memstat.c
@@ -193,7 +193,7 @@ _memstat_mt_reset_stats(struct memory_type *mtp)
mtp->mt_zonefree = 0;
mtp->mt_kegfree = 0;
- for (i = 0; i < MEMSTAT_MAXCPU; i++) {
+ for (i = 0; i < MAXCPU; i++) {
mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
diff --git a/lib/libmemstat/memstat.h b/lib/libmemstat/memstat.h
index e973f1a..fa26944 100644
--- a/lib/libmemstat/memstat.h
+++ b/lib/libmemstat/memstat.h
@@ -30,12 +30,6 @@
#define _MEMSTAT_H_
/*
- * Number of CPU slots in library-internal data structures. This should be
- * at least the value of MAXCPU from param.h.
- */
-#define MEMSTAT_MAXCPU 32
-
-/*
* Amount of caller data to maintain for each caller data slot. Applications
* must not request more than this number of caller save data, or risk
* corrupting internal libmemstat(3) data structures. A compile time check
diff --git a/lib/libmemstat/memstat_internal.h b/lib/libmemstat/memstat_internal.h
index b7fdd71..8881e58 100644
--- a/lib/libmemstat/memstat_internal.h
+++ b/lib/libmemstat/memstat_internal.h
@@ -100,11 +100,11 @@ struct memory_type {
uint64_t mtp_sizemask; /* Per-CPU mt_sizemask. */
void *mtp_caller_pointer[MEMSTAT_MAXCALLER];
uint64_t mtp_caller_uint64[MEMSTAT_MAXCALLER];
- } mt_percpu_alloc[MEMSTAT_MAXCPU];
+ } mt_percpu_alloc[MAXCPU];
struct {
uint64_t mtp_free; /* Per-CPU cache free items. */
- } mt_percpu_cache[MEMSTAT_MAXCPU];
+ } mt_percpu_cache[MAXCPU];
LIST_ENTRY(memory_type) mt_list; /* List of types. */
};
diff --git a/lib/libmemstat/memstat_malloc.c b/lib/libmemstat/memstat_malloc.c
index 28a48c6..a8d14f8 100644
--- a/lib/libmemstat/memstat_malloc.c
+++ b/lib/libmemstat/memstat_malloc.c
@@ -96,7 +96,7 @@ retry:
return (-1);
}
- if (maxcpus > MEMSTAT_MAXCPU) {
+ if (maxcpus > MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
return (-1);
}
@@ -160,7 +160,7 @@ retry:
return (-1);
}
- if (mtshp->mtsh_maxcpus > MEMSTAT_MAXCPU) {
+ if (mtshp->mtsh_maxcpus > MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
free(buffer);
return (-1);
@@ -295,7 +295,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
void *kmemstatistics;
int hint_dontsearch, j, mp_maxcpus, ret;
char name[MEMTYPE_MAXNAME];
- struct malloc_type_stats mts[MEMSTAT_MAXCPU], *mtsp;
+ struct malloc_type_stats mts[MAXCPU], *mtsp;
struct malloc_type_internal *mtip;
struct malloc_type type, *typep;
kvm_t *kvm;
@@ -322,7 +322,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
return (-1);
}
- if (mp_maxcpus > MEMSTAT_MAXCPU) {
+ if (mp_maxcpus > MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
return (-1);
}
@@ -348,11 +348,6 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
list->mtl_error = ret;
return (-1);
}
-
- /*
- * Since our compile-time value for MAXCPU may differ from the
- * kernel's, we populate our own array.
- */
mtip = type.ks_handle;
ret = kread(kvm, mtip->mti_stats, mts, mp_maxcpus *
sizeof(struct malloc_type_stats), 0);
diff --git a/lib/libmemstat/memstat_uma.c b/lib/libmemstat/memstat_uma.c
index 4aae61a..10ff8ec 100644
--- a/lib/libmemstat/memstat_uma.c
+++ b/lib/libmemstat/memstat_uma.c
@@ -27,6 +27,7 @@
*/
#include <sys/param.h>
+#include <sys/cpuset.h>
#include <sys/sysctl.h>
#define LIBMEMSTAT /* Cause vm_page.h not to include opt_vmpage.h */
@@ -104,7 +105,7 @@ retry:
return (-1);
}
- if (maxcpus > MEMSTAT_MAXCPU) {
+ if (maxcpus > MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
return (-1);
}
@@ -168,7 +169,7 @@ retry:
return (-1);
}
- if (ushp->ush_maxcpus > MEMSTAT_MAXCPU) {
+ if (ushp->ush_maxcpus > MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
free(buffer);
return (-1);
@@ -313,7 +314,7 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
struct uma_keg *kzp, kz;
int hint_dontsearch, i, mp_maxid, ret;
char name[MEMTYPE_MAXNAME];
- __cpumask_t all_cpus;
+ cpuset_t all_cpus;
kvm_t *kvm;
kvm = (kvm_t *)kvm_handle;
@@ -407,7 +408,7 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
if (kz.uk_flags & UMA_ZFLAG_INTERNAL)
goto skip_percpu;
for (i = 0; i < mp_maxid + 1; i++) {
- if ((all_cpus & (1 << i)) == 0)
+ if (!CPU_ISSET(i, &all_cpus))
continue;
ucp = &ucp_array[i];
mtp->mt_numallocs += ucp->uc_allocs;
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 4fa169c..d6225d8 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -4083,7 +4083,7 @@ pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
done:
/* mark CPU as needing processing */
- atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
+ atomic_set_int(&pmc_cpumask, (1 << cpu));
return (error);
}
@@ -4193,7 +4193,7 @@ pmc_process_samples(int cpu)
break;
if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
/* Need a rescan at a later time. */
- atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
+ atomic_set_int(&pmc_cpumask, (1 << cpu));
break;
}
@@ -4782,7 +4782,7 @@ pmc_cleanup(void)
PMCDBG(MOD,INI,0, "%s", "cleanup");
/* switch off sampling */
- atomic_store_rel_int(&pmc_cpumask, 0);
+ pmc_cpumask = 0;
pmc_intr = NULL;
sx_xlock(&pmc_sx);
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index 86e78c4..a78b601 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -357,20 +357,4 @@ IDTVEC(rendezvous)
POP_FRAME
iret
-/*
- * Clean up when we lose out on the lazy context switch optimization.
- * ie: when we are about to release a PTD but a cpu is still borrowing it.
- */
- SUPERALIGN_TEXT
-IDTVEC(lazypmap)
- PUSH_FRAME
- SET_KERNEL_SREGS
- cld
-
- call pmap_lazyfix_action
-
- movl lapic, %eax
- movl $0, LA_EOI(%eax) /* End Of Interrupt to APIC */
- POP_FRAME
- iret
#endif /* SMP */
diff --git a/sys/i386/i386/db_trace.c b/sys/i386/i386/db_trace.c
index 445d9c5..79da4dc 100644
--- a/sys/i386/i386/db_trace.c
+++ b/sys/i386/i386/db_trace.c
@@ -312,8 +312,7 @@ db_nextframe(struct i386_frame **fp, db_addr_t *ip, struct thread *td)
frame_type = TRAP_TIMERINT;
else if (strcmp(name, "Xcpustop") == 0 ||
strcmp(name, "Xrendezvous") == 0 ||
- strcmp(name, "Xipi_intr_bitmap_handler") == 0 ||
- strcmp(name, "Xlazypmap") == 0)
+ strcmp(name, "Xipi_intr_bitmap_handler") == 0)
frame_type = TRAP_INTERRUPT;
}
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 871ccb4..a07b06c 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -165,7 +165,6 @@ u_long *ipi_invlrng_counts[MAXCPU];
u_long *ipi_invlpg_counts[MAXCPU];
u_long *ipi_invlcache_counts[MAXCPU];
u_long *ipi_rendezvous_counts[MAXCPU];
-u_long *ipi_lazypmap_counts[MAXCPU];
static u_long *ipi_hardclock_counts[MAXCPU];
#endif
@@ -552,10 +551,6 @@ cpu_mp_start(void)
setidt(IPI_INVLCACHE, IDTVEC(invlcache),
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- /* Install an inter-CPU IPI for lazy pmap release */
- setidt(IPI_LAZYPMAP, IDTVEC(lazypmap),
- SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-
/* Install an inter-CPU IPI for all-CPU rendezvous */
setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@@ -1682,8 +1677,6 @@ mp_ipi_intrcnt(void *dummy)
intrcnt_add(buf, &ipi_ast_counts[i]);
snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
intrcnt_add(buf, &ipi_rendezvous_counts[i]);
- snprintf(buf, sizeof(buf), "cpu%d:lazypmap", i);
- intrcnt_add(buf, &ipi_lazypmap_counts[i]);
snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
intrcnt_add(buf, &ipi_hardclock_counts[i]);
}
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index d10bbe5..e1fe137 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1881,98 +1881,6 @@ retry:
* Pmap allocation/deallocation routines.
***************************************************/
-#ifdef SMP
-/*
- * Deal with a SMP shootdown of other users of the pmap that we are
- * trying to dispose of. This can be a bit hairy.
- */
-static cpumask_t *lazymask;
-static u_int lazyptd;
-static volatile u_int lazywait;
-
-void pmap_lazyfix_action(void);
-
-void
-pmap_lazyfix_action(void)
-{
- cpumask_t mymask = PCPU_GET(cpumask);
-
-#ifdef COUNT_IPIS
- (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
-#endif
- if (rcr3() == lazyptd)
- load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- atomic_clear_int(lazymask, mymask);
- atomic_store_rel_int(&lazywait, 1);
-}
-
-static void
-pmap_lazyfix_self(cpumask_t mymask)
-{
-
- if (rcr3() == lazyptd)
- load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- atomic_clear_int(lazymask, mymask);
-}
-
-
-static void
-pmap_lazyfix(pmap_t pmap)
-{
- cpumask_t mymask, mask;
- u_int spins;
-
- while ((mask = pmap->pm_active) != 0) {
- spins = 50000000;
- mask = mask & -mask; /* Find least significant set bit */
- mtx_lock_spin(&smp_ipi_mtx);
-#ifdef PAE
- lazyptd = vtophys(pmap->pm_pdpt);
-#else
- lazyptd = vtophys(pmap->pm_pdir);
-#endif
- mymask = PCPU_GET(cpumask);
- if (mask == mymask) {
- lazymask = &pmap->pm_active;
- pmap_lazyfix_self(mymask);
- } else {
- atomic_store_rel_int((u_int *)&lazymask,
- (u_int)&pmap->pm_active);
- atomic_store_rel_int(&lazywait, 0);
- ipi_selected(mask, IPI_LAZYPMAP);
- while (lazywait == 0) {
- ia32_pause();
- if (--spins == 0)
- break;
- }
- }
- mtx_unlock_spin(&smp_ipi_mtx);
- if (spins == 0)
- printf("pmap_lazyfix: spun for 50000000\n");
- }
-}
-
-#else /* SMP */
-
-/*
- * Cleaning up on uniprocessor is easy. For various reasons, we're
- * unlikely to have to even execute this code, including the fact
- * that the cleanup is deferred until the parent does a wait(2), which
- * means that another userland process has run.
- */
-static void
-pmap_lazyfix(pmap_t pmap)
-{
- u_int cr3;
-
- cr3 = vtophys(pmap->pm_pdir);
- if (cr3 == rcr3()) {
- load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- pmap->pm_active &= ~(PCPU_GET(cpumask));
- }
-}
-#endif /* SMP */
-
/*
* Release any resources held by the given physical map.
* Called when a pmap initialized by pmap_pinit is being released.
@@ -1990,7 +1898,6 @@ pmap_release(pmap_t pmap)
KASSERT(pmap->pm_root == NULL,
("pmap_release: pmap has reserved page table page(s)"));
- pmap_lazyfix(pmap);
mtx_lock_spin(&allpmaps_lock);
LIST_REMOVE(pmap, pm_list);
mtx_unlock_spin(&allpmaps_lock);
diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s
index 680b032..6547569 100644
--- a/sys/i386/i386/swtch.s
+++ b/sys/i386/i386/swtch.s
@@ -174,12 +174,6 @@ ENTRY(cpu_switch)
/* switch address space */
movl PCB_CR3(%edx),%eax
-#ifdef PAE
- cmpl %eax,IdlePDPT /* Kernel address space? */
-#else
- cmpl %eax,IdlePTD /* Kernel address space? */
-#endif
- je sw0
READ_CR3(%ebx) /* The same address space? */
cmpl %ebx,%eax
je sw0
diff --git a/sys/i386/include/smp.h b/sys/i386/include/smp.h
index d364cd9..b512e00 100644
--- a/sys/i386/include/smp.h
+++ b/sys/i386/include/smp.h
@@ -42,7 +42,6 @@ extern u_long *ipi_invlrng_counts[MAXCPU];
extern u_long *ipi_invlpg_counts[MAXCPU];
extern u_long *ipi_invlcache_counts[MAXCPU];
extern u_long *ipi_rendezvous_counts[MAXCPU];
-extern u_long *ipi_lazypmap_counts[MAXCPU];
#endif
/* IPI handlers */
@@ -53,8 +52,7 @@ inthand_t
IDTVEC(invlcache), /* Write back and invalidate cache */
IDTVEC(ipi_intr_bitmap_handler), /* Bitmap based IPIs */
IDTVEC(cpustop), /* CPU stops & waits to be restarted */
- IDTVEC(rendezvous), /* handle CPU rendezvous */
- IDTVEC(lazypmap); /* handle lazy pmap release */
+ IDTVEC(rendezvous); /* handle CPU rendezvous */
/* functions in mp_machdep.c */
void cpu_add(u_int apic_id, char boot_cpu);
diff --git a/sys/i386/include/xen/xenvar.h b/sys/i386/include/xen/xenvar.h
index 4f8c857..365930a 100644
--- a/sys/i386/include/xen/xenvar.h
+++ b/sys/i386/include/xen/xenvar.h
@@ -99,7 +99,6 @@ void xpq_init(void);
(((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
-typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } xen_cpumask_t;
int xen_create_contiguous_region(vm_page_t pages, int npages);
diff --git a/sys/i386/xen/mp_machdep.c b/sys/i386/xen/mp_machdep.c
index 6e0fa23..670d110 100644
--- a/sys/i386/xen/mp_machdep.c
+++ b/sys/i386/xen/mp_machdep.c
@@ -153,7 +153,6 @@ static cpumask_t hyperthreading_cpus_mask;
extern void Xhypervisor_callback(void);
extern void failsafe_callback(void);
-extern void pmap_lazyfix_action(void);
struct cpu_group *
cpu_topo(void)
@@ -340,24 +339,16 @@ iv_invlcache(uintptr_t a, uintptr_t b)
atomic_add_int(&smp_tlb_wait, 1);
}
-static void
-iv_lazypmap(uintptr_t a, uintptr_t b)
-{
- pmap_lazyfix_action();
- atomic_add_int(&smp_tlb_wait, 1);
-}
-
/*
* These start from "IPI offset" APIC_IPI_INTS
*/
-static call_data_func_t *ipi_vectors[6] =
+static call_data_func_t *ipi_vectors[5] =
{
iv_rendezvous,
iv_invltlb,
iv_invlpg,
iv_invlrng,
iv_invlcache,
- iv_lazypmap,
};
/*
@@ -957,6 +948,30 @@ start_ap(int apic_id)
}
/*
+ * send an IPI to a specific CPU.
+ */
+static void
+ipi_send_cpu(int cpu, u_int ipi)
+{
+ u_int bitmap, old_pending, new_pending;
+
+ if (IPI_IS_BITMAPED(ipi)) {
+ bitmap = 1 << ipi;
+ ipi = IPI_BITMAP_VECTOR;
+ do {
+ old_pending = cpu_ipi_pending[cpu];
+ new_pending = old_pending | bitmap;
+ } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+ old_pending, new_pending));
+ if (!old_pending)
+ ipi_pcpu(cpu, RESCHEDULE_VECTOR);
+ } else {
+ KASSERT(call_data != NULL, ("call_data not set"));
+ ipi_pcpu(cpu, CALL_FUNCTION_VECTOR);
+ }
+}
+
+/*
* Flush the TLB on all other CPU's
*/
static void
@@ -1098,14 +1113,6 @@ void
ipi_selected(cpumask_t cpus, u_int ipi)
{
int cpu;
- u_int bitmap = 0;
- u_int old_pending;
- u_int new_pending;
-
- if (IPI_IS_BITMAPED(ipi)) {
- bitmap = 1 << ipi;
- ipi = IPI_BITMAP_VECTOR;
- }
/*
* IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
@@ -1115,23 +1122,11 @@ ipi_selected(cpumask_t cpus, u_int ipi)
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, cpus);
- CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
while ((cpu = ffs(cpus)) != 0) {
cpu--;
cpus &= ~(1 << cpu);
-
- if (bitmap) {
- do {
- old_pending = cpu_ipi_pending[cpu];
- new_pending = old_pending | bitmap;
- } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
- old_pending, new_pending));
- if (!old_pending)
- ipi_pcpu(cpu, RESCHEDULE_VECTOR);
- } else {
- KASSERT(call_data != NULL, ("call_data not set"));
- ipi_pcpu(cpu, CALL_FUNCTION_VECTOR);
- }
+ CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+ ipi_send_cpu(cpu, ipi);
}
}
@@ -1141,14 +1136,6 @@ ipi_selected(cpumask_t cpus, u_int ipi)
void
ipi_cpu(int cpu, u_int ipi)
{
- u_int bitmap = 0;
- u_int old_pending;
- u_int new_pending;
-
- if (IPI_IS_BITMAPED(ipi)) {
- bitmap = 1 << ipi;
- ipi = IPI_BITMAP_VECTOR;
- }
/*
* IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
@@ -1159,19 +1146,7 @@ ipi_cpu(int cpu, u_int ipi)
atomic_set_int(&ipi_nmi_pending, 1 << cpu);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
-
- if (bitmap) {
- do {
- old_pending = cpu_ipi_pending[cpu];
- new_pending = old_pending | bitmap;
- } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
- old_pending, new_pending));
- if (!old_pending)
- ipi_pcpu(cpu, RESCHEDULE_VECTOR);
- } else {
- KASSERT(call_data != NULL, ("call_data not set"));
- ipi_pcpu(cpu, CALL_FUNCTION_VECTOR);
- }
+ ipi_send_cpu(cpu, ipi);
}
/*
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index eb3c803..cf36f5e 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -1681,98 +1681,6 @@ retry:
* Pmap allocation/deallocation routines.
***************************************************/
-#ifdef SMP
-/*
- * Deal with a SMP shootdown of other users of the pmap that we are
- * trying to dispose of. This can be a bit hairy.
- */
-static cpumask_t *lazymask;
-static u_int lazyptd;
-static volatile u_int lazywait;
-
-void pmap_lazyfix_action(void);
-
-void
-pmap_lazyfix_action(void)
-{
- cpumask_t mymask = PCPU_GET(cpumask);
-
-#ifdef COUNT_IPIS
- (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
-#endif
- if (rcr3() == lazyptd)
- load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- atomic_clear_int(lazymask, mymask);
- atomic_store_rel_int(&lazywait, 1);
-}
-
-static void
-pmap_lazyfix_self(cpumask_t mymask)
-{
-
- if (rcr3() == lazyptd)
- load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- atomic_clear_int(lazymask, mymask);
-}
-
-
-static void
-pmap_lazyfix(pmap_t pmap)
-{
- cpumask_t mymask, mask;
- u_int spins;
-
- while ((mask = pmap->pm_active) != 0) {
- spins = 50000000;
- mask = mask & -mask; /* Find least significant set bit */
- mtx_lock_spin(&smp_ipi_mtx);
-#ifdef PAE
- lazyptd = vtophys(pmap->pm_pdpt);
-#else
- lazyptd = vtophys(pmap->pm_pdir);
-#endif
- mymask = PCPU_GET(cpumask);
- if (mask == mymask) {
- lazymask = &pmap->pm_active;
- pmap_lazyfix_self(mymask);
- } else {
- atomic_store_rel_int((u_int *)&lazymask,
- (u_int)&pmap->pm_active);
- atomic_store_rel_int(&lazywait, 0);
- ipi_selected(mask, IPI_LAZYPMAP);
- while (lazywait == 0) {
- ia32_pause();
- if (--spins == 0)
- break;
- }
- }
- mtx_unlock_spin(&smp_ipi_mtx);
- if (spins == 0)
- printf("pmap_lazyfix: spun for 50000000\n");
- }
-}
-
-#else /* SMP */
-
-/*
- * Cleaning up on uniprocessor is easy. For various reasons, we're
- * unlikely to have to even execute this code, including the fact
- * that the cleanup is deferred until the parent does a wait(2), which
- * means that another userland process has run.
- */
-static void
-pmap_lazyfix(pmap_t pmap)
-{
- u_int cr3;
-
- cr3 = vtophys(pmap->pm_pdir);
- if (cr3 == rcr3()) {
- load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- pmap->pm_active &= ~(PCPU_GET(cpumask));
- }
-}
-#endif /* SMP */
-
/*
* Release any resources held by the given physical map.
* Called when a pmap initialized by pmap_pinit is being released.
@@ -1798,7 +1706,6 @@ pmap_release(pmap_t pmap)
mtx_lock(&createdelete_lock);
#endif
- pmap_lazyfix(pmap);
mtx_lock_spin(&allpmaps_lock);
LIST_REMOVE(pmap, pm_list);
mtx_unlock_spin(&allpmaps_lock);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 9424f73..fef9e25 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -155,6 +155,8 @@ static struct runq runq;
*/
static struct runq runq_pcpu[MAXCPU];
long runq_length[MAXCPU];
+
+static cpumask_t idle_cpus_mask;
#endif
struct pcpuidlestat {
@@ -233,16 +235,6 @@ SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
&forward_wakeup_use_loop, 0,
"Use a loop to find idle cpus");
-static int forward_wakeup_use_single = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
- &forward_wakeup_use_single, 0,
- "Only signal one idle cpu");
-
-static int forward_wakeup_use_htt = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
- &forward_wakeup_use_htt, 0,
- "account for htt");
-
#endif
#if 0
static int sched_followon = 0;
@@ -1062,7 +1054,7 @@ static int
forward_wakeup(int cpunum)
{
struct pcpu *pc;
- cpumask_t dontuse, id, map, map2, map3, me;
+ cpumask_t dontuse, id, map, map2, me;
mtx_assert(&sched_lock, MA_OWNED);
@@ -1087,13 +1079,13 @@ forward_wakeup(int cpunum)
return (0);
dontuse = me | stopped_cpus | hlt_cpus_mask;
- map3 = 0;
+ map2 = 0;
if (forward_wakeup_use_loop) {
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask;
if ((id & dontuse) == 0 &&
pc->pc_curthread == pc->pc_idlethread) {
- map3 |= id;
+ map2 |= id;
}
}
}
@@ -1104,33 +1096,19 @@ forward_wakeup(int cpunum)
/* If they are both on, compare and use loop if different. */
if (forward_wakeup_use_loop) {
- if (map != map3) {
- printf("map (%02X) != map3 (%02X)\n", map,
- map3);
- map = map3;
+ if (map != map2) {
+ printf("map != map2, loop method preferred\n");
+ map = map2;
}
}
} else {
- map = map3;
+ map = map2;
}
/* If we only allow a specific CPU, then mask off all the others. */
if (cpunum != NOCPU) {
KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
map &= (1 << cpunum);
- } else {
- /* Try choose an idle die. */
- if (forward_wakeup_use_htt) {
- map2 = (map & (map >> 1)) & 0x5555;
- if (map2) {
- map = map2;
- }
- }
-
- /* Set only one bit. */
- if (forward_wakeup_use_single) {
- map = map & ((~map) + 1);
- }
}
if (map) {
forward_wakeups_delivered++;
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 9ae6381..aba6f0e 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -55,7 +55,6 @@ __FBSDID("$FreeBSD$");
#ifdef SMP
volatile cpumask_t stopped_cpus;
volatile cpumask_t started_cpus;
-cpumask_t idle_cpus_mask;
cpumask_t hlt_cpus_mask;
cpumask_t logical_cpus_mask;
diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h
index 854fa29..3263991 100644
--- a/sys/sys/cpuset.h
+++ b/sys/sys/cpuset.h
@@ -36,7 +36,7 @@
#define CPU_SETSIZE MAXCPU
#endif
-#define CPU_MAXSIZE 128
+#define CPU_MAXSIZE (4 * MAXCPU)
#ifndef CPU_SETSIZE
#define CPU_SETSIZE CPU_MAXSIZE
diff --git a/sys/sys/smp.h b/sys/sys/smp.h
index 6104d3e..544cb95 100644
--- a/sys/sys/smp.h
+++ b/sys/sys/smp.h
@@ -73,7 +73,6 @@ extern int smp_active;
extern int smp_cpus;
extern volatile cpumask_t started_cpus;
extern volatile cpumask_t stopped_cpus;
-extern cpumask_t idle_cpus_mask;
extern cpumask_t hlt_cpus_mask;
extern cpumask_t logical_cpus_mask;
#endif /* SMP */
OpenPOWER on IntegriCloud