summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/acpica/acpi_wakeup.c3
-rw-r--r--sys/amd64/amd64/genassym.c1
-rw-r--r--sys/amd64/amd64/intr_machdep.c5
-rw-r--r--sys/amd64/amd64/mp_machdep.c71
-rw-r--r--sys/amd64/amd64/pmap.c72
-rw-r--r--sys/amd64/amd64/support.S12
-rw-r--r--sys/amd64/amd64/vm_machdep.c9
-rw-r--r--sys/amd64/include/cpufunc.h12
8 files changed, 79 insertions, 106 deletions
diff --git a/sys/amd64/acpica/acpi_wakeup.c b/sys/amd64/acpica/acpi_wakeup.c
index 29e66c5..43aeec3 100644
--- a/sys/amd64/acpica/acpi_wakeup.c
+++ b/sys/amd64/acpica/acpi_wakeup.c
@@ -226,7 +226,8 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
return (ret);
#ifdef SMP
- wakeup_cpus = PCPU_GET(other_cpus);
+ wakeup_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &wakeup_cpus);
#endif
AcpiSetFirmwareWakingVector(WAKECODE_PADDR(sc));
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index aba790d..1c9abd5 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -201,7 +201,6 @@ ASSYM(UC_EFLAGS, offsetof(ucontext_t, uc_mcontext.mc_rflags));
ASSYM(ENOENT, ENOENT);
ASSYM(EFAULT, EFAULT);
ASSYM(ENAMETOOLONG, ENAMETOOLONG);
-ASSYM(MAXCPU, MAXCPU);
ASSYM(MAXCOMLEN, MAXCOMLEN);
ASSYM(MAXPATHLEN, MAXPATHLEN);
ASSYM(PC_SIZEOF, sizeof(struct pcpu));
diff --git a/sys/amd64/amd64/intr_machdep.c b/sys/amd64/amd64/intr_machdep.c
index 4557177..e64f89d 100644
--- a/sys/amd64/amd64/intr_machdep.c
+++ b/sys/amd64/amd64/intr_machdep.c
@@ -82,6 +82,11 @@ static int round_robin_interrupts = 1;
TUNABLE_INT("round_robin_interrupts", &round_robin_interrupts);
#endif
+u_long intrcnt[INTRCNT_COUNT];
+char intrnames[INTRCNT_COUNT * (MAXCOMLEN + 1)];
+size_t sintrcnt = sizeof(intrcnt);
+size_t sintrnames = sizeof(intrnames);
+
static int intr_assign_cpu(void *arg, u_char cpu);
static void intr_disable_src(void *arg);
static void intr_init(void *__dummy);
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index dcf70d4..5cce565 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -624,10 +624,10 @@ cpu_mp_announce(void)
void
init_secondary(void)
{
- cpuset_t tcpuset, tallcpus;
struct pcpu *pc;
struct nmi_pcpu *np;
u_int64_t msr, cr0;
+ u_int cpuid;
int cpu, gsel_tss, x;
struct region_descriptor ap_gdt;
@@ -731,8 +731,9 @@ init_secondary(void)
fpuinit();
/* A quick check from sanity claus */
+ cpuid = PCPU_GET(cpuid);
if (PCPU_GET(apic_id) != lapic_id()) {
- printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
+ printf("SMP: cpuid = %d\n", cpuid);
printf("SMP: actual apic_id = %d\n", lapic_id());
printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
panic("cpuid mismatch! boom!!");
@@ -754,19 +755,13 @@ init_secondary(void)
smp_cpus++;
- CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
- printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
- tcpuset = PCPU_GET(cpumask);
+ CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
+ printf("SMP: AP CPU #%d Launched!\n", cpuid);
/* Determine if we are a logical CPU. */
/* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
- CPU_OR(&logical_cpus_mask, &tcpuset);
-
- /* Build our map of 'other' CPUs. */
- tallcpus = all_cpus;
- CPU_NAND(&tallcpus, &tcpuset);
- PCPU_SET(other_cpus, tallcpus);
+ CPU_SET(cpuid, &logical_cpus_mask);
if (bootverbose)
lapic_dump("AP");
@@ -913,7 +908,6 @@ assign_cpu_ids(void)
static int
start_all_aps(void)
{
- cpuset_t tallcpus, tcpuset;
vm_offset_t va = boot_address + KERNBASE;
u_int64_t *pt4, *pt3, *pt2;
u_int32_t mpbioswarmvec;
@@ -1004,12 +998,6 @@ start_all_aps(void)
CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
}
- /* build our map of 'other' CPUs */
- tallcpus = all_cpus;
- tcpuset = PCPU_GET(cpumask);
- CPU_NAND(&tallcpus, &tcpuset);
- PCPU_SET(other_cpus, tallcpus);
-
/* restore the warmstart vector */
*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
@@ -1193,9 +1181,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_of
if (othercpus < 1)
return;
} else {
- sched_pin();
- CPU_NAND(&mask, PCPU_PTR(cpumask));
- sched_unpin();
+ CPU_CLR(PCPU_GET(cpuid), &mask);
if (CPU_EMPTY(&mask))
return;
}
@@ -1392,11 +1378,13 @@ ipi_cpu(int cpu, u_int ipi)
void
ipi_all_but_self(u_int ipi)
{
+ cpuset_t other_cpus;
+
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
- sched_pin();
if (IPI_IS_BITMAPED(ipi)) {
- ipi_selected(PCPU_GET(other_cpus), ipi);
- sched_unpin();
+ ipi_selected(other_cpus, ipi);
return;
}
@@ -1406,8 +1394,7 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- CPU_OR_ATOMIC(&ipi_nmi_pending, PCPU_PTR(other_cpus));
- sched_unpin();
+ CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@@ -1416,7 +1403,7 @@ ipi_all_but_self(u_int ipi)
int
ipi_nmi_handler()
{
- cpuset_t cpumask;
+ u_int cpuid;
/*
* As long as there is not a simple way to know about a NMI's
@@ -1424,13 +1411,11 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
- sched_pin();
- cpumask = PCPU_GET(cpumask);
- sched_unpin();
- if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
return (1);
- CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
+ CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
cpustop_handler();
return (0);
}
@@ -1442,25 +1427,21 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
- cpuset_t cpumask;
u_int cpu;
- sched_pin();
cpu = PCPU_GET(cpuid);
- cpumask = PCPU_GET(cpumask);
- sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
- while (!CPU_OVERLAP(&started_cpus, &cpumask))
+ while (!CPU_ISSET(cpu, &started_cpus))
ia32_pause();
- CPU_NAND_ATOMIC(&started_cpus, &cpumask);
- CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
@@ -1475,19 +1456,17 @@ cpustop_handler(void)
void
cpususpend_handler(void)
{
- cpuset_t cpumask;
register_t cr3, rf;
u_int cpu;
cpu = PCPU_GET(cpuid);
- cpumask = PCPU_GET(cpumask);
rf = intr_disable();
cr3 = rcr3();
if (savectx(susppcbs[cpu])) {
wbinvd();
- CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
} else {
pmap_init_pat();
PCPU_SET(switchtime, 0);
@@ -1495,11 +1474,11 @@ cpususpend_handler(void)
}
/* Wait for resume */
- while (!CPU_OVERLAP(&started_cpus, &cpumask))
+ while (!CPU_ISSET(cpu, &started_cpus))
ia32_pause();
- CPU_NAND_ATOMIC(&started_cpus, &cpumask);
- CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
/* Restore CR3 and enable interrupts */
load_cr3(cr3);
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 941e85e..bed795b 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -925,16 +925,18 @@ pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invlpg(va);
smp_invlpg(va);
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
invlpg(va);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
@@ -946,8 +948,9 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
vm_offset_t addr;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
@@ -955,9 +958,10 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
invlpg(addr);
smp_invlpg_range(sva, eva);
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
CPU_AND(&other_cpus, &pmap->pm_active);
@@ -970,16 +974,18 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
void
pmap_invalidate_all(pmap_t pmap)
{
- cpuset_t cpumask, other_cpus;
+ cpuset_t other_cpus;
+ u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invltlb();
smp_invltlb();
} else {
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
- if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
invltlb();
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
@@ -999,11 +1005,11 @@ pmap_invalidate_cache(void)
}
struct pde_action {
- cpuset_t store; /* processor that updates the PDE */
cpuset_t invalidate; /* processors that invalidate their TLB */
vm_offset_t va;
pd_entry_t *pde;
pd_entry_t newpde;
+ u_int store; /* processor that updates the PDE */
};
static void
@@ -1011,12 +1017,8 @@ pmap_update_pde_action(void *arg)
{
struct pde_action *act = arg;
- sched_pin();
- if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) {
- sched_unpin();
+ if (act->store == PCPU_GET(cpuid))
pde_store(act->pde, act->newpde);
- } else
- sched_unpin();
}
static void
@@ -1024,12 +1026,8 @@ pmap_update_pde_teardown(void *arg)
{
struct pde_action *act = arg;
- sched_pin();
- if (CPU_OVERLAP(&act->invalidate, PCPU_PTR(cpumask))) {
- sched_unpin();
+ if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
pmap_update_pde_invalidate(act->va, act->newpde);
- } else
- sched_unpin();
}
/*
@@ -1044,28 +1042,30 @@ static void
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
struct pde_action act;
- cpuset_t active, cpumask, other_cpus;
+ cpuset_t active, other_cpus;
+ u_int cpuid;
sched_pin();
- cpumask = PCPU_GET(cpumask);
- other_cpus = PCPU_GET(other_cpus);
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
if (pmap == kernel_pmap)
active = all_cpus;
else
active = pmap->pm_active;
if (CPU_OVERLAP(&active, &other_cpus)) {
- act.store = cpumask;
+ act.store = cpuid;
act.invalidate = active;
act.va = va;
act.pde = pde;
act.newpde = newpde;
- CPU_OR(&cpumask, &active);
- smp_rendezvous_cpus(cpumask,
+ CPU_SET(cpuid, &active);
+ smp_rendezvous_cpus(active,
smp_no_rendevous_barrier, pmap_update_pde_action,
pmap_update_pde_teardown, &act);
} else {
pde_store(pde, newpde);
- if (CPU_OVERLAP(&active, &cpumask))
+ if (CPU_ISSET(cpuid, &active))
pmap_update_pde_invalidate(va, newpde);
}
sched_unpin();
@@ -2104,7 +2104,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
vm_page_t m, free;
TAILQ_FOREACH(m, &vpq->pl, pageq) {
- if (m->hold_count || m->busy)
+ if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
continue;
TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
va = pv->pv_va;
@@ -5095,17 +5095,19 @@ void
pmap_activate(struct thread *td)
{
pmap_t pmap, oldpmap;
+ u_int cpuid;
u_int64_t cr3;
critical_enter();
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
+ cpuid = PCPU_GET(cpuid);
#ifdef SMP
- CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
+ CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
#else
- CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask));
- CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask));
+ CPU_CLR(cpuid, &oldpmap->pm_active);
+ CPU_SET(cpuid, &pmap->pm_active);
#endif
cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4);
td->td_pcb->pcb_cr3 = cr3;
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
index d3522ec..fed852c 100644
--- a/sys/amd64/amd64/support.S
+++ b/sys/amd64/amd64/support.S
@@ -38,18 +38,6 @@
#include "assym.s"
- .data
- ALIGN_DATA
- .globl intrcnt, eintrcnt
-intrcnt:
- .space INTRCNT_COUNT * 8
-eintrcnt:
-
- .globl intrnames, eintrnames
-intrnames:
- .space INTRCNT_COUNT * (MAXCOMLEN + 1)
-eintrnames:
-
.text
/*
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 2a9dd7a..db0d51b 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -59,7 +59,6 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/pioctl.h>
#include <sys/proc.h>
-#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
@@ -537,8 +536,8 @@ cpu_reset()
u_int cnt;
if (smp_active) {
- sched_pin();
- map = PCPU_GET(other_cpus);
+ map = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &map);
CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map)) {
printf("cpu_reset: Stopping other CPUs\n");
@@ -547,7 +546,6 @@ cpu_reset()
if (PCPU_GET(cpuid) != 0) {
cpu_reset_proxyid = PCPU_GET(cpuid);
- sched_unpin();
cpustop_restartfunc = cpu_reset_proxy;
cpu_reset_proxy_active = 0;
printf("cpu_reset: Restarting BSP\n");
@@ -569,8 +567,7 @@ cpu_reset()
while (1)
ia32_pause();
/* NOTREACHED */
- } else
- sched_unpin();
+ }
DELAY(1000000);
}
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index 9b61d37..c07e09b 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -467,16 +467,18 @@ load_es(u_short sel)
}
static __inline void
-cpu_monitor(const void *addr, int extensions, int hints)
+cpu_monitor(const void *addr, u_long extensions, u_int hints)
{
- __asm __volatile("monitor;"
- : :"a" (addr), "c" (extensions), "d"(hints));
+
+ __asm __volatile("monitor"
+ : : "a" (addr), "c" (extensions), "d" (hints));
}
static __inline void
-cpu_mwait(int extensions, int hints)
+cpu_mwait(u_long extensions, u_int hints)
{
- __asm __volatile("mwait;" : :"a" (hints), "c" (extensions));
+
+ __asm __volatile("mwait" : : "a" (hints), "c" (extensions));
}
#ifdef _KERNEL
OpenPOWER on IntegriCloud