summaryrefslogtreecommitdiffstats
path: root/sys/sparc64/include/smp.h
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2002-03-13 03:43:00 +0000
committerjake <jake@FreeBSD.org>2002-03-13 03:43:00 +0000
commitdf6db29bae8f58785bc0aa52a89093892533b69e (patch)
treecf7e7d2080dafa61be92578c9fd6c61530140ea1 /sys/sparc64/include/smp.h
parent6ee80641df5b29b2e23004a1625d7a5cdb706110 (diff)
downloadFreeBSD-src-df6db29bae8f58785bc0aa52a89093892533b69e.zip
FreeBSD-src-df6db29bae8f58785bc0aa52a89093892533b69e.tar.gz
Make IPI_WAIT use a bit mask of the cpus that a pmap is active on and only
wait for those cpus, instead of all of them by using a count. Oops. Make the pointer to the mask that the primary cpu spins on volatile, so gcc doesn't optimize out an important load. Oops again. Activate tlb shootdown ipi synchronization now that it works. We have all involved cpus wait until all the others are done. This may not be necessary, it is mostly for sanity. Make the trigger level interrupt ipi handler work. Submitted by: tmm
Diffstat (limited to 'sys/sparc64/include/smp.h')
-rw-r--r--sys/sparc64/include/smp.h30
1 files changed, 12 insertions, 18 deletions
diff --git a/sys/sparc64/include/smp.h b/sys/sparc64/include/smp.h
index bc0c0d5..958fa0c 100644
--- a/sys/sparc64/include/smp.h
+++ b/sys/sparc64/include/smp.h
@@ -57,13 +57,8 @@ struct cpu_start_args {
struct tte csa_ttes[PCPU_PAGES];
};
-struct ipi_level_args {
- u_int ila_count;
- u_int ila_level;
-};
-
struct ipi_tlb_args {
- u_int ita_count;
+ u_int ita_mask;
u_long ita_tlb;
struct pmap *ita_pmap;
u_long ita_start;
@@ -74,6 +69,7 @@ struct ipi_tlb_args {
struct pcpu;
void cpu_mp_bootstrap(struct pcpu *pc);
+void cpu_mp_shutdown(void);
void cpu_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2);
void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2);
@@ -116,11 +112,11 @@ ipi_tlb_context_demap(struct pmap *pm)
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
- ita->ita_count = smp_cpus;
+ ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
(u_long)ita);
- return (&ita->ita_count);
+ return (&ita->ita_mask);
}
static __inline void *
@@ -134,12 +130,12 @@ ipi_tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
- ita->ita_count = smp_cpus;
+ ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_tlb = tlb;
ita->ita_pmap = pm;
ita->ita_va = va;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
- return (&ita->ita_count);
+ return (&ita->ita_mask);
}
static __inline void *
@@ -153,26 +149,24 @@ ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
- ita->ita_count = smp_cpus;
+ ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
ita->ita_start = start;
ita->ita_end = end;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
- return (&ita->ita_count);
+ return (&ita->ita_mask);
}
static __inline void
ipi_wait(void *cookie)
{
-#if 0
- u_int *count;
+ u_int *volatile mask;
- if ((count = cookie) != NULL) {
- atomic_subtract_int(count, 1);
- while (*count != 0)
+ if ((mask = cookie) != NULL) {
+ atomic_clear_int(mask, PCPU_GET(cpumask));
+ while (*mask != 0)
;
}
-#endif
}
#endif
OpenPOWER on IntegriCloud