summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorobrien <obrien@FreeBSD.org>2004-08-28 00:49:55 +0000
committerobrien <obrien@FreeBSD.org>2004-08-28 00:49:55 +0000
commit0fe47008f64288f55c942ef9a92d2c7b57dc52e9 (patch)
treeec20bdbd13037c6dbb75b6ba7e805d8466fdfb40
parent587d1d74f30e955065db313171901efe9d6cafec (diff)
downloadFreeBSD-src-0fe47008f64288f55c942ef9a92d2c7b57dc52e9.zip
FreeBSD-src-0fe47008f64288f55c942ef9a92d2c7b57dc52e9.tar.gz
s/smp_rv_mtx/smp_ipi_mtx/g
Requested by: jhb
-rw-r--r--sys/amd64/amd64/mp_machdep.c4
-rw-r--r--sys/amd64/amd64/pmap.c12
-rw-r--r--sys/i386/i386/mp_machdep.c4
-rw-r--r--sys/i386/i386/pmap.c16
-rw-r--r--sys/kern/subr_smp.c8
-rw-r--r--sys/sys/smp.h2
6 files changed, 23 insertions, 23 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index ff9b08b..989b804 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -708,7 +708,7 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
ncpu = mp_ncpus - 1; /* does not shootdown self */
if (ncpu < 1)
return; /* no other cpus */
- mtx_assert(&smp_rv_mtx, MA_OWNED);
+ mtx_assert(&smp_ipi_mtx, MA_OWNED);
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
@@ -794,7 +794,7 @@ smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offse
if (ncpu < 1)
return;
}
- mtx_assert(&smp_rv_mtx, MA_OWNED);
+ mtx_assert(&smp_ipi_mtx, MA_OWNED);
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index be37b00..f00132c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -631,7 +631,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
if (smp_started) {
if (!(read_rflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_rv_mtx);
+ mtx_lock_spin(&smp_ipi_mtx);
} else
critical_enter();
/*
@@ -652,7 +652,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
smp_masked_invlpg(pmap->pm_active & other_cpus, va);
}
if (smp_started)
- mtx_unlock_spin(&smp_rv_mtx);
+ mtx_unlock_spin(&smp_ipi_mtx);
else
critical_exit();
}
@@ -667,7 +667,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
if (smp_started) {
if (!(read_rflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_rv_mtx);
+ mtx_lock_spin(&smp_ipi_mtx);
} else
critical_enter();
/*
@@ -691,7 +691,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
sva, eva);
}
if (smp_started)
- mtx_unlock_spin(&smp_rv_mtx);
+ mtx_unlock_spin(&smp_ipi_mtx);
else
critical_exit();
}
@@ -705,7 +705,7 @@ pmap_invalidate_all(pmap_t pmap)
if (smp_started) {
if (!(read_rflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_rv_mtx);
+ mtx_lock_spin(&smp_ipi_mtx);
} else
critical_enter();
/*
@@ -726,7 +726,7 @@ pmap_invalidate_all(pmap_t pmap)
smp_masked_invltlb(pmap->pm_active & other_cpus);
}
if (smp_started)
- mtx_unlock_spin(&smp_rv_mtx);
+ mtx_unlock_spin(&smp_ipi_mtx);
else
critical_exit();
}
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index e817260..20a3daf 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -910,7 +910,7 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
ncpu = mp_ncpus - 1; /* does not shootdown self */
if (ncpu < 1)
return; /* no other cpus */
- mtx_assert(&smp_rv_mtx, MA_OWNED);
+ mtx_assert(&smp_ipi_mtx, MA_OWNED);
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
@@ -996,7 +996,7 @@ smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offse
if (ncpu < 1)
return;
}
- mtx_assert(&smp_rv_mtx, MA_OWNED);
+ mtx_assert(&smp_ipi_mtx, MA_OWNED);
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index dbdeca0..86b5523 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -582,7 +582,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
if (smp_started) {
if (!(read_eflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_rv_mtx);
+ mtx_lock_spin(&smp_ipi_mtx);
} else
critical_enter();
/*
@@ -603,7 +603,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
smp_masked_invlpg(pmap->pm_active & other_cpus, va);
}
if (smp_started)
- mtx_unlock_spin(&smp_rv_mtx);
+ mtx_unlock_spin(&smp_ipi_mtx);
else
critical_exit();
}
@@ -618,7 +618,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
if (smp_started) {
if (!(read_eflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_rv_mtx);
+ mtx_lock_spin(&smp_ipi_mtx);
} else
critical_enter();
/*
@@ -642,7 +642,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
sva, eva);
}
if (smp_started)
- mtx_unlock_spin(&smp_rv_mtx);
+ mtx_unlock_spin(&smp_ipi_mtx);
else
critical_exit();
}
@@ -656,7 +656,7 @@ pmap_invalidate_all(pmap_t pmap)
if (smp_started) {
if (!(read_eflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_rv_mtx);
+ mtx_lock_spin(&smp_ipi_mtx);
} else
critical_enter();
/*
@@ -677,7 +677,7 @@ pmap_invalidate_all(pmap_t pmap)
smp_masked_invltlb(pmap->pm_active & other_cpus);
}
if (smp_started)
- mtx_unlock_spin(&smp_rv_mtx);
+ mtx_unlock_spin(&smp_ipi_mtx);
else
critical_exit();
}
@@ -1297,7 +1297,7 @@ pmap_lazyfix(pmap_t pmap)
while ((mask = pmap->pm_active) != 0) {
spins = 50000000;
mask = mask & -mask; /* Find least significant set bit */
- mtx_lock_spin(&smp_rv_mtx);
+ mtx_lock_spin(&smp_ipi_mtx);
#ifdef PAE
lazyptd = vtophys(pmap->pm_pdpt);
#else
@@ -1317,7 +1317,7 @@ pmap_lazyfix(pmap_t pmap)
break;
}
}
- mtx_unlock_spin(&smp_rv_mtx);
+ mtx_unlock_spin(&smp_ipi_mtx);
if (spins == 0)
printf("pmap_lazyfix: spun for 50000000\n");
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 945ef6f..f1b8499 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -109,7 +109,7 @@ static volatile int smp_rv_waiters[2];
* functions trigger at once and cause multiple CPUs to busywait with
* interrupts disabled.
*/
-struct mtx smp_rv_mtx;
+struct mtx smp_ipi_mtx;
/*
* Let the MD SMP code initialize mp_maxid very early if it can.
@@ -135,7 +135,7 @@ mp_start(void *dummy)
return;
}
- mtx_init(&smp_rv_mtx, "smp rendezvous", NULL, MTX_SPIN);
+ mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
cpu_mp_start();
printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
mp_ncpus);
@@ -331,7 +331,7 @@ smp_rendezvous(void (* setup_func)(void *),
}
/* obtain rendezvous lock */
- mtx_lock_spin(&smp_rv_mtx);
+ mtx_lock_spin(&smp_ipi_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -348,7 +348,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_unlock_spin(&smp_rv_mtx);
+ mtx_unlock_spin(&smp_ipi_mtx);
}
#else /* !SMP */
diff --git a/sys/sys/smp.h b/sys/sys/smp.h
index 5a1ab86..12bfbba 100644
--- a/sys/sys/smp.h
+++ b/sys/sys/smp.h
@@ -95,7 +95,7 @@ void forward_roundrobin(void);
int restart_cpus(cpumask_t);
int stop_cpus(cpumask_t);
void smp_rendezvous_action(void);
-extern struct mtx smp_rv_mtx;
+extern struct mtx smp_ipi_mtx;
#endif /* SMP */
void smp_rendezvous(void (*)(void *),
void (*)(void *),
OpenPOWER on IntegriCloud