summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralfred <alfred@FreeBSD.org>2001-05-19 01:28:09 +0000
committeralfred <alfred@FreeBSD.org>2001-05-19 01:28:09 +0000
commita3f0842419d98da211706f921fc626e160cd960b (patch)
treee86922a5639c32e1242d4f3088fc487f3be5b236 /sys/amd64
parent9eda9187f024233436e6a743f13bd938b1a0f19c (diff)
downloadFreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.zip
FreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.tar.gz
Introduce a global lock for the vm subsystem (vm_mtx).
vm_mtx does not recurse and is required for most low level vm operations. faults can not be taken without holding Giant. Memory subsystems can now call the base page allocators safely. Almost all atomic ops were removed as they are covered under the vm mutex. Alpha and ia64 now need to catch up to i386's trap handlers. FFS and NFS have been tested, other filesystems will need minor changes (grabbing the vm lock when twiddling page properties). Reviewed (partially) by: jake, jhb
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/busdma_machdep.c2
-rw-r--r--sys/amd64/amd64/machdep.c4
-rw-r--r--sys/amd64/amd64/mem.c18
-rw-r--r--sys/amd64/amd64/pmap.c1
-rw-r--r--sys/amd64/amd64/trap.c13
-rw-r--r--sys/amd64/amd64/vm_machdep.c22
6 files changed, 45 insertions, 15 deletions
diff --git a/sys/amd64/amd64/busdma_machdep.c b/sys/amd64/amd64/busdma_machdep.c
index 63906dd..3dc9e76 100644
--- a/sys/amd64/amd64/busdma_machdep.c
+++ b/sys/amd64/amd64/busdma_machdep.c
@@ -31,6 +31,8 @@
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index bb552a3..e02569c1 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -264,6 +264,7 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
+ mtx_lock(&vm_mtx);
earlysetcpuclass();
startrtclock();
printcpuinfo();
@@ -397,6 +398,7 @@ again:
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
+ mtx_unlock(&vm_mtx);
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
@@ -2075,9 +2077,11 @@ f00f_hack(void *unused) {
r_idt.rd_base = (int)new_idt;
lidt(&r_idt);
idt = new_idt;
+ mtx_lock(&vm_mtx);
if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
VM_PROT_READ, FALSE) != KERN_SUCCESS)
panic("vm_map_protect failed");
+ mtx_unlock(&vm_mtx);
return;
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index a5a9135..8671530 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -50,6 +50,8 @@
#include <sys/fcntl.h>
#include <sys/ioccom.h>
#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/memrange.h>
#include <sys/proc.h>
@@ -153,13 +155,17 @@ mmrw(dev_t dev, struct uio *uio, int flags)
case 0:
v = uio->uio_offset;
v &= ~PAGE_MASK;
+ mtx_lock(&vm_mtx);
pmap_kenter((vm_offset_t)ptvmmap, v);
+ mtx_unlock(&vm_mtx);
o = (int)uio->uio_offset & PAGE_MASK;
c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));
c = min(c, (u_int)iov->iov_len);
error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
+ mtx_lock(&vm_mtx);
pmap_kremove((vm_offset_t)ptvmmap);
+ mtx_unlock(&vm_mtx);
continue;
/* minor device 1 is kernel memory */
@@ -177,14 +183,20 @@ mmrw(dev_t dev, struct uio *uio, int flags)
return EFAULT;
if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0))
return EFAULT;
+ mtx_lock(&vm_mtx);
for (; addr < eaddr; addr += PAGE_SIZE)
- if (pmap_extract(kernel_pmap, addr) == 0)
+ if (pmap_extract(kernel_pmap, addr) == 0) {
+ mtx_unlock(&vm_mtx);
return EFAULT;
-
+ }
+
if (!kernacc((caddr_t)(int)uio->uio_offset, c,
uio->uio_rw == UIO_READ ?
- VM_PROT_READ : VM_PROT_WRITE))
+ VM_PROT_READ : VM_PROT_WRITE)) {
+ mtx_unlock(&vm_mtx);
return (EFAULT);
+ }
+ mtx_unlock(&vm_mtx);
error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
continue;
}
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 93807ee..488a8a5 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -75,6 +75,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/msgbuf.h>
#include <sys/proc.h>
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index af7bfc1..8924fa2 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -330,9 +330,7 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_lock(&Giant);
i = trap_pfault(&frame, TRUE, eva);
- mtx_unlock(&Giant);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
if (i == -2) {
/*
@@ -443,9 +441,7 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_lock(&Giant);
(void) trap_pfault(&frame, FALSE, eva);
- mtx_unlock(&Giant);
goto out;
case T_DNA:
@@ -887,7 +883,9 @@ nogo:
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
return (0);
}
+ mtx_lock(&Giant);
trap_fatal(frame, eva);
+ mtx_unlock(&Giant);
return (-1);
}
@@ -1147,14 +1145,17 @@ syscall(frame)
/*
* Try to run the syscall without the MP lock if the syscall
- * is MP safe. We have to obtain the MP lock no matter what if
- * we are ktracing
+ * is MP safe.
*/
if ((callp->sy_narg & SYF_MPSAFE) == 0) {
mtx_lock(&Giant);
}
#ifdef KTRACE
+ /*
+ * We have to obtain the MP lock no matter what if
+ * we are ktracing
+ */
if (KTRPOINT(p, KTR_SYSCALL)) {
if (!mtx_owned(&Giant))
mtx_lock(&Giant);
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index fd626a3..eda2386 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -290,11 +290,14 @@ void
cpu_wait(p)
struct proc *p;
{
+
+ mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
+ mtx_unlock(&vm_mtx);
}
/*
@@ -376,6 +379,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
+ mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -391,6 +395,7 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
+ mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -411,6 +416,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
+ mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -418,6 +424,7 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
+ mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -574,12 +581,17 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
+ if (mtx_trylock(&vm_mtx) == 0)
+ return (0);
+ if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
+ mtx_unlock(&vm_mtx);
return(0);
- if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
+ }
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
+ mtx_unlock(&vm_mtx);
return(0);
+ }
- if (mtx_trylock(&Giant)) {
s = splvm();
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
@@ -602,10 +614,8 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&Giant);
+ mtx_unlock(&vm_mtx);
return (1);
- }
- return (0);
}
/*
OpenPOWER on IntegriCloud