summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
committerdillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
commite028603b7e3e4fb35cdf00aab533f3965f4a13cc (patch)
tree7420cce169451a74c5b87963467a4aeff668ed12 /sys/amd64
parent0b028660051eb7abf4306d34e7fec0e7fde86a28 (diff)
downloadFreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.zip
FreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.tar.gz
With Alfred's permission, remove vm_mtx in favor of a fine-grained approach
(this commit is just the first stage). Also add various GIANT_ macros to formalize the removal of Giant, making it easy to test in a more piecemeal fashion. These macros will allow us to test fine-grained locks to a degree before removing Giant, and also after, and to remove Giant in a piecemeal fashion via sysctl's on those subsystems which the authors believe can operate without Giant.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/busdma_machdep.c1
-rw-r--r--sys/amd64/amd64/machdep.c6
-rw-r--r--sys/amd64/amd64/mem.c10
-rw-r--r--sys/amd64/amd64/pmap.c8
-rw-r--r--sys/amd64/amd64/vm_machdep.c26
-rw-r--r--sys/amd64/isa/isa_dma.c12
6 files changed, 22 insertions, 41 deletions
diff --git a/sys/amd64/amd64/busdma_machdep.c b/sys/amd64/amd64/busdma_machdep.c
index 3dc9e76..d99e22a 100644
--- a/sys/amd64/amd64/busdma_machdep.c
+++ b/sys/amd64/amd64/busdma_machdep.c
@@ -32,6 +32,7 @@
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <vm/vm.h>
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 73b8052..9ee7bfc 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -218,7 +218,6 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
- mtx_lock(&vm_mtx);
earlysetcpuclass();
startrtclock();
printcpuinfo();
@@ -352,7 +351,6 @@ again:
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
- mtx_unlock(&vm_mtx);
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
@@ -2007,6 +2005,8 @@ f00f_hack(void *unused) {
if (!has_f00f_bug)
return;
+ GIANT_REQUIRED;
+
printf("Intel Pentium detected, installing workaround for F00F bug\n");
r_idt.rd_limit = sizeof(idt0) - 1;
@@ -2022,11 +2022,9 @@ f00f_hack(void *unused) {
r_idt.rd_base = (int)new_idt;
lidt(&r_idt);
idt = new_idt;
- mtx_lock(&vm_mtx);
if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
VM_PROT_READ, FALSE) != KERN_SUCCESS)
panic("vm_map_protect failed");
- mtx_unlock(&vm_mtx);
return;
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index dfb034e..5eded32 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -140,6 +140,8 @@ mmrw(dev_t dev, struct uio *uio, int flags)
int error = 0;
vm_offset_t addr, eaddr;
+ GIANT_REQUIRED;
+
while (uio->uio_resid > 0 && error == 0) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
@@ -155,17 +157,13 @@ mmrw(dev_t dev, struct uio *uio, int flags)
case 0:
v = uio->uio_offset;
v &= ~PAGE_MASK;
- mtx_lock(&vm_mtx);
pmap_kenter((vm_offset_t)ptvmmap, v);
- mtx_unlock(&vm_mtx);
o = (int)uio->uio_offset & PAGE_MASK;
c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));
c = min(c, (u_int)iov->iov_len);
error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
- mtx_lock(&vm_mtx);
pmap_kremove((vm_offset_t)ptvmmap);
- mtx_unlock(&vm_mtx);
continue;
/* minor device 1 is kernel memory */
@@ -183,20 +181,16 @@ mmrw(dev_t dev, struct uio *uio, int flags)
return EFAULT;
if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0))
return EFAULT;
- mtx_lock(&vm_mtx);
for (; addr < eaddr; addr += PAGE_SIZE)
if (pmap_extract(kernel_pmap, addr) == 0) {
- mtx_unlock(&vm_mtx);
return EFAULT;
}
if (!kernacc((caddr_t)(int)uio->uio_offset, c,
uio->uio_rw == UIO_READ ?
VM_PROT_READ : VM_PROT_WRITE)) {
- mtx_unlock(&vm_mtx);
return (EFAULT);
}
- mtx_unlock(&vm_mtx);
error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
continue;
}
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c6db00b..a00bcab 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3135,14 +3135,11 @@ pmap_mapdev(pa, size)
{
vm_offset_t va, tmpva, offset;
unsigned *pte;
- int hadvmlock;
offset = pa & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
@@ -3158,9 +3155,6 @@ pmap_mapdev(pa, size)
}
invltlb();
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
-
return ((void *)(va + offset));
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index e26d211..ef84bf9 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -292,14 +292,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -378,10 +377,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -397,7 +397,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -415,10 +414,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -426,7 +426,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -582,17 +581,12 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
- if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
+ if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
return(0);
- }
- if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- }
+ if (mtx_trylock(&Giant)) {
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
@@ -611,8 +605,10 @@ vm_page_zero_idle()
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
diff --git a/sys/amd64/isa/isa_dma.c b/sys/amd64/isa/isa_dma.c
index 6acc527..d9f1233 100644
--- a/sys/amd64/isa/isa_dma.c
+++ b/sys/amd64/isa/isa_dma.c
@@ -53,6 +53,7 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <vm/vm.h>
@@ -216,6 +217,8 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
int waport;
caddr_t newaddr;
+ GIANT_REQUIRED;
+
#ifdef DIAGNOSTIC
if (chan & ~VALID_DMA_MASK)
panic("isa_dmastart: channel out of range");
@@ -254,12 +257,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
- mtx_lock(&vm_mtx); /*
- * XXX: need to hold for longer period to
- * ensure that mappings don't change
- */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
- mtx_unlock(&vm_mtx);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@@ -378,11 +376,11 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
vm_offset_t phys, priorpage = 0, endva;
u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
+ GIANT_REQUIRED;
+
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
- mtx_lock(&vm_mtx);
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
- mtx_unlock(&vm_mtx);
#define ISARAM_END RAM_END
if (phys == 0)
panic("isa_dmacheck: no physical page present");
OpenPOWER on IntegriCloud