summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/trap.c9
-rw-r--r--sys/i386/i386/trap.c9
2 files changed, 10 insertions, 8 deletions
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index e3f45ed..a84a4f2 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -286,9 +286,7 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_lock(&Giant);
i = trap_pfault(&frame, TRUE, eva);
- mtx_unlock(&Giant);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
if (i == -2) {
/*
@@ -404,9 +402,7 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_lock(&Giant);
(void) trap_pfault(&frame, FALSE, eva);
- mtx_unlock(&Giant);
goto out;
case T_DNA:
@@ -682,6 +678,7 @@ trap_pfault(frame, usermode, eva)
if (vm == NULL)
goto nogo;
+ mtx_lock(&Giant);
map = &vm->vm_map;
/*
@@ -719,6 +716,7 @@ trap_pfault(frame, usermode, eva)
if (usermode)
goto nogo;
+ mtx_lock(&Giant);
/*
* Since we know that kernel virtual address addresses
* always have pte pages mapped, we just have to fault
@@ -726,6 +724,7 @@ trap_pfault(frame, usermode, eva)
*/
rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
}
+ mtx_unlock(&Giant);
if (rv == KERN_SUCCESS)
return (0);
@@ -799,6 +798,7 @@ trap_pfault(frame, usermode, eva)
else
ftype = VM_PROT_READ;
+ mtx_lock(&Giant);
if (map != kernel_map) {
/*
* Keep swapout from messing with us during this
@@ -835,6 +835,7 @@ trap_pfault(frame, usermode, eva)
*/
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
}
+ mtx_unlock(&Giant);
if (rv == KERN_SUCCESS)
return (0);
diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c
index e3f45ed..a84a4f2 100644
--- a/sys/i386/i386/trap.c
+++ b/sys/i386/i386/trap.c
@@ -286,9 +286,7 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_lock(&Giant);
i = trap_pfault(&frame, TRUE, eva);
- mtx_unlock(&Giant);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
if (i == -2) {
/*
@@ -404,9 +402,7 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_lock(&Giant);
(void) trap_pfault(&frame, FALSE, eva);
- mtx_unlock(&Giant);
goto out;
case T_DNA:
@@ -682,6 +678,7 @@ trap_pfault(frame, usermode, eva)
if (vm == NULL)
goto nogo;
+ mtx_lock(&Giant);
map = &vm->vm_map;
/*
@@ -719,6 +716,7 @@ trap_pfault(frame, usermode, eva)
if (usermode)
goto nogo;
+ mtx_lock(&Giant);
/*
* Since we know that kernel virtual address addresses
* always have pte pages mapped, we just have to fault
@@ -726,6 +724,7 @@ trap_pfault(frame, usermode, eva)
*/
rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
}
+ mtx_unlock(&Giant);
if (rv == KERN_SUCCESS)
return (0);
@@ -799,6 +798,7 @@ trap_pfault(frame, usermode, eva)
else
ftype = VM_PROT_READ;
+ mtx_lock(&Giant);
if (map != kernel_map) {
/*
* Keep swapout from messing with us during this
@@ -835,6 +835,7 @@ trap_pfault(frame, usermode, eva)
*/
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
}
+ mtx_unlock(&Giant);
if (rv == KERN_SUCCESS)
return (0);
OpenPOWER on IntegriCloud