summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2007-05-29 18:55:41 +0000
committerattilio <attilio@FreeBSD.org>2007-05-29 18:55:41 +0000
commitd5b0dfc29b2a69d2c6ca059947f3aa366b444f61 (patch)
tree1cf8f92c0776f673efd5bd941767177a609b46b7 /sys/i386
parent410f244e4f5e3ce03031bdfbe54f9e567619511a (diff)
downloadFreeBSD-src-d5b0dfc29b2a69d2c6ca059947f3aa366b444f61.zip
FreeBSD-src-d5b0dfc29b2a69d2c6ca059947f3aa366b444f61.tar.gz
Fix some problems introduced with the last descriptors tables locking
patch: - Do the correct test for ldt allocation - Drop dt_lock just before to call kmem_free (since it acquires blocking locks inside) - Solve a deadlock with smp_rendezvous() where other CPU will wait undefinitively for dt_lock acquisition. - Add dt_lock in the WITNESS list of spinlocks While applying these modifies, change the requirement for user_ldt_free() making that returning without dt_lock held. Tested by: marcus, tegge Reviewed by: tegge Approved by: jeff (mentor)
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/machdep.c3
-rw-r--r--sys/i386/i386/sys_machdep.c18
-rw-r--r--sys/i386/i386/vm_machdep.c8
3 files changed, 21 insertions, 8 deletions
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 4b04ab9..cc6984e 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -1174,7 +1174,8 @@ exec_setregs(td, entry, stack, ps_strings)
mtx_lock_spin(&dt_lock);
if (td->td_proc->p_md.md_ldt)
user_ldt_free(td);
- mtx_unlock_spin(&dt_lock);
+ else
+ mtx_unlock_spin(&dt_lock);
bzero((char *)regs, sizeof(struct trapframe));
regs->tf_eip = entry;
diff --git a/sys/i386/i386/sys_machdep.c b/sys/i386/i386/sys_machdep.c
index 74a7e6c..31fad62 100644
--- a/sys/i386/i386/sys_machdep.c
+++ b/sys/i386/i386/sys_machdep.c
@@ -428,7 +428,7 @@ user_ldt_alloc(struct mdproc *mdp, int len)
}
/*
- * Must be called with dt_lock held.
+ * Must be called with dt_lock held. Returns with dt_lock unheld.
*/
void
user_ldt_free(struct thread *td)
@@ -446,6 +446,7 @@ user_ldt_free(struct thread *td)
}
mdp->md_ldt = NULL;
+ mtx_unlock_spin(&dt_lock);
if (refcount_release(&pldt->ldt_refcnt)) {
kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
pldt->ldt_len * sizeof(union descriptor));
@@ -701,7 +702,7 @@ i386_ldt_grow(struct thread *td, int len)
len = NLDT + 1;
/* Allocate a user ldt. */
- if ((pldt = mdp->md_ldt) != NULL || len > pldt->ldt_len) {
+ if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
struct proc_ldt *new_ldt;
new_ldt = user_ldt_alloc(mdp, len);
@@ -716,26 +717,37 @@ i386_ldt_grow(struct thread *td, int len)
pldt->ldt_sd = new_ldt->ldt_sd;
pldt->ldt_base = new_ldt->ldt_base;
pldt->ldt_len = new_ldt->ldt_len;
+ mtx_unlock_spin(&dt_lock);
kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
old_ldt_len * sizeof(union descriptor));
FREE(new_ldt, M_SUBPROC);
+ mtx_lock_spin(&dt_lock);
} else {
/*
* If other threads already did the work,
* do nothing.
*/
+ mtx_unlock_spin(&dt_lock);
kmem_free(kernel_map,
(vm_offset_t)new_ldt->ldt_base,
new_ldt->ldt_len * sizeof(union descriptor));
FREE(new_ldt, M_SUBPROC);
+ mtx_lock_spin(&dt_lock);
return (0);
}
} else
mdp->md_ldt = pldt = new_ldt;
#ifdef SMP
- /* signal other cpus to reload ldt */
+ /*
+ * Signal other cpus to reload ldt. We need to unlock dt_lock
+ * here because other CPU will contest on it since their
+ * curthreads won't hold the lock and will block when trying
+ * to acquire it.
+ */
+ mtx_unlock_spin(&dt_lock);
smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
NULL, td);
+ mtx_lock_spin(&dt_lock);
#else
set_user_ldt(mdp);
#endif
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 116f1df..73ce4b7 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -168,8 +168,8 @@ cpu_fork(td1, p2, td2, flags)
mdp1->md_ldt = pldt;
set_user_ldt(mdp1);
user_ldt_free(td1);
- }
- mtx_unlock_spin(&dt_lock);
+ } else
+ mtx_unlock_spin(&dt_lock);
}
return;
}
@@ -312,8 +312,8 @@ cpu_exit(struct thread *td)
td->td_pcb->pcb_gs = _udatasel;
load_gs(_udatasel);
user_ldt_free(td);
- }
- mtx_unlock_spin(&dt_lock);
+ } else
+ mtx_unlock_spin(&dt_lock);
}
void
OpenPOWER on IntegriCloud