diff options
author | jeff <jeff@FreeBSD.org> | 2012-11-15 00:51:57 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2012-11-15 00:51:57 +0000 |
commit | f40f3c3255364f472ec06d9ba5ad0b66f16a6572 (patch) | |
tree | dc9bfdaab60c5892bec647736baab766ea8ee213 /sys/kern/subr_lock.c | |
parent | e8ae50d444598a029ceb39f734aeda3b5ee77521 (diff) | |
download | FreeBSD-src-f40f3c3255364f472ec06d9ba5ad0b66f16a6572.zip FreeBSD-src-f40f3c3255364f472ec06d9ba5ad0b66f16a6572.tar.gz |
- Implement run-time expansion of the KTR buffer via sysctl.
- Implement a function to ensure that all preempted threads have switched
back out at least once. Use this to make sure there are no stale
references to the old ktr_buf or the lock profiling buffers before
updating them.
Reviewed by: marius (sparc64 parts), attilio (earlier patch)
Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/kern/subr_lock.c')
-rw-r--r-- | sys/kern/subr_lock.c | 29 |
1 files changed, 4 insertions, 25 deletions
diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c index b040c7b..6f3682a 100644 --- a/sys/kern/subr_lock.c +++ b/sys/kern/subr_lock.c @@ -240,34 +240,13 @@ lock_prof_init(void *arg) } SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); -/* - * To be certain that lock profiling has idled on all cpus before we - * reset, we schedule the resetting thread on all active cpus. Since - * all operations happen within critical sections we can be sure that - * it is safe to zero the profiling structures. - */ -static void -lock_prof_idle(void) -{ - struct thread *td; - int cpu; - - td = curthread; - thread_lock(td); - CPU_FOREACH(cpu) { - sched_bind(td, cpu); - } - sched_unbind(td); - thread_unlock(td); -} - static void lock_prof_reset_wait(void) { /* - * Spin relinquishing our cpu so that lock_prof_idle may - * run on it. + * Spin relinquishing our cpu so that quiesce_all_cpus may + * complete. */ while (lock_prof_resetting) sched_relinquish(curthread); @@ -289,7 +268,7 @@ lock_prof_reset(void) atomic_store_rel_int(&lock_prof_resetting, 1); enabled = lock_prof_enable; lock_prof_enable = 0; - lock_prof_idle(); + quiesce_all_cpus("profreset", 0); /* * Some objects may have migrated between CPUs. Clear all links * before we zero the structures. Some items may still be linked @@ -401,7 +380,7 @@ dump_lock_prof_stats(SYSCTL_HANDLER_ARGS) "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); enabled = lock_prof_enable; lock_prof_enable = 0; - lock_prof_idle(); + quiesce_all_cpus("profstat", 0); t = ticks; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (lp_cpu[cpu] == NULL) |