summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-01 01:12:45 +0000
committerjeff <jeff@FreeBSD.org>2007-06-01 01:12:45 +0000
commita7a8bac81f171b93c4770f51c694b15c84a2f9f8 (patch)
tree9c09dcc76185c3dc30b048a5f2eb972f3bb8a849 /sys/kern/vfs_bio.c
parent062ed7352f59fb6db93a3f51ca6e64ec0cefca22 (diff)
downloadFreeBSD-src-a7a8bac81f171b93c4770f51c694b15c84a2f9f8.zip
FreeBSD-src-a7a8bac81f171b93c4770f51c694b15c84a2f9f8.tar.gz
- Move rusage from being per-process in struct pstats to per-thread in
td_ru. This removes the requirement for per-process synchronization in statclock() and mi_switch(). This was previously supported by sched_lock which is going away. All modifications to rusage are now done in the context of the owning thread. reads proceed without locks. - Aggregate exiting threads rusage in thread_exit() such that the exiting thread's rusage is not lost. - Provide a new routine, rufetch() to fetch an aggregate of all rusage structures from all threads in a process. This routine must be used in any place requiring a rusage from a process prior to it's exit. The exited process's rusage is still available via p_ru. - Aggregate tick statistics only on demand via rufetch() or when a thread exits. Tick statistics are kept in the thread and protected by sched_lock until it exits. Initial patch by: attilio Reviewed by: attilio, bde (some objections), arch (mostly silent)
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 44879ff..18c6d59 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -746,7 +746,7 @@ breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
if ((rabp->b_flags & B_CACHE) == 0) {
if (!TD_IS_IDLETHREAD(curthread))
- curthread->td_proc->p_stats->p_ru.ru_inblock++;
+ curthread->td_ru.ru_inblock++;
rabp->b_flags |= B_ASYNC;
rabp->b_flags &= ~B_INVAL;
rabp->b_ioflags &= ~BIO_ERROR;
@@ -781,7 +781,7 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
/* if not found in cache, do some I/O */
if ((bp->b_flags & B_CACHE) == 0) {
if (!TD_IS_IDLETHREAD(curthread))
- curthread->td_proc->p_stats->p_ru.ru_inblock++;
+ curthread->td_ru.ru_inblock++;
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
@@ -860,7 +860,7 @@ bufwrite(struct buf *bp)
atomic_add_int(&runningbufspace, bp->b_runningbufspace);
if (!TD_IS_IDLETHREAD(curthread))
- curthread->td_proc->p_stats->p_ru.ru_oublock++;
+ curthread->td_ru.ru_oublock++;
if (oldflags & B_ASYNC)
BUF_KERNPROC(bp);
bp->b_iooffset = dbtob(bp->b_blkno);
OpenPOWER on IntegriCloud