From dd7446483c6623723f0f5a0cf4954a72efe3acb5 Mon Sep 17 00:00:00 2001 From: phk Date: Tue, 28 Sep 2004 11:56:37 +0000 Subject: Protect the start/end counts on consumers and providers with the up/down mutexes. Make it possible to also protect the disk statistics (at a minor cost in performance) by setting bit 2 of kern.geom.collectstats. --- sys/geom/geom_io.c | 68 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 28 deletions(-) (limited to 'sys') diff --git a/sys/geom/geom_io.c b/sys/geom/geom_io.c index 12bce63..eb10b79 100644 --- a/sys/geom/geom_io.c +++ b/sys/geom/geom_io.c @@ -105,19 +105,6 @@ g_bioq_first(struct g_bioq *bq) return (bp); } -static void -g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq) -{ - - KASSERT(!(bp->bio_flags & BIO_ONQUEUE), - ("Bio already on queue bp=%p target %p", bp, rq)); - bp->bio_flags |= BIO_ONQUEUE; - g_bioq_lock(rq); - TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue); - rq->bio_queue_length++; - g_bioq_unlock(rq); -} - struct bio * g_new_bio(void) { @@ -272,17 +259,29 @@ g_io_request(struct bio *bp, struct g_consumer *cp) bp->bio_error = 0; bp->bio_completed = 0; + KASSERT(!(bp->bio_flags & BIO_ONQUEUE), + ("Bio already on queue bp=%p", bp)); + bp->bio_flags |= BIO_ONQUEUE; + + binuptime(&bp->bio_t0); + if (g_collectstats & 4) + g_bioq_lock(&g_bio_run_down); if (g_collectstats & 1) - devstat_start_transaction_bio(pp->stat, bp); - pp->nstart++; + devstat_start_transaction(pp->stat, &bp->bio_t0); if (g_collectstats & 2) - devstat_start_transaction_bio(cp->stat, bp); + devstat_start_transaction(cp->stat, &bp->bio_t0); + + if (!(g_collectstats & 4)) + g_bioq_lock(&g_bio_run_down); + pp->nstart++; cp->nstart++; + TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); + g_bio_run_down.bio_queue_length++; + g_bioq_unlock(&g_bio_run_down); /* Pass it on down. */ g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); - g_bioq_enqueue_tail(bp, &g_bio_run_down); wakeup(&g_wait_down); } @@ -312,30 +311,43 @@ g_io_deliver(struct bio *bp, int error) bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); + KASSERT(!(bp->bio_flags & BIO_ONQUEUE), + ("Bio already on queue bp=%p", bp)); + bp->bio_flags |= BIO_ONQUEUE; + /* * XXX: next two doesn't belong here */ bp->bio_bcount = bp->bio_length; bp->bio_resid = bp->bio_bcount - bp->bio_completed; + + if (g_collectstats & 4) + g_bioq_lock(&g_bio_run_up); if (g_collectstats & 1) devstat_end_transaction_bio(pp->stat, bp); if (g_collectstats & 2) devstat_end_transaction_bio(cp->stat, bp); + if (!(g_collectstats & 4)) + g_bioq_lock(&g_bio_run_up); cp->nend++; pp->nend++; - - if (error == ENOMEM) { - if (bootverbose) - printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); - bp->bio_children = 0; - bp->bio_inbed = 0; - g_io_request(bp, cp); - pace++; + if (error != ENOMEM) { + bp->bio_error = error; + TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); + g_bio_run_up.bio_queue_length++; + g_bioq_unlock(&g_bio_run_up); + wakeup(&g_wait_up); return; } - bp->bio_error = error; - g_bioq_enqueue_tail(bp, &g_bio_run_up); - wakeup(&g_wait_up); + g_bioq_unlock(&g_bio_run_up); + + if (bootverbose) + printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); + bp->bio_children = 0; + bp->bio_inbed = 0; + g_io_request(bp, cp); + pace++; + return; } void -- cgit v1.1