From 0a34594b9cd7c8b87f719ed058da6be2b756a8e5 Mon Sep 17 00:00:00 2001 From: scottl Date: Tue, 7 Jan 2014 01:32:23 +0000 Subject: MFC Alexander Motin's GEOM direct dispatch work: r256603: Introduce new function devstat_end_transaction_bio_bt(), adding new argument to specify present time. Use this function to move binuptime() out of lock, substantially reducing lock congestion when slow timecounter is used. r256606: Move g_io_deliver() out of the lock, as required for direct dispatch. Move g_destroy_bio() out too to reduce lock scope even more. r256607: Fix passing uninitialized bio_resid argument to g_trace(). r256610: Add unmapped I/O support to GEOM RAID. r256830: Restore BIO_UNMAPPED and BIO_TRANSIENT_MAPPING in biodonne() when unmapping temporary mapped buffer. That fixes double unmap if biodone() called twice for the same BIO (but with different done methods). r256880: Merge GEOM direct dispatch changes from the projects/camlock branch. When safety requirements are met, it allows to avoid passing I/O requests to GEOM g_up/g_down thread, executing them directly in the caller context. That allows to avoid CPU bottlenecks in g_up/g_down threads, plus avoid several context switches per I/O. r259247: Fix bug introduced at r256607. We have to recalculate bp_resid here since sizes of original and completed requests may differ due to end of media. Testing of the stable/10 merge was done by Netflix, but all of the credit goes to Alexander and iX Systems. Submitted by: mav Sponsored by: iX Systems --- sys/geom/raid/g_raid.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'sys/geom/raid/g_raid.c') diff --git a/sys/geom/raid/g_raid.c b/sys/geom/raid/g_raid.c index 41a1f96..a161f8a 100644 --- a/sys/geom/raid/g_raid.c +++ b/sys/geom/raid/g_raid.c @@ -792,6 +792,7 @@ g_raid_open_consumer(struct g_raid_softc *sc, const char *name) if (pp == NULL) return (NULL); cp = g_new_consumer(sc->sc_geom); + cp->flags |= G_CF_DIRECT_RECEIVE; if (g_attach(cp, pp) != 0) { g_destroy_consumer(cp); return (NULL); @@ -993,20 +994,15 @@ g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp) cbp->bio_caller1 = sd; bioq_insert_tail(&queue, cbp); } - for (cbp = bioq_first(&queue); cbp != NULL; - cbp = bioq_first(&queue)) { - bioq_remove(&queue, cbp); + while ((cbp = bioq_takefirst(&queue)) != NULL) { sd = cbp->bio_caller1; cbp->bio_caller1 = NULL; g_raid_subdisk_iostart(sd, cbp); } return; failure: - for (cbp = bioq_first(&queue); cbp != NULL; - cbp = bioq_first(&queue)) { - bioq_remove(&queue, cbp); + while ((cbp = bioq_takefirst(&queue)) != NULL) g_destroy_bio(cbp); - } if (bp->bio_error == 0) bp->bio_error = ENOMEM; g_raid_iodone(bp, bp->bio_error); @@ -1639,11 +1635,13 @@ static void g_raid_launch_provider(struct g_raid_volume *vol) { struct g_raid_disk *disk; + struct g_raid_subdisk *sd; struct g_raid_softc *sc; struct g_provider *pp; char name[G_RAID_MAX_VOLUMENAME]; char announce_buf[80], buf1[32]; off_t off; + int i; sc = vol->v_softc; sx_assert(&sc->sc_lock, SX_LOCKED); @@ -1673,6 +1671,18 @@ g_raid_launch_provider(struct g_raid_volume *vol) } pp = g_new_providerf(sc->sc_geom, "%s", name); + pp->flags |= G_PF_DIRECT_RECEIVE; + if (vol->v_tr->tro_class->trc_accept_unmapped) { + pp->flags |= G_PF_ACCEPT_UNMAPPED; + for (i = 0; i < vol->v_disks_count; i++) { + sd = &vol->v_subdisks[i]; + if (sd->sd_state == G_RAID_SUBDISK_S_NONE) + continue; + if ((sd->sd_disk->d_consumer->provider->flags & + G_PF_ACCEPT_UNMAPPED) == 0) + pp->flags &= ~G_PF_ACCEPT_UNMAPPED; + } + } pp->private = vol; pp->mediasize = vol->v_mediasize; pp->sectorsize = vol->v_sectorsize; @@ -2247,6 +2257,7 @@ g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) */ gp->orphan = g_raid_taste_orphan; cp = g_new_consumer(gp); + cp->flags |= G_CF_DIRECT_RECEIVE; g_attach(cp, pp); geom = NULL; -- cgit v1.1