diff options
Diffstat (limited to 'sys/dev/xen/blkfront/blkfront.c')
-rw-r--r-- | sys/dev/xen/blkfront/blkfront.c | 212 |
1 files changed, 62 insertions, 150 deletions
diff --git a/sys/dev/xen/blkfront/blkfront.c b/sys/dev/xen/blkfront/blkfront.c index 92b5f35..3991f96 100644 --- a/sys/dev/xen/blkfront/blkfront.c +++ b/sys/dev/xen/blkfront/blkfront.c @@ -156,31 +156,71 @@ xbd_free_command(struct xbd_command *cm) } static void +mksegarray(bus_dma_segment_t *segs, int nsegs, + grant_ref_t * gref_head, int otherend_id, int readonly, + grant_ref_t * sg_ref, blkif_request_segment_t * sg) +{ + struct blkif_request_segment *last_block_sg = sg + nsegs; + vm_paddr_t buffer_ma; + uint64_t fsect, lsect; + int ref; + + while (sg < last_block_sg) { + buffer_ma = segs->ds_addr; + fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; + lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; + + KASSERT(lsect <= 7, ("XEN disk driver data cannot " + "cross a page boundary")); + + /* install a grant reference. */ + ref = gnttab_claim_grant_reference(gref_head); + + /* + * GNTTAB_LIST_END == 0xffffffff, but it is private + * to gnttab.c. + */ + KASSERT(ref != ~0, ("grant_reference failed")); + + gnttab_grant_foreign_access_ref( + ref, + otherend_id, + buffer_ma >> PAGE_SHIFT, + readonly); + + *sg_ref = ref; + *sg = (struct blkif_request_segment) { + .gref = ref, + .first_sect = fsect, + .last_sect = lsect + }; + sg++; + sg_ref++; + segs++; + } +} + +static void xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct xbd_softc *sc; struct xbd_command *cm; blkif_request_t *ring_req; - struct blkif_request_segment *sg; - struct blkif_request_segment *last_block_sg; - grant_ref_t *sg_ref; - vm_paddr_t buffer_ma; - uint64_t fsect, lsect; - int ref; int op; - int block_segs; cm = arg; sc = cm->cm_sc; if (error) { - printf("error %d in xbd_queue_cb\n", error); cm->cm_bp->bio_error = EIO; biodone(cm->cm_bp); xbd_free_command(cm); return; } + KASSERT(nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST, + ("Too many segments in a blkfront I/O")); + /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); sc->xbd_ring.req_prod_pvt++; @@ -190,57 +230,10 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; ring_req->nr_segments = nsegs; cm->cm_nseg = nsegs; - - block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK); - sg = ring_req->seg; - last_block_sg = sg + block_segs; - sg_ref = cm->cm_sg_refs; - - while (1) { - - while (sg < last_block_sg) { - buffer_ma = segs->ds_addr; - fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; - lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; - - KASSERT(lsect <= 7, ("XEN disk driver data cannot " - "cross a page boundary")); - - /* install a grant reference. */ - ref = gnttab_claim_grant_reference(&cm->cm_gref_head); - - /* - * GNTTAB_LIST_END == 0xffffffff, but it is private - * to gnttab.c. - */ - KASSERT(ref != ~0, ("grant_reference failed")); - - gnttab_grant_foreign_access_ref( - ref, - xenbus_get_otherend_id(sc->xbd_dev), - buffer_ma >> PAGE_SHIFT, - ring_req->operation == BLKIF_OP_WRITE); - - *sg_ref = ref; - *sg = (struct blkif_request_segment) { - .gref = ref, - .first_sect = fsect, - .last_sect = lsect - }; - sg++; - sg_ref++; - segs++; - nsegs--; - } - block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK); - if (block_segs == 0) - break; - - sg = BLKRING_GET_SEG_BLOCK(&sc->xbd_ring, - sc->xbd_ring.req_prod_pvt); - sc->xbd_ring.req_prod_pvt++; - last_block_sg = sg + block_segs; - } + mksegarray(segs, nsegs, &cm->cm_gref_head, + xenbus_get_otherend_id(sc->xbd_dev), + cm->cm_operation == BLKIF_OP_WRITE, + cm->cm_sg_refs, ring_req->seg); if (cm->cm_operation == BLKIF_OP_READ) op = BUS_DMASYNC_PREREAD; @@ -396,8 +389,8 @@ xbd_startio(struct xbd_softc *sc) if (sc->xbd_state != XBD_STATE_CONNECTED) return; - while (RING_FREE_REQUESTS(&sc->xbd_ring) >= - sc->xbd_max_request_blocks) { + while (!RING_FULL(&sc->xbd_ring)) { + if (sc->xbd_qfrozen_cnt != 0) break; @@ -450,13 +443,6 @@ xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm) biodone(bp); } -static int -xbd_completion(struct xbd_command *cm) -{ - gnttab_end_foreign_access_references(cm->cm_nseg, cm->cm_sg_refs); - return (BLKIF_SEGS_TO_BLOCKS(cm->cm_nseg)); -} - static void xbd_int(void *xsc) { @@ -482,7 +468,9 @@ xbd_int(void *xsc) cm = &sc->xbd_shadow[bret->id]; xbd_remove_cm(cm, XBD_Q_BUSY); - i += xbd_completion(cm); + gnttab_end_foreign_access_references(cm->cm_nseg, + cm->cm_sg_refs); + i++; if (cm->cm_operation == BLKIF_OP_READ) op = BUS_DMASYNC_POSTREAD; @@ -1064,11 +1052,9 @@ xbd_initialize(struct xbd_softc *sc) */ max_ring_page_order = 0; sc->xbd_ring_pages = 1; - sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; + sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; sc->xbd_max_request_size = XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); - sc->xbd_max_request_blocks = - BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments); /* * Protocol negotiation. @@ -1095,24 +1081,10 @@ xbd_initialize(struct xbd_softc *sc) if (sc->xbd_ring_pages < 1) sc->xbd_ring_pages = 1; - sc->xbd_max_requests = - BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE); - (void)xs_scanf(XST_NIL, otherend_path, - "max-requests", NULL, "%" PRIu32, - &sc->xbd_max_requests); - - (void)xs_scanf(XST_NIL, otherend_path, - "max-request-segments", NULL, "%" PRIu32, - &sc->xbd_max_request_segments); - - (void)xs_scanf(XST_NIL, otherend_path, - "max-request-size", NULL, "%" PRIu32, - &sc->xbd_max_request_size); - if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) { device_printf(sc->xbd_dev, "Back-end specified ring-pages of %u " - "limited to front-end limit of %zu.\n", + "limited to front-end limit of %u.\n", sc->xbd_ring_pages, XBD_MAX_RING_PAGES); sc->xbd_ring_pages = XBD_MAX_RING_PAGES; } @@ -1128,46 +1100,16 @@ xbd_initialize(struct xbd_softc *sc) sc->xbd_ring_pages = new_page_limit; } + sc->xbd_max_requests = + BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE); if (sc->xbd_max_requests > XBD_MAX_REQUESTS) { device_printf(sc->xbd_dev, "Back-end specified max_requests of %u " - "limited to front-end limit of %u.\n", + "limited to front-end limit of %zu.\n", sc->xbd_max_requests, XBD_MAX_REQUESTS); sc->xbd_max_requests = XBD_MAX_REQUESTS; } - if (sc->xbd_max_request_segments > XBD_MAX_SEGMENTS_PER_REQUEST) { - device_printf(sc->xbd_dev, - "Back-end specified max_request_segments of %u " - "limited to front-end limit of %u.\n", - sc->xbd_max_request_segments, - XBD_MAX_SEGMENTS_PER_REQUEST); - sc->xbd_max_request_segments = XBD_MAX_SEGMENTS_PER_REQUEST; - } - - if (sc->xbd_max_request_size > XBD_MAX_REQUEST_SIZE) { - device_printf(sc->xbd_dev, - "Back-end specified max_request_size of %u " - "limited to front-end limit of %u.\n", - sc->xbd_max_request_size, - XBD_MAX_REQUEST_SIZE); - sc->xbd_max_request_size = XBD_MAX_REQUEST_SIZE; - } - - if (sc->xbd_max_request_size > - XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)) { - device_printf(sc->xbd_dev, - "Back-end specified max_request_size of %u " - "limited to front-end limit of %u. (Too few segments.)\n", - sc->xbd_max_request_size, - XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)); - sc->xbd_max_request_size = - XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); - } - - sc->xbd_max_request_blocks = - BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments); - /* Allocate datastructures based on negotiated values. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->xbd_dev), /* parent */ @@ -1241,36 +1183,6 @@ xbd_initialize(struct xbd_softc *sc) } } - error = xs_printf(XST_NIL, node_path, - "max-requests","%u", - sc->xbd_max_requests); - if (error) { - xenbus_dev_fatal(sc->xbd_dev, error, - "writing %s/max-requests", - node_path); - return; - } - - error = xs_printf(XST_NIL, node_path, - "max-request-segments","%u", - sc->xbd_max_request_segments); - if (error) { - xenbus_dev_fatal(sc->xbd_dev, error, - "writing %s/max-request-segments", - node_path); - return; - } - - error = xs_printf(XST_NIL, node_path, - "max-request-size","%u", - sc->xbd_max_request_size); - if (error) { - xenbus_dev_fatal(sc->xbd_dev, error, - "writing %s/max-request-size", - node_path); - return; - } - error = xs_printf(XST_NIL, node_path, "event-channel", "%u", xen_intr_port(sc->xen_intr_handle)); if (error) { |