summaryrefslogtreecommitdiffstats
path: root/sys/dev/xen/blkfront
diff options
context:
space:
mode:
authorroyger <royger@FreeBSD.org>2015-06-12 07:50:34 +0000
committerroyger <royger@FreeBSD.org>2015-06-12 07:50:34 +0000
commitecdfc8f10abb6d2e0917e148f37aafc2b8ed76f8 (patch)
tree483a8786fad7d4b3904187bc6501c46fd5d0ea5a /sys/dev/xen/blkfront
parenta75e86bc6b9dac6636dba259df0fb6c5b731d445 (diff)
downloadFreeBSD-src-ecdfc8f10abb6d2e0917e148f37aafc2b8ed76f8.zip
FreeBSD-src-ecdfc8f10abb6d2e0917e148f37aafc2b8ed76f8.tar.gz
xen-blk{front/back}: remove broken FreeBSD extensions
The FreeBSD extension adds a new request type, called blkif_segment_block which has a size of 112bytes for both i386 and amd64. This is fine on amd64, since requests have a size of 112B there also. But this is not true for i386, where requests have a size of 108B. So on i386 we basically overrun the ring slot when queuing a request of type blkif_segment_block_t, which is very bad. Remove this extension (including a cleanup of the public blkif.h header file) from blkfront and blkback. Sponsored by: Citrix Systems R&D Tested-by: cperciva
Diffstat (limited to 'sys/dev/xen/blkfront')
-rw-r--r--sys/dev/xen/blkfront/blkfront.c175
-rw-r--r--sys/dev/xen/blkfront/block.h20
2 files changed, 50 insertions, 145 deletions
diff --git a/sys/dev/xen/blkfront/blkfront.c b/sys/dev/xen/blkfront/blkfront.c
index 92b5f35..a71251d 100644
--- a/sys/dev/xen/blkfront/blkfront.c
+++ b/sys/dev/xen/blkfront/blkfront.c
@@ -174,7 +174,6 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
sc = cm->cm_sc;
if (error) {
- printf("error %d in xbd_queue_cb\n", error);
cm->cm_bp->bio_error = EIO;
biodone(cm->cm_bp);
xbd_free_command(cm);
@@ -191,55 +190,44 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
ring_req->nr_segments = nsegs;
cm->cm_nseg = nsegs;
- block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK);
+ block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_REQUEST);
sg = ring_req->seg;
last_block_sg = sg + block_segs;
sg_ref = cm->cm_sg_refs;
- while (1) {
+ while (sg < last_block_sg) {
+ buffer_ma = segs->ds_addr;
+ fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT;
+ lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1;
- while (sg < last_block_sg) {
- buffer_ma = segs->ds_addr;
- fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT;
- lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1;
+ KASSERT(lsect <= 7, ("XEN disk driver data cannot "
+ "cross a page boundary"));
- KASSERT(lsect <= 7, ("XEN disk driver data cannot "
- "cross a page boundary"));
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&cm->cm_gref_head);
- /* install a grant reference. */
- ref = gnttab_claim_grant_reference(&cm->cm_gref_head);
-
- /*
- * GNTTAB_LIST_END == 0xffffffff, but it is private
- * to gnttab.c.
- */
- KASSERT(ref != ~0, ("grant_reference failed"));
-
- gnttab_grant_foreign_access_ref(
- ref,
- xenbus_get_otherend_id(sc->xbd_dev),
- buffer_ma >> PAGE_SHIFT,
- ring_req->operation == BLKIF_OP_WRITE);
-
- *sg_ref = ref;
- *sg = (struct blkif_request_segment) {
- .gref = ref,
- .first_sect = fsect,
- .last_sect = lsect
- };
- sg++;
- sg_ref++;
- segs++;
- nsegs--;
- }
- block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK);
- if (block_segs == 0)
- break;
-
- sg = BLKRING_GET_SEG_BLOCK(&sc->xbd_ring,
- sc->xbd_ring.req_prod_pvt);
- sc->xbd_ring.req_prod_pvt++;
- last_block_sg = sg + block_segs;
+ /*
+ * GNTTAB_LIST_END == 0xffffffff, but it is private
+ * to gnttab.c.
+ */
+ KASSERT(ref != ~0, ("grant_reference failed"));
+
+ gnttab_grant_foreign_access_ref(
+ ref,
+ xenbus_get_otherend_id(sc->xbd_dev),
+ buffer_ma >> PAGE_SHIFT,
+ ring_req->operation == BLKIF_OP_WRITE);
+
+ *sg_ref = ref;
+ *sg = (struct blkif_request_segment) {
+ .gref = ref,
+ .first_sect = fsect,
+ .last_sect = lsect
+ };
+ sg++;
+ sg_ref++;
+ segs++;
+ nsegs--;
}
if (cm->cm_operation == BLKIF_OP_READ)
@@ -396,8 +384,8 @@ xbd_startio(struct xbd_softc *sc)
if (sc->xbd_state != XBD_STATE_CONNECTED)
return;
- while (RING_FREE_REQUESTS(&sc->xbd_ring) >=
- sc->xbd_max_request_blocks) {
+ while (!RING_FULL(&sc->xbd_ring)) {
+
if (sc->xbd_qfrozen_cnt != 0)
break;
@@ -450,13 +438,6 @@ xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm)
biodone(bp);
}
-static int
-xbd_completion(struct xbd_command *cm)
-{
- gnttab_end_foreign_access_references(cm->cm_nseg, cm->cm_sg_refs);
- return (BLKIF_SEGS_TO_BLOCKS(cm->cm_nseg));
-}
-
static void
xbd_int(void *xsc)
{
@@ -482,7 +463,9 @@ xbd_int(void *xsc)
cm = &sc->xbd_shadow[bret->id];
xbd_remove_cm(cm, XBD_Q_BUSY);
- i += xbd_completion(cm);
+ gnttab_end_foreign_access_references(cm->cm_nseg,
+ cm->cm_sg_refs);
+ i++;
if (cm->cm_operation == BLKIF_OP_READ)
op = BUS_DMASYNC_POSTREAD;
@@ -1064,11 +1047,9 @@ xbd_initialize(struct xbd_softc *sc)
*/
max_ring_page_order = 0;
sc->xbd_ring_pages = 1;
- sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
+ sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
sc->xbd_max_request_size =
XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
- sc->xbd_max_request_blocks =
- BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments);
/*
* Protocol negotiation.
@@ -1095,24 +1076,10 @@ xbd_initialize(struct xbd_softc *sc)
if (sc->xbd_ring_pages < 1)
sc->xbd_ring_pages = 1;
- sc->xbd_max_requests =
- BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE);
- (void)xs_scanf(XST_NIL, otherend_path,
- "max-requests", NULL, "%" PRIu32,
- &sc->xbd_max_requests);
-
- (void)xs_scanf(XST_NIL, otherend_path,
- "max-request-segments", NULL, "%" PRIu32,
- &sc->xbd_max_request_segments);
-
- (void)xs_scanf(XST_NIL, otherend_path,
- "max-request-size", NULL, "%" PRIu32,
- &sc->xbd_max_request_size);
-
if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) {
device_printf(sc->xbd_dev,
"Back-end specified ring-pages of %u "
- "limited to front-end limit of %zu.\n",
+ "limited to front-end limit of %u.\n",
sc->xbd_ring_pages, XBD_MAX_RING_PAGES);
sc->xbd_ring_pages = XBD_MAX_RING_PAGES;
}
@@ -1128,46 +1095,16 @@ xbd_initialize(struct xbd_softc *sc)
sc->xbd_ring_pages = new_page_limit;
}
+ sc->xbd_max_requests =
+ BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE);
if (sc->xbd_max_requests > XBD_MAX_REQUESTS) {
device_printf(sc->xbd_dev,
"Back-end specified max_requests of %u "
- "limited to front-end limit of %u.\n",
+ "limited to front-end limit of %zu.\n",
sc->xbd_max_requests, XBD_MAX_REQUESTS);
sc->xbd_max_requests = XBD_MAX_REQUESTS;
}
- if (sc->xbd_max_request_segments > XBD_MAX_SEGMENTS_PER_REQUEST) {
- device_printf(sc->xbd_dev,
- "Back-end specified max_request_segments of %u "
- "limited to front-end limit of %u.\n",
- sc->xbd_max_request_segments,
- XBD_MAX_SEGMENTS_PER_REQUEST);
- sc->xbd_max_request_segments = XBD_MAX_SEGMENTS_PER_REQUEST;
- }
-
- if (sc->xbd_max_request_size > XBD_MAX_REQUEST_SIZE) {
- device_printf(sc->xbd_dev,
- "Back-end specified max_request_size of %u "
- "limited to front-end limit of %u.\n",
- sc->xbd_max_request_size,
- XBD_MAX_REQUEST_SIZE);
- sc->xbd_max_request_size = XBD_MAX_REQUEST_SIZE;
- }
-
- if (sc->xbd_max_request_size >
- XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)) {
- device_printf(sc->xbd_dev,
- "Back-end specified max_request_size of %u "
- "limited to front-end limit of %u. (Too few segments.)\n",
- sc->xbd_max_request_size,
- XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments));
- sc->xbd_max_request_size =
- XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
- }
-
- sc->xbd_max_request_blocks =
- BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments);
-
/* Allocate datastructures based on negotiated values. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->xbd_dev), /* parent */
@@ -1241,36 +1178,6 @@ xbd_initialize(struct xbd_softc *sc)
}
}
- error = xs_printf(XST_NIL, node_path,
- "max-requests","%u",
- sc->xbd_max_requests);
- if (error) {
- xenbus_dev_fatal(sc->xbd_dev, error,
- "writing %s/max-requests",
- node_path);
- return;
- }
-
- error = xs_printf(XST_NIL, node_path,
- "max-request-segments","%u",
- sc->xbd_max_request_segments);
- if (error) {
- xenbus_dev_fatal(sc->xbd_dev, error,
- "writing %s/max-request-segments",
- node_path);
- return;
- }
-
- error = xs_printf(XST_NIL, node_path,
- "max-request-size","%u",
- sc->xbd_max_request_size);
- if (error) {
- xenbus_dev_fatal(sc->xbd_dev, error,
- "writing %s/max-request-size",
- node_path);
- return;
- }
-
error = xs_printf(XST_NIL, node_path, "event-channel",
"%u", xen_intr_port(sc->xen_intr_handle));
if (error) {
diff --git a/sys/dev/xen/blkfront/block.h b/sys/dev/xen/blkfront/block.h
index 9c803bc..3007118 100644
--- a/sys/dev/xen/blkfront/block.h
+++ b/sys/dev/xen/blkfront/block.h
@@ -61,11 +61,19 @@
((size / PAGE_SIZE) + 1)
/**
+ * The maximum number of shared memory ring pages we will allow in a
+ * negotiated block-front/back communication channel. Allow enough
+ * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd.
+ */
+#define XBD_MAX_RING_PAGES 32
+
+/**
* The maximum number of outstanding requests blocks (request headers plus
* additional segment blocks) we will allow in a negotiated block-front/back
* communication channel.
*/
-#define XBD_MAX_REQUESTS 256
+#define XBD_MAX_REQUESTS \
+ __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES)
/**
* The maximum mapped region size per request we will allow in a negotiated
@@ -83,15 +91,6 @@
(MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \
XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE)))
-/**
- * The maximum number of shared memory ring pages we will allow in a
- * negotiated block-front/back communication channel. Allow enough
- * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd.
- */
-#define XBD_MAX_RING_PAGES \
- BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBD_MAX_SEGMENTS_PER_REQUEST) \
- * XBD_MAX_REQUESTS)
-
typedef enum {
XBDCF_Q_MASK = 0xFF,
/* This command has contributed to xbd_qfrozen_cnt. */
@@ -175,7 +174,6 @@ struct xbd_softc {
u_int xbd_ring_pages;
uint32_t xbd_max_requests;
uint32_t xbd_max_request_segments;
- uint32_t xbd_max_request_blocks;
uint32_t xbd_max_request_size;
grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES];
blkif_front_ring_t xbd_ring;
OpenPOWER on IntegriCloud