summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorgibbs <gibbs@FreeBSD.org>2012-02-16 21:58:47 +0000
committergibbs <gibbs@FreeBSD.org>2012-02-16 21:58:47 +0000
commita50c43eae7b499420b1a953487a5de0ad2e49464 (patch)
treea78b6e8f60d0f3364102d8e597b56d09b5d0b34e
parent1b7653a401012eee28f32c7e5c0137b06516f3a1 (diff)
downloadFreeBSD-src-a50c43eae7b499420b1a953487a5de0ad2e49464.zip
FreeBSD-src-a50c43eae7b499420b1a953487a5de0ad2e49464.tar.gz
Fix a bug in the calculation of the maximum I/O request size.
The previous code did not limit the I/O request size based on the maximum number of segments supported by the back-end. In current practice, since the only back-end supporting chained requests is the FreeBSD implementation, this limit was never exceeded. sys/dev/xen/blkfront/block.h: Add two macros, XBF_SEGS_TO_SIZE() and XBF_SIZE_TO_SEGS(), to centralize the logic of reserving a segment to deal with non-page-aligned I/Os. sys/dev/xen/blkfront/blkfront.c: o When negotiating transfer parameters, limit the max_request_size we use and publish, if it is greater than the maximum, unaligned, I/O we can support with the number of segments advertised by the backend. o Don't unilaterally reduce the I/O size published to the disk layer by a single page. max_request_size is already properly limited in the transfer parameter negotiation code. o Fix typos in printf strings: "max_requests_segments" -> "max_request_segments" "specificed" -> "specified" MFC after: 1 day
-rw-r--r--sys/dev/xen/blkfront/blkfront.c21
-rw-r--r--sys/dev/xen/blkfront/block.h34
2 files changed, 44 insertions, 11 deletions
diff --git a/sys/dev/xen/blkfront/blkfront.c b/sys/dev/xen/blkfront/blkfront.c
index e5c436f..a5e280d 100644
--- a/sys/dev/xen/blkfront/blkfront.c
+++ b/sys/dev/xen/blkfront/blkfront.c
@@ -228,7 +228,7 @@ xlvbd_add(struct xb_softc *sc, blkif_sector_t sectors,
sc->xb_disk->d_sectorsize = sector_size;
sc->xb_disk->d_mediasize = sectors * sector_size;
- sc->xb_disk->d_maxsize = sc->max_request_size - PAGE_SIZE;
+ sc->xb_disk->d_maxsize = sc->max_request_size;
sc->xb_disk->d_flags = 0;
disk_create(sc->xb_disk, DISK_VERSION_00);
@@ -555,7 +555,7 @@ blkfront_initialize(struct xb_softc *sc)
max_ring_page_order = 0;
sc->ring_pages = 1;
sc->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
- sc->max_request_size = (sc->max_request_segments - 1) * PAGE_SIZE;
+ sc->max_request_size = XBF_SEGS_TO_SIZE(sc->max_request_segments);
sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments);
/*
@@ -621,8 +621,8 @@ blkfront_initialize(struct xb_softc *sc)
}
if (sc->max_request_segments > XBF_MAX_SEGMENTS_PER_REQUEST) {
- device_printf(sc->xb_dev, "Back-end specificed "
- "max_requests_segments of %u limited to "
+ device_printf(sc->xb_dev, "Back-end specified "
+ "max_request_segments of %u limited to "
"front-end limit of %u.\n",
sc->max_request_segments,
XBF_MAX_SEGMENTS_PER_REQUEST);
@@ -630,12 +630,23 @@ blkfront_initialize(struct xb_softc *sc)
}
if (sc->max_request_size > XBF_MAX_REQUEST_SIZE) {
- device_printf(sc->xb_dev, "Back-end specificed "
+ device_printf(sc->xb_dev, "Back-end specified "
"max_request_size of %u limited to front-end "
"limit of %u.\n", sc->max_request_size,
XBF_MAX_REQUEST_SIZE);
sc->max_request_size = XBF_MAX_REQUEST_SIZE;
}
+
+ if (sc->max_request_size > XBF_SEGS_TO_SIZE(sc->max_request_segments)) {
+ device_printf(sc->xb_dev, "Back-end specified "
+ "max_request_size of %u limited to front-end "
+ "limit of %u. (Too few segments.)\n",
+ sc->max_request_size,
+ XBF_SEGS_TO_SIZE(sc->max_request_segments));
+ sc->max_request_size =
+ XBF_SEGS_TO_SIZE(sc->max_request_segments);
+ }
+
sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments);
/* Allocate datastructures based on negotiated values. */
diff --git a/sys/dev/xen/blkfront/block.h b/sys/dev/xen/blkfront/block.h
index 1020b7f..5aa35ae 100644
--- a/sys/dev/xen/blkfront/block.h
+++ b/sys/dev/xen/blkfront/block.h
@@ -35,6 +35,32 @@
#include <xen/blkif.h>
/**
+ * Given a number of blkif segments, compute the maximum I/O size supported.
+ *
+ * \note This calculation assumes that all but the first and last segments
+ * of the I/O are fully utilized.
+ *
+ * \note We reserve a segement from the maximum supported by the transport to
+ * guarantee we can handle an unaligned transfer without the need to
+ * use a bounce buffer.
+ */
+#define XBF_SEGS_TO_SIZE(segs) \
+ (((segs) - 1) * PAGE_SIZE)
+
+/**
+ * Compute the maximum number of blkif segments requried to represent
+ * an I/O of the given size.
+ *
+ * \note This calculation assumes that all but the first and last segments
+ * of the I/O are fully utilized.
+ *
+ * \note We reserve a segement to guarantee we can handle an unaligned
+ * transfer without the need to use a bounce buffer.
+ */
+#define XBF_SIZE_TO_SEGS(size) \
+ ((size / PAGE_SIZE) + 1)
+
+/**
* The maximum number of outstanding requests blocks (request headers plus
* additional segment blocks) we will allow in a negotiated block-front/back
* communication channel.
@@ -44,13 +70,9 @@
/**
* The maximum mapped region size per request we will allow in a negotiated
* block-front/back communication channel.
- *
- * \note We reserve a segement from the maximum supported by the transport to
- * guarantee we can handle an unaligned transfer without the need to
- * use a bounce buffer..
*/
#define XBF_MAX_REQUEST_SIZE \
- MIN(MAXPHYS, (BLKIF_MAX_SEGMENTS_PER_REQUEST - 1) * PAGE_SIZE)
+ MIN(MAXPHYS, XBF_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST))
/**
* The maximum number of segments (within a request header and accompanying
@@ -59,7 +81,7 @@
*/
#define XBF_MAX_SEGMENTS_PER_REQUEST \
(MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \
- (XBF_MAX_REQUEST_SIZE / PAGE_SIZE) + 1))
+ XBF_SIZE_TO_SEGS(XBF_MAX_REQUEST_SIZE)))
/**
* The maximum number of shared memory ring pages we will allow in a
OpenPOWER on IntegriCloud