summaryrefslogtreecommitdiffstats
path: root/sys/dev/vinum
diff options
context:
space:
mode:
authorgrog <grog@FreeBSD.org>2001-05-22 02:36:47 +0000
committergrog <grog@FreeBSD.org>2001-05-22 02:36:47 +0000
commit9fadf0687c2d363b33a199a6e5929f61b99cda0b (patch)
treec6ea6b4165575bb76bdbe421c393e6ee3251ff9e /sys/dev/vinum
parent282262c30227edea344de9077358b11fa220dd3d (diff)
downloadFreeBSD-src-9fadf0687c2d363b33a199a6e5929f61b99cda0b.zip
FreeBSD-src-9fadf0687c2d363b33a199a6e5929f61b99cda0b.tar.gz
vinumstart: If a write request is for a RAID-[45] plex or a volume
with more than one plex, the data will be accessed multiple times. During this time, userland code could potentially modify the buffer, thus causing data corruption. In the case of a multi-plexed volume this might be cosmetic, but in the case of a RAID-[45] plex it can cause severe data corruption which only becomes evident after a drive failure. Avoid this situation by making a copy of the data buffer before using it. Note that this solution does not guarantee any particular content of the buffer, just that it remains unchanged for the duration of the request. Suggested by: alfred
Diffstat (limited to 'sys/dev/vinum')
-rw-r--r--sys/dev/vinum/vinumrequest.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/sys/dev/vinum/vinumrequest.c b/sys/dev/vinum/vinumrequest.c
index fad2f38..af8147b 100644
--- a/sys/dev/vinum/vinumrequest.c
+++ b/sys/dev/vinum/vinumrequest.c
@@ -168,14 +168,13 @@ vinumstrategy(struct bio *biop)
}
/*
- * Start a transfer. Return -1 on error,
- * 0 if OK, 1 if we need to retry.
- * Parameter reviveok is set when doing
- * transfers for revives: it allows transfers to
- * be started immediately when a revive is in
- * progress. During revive, normal transfers
- * are queued if they share address space with
- * a currently active revive operation.
+ * Start a transfer. Return -1 on error, 0 if OK,
+ * 1 if we need to retry. Parameter reviveok is
+ * set when doing transfers for revives: it allows
+ * transfers to be started immediately when a
+ * revive is in progress. During revive, normal
+ * transfers are queued if they share address
+ * space with a currently active revive operation.
*/
int
vinumstart(struct buf *bp, int reviveok)
@@ -209,7 +208,7 @@ vinumstart(struct buf *bp, int reviveok)
/*
* Note the volume ID. This can be NULL, which
* the request building functions use as an
- * indication for single plex I/O
+ * indication for single plex I/O.
*/
rq->bp = bp; /* and the user buffer struct */
@@ -269,9 +268,16 @@ vinumstart(struct buf *bp, int reviveok)
* a RAID-4 or RAID-5 plex, we must also update the parity stripe.
*/
{
- if (vol != NULL)
- status = build_write_request(rq); /* Not all the subdisks are up */
- else { /* plex I/O */
+ if (vol != NULL) {
+ if ((vol->plexes > 0) /* multiple plex */
+ ||(isparity((&PLEX[vol->plex[0]])))) { /* or RAID-[45], */
+ rq->save_data = bp->b_data; /* save the data buffer address */
+ bp->b_data = Malloc(bp->b_bufsize);
+ bcopy(rq->save_data, bp->b_data, bp->b_bufsize); /* make a copy */
+ rq->flags |= XFR_COPYBUF; /* and note that we did it */
+ }
+ status = build_write_request(rq);
+ } else { /* plex I/O */
daddr_t diskstart;
diskstart = bp->b_blkno; /* start offset of transfer */
@@ -285,6 +291,10 @@ vinumstart(struct buf *bp, int reviveok)
bp->b_error = EIO; /* I/O error */
bp->b_io.bio_flags |= BIO_ERROR;
}
+ if (rq->flags & XFR_COPYBUF) {
+ Free(bp->b_data);
+ bp->b_data = rq->save_data;
+ }
bufdone(bp);
freerq(rq);
return -1;
@@ -1005,7 +1015,7 @@ vinum_bounds_check(struct buf *bp, struct volume *vol)
&& bp->b_blkno + size > LABELSECTOR /* and finishes after */
#endif
&& (!(vol->flags & VF_RAW)) /* and it's not raw */
-&&(bp->b_iocmd == BIO_WRITE) /* and it's a write */
+ &&(bp->b_iocmd == BIO_WRITE) /* and it's a write */
&&(!vol->flags & (VF_WLABEL | VF_LABELLING))) { /* and we're not allowed to write the label */
bp->b_error = EROFS; /* read-only */
bp->b_io.bio_flags |= BIO_ERROR;
OpenPOWER on IntegriCloud