summaryrefslogtreecommitdiffstats
path: root/sys/dev/vinum/vinumrequest.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/vinum/vinumrequest.c')
-rw-r--r--sys/dev/vinum/vinumrequest.c156
1 files changed, 104 insertions, 52 deletions
diff --git a/sys/dev/vinum/vinumrequest.c b/sys/dev/vinum/vinumrequest.c
index bba7e1d..ba22fa9 100644
--- a/sys/dev/vinum/vinumrequest.c
+++ b/sys/dev/vinum/vinumrequest.c
@@ -119,8 +119,10 @@ vinumstrategy(struct buf *bp)
sdio(bp);
return;
- /* In fact, vinum doesn't handle drives: they're
- * handled directly by the disk drivers */
+ /*
+ * In fact, vinum doesn't handle drives: they're
+ * handled directly by the disk drivers
+ */
case VINUM_DRIVE_TYPE:
default:
bp->b_error = EIO; /* I/O error */
@@ -142,9 +144,11 @@ vinumstrategy(struct buf *bp)
return;
}
/* FALLTHROUGH */
- /* Plex I/O is pretty much the same as volume I/O
+ /*
+ * Plex I/O is pretty much the same as volume I/O
* for a single plex. Indicate this by passing a NULL
- * pointer (set above) for the volume */
+ * pointer (set above) for the volume
+ */
case VINUM_PLEX_TYPE:
case VINUM_RAWPLEX_TYPE:
bp->b_resid = bp->b_bcount; /* transfer everything */
@@ -153,14 +157,16 @@ vinumstrategy(struct buf *bp)
}
}
-/* Start a transfer. Return -1 on error,
+/*
+ * Start a transfer. Return -1 on error,
* 0 if OK, 1 if we need to retry.
* Parameter reviveok is set when doing
* transfers for revives: it allows transfers to
* be started immediately when a revive is in
* progress. During revive, normal transfers
* are queued if they share address space with
- * a currently active revive operation. */
+ * a currently active revive operation.
+ */
int
vinumstart(struct buf *bp, int reviveok)
{
@@ -175,12 +181,14 @@ vinumstart(struct buf *bp, int reviveok)
logrq(loginfo_user_bp, bp, bp);
#endif
- /* XXX In these routines, we're assuming that
+ /*
+ * XXX In these routines, we're assuming that
* we will always be called with bp->b_bcount
* which is a multiple of the sector size. This
* is a reasonable assumption, since we are only
* called from system routines. Should we check
- * anyway? */
+ * anyway?
+ */
if ((bp->b_bcount % DEV_BSIZE) != 0) { /* bad length */
bp->b_error = EINVAL; /* invalid size */
@@ -197,9 +205,11 @@ vinumstart(struct buf *bp, int reviveok)
}
bzero(rq, sizeof(struct request));
- /* Note the volume ID. This can be NULL, which
+ /*
+ * Note the volume ID. This can be NULL, which
* the request building functions use as an
- * indication for single plex I/O */
+ * indication for single plex I/O
+ */
rq->bp = bp; /* and the user buffer struct */
if (DEVTYPE(bp->b_dev) == VINUM_VOLUME_TYPE) { /* it's a volume, */
@@ -215,14 +225,16 @@ vinumstart(struct buf *bp, int reviveok)
}
if (bp->b_flags & B_READ) {
- /* This is a read request. Decide
+ /*
+ * This is a read request. Decide
* which plex to read from.
*
* There's a potential race condition here,
* since we're not locked, and we could end
* up multiply incrementing the round-robin
* counter. This doesn't have any serious
- * effects, however. */
+ * effects, however.
+ */
if (vol != NULL) {
vol->reads++;
vol->bytes_read += bp->b_bcount;
@@ -259,9 +271,11 @@ vinumstart(struct buf *bp, int reviveok)
return result;
}
} else
- /* This is a write operation. We write to all
+ /*
+ * This is a write operation. We write to all
* plexes. If this is a RAID 5 plex, we must also
- * update the parity stripe. */
+ * update the parity stripe.
+ */
{
if (vol != NULL) {
vol->writes++;
@@ -291,8 +305,10 @@ vinumstart(struct buf *bp, int reviveok)
}
}
-/* Call the low-level strategy routines to
- * perform the requests in a struct request */
+/*
+ * Call the low-level strategy routines to
+ * perform the requests in a struct request
+ */
int
launch_requests(struct request *rq, int reviveok)
{
@@ -301,10 +317,12 @@ launch_requests(struct request *rq, int reviveok)
struct rqelement *rqe; /* current element */
int s;
- /* First find out whether we're reviving, and the
+ /*
+ * First find out whether we're reviving, and the
* request contains a conflict. If so, we hang
* the request off plex->waitlist of the first
- * plex we find which is reviving */
+ * plex we find which is reviving
+ */
if ((rq->flags & XFR_REVIVECONFLICT) /* possible revive conflict */
&&(!reviveok)) { /* and we don't want to do it now, */
struct sd *sd;
@@ -389,7 +407,8 @@ launch_requests(struct request *rq, int reviveok)
return 0;
}
-/* define the low-level requests needed to perform a
+/*
+ * define the low-level requests needed to perform a
* high-level I/O operation for a specific plex 'plexno'.
*
* Return 0 if all subdisks involved in the request are up, 1 if some
@@ -483,20 +502,28 @@ bre(struct request *rq,
case plex_striped:
{
while (*diskaddr < diskend) { /* until we get it all sorted out */
- /* The offset of the start address from
- * the start of the stripe */
+ /*
+ * The offset of the start address from
+ * the start of the stripe
+ */
stripeoffset = *diskaddr % (plex->stripesize * plex->subdisks);
- /* The plex-relative address of the
- * start of the stripe */
+ /*
+ * The plex-relative address of the
+ * start of the stripe
+ */
stripebase = *diskaddr - stripeoffset;
- /* The number of the subdisk in which
- * the start is located */
+ /*
+ * The number of the subdisk in which
+ * the start is located
+ */
sdno = stripeoffset / plex->stripesize;
- /* The offset from the beginning of the stripe
- * on this subdisk */
+ /*
+ * The offset from the beginning of the stripe
+ * on this subdisk
+ */
blockoffset = stripeoffset % plex->stripesize;
sd = &SD[plex->sdnos[sdno]]; /* the subdisk in question */
@@ -561,9 +588,11 @@ bre(struct request *rq,
return status;
}
-/* Build up a request structure for reading volumes.
+/*
+ * Build up a request structure for reading volumes.
* This function is not needed for plex reads, since there's
- * no recovery if a plex read can't be satisified. */
+ * no recovery if a plex read can't be satisified.
+ */
enum requeststatus
build_read_request(struct request *rq, /* request */
int plexindex)
@@ -600,14 +629,18 @@ build_read_request(struct request *rq, /* request */
case REQUEST_ENOMEM:
return status;
- /* if we get here, we have either had a failure or
+ /*
+ * if we get here, we have either had a failure or
* a RAID 5 recovery. We don't want to use the
* recovery, because it's expensive, so first we
- * check if we have alternatives */
+ * check if we have alternatives
+ */
case REQUEST_DOWN: /* can't access the plex */
if (vol != NULL) { /* and this is volume I/O */
- /* Try to satisfy the request
- * from another plex */
+ /*
+ * Try to satisfy the request
+ * from another plex
+ */
for (plexno = 0; plexno < vol->plexes; plexno++) {
diskaddr = startaddr; /* start at the beginning again */
oldstart = startaddr; /* and note where that was */
@@ -631,10 +664,12 @@ build_read_request(struct request *rq, /* request */
return status;
}
-/* Build up a request structure for writes.
+/*
+ * Build up a request structure for writes.
* Return 0 if all subdisks involved in the request are up, 1 if some
* subdisks are not up, and -1 if the request is at least partially
- * outside the bounds of the subdisks. */
+ * outside the bounds of the subdisks.
+ */
enum requeststatus
build_write_request(struct request *rq)
{ /* request */
@@ -651,9 +686,11 @@ build_write_request(struct request *rq)
status = REQUEST_OK;
for (plexno = 0; plexno < vol->plexes; plexno++) {
diskstart = bp->b_blkno; /* start offset of transfer */
- /* Build requests for the plex.
+ /*
+ * Build requests for the plex.
* We take the best possible result here (min,
- * not max): we're happy if we can write at all */
+ * not max): we're happy if we can write at all
+ */
status = min(status, bre(rq,
vol->plex[plexno],
&diskstart,
@@ -681,8 +718,10 @@ build_rq_buffer(struct rqelement *rqe, struct plex *plex)
bp->b_proc = ubp->b_proc; /* process pointer */
bp->b_flags = ubp->b_flags & (B_NOCACHE | B_READ | B_ASYNC); /* copy these flags from user bp */
bp->b_flags |= B_CALL | B_BUSY; /* inform us when it's done */
- /* XXX Should we check for reviving plexes here, and
- * set B_ORDERED if so? */
+ /*
+ * XXX Should we check for reviving plexes here, and
+ * set B_ORDERED if so?
+ */
bp->b_iodone = complete_rqe; /* by calling us here */
bp->b_dev = DRIVE[rqe->driveno].dev; /* drive device */
bp->b_blkno = rqe->sdoffset + sd->driveoffset; /* start address */
@@ -701,14 +740,18 @@ build_rq_buffer(struct rqelement *rqe, struct plex *plex)
return REQUEST_ENOMEM; /* no memory */
}
} else
- /* Point directly to user buffer data. This means
+ /*
+ * Point directly to user buffer data. This means
* that we don't need to do anything when we have
- * finished the transfer */
+ * finished the transfer
+ */
bp->b_data = ubp->b_data + rqe->useroffset * DEV_BSIZE;
return 0;
}
-/* Abort a request: free resources and complete the
- * user request with the specified error */
+/*
+ * Abort a request: free resources and complete the
+ * user request with the specified error
+ */
int
abortrequest(struct request *rq, int error)
{
@@ -721,10 +764,12 @@ abortrequest(struct request *rq, int error)
return error; /* and give up */
}
-/* Check that our transfer will cover the
+/*
+ * Check that our transfer will cover the
* complete address space of the user request.
*
- * Return 1 if it can, otherwise 0 */
+ * Return 1 if it can, otherwise 0
+ */
int
check_range_covered(struct request *rq)
{
@@ -784,8 +829,10 @@ sdio(struct buf *bp)
sbp->b.b_bcount -= (endoffset - sd->sectors) * DEV_BSIZE; /* trim */
if (sbp->b.b_bcount <= 0) { /* nothing to transfer */
bp->b_resid = bp->b_bcount; /* nothing transferred */
- /* XXX Grrr. This doesn't seem to work. Return
- * an error after all */
+ /*
+ * XXX Grrr. This doesn't seem to work. Return
+ * an error after all
+ */
bp->b_flags |= B_ERROR;
bp->b_error = ENOSPC;
biodone(bp);
@@ -814,7 +861,8 @@ sdio(struct buf *bp)
splx(s);
}
-/* Simplified version of bounds_check_with_label
+/*
+ * Simplified version of bounds_check_with_label
* Determine the size of the transfer, and make sure it is
* within the boundaries of the partition. Adjust transfer
* if needed, and signal errors or early completion.
@@ -871,8 +919,10 @@ vinum_bounds_check(struct buf *bp, struct volume *vol)
return 1;
}
-/* Allocate a request group and hook
- * it in in the list for rq */
+/*
+ * Allocate a request group and hook
+ * it in in the list for rq
+ */
struct rqgroup *
allocrqg(struct request *rq, int elements)
{
@@ -895,10 +945,12 @@ allocrqg(struct request *rq, int elements)
return rqg;
}
-/* Deallocate a request group out of a chain. We do
+/*
+ * Deallocate a request group out of a chain. We do
* this by linear search: the chain is short, this
* almost never happens, and currently it can only
- * happen to the first member of the chain. */
+ * happen to the first member of the chain.
+ */
void
deallocrqg(struct rqgroup *rqg)
{
OpenPOWER on IntegriCloud