summaryrefslogtreecommitdiffstats
path: root/sys/cam
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2015-10-05 11:02:17 +0000
committermav <mav@FreeBSD.org>2015-10-05 11:02:17 +0000
commitc5a6a52cc6756f590647fd6f8d45909104f46369 (patch)
treebfa95d10e9ecc700b9027b8a1e9cff596f85f759 /sys/cam
parent06a6d5a7e3b17cc5d5954391a0381bd879162cd9 (diff)
downloadFreeBSD-src-c5a6a52cc6756f590647fd6f8d45909104f46369.zip
FreeBSD-src-c5a6a52cc6756f590647fd6f8d45909104f46369.tar.gz
MFC r288220: Remove stale comments and some excessive empty lines.
Diffstat (limited to 'sys/cam')
-rw-r--r--sys/cam/ctl/ctl_backend_block.c48
-rw-r--r--sys/cam/ctl/ctl_backend_ramdisk.c46
2 files changed, 10 insertions, 84 deletions
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
index 5985993..1839e47 100644
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -466,14 +466,8 @@ ctl_be_block_move_done(union ctl_io *io)
* interrupt context, and therefore we cannot block.
*/
mtx_lock(&be_lun->queue_lock);
- /*
- * XXX KDM make sure that links is okay to use at this point.
- * Otherwise, we either need to add another field to ctl_io_hdr,
- * or deal with resource allocation here.
- */
STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
-
taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
return (0);
@@ -589,15 +583,12 @@ ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
(void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
- if (MNT_SHARED_WRITES(mountpoint)
- || ((mountpoint == NULL)
- && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
+ if (MNT_SHARED_WRITES(mountpoint) ||
+ ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
lock_flags = LK_SHARED;
else
lock_flags = LK_EXCLUSIVE;
-
vn_lock(be_lun->vn, lock_flags | LK_RETRY);
-
error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT,
curthread);
VOP_UNLOCK(be_lun->vn, 0);
@@ -719,13 +710,11 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
(void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
- if (MNT_SHARED_WRITES(mountpoint)
- || ((mountpoint == NULL)
+ if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL)
&& MNT_SHARED_WRITES(be_lun->vn->v_mount)))
lock_flags = LK_SHARED;
else
lock_flags = LK_EXCLUSIVE;
-
vn_lock(be_lun->vn, lock_flags | LK_RETRY);
/*
@@ -1543,14 +1532,8 @@ ctl_be_block_next(struct ctl_be_block_io *beio)
io->io_hdr.status |= CTL_STATUS_NONE;
mtx_lock(&be_lun->queue_lock);
- /*
- * XXX KDM make sure that links is okay to use at this point.
- * Otherwise, we either need to add another field to ctl_io_hdr,
- * or deal with resource allocation here.
- */
STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
-
taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
}
@@ -1781,11 +1764,6 @@ ctl_be_block_submit(union ctl_io *io)
PRIV(io)->len = 0;
mtx_lock(&be_lun->queue_lock);
- /*
- * XXX KDM make sure that links is okay to use at this point.
- * Otherwise, we either need to add another field to ctl_io_hdr,
- * or deal with resource allocation here.
- */
STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
@@ -2336,10 +2314,6 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
num_threads = 1;
}
- /*
- * XXX This searching loop might be refactored to be combined with
- * the loop above,
- */
value = ctl_get_opt(&cbe_lun->options, "num_threads");
if (value != NULL) {
tmp_num_threads = strtol(value, NULL, 0);
@@ -2529,7 +2503,6 @@ ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
break;
}
mtx_unlock(&softc->lock);
-
if (be_lun == NULL) {
snprintf(req->error_str, sizeof(req->error_str),
"LUN %u is not managed by the block backend",
@@ -2597,13 +2570,10 @@ ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
free(be_lun, M_CTLBLK);
req->status = CTL_LUN_OK;
-
return (0);
bailout_error:
-
req->status = CTL_LUN_ERROR;
-
return (0);
}
@@ -2625,7 +2595,6 @@ ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
break;
}
mtx_unlock(&softc->lock);
-
if (be_lun == NULL) {
snprintf(req->error_str, sizeof(req->error_str),
"LUN %u is not managed by the block backend",
@@ -2702,7 +2671,6 @@ ctl_be_block_lun_shutdown(void *be_lun)
struct ctl_be_block_softc *softc;
lun = (struct ctl_be_block_lun *)be_lun;
-
softc = lun->softc;
mtx_lock(&softc->lock);
@@ -2710,7 +2678,6 @@ ctl_be_block_lun_shutdown(void *be_lun)
if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
wakeup(lun);
mtx_unlock(&softc->lock);
-
}
static void
@@ -2759,14 +2726,13 @@ ctl_be_block_config_write(union ctl_io *io)
struct ctl_be_lun *cbe_lun;
int retval;
- retval = 0;
-
DPRINTF("entered\n");
cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
CTL_PRIV_BACKEND_LUN].ptr;
be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
+ retval = 0;
switch (io->scsiio.cdb[0]) {
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
@@ -2884,22 +2850,16 @@ ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
int retval;
lun = (struct ctl_be_block_lun *)be_lun;
- retval = 0;
retval = sbuf_printf(sb, "\t<num_threads>");
-
if (retval != 0)
goto bailout;
-
retval = sbuf_printf(sb, "%d", lun->num_threads);
-
if (retval != 0)
goto bailout;
-
retval = sbuf_printf(sb, "</num_threads>\n");
bailout:
-
return (retval);
}
diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c
index e79769c..8e549c6 100644
--- a/sys/cam/ctl/ctl_backend_ramdisk.c
+++ b/sys/cam/ctl/ctl_backend_ramdisk.c
@@ -145,18 +145,13 @@ CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
int
ctl_backend_ramdisk_init(void)
{
- struct ctl_be_ramdisk_softc *softc;
+ struct ctl_be_ramdisk_softc *softc = &rd_softc;
#ifdef CTL_RAMDISK_PAGES
int i;
#endif
-
- softc = &rd_softc;
-
memset(softc, 0, sizeof(*softc));
-
mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
-
STAILQ_INIT(&softc->lun_list);
softc->rd_size = 1024 * 1024;
#ifdef CTL_RAMDISK_PAGES
@@ -177,14 +172,12 @@ ctl_backend_ramdisk_init(void)
void
ctl_backend_ramdisk_shutdown(void)
{
- struct ctl_be_ramdisk_softc *softc;
+ struct ctl_be_ramdisk_softc *softc = &rd_softc;
struct ctl_be_ramdisk_lun *lun, *next_lun;
#ifdef CTL_RAMDISK_PAGES
int i;
#endif
- softc = &rd_softc;
-
mtx_lock(&softc->lock);
for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
/*
@@ -360,11 +353,8 @@ ctl_backend_ramdisk_worker(void *context, int pending)
if (io != NULL) {
STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
ctl_io_hdr, links);
-
mtx_unlock(&be_lun->queue_lock);
-
ctl_backend_ramdisk_continue(io);
-
mtx_lock(&be_lun->queue_lock);
continue;
}
@@ -382,18 +372,14 @@ static int
ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
int flag, struct thread *td)
{
- struct ctl_be_ramdisk_softc *softc;
+ struct ctl_be_ramdisk_softc *softc = &rd_softc;
+ struct ctl_lun_req *lun_req;
int retval;
retval = 0;
- softc = &rd_softc;
-
switch (cmd) {
- case CTL_LUN_REQ: {
- struct ctl_lun_req *lun_req;
-
+ case CTL_LUN_REQ:
lun_req = (struct ctl_lun_req *)addr;
-
switch (lun_req->reqtype) {
case CTL_LUNREQ_CREATE:
retval = ctl_backend_ramdisk_create(softc, lun_req);
@@ -412,7 +398,6 @@ ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
break;
}
break;
- }
default:
retval = ENOTTY;
break;
@@ -429,20 +414,14 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_rm_params *params;
int retval;
-
retval = 0;
params = &req->reqdata.rm;
-
- be_lun = NULL;
-
mtx_lock(&softc->lock);
-
STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
if (be_lun->cbe_lun.lun_id == params->lun_id)
break;
}
mtx_unlock(&softc->lock);
-
if (be_lun == NULL) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: LUN %u is not managed by the ramdisk backend",
@@ -451,7 +430,6 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
}
retval = ctl_disable_lun(&be_lun->cbe_lun);
-
if (retval != 0) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: error %d returned from ctl_disable_lun() for "
@@ -483,10 +461,9 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
}
mtx_lock(&softc->lock);
-
while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
- if (retval == EINTR)
+ if (retval == EINTR)
break;
}
be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
@@ -514,12 +491,10 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
}
req->status = CTL_LUN_OK;
-
return (retval);
bailout_error:
req->status = CTL_LUN_ERROR;
-
return (0);
}
@@ -656,7 +631,6 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
mtx_lock(&softc->lock);
softc->num_luns++;
STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
-
mtx_unlock(&softc->lock);
retval = ctl_add_lun(&be_lun->cbe_lun);
@@ -703,7 +677,6 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
mtx_unlock(&softc->lock);
req->status = CTL_LUN_OK;
-
return (retval);
bailout_error:
@@ -716,7 +689,6 @@ bailout_error:
mtx_destroy(&be_lun->queue_lock);
free(be_lun, M_RAMDISK);
}
-
return (retval);
}
@@ -739,7 +711,6 @@ ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
break;
}
mtx_unlock(&softc->lock);
-
if (be_lun == NULL) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: LUN %u is not managed by the ramdisk backend",
@@ -786,12 +757,10 @@ ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
params->lun_size_bytes = be_lun->size_bytes;
req->status = CTL_LUN_OK;
-
return (0);
bailout_error:
req->status = CTL_LUN_ERROR;
-
return (0);
}
@@ -807,9 +776,7 @@ ctl_backend_ramdisk_lun_shutdown(void *be_lun)
do_free = 0;
mtx_lock(&softc->lock);
-
lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
-
if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
wakeup(lun);
} else {
@@ -818,7 +785,6 @@ ctl_backend_ramdisk_lun_shutdown(void *be_lun)
softc->num_luns--;
do_free = 1;
}
-
mtx_unlock(&softc->lock);
if (do_free != 0)
OpenPOWER on IntegriCloud