diff options
author | scottl <scottl@FreeBSD.org> | 2014-01-07 01:51:48 +0000 |
---|---|---|
committer | scottl <scottl@FreeBSD.org> | 2014-01-07 01:51:48 +0000 |
commit | cd4455d63815797dee4bd5d2fbe903ace2888a8f (patch) | |
tree | e4a932aed55b2e611c4a7cbe18560be7eb0cd1f1 | |
parent | 0a34594b9cd7c8b87f719ed058da6be2b756a8e5 (diff) | |
download | FreeBSD-src-cd4455d63815797dee4bd5d2fbe903ace2888a8f.zip FreeBSD-src-cd4455d63815797dee4bd5d2fbe903ace2888a8f.tar.gz |
MFC Alexander Motin's direct dispatch, multi-queue, and finer-grained
locking support for CAM
r256826:
Fix several target mode SIMs to not blindly clear ccb_h.flags field of
ATIO CCBs. Not all CCB flags there belong to them.
r256836:
Remove hard limit on number of BIOs handled with one ATA TRIM request.
r256843:
Merge CAM locking changes from the projects/camlock branch to radically
reduce lock congestion and improve SMP scalability of the SCSI/ATA stack,
preparing the ground for the coming next GEOM direct dispatch support.
r256888:
Unconditionally acquire periph reference on CCB allocation failure.
r256895:
Fix memory and references leak due to unfreed path.
r256960:
Move CAM_UNQUEUED_INDEX setting to the last moment and under the periph lock.
This fixes race condition with cam_periph_ccbwait(), causing use-after-free.
r256975:
Minor (mostly cosmetical) addition to r256960.
r257054:
Some microoptimizations for da and ada drivers:
- Replace ordered_tag_count counter with single flag;
- From da remove outstanding_cmds counter, duplicating pending_ccbs list;
- From da_softc remove unused links field.
r257482:
Fix lock recursion, triggered by `smartctl -a /dev/adaX`.
r257501:
Make getenv_*() functions and respectively TUNABLE_*_FETCH() macros not
allocate memory and so not require sleepable environment. getenv() has
already used on-stack temporary storage, so just use it more rationally.
getenv_string() receives buffer as argument, so don't need another one.
r257914:
Some CAM locks polishing:
- Fix LOR and possible lock recursion when handling high-power commands.
Introduce new lock to protect left power quota and list of frozen devices.
- Correct locking around xpt periph creation.
- Remove seems never used XPT_FLAG_OPEN xpt periph flag.
Again, Netflix assisted with testing the merge, but all of the credit goes
to Alexander and iX Systems.
Submitted by: mav
Sponsored by: iX Systems
37 files changed, 1660 insertions, 1683 deletions
diff --git a/sys/cam/ata/ata_da.c b/sys/cam/ata/ata_da.c index e476a3e..989f0aa 100644 --- a/sys/cam/ata/ata_da.c +++ b/sys/cam/ata/ata_da.c @@ -80,7 +80,7 @@ typedef enum { ADA_FLAG_CAN_NCQ = 0x0008, ADA_FLAG_CAN_DMA = 0x0010, ADA_FLAG_NEED_OTAG = 0x0020, - ADA_FLAG_WENT_IDLE = 0x0040, + ADA_FLAG_WAS_OTAG = 0x0040, ADA_FLAG_CAN_TRIM = 0x0080, ADA_FLAG_OPEN = 0x0100, ADA_FLAG_SCTX_INIT = 0x0200, @@ -103,7 +103,6 @@ typedef enum { ADA_CCB_RAHEAD = 0x01, ADA_CCB_WCACHE = 0x02, ADA_CCB_BUFFER_IO = 0x03, - ADA_CCB_WAITING = 0x04, ADA_CCB_DUMP = 0x05, ADA_CCB_TRIM = 0x06, ADA_CCB_TYPE_MASK = 0x0F, @@ -123,21 +122,20 @@ struct disk_params { #define TRIM_MAX_BLOCKS 8 #define TRIM_MAX_RANGES (TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES) -#define TRIM_MAX_BIOS (TRIM_MAX_RANGES * 4) struct trim_request { uint8_t data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE]; - struct bio *bps[TRIM_MAX_BIOS]; + TAILQ_HEAD(, bio) bps; }; struct ada_softc { struct bio_queue_head bio_queue; struct bio_queue_head trim_queue; + int outstanding_cmds; /* Number of active commands */ + int refcount; /* Active xpt_action() calls */ ada_state state; - ada_flags flags; + ada_flags flags; ada_quirks quirks; int sort_io_queue; - int ordered_tag_count; - int outstanding_cmds; int trim_max_ranges; int trim_running; int read_ahead; @@ -630,14 +628,8 @@ adaclose(struct disk *dp) int error; periph = (struct cam_periph *)dp->d_drv1; - cam_periph_lock(periph); - if (cam_periph_hold(periph, PRIBIO) != 0) { - cam_periph_unlock(periph); - cam_periph_release(periph); - return (0); - } - softc = (struct ada_softc *)periph->softc; + cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaclose\n")); @@ -645,7 +637,8 @@ adaclose(struct disk *dp) /* We only sync the cache if the drive is capable of it. */ if ((softc->flags & ADA_FLAG_DIRTY) != 0 && (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 && - (periph->flags & CAM_PERIPH_INVALID) == 0) { + (periph->flags & CAM_PERIPH_INVALID) == 0 && + cam_periph_hold(periph, PRIBIO) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, @@ -669,10 +662,13 @@ adaclose(struct disk *dp) else softc->flags &= ~ADA_FLAG_DIRTY; xpt_release_ccb(ccb); + cam_periph_unhold(periph); } softc->flags &= ~ADA_FLAG_OPEN; - cam_periph_unhold(periph); + + while (softc->refcount != 0) + cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); @@ -682,23 +678,15 @@ static void adaschedule(struct cam_periph *periph) { struct ada_softc *softc = (struct ada_softc *)periph->softc; - uint32_t prio; if (softc->state != ADA_STATE_NORMAL) return; - /* Check if cam_periph_getccb() was called. */ - prio = periph->immediate_priority; - /* Check if we have more work to do. */ if (bioq_first(&softc->bio_queue) || (!softc->trim_running && bioq_first(&softc->trim_queue))) { - prio = CAM_PRIORITY_NORMAL; + xpt_schedule(periph, CAM_PRIORITY_NORMAL); } - - /* Schedule CCB if any of above is true. */ - if (prio != CAM_PRIORITY_NONE) - xpt_schedule(periph, prio); } /* @@ -962,7 +950,7 @@ adaasync(void *callback_arg, u_int32_t code, status = cam_periph_alloc(adaregister, adaoninvalidate, adacleanup, adastart, "ada", CAM_PERIPH_BIO, - cgd->ccb_h.path, adaasync, + path, adaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP @@ -1038,8 +1026,10 @@ adaasync(void *callback_arg, u_int32_t code, softc->state = ADA_STATE_WCACHE; else break; - cam_periph_acquire(periph); - xpt_schedule(periph, CAM_PRIORITY_DEV); + if (cam_periph_acquire(periph) != CAM_REQ_CMP) + softc->state = ADA_STATE_NORMAL; + else + xpt_schedule(periph, CAM_PRIORITY_DEV); } default: cam_periph_async(periph, code, path, arg); @@ -1346,8 +1336,8 @@ adaregister(struct cam_periph *periph, void *arg) * Create our sysctl variables, now that we know * we have successfully attached. */ - cam_periph_acquire(periph); - taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); + if (cam_periph_acquire(periph) == CAM_REQ_CMP) + taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Add async callbacks for bus reset and @@ -1365,7 +1355,7 @@ adaregister(struct cam_periph *periph, void *arg) * Schedule a periodic event to occasionally send an * ordered tag to a device. */ - callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0); + callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); @@ -1373,16 +1363,17 @@ adaregister(struct cam_periph *periph, void *arg) if (ADA_RA >= 0 && cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) { softc->state = ADA_STATE_RAHEAD; - cam_periph_acquire(periph); - xpt_schedule(periph, CAM_PRIORITY_DEV); } else if (ADA_WC >= 0 && cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) { softc->state = ADA_STATE_WCACHE; - cam_periph_acquire(periph); - xpt_schedule(periph, CAM_PRIORITY_DEV); - } else + } else { softc->state = ADA_STATE_NORMAL; - + return(CAM_REQ_CMP); + } + if (cam_periph_acquire(periph) != CAM_REQ_CMP) + softc->state = ADA_STATE_NORMAL; + else + xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } @@ -1400,29 +1391,17 @@ adastart(struct cam_periph *periph, union ccb *start_ccb) struct bio *bp; u_int8_t tag_code; - /* Execute immediate CCB if waiting. */ - if (periph->immediate_priority <= periph->pinfo.priority) { - CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, - ("queuing for immediate ccb\n")); - start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING; - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - /* Have more work to do, so ensure we stay scheduled */ - adaschedule(periph); - break; - } /* Run TRIM if not running yet. */ if (!softc->trim_running && (bp = bioq_first(&softc->trim_queue)) != 0) { struct trim_request *req = &softc->trim_req; struct bio *bp1; uint64_t lastlba = (uint64_t)-1; - int bps = 0, c, lastcount = 0, off, ranges = 0; + int c, lastcount = 0, off, ranges = 0; softc->trim_running = 1; bzero(req, sizeof(*req)); + TAILQ_INIT(&req->bps); bp1 = bp; do { uint64_t lba = bp1->bio_pblkno; @@ -1465,10 +1444,9 @@ adastart(struct cam_periph *periph, union ccb *start_ccb) */ } lastlba = lba; - req->bps[bps++] = bp1; + TAILQ_INSERT_TAIL(&req->bps, bp1, bio_queue); bp1 = bioq_first(&softc->trim_queue); - if (bps >= TRIM_MAX_BIOS || - bp1 == NULL || + if (bp1 == NULL || bp1->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) @@ -1487,6 +1465,7 @@ adastart(struct cam_periph *periph, union ccb *start_ccb) ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES - 1) / ATA_DSM_BLK_RANGES); start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM; + start_ccb->ccb_h.flags |= CAM_UNLOCKED; goto out; } /* Run regular command. */ @@ -1500,7 +1479,7 @@ adastart(struct cam_periph *periph, union ccb *start_ccb) if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~ADA_FLAG_NEED_OTAG; - softc->ordered_tag_count++; + softc->flags |= ADA_FLAG_WAS_OTAG; tag_code = 0; } else { tag_code = 1; @@ -1655,10 +1634,15 @@ adastart(struct cam_periph *periph, union ccb *start_ccb) break; } start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; + start_ccb->ccb_h.flags |= CAM_UNLOCKED; out: start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; + softc->refcount++; + cam_periph_unlock(periph); xpt_action(start_ccb); + cam_periph_lock(periph); + softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ adaschedule(periph); @@ -1667,13 +1651,6 @@ out: case ADA_STATE_RAHEAD: case ADA_STATE_WCACHE: { - if ((periph->flags & CAM_PERIPH_INVALID) != 0) { - softc->state = ADA_STATE_NORMAL; - xpt_release_ccb(start_ccb); - cam_periph_release_locked(periph); - return; - } - cam_fill_ataio(ataio, 1, adadone, @@ -1722,10 +1699,12 @@ adadone(struct cam_periph *periph, union ccb *done_ccb) struct bio *bp; int error; + cam_periph_lock(periph); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = adaerror(done_ccb, 0, 0); if (error == ERESTART) { /* A retry was scheduled, so just return. */ + cam_periph_unlock(periph); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) @@ -1754,29 +1733,32 @@ adadone(struct cam_periph *periph, union ccb *done_ccb) } softc->outstanding_cmds--; if (softc->outstanding_cmds == 0) - softc->flags |= ADA_FLAG_WENT_IDLE; + softc->flags |= ADA_FLAG_WAS_OTAG; + xpt_release_ccb(done_ccb); if (state == ADA_CCB_TRIM) { - struct trim_request *req = - (struct trim_request *)ataio->data_ptr; - int i; - - for (i = 1; i < TRIM_MAX_BIOS && req->bps[i]; i++) { - struct bio *bp1 = req->bps[i]; + TAILQ_HEAD(, bio) queue; + struct bio *bp1; - bp1->bio_error = bp->bio_error; - if (bp->bio_flags & BIO_ERROR) { + TAILQ_INIT(&queue); + TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue); + softc->trim_running = 0; + adaschedule(periph); + cam_periph_unlock(periph); + while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { + TAILQ_REMOVE(&queue, bp1, bio_queue); + bp1->bio_error = error; + if (error != 0) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } - softc->trim_running = 0; - biodone(bp); - adaschedule(periph); - } else + } else { + cam_periph_unlock(periph); biodone(bp); - break; + } + return; } case ADA_CCB_RAHEAD: { @@ -1852,12 +1834,6 @@ out: cam_periph_release_locked(periph); return; } - case ADA_CCB_WAITING: - { - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); - return; - } case ADA_CCB_DUMP: /* No-op. We're polling */ return; @@ -1919,14 +1895,11 @@ adasendorderedtag(void *arg) struct ada_softc *softc = arg; if (ada_send_ordered) { - if ((softc->ordered_tag_count == 0) - && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) { - softc->flags |= ADA_FLAG_NEED_OTAG; + if (softc->outstanding_cmds > 0) { + if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0) + softc->flags |= ADA_FLAG_NEED_OTAG; + softc->flags &= ~ADA_FLAG_WAS_OTAG; } - if (softc->outstanding_cmds > 0) - softc->flags &= ~ADA_FLAG_WENT_IDLE; - - softc->ordered_tag_count = 0; } /* Queue us up again */ callout_reset(&softc->sendordered_c, diff --git a/sys/cam/ata/ata_pmp.c b/sys/cam/ata/ata_pmp.c index d220591..bd4d25a 100644 --- a/sys/cam/ata/ata_pmp.c +++ b/sys/cam/ata/ata_pmp.c @@ -293,7 +293,7 @@ pmpasync(void *callback_arg, u_int32_t code, status = cam_periph_alloc(pmpregister, pmponinvalidate, pmpcleanup, pmpstart, "pmp", CAM_PERIPH_BIO, - cgd->ccb_h.path, pmpasync, + path, pmpasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP @@ -318,13 +318,17 @@ pmpasync(void *callback_arg, u_int32_t code, if (code == AC_SENT_BDR || code == AC_BUS_RESET) softc->found = 0; /* We have to reset everything. */ if (softc->state == PMP_STATE_NORMAL) { - if (softc->pm_pid == 0x37261095 || - softc->pm_pid == 0x38261095) - softc->state = PMP_STATE_PM_QUIRKS_1; - else - softc->state = PMP_STATE_PRECONFIG; - cam_periph_acquire(periph); - xpt_schedule(periph, CAM_PRIORITY_DEV); + if (cam_periph_acquire(periph) == CAM_REQ_CMP) { + if (softc->pm_pid == 0x37261095 || + softc->pm_pid == 0x38261095) + softc->state = PMP_STATE_PM_QUIRKS_1; + else + softc->state = PMP_STATE_PRECONFIG; + xpt_schedule(periph, CAM_PRIORITY_DEV); + } else { + pmprelease(periph, softc->found); + xpt_release_boot(); + } } else softc->restart = 1; break; diff --git a/sys/cam/ata/ata_xpt.c b/sys/cam/ata/ata_xpt.c index 2d9b05d..867b2fe 100644 --- a/sys/cam/ata/ata_xpt.c +++ b/sys/cam/ata/ata_xpt.c @@ -182,7 +182,7 @@ static struct cam_ed * static void ata_device_transport(struct cam_path *path); static void ata_get_transfer_settings(struct ccb_trans_settings *cts); static void ata_set_transfer_settings(struct ccb_trans_settings *cts, - struct cam_ed *device, + struct cam_path *path, int async_update); static void ata_dev_async(u_int32_t async_code, struct cam_eb *bus, @@ -249,6 +249,7 @@ proberegister(struct cam_periph *periph, void *arg) return (status); } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); + ata_device_transport(periph->path); probeschedule(periph); return(CAM_REQ_CMP); } @@ -1320,6 +1321,7 @@ ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb) struct cam_path *path; ata_scan_bus_info *scan_info; union ccb *work_ccb, *reset_ccb; + struct mtx *mtx; cam_status status; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, @@ -1395,11 +1397,14 @@ ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb) xpt_done(request_ccb); break; } + mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); goto scan_next; case XPT_SCAN_LUN: work_ccb = request_ccb; /* Reuse the same CCB to query if a device was really found */ scan_info = (ata_scan_bus_info *)work_ccb->ccb_h.ppriv_ptr0; + mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); + mtx_lock(mtx); /* If there is PMP... */ if ((scan_info->cpi->hba_inquiry & PI_SATAPM) && (scan_info->counter == scan_info->cpi->max_target)) { @@ -1428,6 +1433,7 @@ ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb) ((scan_info->cpi->hba_inquiry & PI_SATAPM) ? 0 : scan_info->cpi->max_target)) { done: + mtx_unlock(mtx); xpt_free_ccb(work_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; @@ -1444,6 +1450,8 @@ scan_next: scan_info->request_ccb->ccb_h.path_id, scan_info->counter, 0); if (status != CAM_REQ_CMP) { + if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN) + mtx_unlock(mtx); printf("xpt_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); @@ -1459,9 +1467,15 @@ scan_next: scan_info->request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_SCAN_LUN; work_ccb->ccb_h.cbfcnp = ata_scan_bus; + work_ccb->ccb_h.flags |= CAM_UNLOCKED; work_ccb->ccb_h.ppriv_ptr0 = scan_info; work_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; + mtx_unlock(mtx); + if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN) + mtx = NULL; xpt_action(work_ccb); + if (mtx != NULL) + mtx_lock(mtx); break; default: break; @@ -1476,6 +1490,7 @@ ata_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_status status; struct cam_path *new_path; struct cam_periph *old_periph; + int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_scan_lun\n")); @@ -1510,10 +1525,14 @@ ata_scan_lun(struct cam_periph *periph, struct cam_path *path, } xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); request_ccb->ccb_h.cbfcnp = xptscandone; + request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->crcn.flags = flags; } + lock = (xpt_path_owned(path) == 0); + if (lock) + xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "aprobe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { probe_softc *softc; @@ -1540,6 +1559,8 @@ ata_scan_lun(struct cam_periph *periph, struct cam_path *path, xpt_done(request_ccb); } } + if (lock) + xpt_path_unlock(path); } static void @@ -1553,7 +1574,6 @@ xptscandone(struct cam_periph *periph, union ccb *done_ccb) static struct cam_ed * ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { - struct cam_path path; struct ata_quirk_entry *quirk; struct cam_ed *device; @@ -1574,22 +1594,6 @@ ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) device->queue_flags = 0; device->serial_num = NULL; device->serial_num_len = 0; - - /* - * XXX should be limited by number of CCBs this bus can - * do. - */ - bus->sim->max_ccbs += device->ccbq.devq_openings; - if (lun_id != CAM_LUN_WILDCARD) { - xpt_compile_path(&path, - NULL, - bus->path_id, - target->target_id, - lun_id); - ata_device_transport(&path); - xpt_release_path(&path); - } - return (device); } @@ -1712,15 +1716,8 @@ ata_dev_advinfo(union ccb *start_ccb) start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { - int owned; - - owned = mtx_owned(start_ccb->ccb_h.path->bus->sim->mtx); - if (owned == 0) - mtx_lock(start_ccb->ccb_h.path->bus->sim->mtx); xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); - if (owned == 0) - mtx_unlock(start_ccb->ccb_h.path->bus->sim->mtx); } } @@ -1732,7 +1729,7 @@ ata_action(union ccb *start_ccb) case XPT_SET_TRAN_SETTINGS: { ata_set_transfer_settings(&start_ccb->cts, - start_ccb->ccb_h.path->device, + start_ccb->ccb_h.path, /*async_update*/FALSE); break; } @@ -1791,11 +1788,9 @@ ata_get_transfer_settings(struct ccb_trans_settings *cts) struct ccb_trans_settings_ata *ata; struct ccb_trans_settings_scsi *scsi; struct cam_ed *device; - struct cam_sim *sim; device = cts->ccb_h.path->device; - sim = cts->ccb_h.path->bus->sim; - (*(sim->sim_action))(sim, (union ccb *)cts); + xpt_action_default((union ccb *)cts); if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { @@ -1832,17 +1827,17 @@ ata_get_transfer_settings(struct ccb_trans_settings *cts) } static void -ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, +ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update) { struct ccb_pathinq cpi; struct ccb_trans_settings_ata *ata; struct ccb_trans_settings_scsi *scsi; - struct cam_sim *sim; struct ata_params *ident_data; struct scsi_inquiry_data *inq_data; + struct cam_ed *device; - if (device == NULL) { + if (path == NULL || (device = path->device) == NULL) { cts->ccb_h.status = CAM_PATH_INVALID; xpt_done((union ccb *)cts); return; @@ -1859,14 +1854,14 @@ ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, cts->protocol_version = device->protocol_version; if (cts->protocol != device->protocol) { - xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n", + xpt_print(path, "Uninitialized Protocol %x:%x?\n", cts->protocol, device->protocol); cts->protocol = device->protocol; } if (cts->protocol_version > device->protocol_version) { if (bootverbose) { - xpt_print(cts->ccb_h.path, "Down reving Protocol " + xpt_print(path, "Down reving Protocol " "Version from %d to %d?\n", cts->protocol_version, device->protocol_version); } @@ -1884,21 +1879,20 @@ ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, cts->transport_version = device->transport_version; if (cts->transport != device->transport) { - xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n", + xpt_print(path, "Uninitialized Transport %x:%x?\n", cts->transport, device->transport); cts->transport = device->transport; } if (cts->transport_version > device->transport_version) { if (bootverbose) { - xpt_print(cts->ccb_h.path, "Down reving Transport " + xpt_print(path, "Down reving Transport " "Version from %d to %d?\n", cts->transport_version, device->transport_version); } cts->transport_version = device->transport_version; } - sim = cts->ccb_h.path->bus->sim; ident_data = &device->ident_data; inq_data = &device->inq_data; if (cts->protocol == PROTO_ATA) @@ -1909,7 +1903,7 @@ ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, scsi = &cts->proto_specific.scsi; else scsi = NULL; - xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE); + xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); @@ -1953,11 +1947,11 @@ ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, device->tag_delay_count = CAM_TAG_DELAY_COUNT; device->flags |= CAM_DEV_TAG_AFTER_COUNT; } else if (nowt && !newt) - xpt_stop_tags(cts->ccb_h.path); + xpt_stop_tags(path); } if (async_update == FALSE) - (*(sim->sim_action))(sim, (union ccb *)cts); + xpt_action_default((union ccb *)cts); } /* @@ -2014,10 +2008,14 @@ ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, xpt_release_device(device); } else if (async_code == AC_TRANSFER_NEG) { struct ccb_trans_settings *settings; + struct cam_path path; settings = (struct ccb_trans_settings *)async_arg; - ata_set_transfer_settings(settings, device, + xpt_compile_path(&path, NULL, bus->path_id, target->target_id, + device->lun_id); + ata_set_transfer_settings(settings, &path, /*async_update*/TRUE); + xpt_release_path(&path); } } @@ -2030,7 +2028,7 @@ ata_announce_periph(struct cam_periph *periph) u_int speed; u_int mb; - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h index 127246b..139bd50 100644 --- a/sys/cam/cam_ccb.h +++ b/sys/cam/cam_ccb.h @@ -104,7 +104,9 @@ typedef enum { CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ - CAM_SEND_STATUS = 0x40000000 /* Send status after data phase */ + CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ + + CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { @@ -151,6 +153,9 @@ typedef enum { /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ + XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB + | XPT_FC_XPT_ONLY, + /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ @@ -1154,6 +1159,16 @@ struct ccb_dev_advinfo { }; /* + * CCB for sending async events + */ +struct ccb_async { + struct ccb_hdr ccb_h; + uint32_t async_code; + off_t async_arg_size; + void *async_arg_ptr; +}; + +/* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc @@ -1192,6 +1207,7 @@ union ccb { struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; + struct ccb_async casync; }; __BEGIN_DECLS diff --git a/sys/cam/cam_periph.c b/sys/cam/cam_periph.c index 4e9138f..1c636cb 100644 --- a/sys/cam/cam_periph.c +++ b/sys/cam/cam_periph.c @@ -196,12 +196,12 @@ cam_periph_alloc(periph_ctor_t *periph_ctor, path_id = xpt_path_path_id(path); target_id = xpt_path_target_id(path); lun_id = xpt_path_lun_id(path); - cam_init_pinfo(&periph->pinfo); periph->periph_start = periph_start; periph->periph_dtor = periph_dtor; periph->periph_oninval = periph_oninvalidate; periph->type = type; periph->periph_name = name; + periph->scheduled_priority = CAM_PRIORITY_NONE; periph->immediate_priority = CAM_PRIORITY_NONE; periph->refcount = 1; /* Dropped by invalidation. */ periph->sim = sim; @@ -298,7 +298,7 @@ cam_periph_find(struct cam_path *path, char *name) TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { if (xpt_path_comp(periph->path, path) == 0) { xpt_unlock_buses(); - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); return(periph); } } @@ -379,7 +379,7 @@ void cam_periph_release_locked_buses(struct cam_periph *periph) { - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); KASSERT(periph->refcount >= 1, ("periph->refcount >= 1")); if (--periph->refcount == 0) camperiphfree(periph); @@ -400,16 +400,16 @@ cam_periph_release_locked(struct cam_periph *periph) void cam_periph_release(struct cam_periph *periph) { - struct cam_sim *sim; + struct mtx *mtx; if (periph == NULL) return; - sim = periph->sim; - mtx_assert(sim->mtx, MA_NOTOWNED); - mtx_lock(sim->mtx); + cam_periph_assert(periph, MA_NOTOWNED); + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); cam_periph_release_locked(periph); - mtx_unlock(sim->mtx); + mtx_unlock(mtx); } int @@ -427,10 +427,10 @@ cam_periph_hold(struct cam_periph *periph, int priority) if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { periph->flags |= CAM_PERIPH_LOCK_WANTED; - if ((error = mtx_sleep(periph, periph->sim->mtx, priority, + if ((error = cam_periph_sleep(periph, periph, priority, "caplck", 0)) != 0) { cam_periph_release_locked(periph); return (error); @@ -449,7 +449,7 @@ void cam_periph_unhold(struct cam_periph *periph) { - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); periph->flags &= ~CAM_PERIPH_LOCKED; if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { @@ -577,7 +577,7 @@ void cam_periph_invalidate(struct cam_periph *periph) { - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); /* * We only call this routine the first time a peripheral is * invalidated. @@ -600,7 +600,9 @@ camperiphfree(struct cam_periph *periph) { struct periph_driver **p_drv; - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); + KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating", + periph->periph_name, periph->unit_number)); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) break; @@ -947,40 +949,14 @@ cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) PRELE(curproc); } -union ccb * -cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) -{ - struct ccb_hdr *ccb_h; - - mtx_assert(periph->sim->mtx, MA_OWNED); - CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); - - while (SLIST_FIRST(&periph->ccb_list) == NULL) { - if (periph->immediate_priority > priority) - periph->immediate_priority = priority; - xpt_schedule(periph, priority); - if ((SLIST_FIRST(&periph->ccb_list) != NULL) - && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) - break; - mtx_assert(periph->sim->mtx, MA_OWNED); - mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb", - 0); - } - - ccb_h = SLIST_FIRST(&periph->ccb_list); - SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); - return ((union ccb *)ccb_h); -} - void cam_periph_ccbwait(union ccb *ccb) { - struct cam_sim *sim; - sim = xpt_path_sim(ccb->ccb_h.path); if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) - mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0); + xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO, + "cbwait", 0); } int @@ -1045,6 +1021,14 @@ cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, return(error); } +static void +cam_periph_done(struct cam_periph *periph, union ccb *done_ccb) +{ + + /* Caller will release the CCB */ + wakeup(&done_ccb->ccb_h.cbfcnp); +} + int cam_periph_runccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, @@ -1053,12 +1037,9 @@ cam_periph_runccb(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags, struct devstat *ds) { - struct cam_sim *sim; int error; - error = 0; - sim = xpt_path_sim(ccb->ccb_h.path); - mtx_assert(sim->mtx, MA_OWNED); + xpt_path_assert(ccb->ccb_h.path, MA_OWNED); /* * If the user has supplied a stats structure, and if we understand @@ -1068,6 +1049,7 @@ cam_periph_runccb(union ccb *ccb, ccb->ccb_h.func_code == XPT_ATA_IO)) devstat_start_transaction(ds, NULL); + ccb->ccb_h.cbfcnp = cam_periph_done; xpt_action(ccb); do { @@ -1786,9 +1768,11 @@ cam_periph_error(union ccb *ccb, cam_flags camflags, scan_ccb->ccb_h.func_code = XPT_SCAN_TGT; scan_ccb->crcn.flags = 0; xpt_rescan(scan_ccb); - } else + } else { xpt_print(newpath, "Can't allocate CCB to rescan target\n"); + xpt_free_path(newpath); + } } } diff --git a/sys/cam/cam_periph.h b/sys/cam/cam_periph.h index 66b9d91..b9a04d9 100644 --- a/sys/cam/cam_periph.h +++ b/sys/cam/cam_periph.h @@ -35,6 +35,7 @@ #include <cam/cam_sim.h> #ifdef _KERNEL +#include <sys/taskqueue.h> #include <cam/cam_xpt.h> @@ -103,7 +104,6 @@ typedef cam_status periph_ctor_t (struct cam_periph *periph, typedef void periph_oninv_t (struct cam_periph *periph); typedef void periph_dtor_t (struct cam_periph *periph); struct cam_periph { - cam_pinfo pinfo; periph_start_t *periph_start; periph_oninv_t *periph_oninval; periph_dtor_t *periph_dtor; @@ -120,15 +120,20 @@ struct cam_periph { #define CAM_PERIPH_INVALID 0x08 #define CAM_PERIPH_NEW_DEV_FOUND 0x10 #define CAM_PERIPH_RECOVERY_INPROG 0x20 +#define CAM_PERIPH_RUN_TASK 0x40 #define CAM_PERIPH_FREE 0x80 #define CAM_PERIPH_ANNOUNCED 0x100 - u_int32_t immediate_priority; + uint32_t scheduled_priority; + uint32_t immediate_priority; + int periph_allocating; + int periph_allocated; u_int32_t refcount; SLIST_HEAD(, ccb_hdr) ccb_list; /* For "immediate" requests */ SLIST_ENTRY(cam_periph) periph_links; TAILQ_ENTRY(cam_periph) unit_links; ac_callback_t *deferred_callback; ac_code deferred_ac; + struct task periph_run_task; }; #define CAM_PERIPH_MAXMAPS 2 @@ -185,30 +190,26 @@ void cam_periph_freeze_after_event(struct cam_periph *periph, int cam_periph_error(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags, union ccb *save_ccb); -static __inline void -cam_periph_lock(struct cam_periph *periph) +static __inline struct mtx * +cam_periph_mtx(struct cam_periph *periph) { - mtx_lock(periph->sim->mtx); + return (xpt_path_mtx(periph->path)); } -static __inline void -cam_periph_unlock(struct cam_periph *periph) -{ - mtx_unlock(periph->sim->mtx); -} +#define cam_periph_owned(periph) \ + mtx_owned(xpt_path_mtx((periph)->path)) -static __inline int -cam_periph_owned(struct cam_periph *periph) -{ - return (mtx_owned(periph->sim->mtx)); -} +#define cam_periph_lock(periph) \ + mtx_lock(xpt_path_mtx((periph)->path)) -static __inline int -cam_periph_sleep(struct cam_periph *periph, void *chan, int priority, - const char *wmesg, int timo) -{ - return (msleep(chan, periph->sim->mtx, priority, wmesg, timo)); -} +#define cam_periph_unlock(periph) \ + mtx_unlock(xpt_path_mtx((periph)->path)) + +#define cam_periph_assert(periph, what) \ + mtx_assert(xpt_path_mtx((periph)->path), (what)) + +#define cam_periph_sleep(periph, chan, priority, wmesg, timo) \ + xpt_path_sleep((periph)->path, (chan), (priority), (wmesg), (timo)) static inline struct cam_periph * cam_periph_acquire_first(struct periph_driver *driver) @@ -230,7 +231,7 @@ cam_periph_acquire_next(struct cam_periph *pperiph) { struct cam_periph *periph = pperiph; - mtx_assert(pperiph->sim->mtx, MA_NOTOWNED); + cam_periph_assert(pperiph, MA_NOTOWNED); xpt_lock_buses(); do { periph = TAILQ_NEXT(periph, unit_links); diff --git a/sys/cam/cam_queue.c b/sys/cam/cam_queue.c index daaa9b3..f6624f3 100644 --- a/sys/cam/cam_queue.c +++ b/sys/cam/cam_queue.c @@ -220,27 +220,30 @@ cam_devq_alloc(int devices, int openings) } if (cam_devq_init(devq, devices, openings) != 0) { free(devq, M_CAMDEVQ); - return (NULL); + return (NULL); } - return (devq); } int cam_devq_init(struct cam_devq *devq, int devices, int openings) { + bzero(devq, sizeof(*devq)); + mtx_init(&devq->send_mtx, "CAM queue lock", NULL, MTX_DEF); if (camq_init(&devq->send_queue, devices) != 0) return (1); devq->send_openings = openings; - devq->send_active = 0; - return (0); + devq->send_active = 0; + return (0); } void cam_devq_free(struct cam_devq *devq) { + camq_fini(&devq->send_queue); + mtx_destroy(&devq->send_mtx); free(devq, M_CAMDEVQ); } @@ -286,6 +289,7 @@ cam_ccbq_resize(struct cam_ccbq *ccbq, int new_size) int delta; delta = new_size - (ccbq->dev_active + ccbq->dev_openings); + ccbq->total_openings += delta; ccbq->devq_openings += delta; ccbq->dev_openings += delta; @@ -303,6 +307,7 @@ cam_ccbq_init(struct cam_ccbq *ccbq, int openings) if (camq_init(&ccbq->queue, imax(64, 1 << fls(openings + openings / 2))) != 0) return (1); + ccbq->total_openings = openings; ccbq->devq_openings = openings; ccbq->dev_openings = openings; return (0); diff --git a/sys/cam/cam_queue.h b/sys/cam/cam_queue.h index 0bb4491..0f74e82 100644 --- a/sys/cam/cam_queue.h +++ b/sys/cam/cam_queue.h @@ -33,6 +33,8 @@ #ifdef _KERNEL +#include <sys/lock.h> +#include <sys/mutex.h> #include <sys/queue.h> #include <cam/cam.h> @@ -59,8 +61,8 @@ struct cam_ccbq { struct camq queue; struct ccb_hdr_tailq queue_extra_head; int queue_extra_entries; + int total_openings; int devq_openings; - int devq_allocating; int dev_openings; int dev_active; int held; @@ -69,9 +71,10 @@ struct cam_ccbq { struct cam_ed; struct cam_devq { - struct camq send_queue; - int send_openings; - int send_active; + struct mtx send_mtx; + struct camq send_queue; + int send_openings; + int send_active; }; diff --git a/sys/cam/cam_sim.c b/sys/cam/cam_sim.c index 530e160..9a8c666 100644 --- a/sys/cam/cam_sim.c +++ b/sys/cam/cam_sim.c @@ -87,7 +87,6 @@ cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll, sim->flags = 0; sim->refcount = 1; sim->devq = queue; - sim->max_ccbs = 8; /* Reserve for management purposes. */ sim->mtx = mtx; if (mtx == &Giant) { sim->flags |= 0; @@ -96,17 +95,12 @@ cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll, sim->flags |= CAM_SIM_MPSAFE; callout_init(&sim->callout, 1); } - - SLIST_INIT(&sim->ccb_freeq); - TAILQ_INIT(&sim->sim_doneq); - return (sim); } void cam_sim_free(struct cam_sim *sim, int free_devq) { - union ccb *ccb; int error; mtx_assert(sim->mtx, MA_OWNED); @@ -118,10 +112,6 @@ cam_sim_free(struct cam_sim *sim, int free_devq) KASSERT(sim->refcount == 0, ("sim->refcount == 0")); - while ((ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) != NULL) { - SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle); - xpt_free_ccb(ccb); - } if (free_devq) cam_simq_free(sim->devq); free(sim, M_CAMSIM); @@ -130,21 +120,31 @@ cam_sim_free(struct cam_sim *sim, int free_devq) void cam_sim_release(struct cam_sim *sim) { - KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); - mtx_assert(sim->mtx, MA_OWNED); + int lock; + lock = (mtx_owned(sim->mtx) == 0); + if (lock) + CAM_SIM_LOCK(sim); + KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); sim->refcount--; if (sim->refcount == 0) wakeup(sim); + if (lock) + CAM_SIM_UNLOCK(sim); } void cam_sim_hold(struct cam_sim *sim) { - KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); - mtx_assert(sim->mtx, MA_OWNED); + int lock; + lock = (mtx_owned(sim->mtx) == 0); + if (lock) + CAM_SIM_LOCK(sim); + KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); sim->refcount++; + if (lock) + CAM_SIM_UNLOCK(sim); } void diff --git a/sys/cam/cam_sim.h b/sys/cam/cam_sim.h index 58127ba..4232650 100644 --- a/sys/cam/cam_sim.h +++ b/sys/cam/cam_sim.h @@ -104,23 +104,9 @@ struct cam_sim { u_int32_t flags; #define CAM_SIM_REL_TIMEOUT_PENDING 0x01 #define CAM_SIM_MPSAFE 0x02 -#define CAM_SIM_ON_DONEQ 0x04 -#define CAM_SIM_POLLED 0x08 -#define CAM_SIM_BATCH 0x10 struct callout callout; struct cam_devq *devq; /* Device Queue to use for this SIM */ int refcount; /* References to the SIM. */ - - /* "Pool" of inactive ccbs managed by xpt_get_ccb and xpt_release_ccb */ - SLIST_HEAD(,ccb_hdr) ccb_freeq; - /* - * Maximum size of ccb pool. Modified as devices are added/removed - * or have their * opening counts changed. - */ - u_int max_ccbs; - /* Current count of allocated ccbs */ - u_int ccb_count; - }; #define CAM_SIM_LOCK(sim) mtx_lock((sim)->mtx) diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c index 54c4acd..86e2f5e 100644 --- a/sys/cam/cam_xpt.c +++ b/sys/cam/cam_xpt.c @@ -40,7 +40,9 @@ __FBSDID("$FreeBSD$"); #include <sys/conf.h> #include <sys/fcntl.h> #include <sys/interrupt.h> +#include <sys/proc.h> #include <sys/sbuf.h> +#include <sys/smp.h> #include <sys/taskqueue.h> #include <sys/lock.h> @@ -90,14 +92,9 @@ struct xpt_task { uintptr_t data2; }; -typedef enum { - XPT_FLAG_OPEN = 0x01 -} xpt_flags; - struct xpt_softc { - xpt_flags flags; - /* number of high powered commands that can go through right now */ + struct mtx xpt_highpower_lock; STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; int num_highpower; @@ -117,6 +114,7 @@ struct xpt_softc { struct mtx xpt_topo_lock; struct mtx xpt_lock; + struct taskqueue *xpt_taskq; }; typedef enum { @@ -155,14 +153,19 @@ TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay); SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, &xsoftc.boot_delay, 0, "Bus registration wait time"); -/* Queues for our software interrupt handler */ -typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; -typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t; -static cam_simq_t cam_simq; -static struct mtx cam_simq_lock; +struct cam_doneq { + struct mtx_padalign cam_doneq_mtx; + STAILQ_HEAD(, ccb_hdr) cam_doneq; + int cam_doneq_sleep; +}; + +static struct cam_doneq cam_doneqs[MAXCPU]; +static int cam_num_doneqs; +static struct proc *cam_proc; -/* Pointers to software interrupt handlers */ -static void *cambio_ih; +TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs); +SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, + &cam_num_doneqs, 0, "Number of completion queues/threads"); struct cam_periph *xpt_periph; @@ -223,16 +226,21 @@ static void xpt_async_bcast(struct async_list *async_head, void *async_arg); static path_id_t xptnextfreepathid(void); static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); -static union ccb *xpt_get_ccb(struct cam_ed *device); -static void xpt_run_dev_allocq(struct cam_ed *device); +static union ccb *xpt_get_ccb(struct cam_periph *periph); +static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); +static void xpt_run_allocq(struct cam_periph *periph, int sleep); +static void xpt_run_allocq_task(void *context, int pending); static void xpt_run_devq(struct cam_devq *devq); static timeout_t xpt_release_devq_timeout; static void xpt_release_simq_timeout(void *arg) __unused; +static void xpt_acquire_bus(struct cam_eb *bus); static void xpt_release_bus(struct cam_eb *bus); -static void xpt_release_devq_device(struct cam_ed *dev, u_int count, +static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); +static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue); static struct cam_et* xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); +static void xpt_acquire_target(struct cam_et *target); static void xpt_release_target(struct cam_et *target); static struct cam_eb* xpt_find_bus(path_id_t path_id); @@ -241,11 +249,14 @@ static struct cam_et* static struct cam_ed* xpt_find_device(struct cam_et *target, lun_id_t lun_id); static void xpt_config(void *arg); +static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, + u_int32_t new_priority); static xpt_devicefunc_t xptpassannouncefunc; static void xptaction(struct cam_sim *sim, union ccb *work_ccb); static void xptpoll(struct cam_sim *sim); -static void camisr(void *); -static void camisr_runqueue(struct cam_sim *); +static void camisr_runqueue(void); +static void xpt_done_process(struct ccb_hdr *ccb_h); +static void xpt_done_td(void *); static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus); static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, @@ -296,7 +307,6 @@ static xpt_devicefunc_t xptsetasyncfunc; static xpt_busfunc_t xptsetasyncbusfunc; static cam_status xptregister(struct cam_periph *periph, void *arg); -static __inline int periph_is_queued(struct cam_periph *periph); static __inline int device_is_queued(struct cam_ed *device); static __inline int @@ -304,6 +314,7 @@ xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) { int retval; + mtx_assert(&devq->send_mtx, MA_OWNED); if ((dev->ccbq.queue.entries > 0) && (dev->ccbq.dev_openings > 0) && (dev->ccbq.queue.qfrozen_cnt == 0)) { @@ -314,7 +325,7 @@ xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) */ retval = xpt_schedule_dev(&devq->send_queue, - &dev->devq_entry.pinfo, + &dev->devq_entry, CAMQ_GET_PRIO(&dev->ccbq.queue)); } else { retval = 0; @@ -323,15 +334,9 @@ xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) } static __inline int -periph_is_queued(struct cam_periph *periph) -{ - return (periph->pinfo.index != CAM_UNQUEUED_INDEX); -} - -static __inline int device_is_queued(struct cam_ed *device) { - return (device->devq_entry.pinfo.index != CAM_UNQUEUED_INDEX); + return (device->devq_entry.index != CAM_UNQUEUED_INDEX); } static void @@ -340,13 +345,6 @@ xpt_periph_init() make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); } -static void -xptdone(struct cam_periph *periph, union ccb *done_ccb) -{ - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); -} - static int xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { @@ -365,11 +363,6 @@ xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) return(ENODEV); } - /* Mark ourselves open */ - mtx_lock(&xsoftc.xpt_lock); - xsoftc.flags |= XPT_FLAG_OPEN; - mtx_unlock(&xsoftc.xpt_lock); - return(0); } @@ -377,11 +370,6 @@ static int xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { - /* Mark ourselves closed */ - mtx_lock(&xsoftc.xpt_lock); - xsoftc.flags &= ~XPT_FLAG_OPEN; - mtx_unlock(&xsoftc.xpt_lock); - return(0); } @@ -457,8 +445,6 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread * ccb = xpt_alloc_ccb(); - CAM_SIM_LOCK(bus->sim); - /* * Create a path using the bus, target, and lun the * user passed in. @@ -469,7 +455,6 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread * inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; - CAM_SIM_UNLOCK(bus->sim); xpt_free_ccb(ccb); break; } @@ -477,12 +462,12 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread * xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(ccb, inccb); - ccb->ccb_h.cbfcnp = xptdone; + xpt_path_lock(ccb->ccb_h.path); cam_periph_runccb(ccb, NULL, 0, 0, NULL); + xpt_path_unlock(ccb->ccb_h.path); bcopy(ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); - CAM_SIM_UNLOCK(bus->sim); break; case XPT_DEBUG: { @@ -493,8 +478,6 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread * * allocate it on the stack. */ - CAM_SIM_LOCK(bus->sim); - /* * Create a path using the bus, target, and lun the * user passed in. @@ -505,18 +488,15 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread * inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; - CAM_SIM_UNLOCK(bus->sim); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(&ccb, inccb); - ccb.ccb_h.cbfcnp = xptdone; xpt_action(&ccb); bcopy(&ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb.ccb_h.path); - CAM_SIM_UNLOCK(bus->sim); break; } @@ -564,9 +544,7 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread * /* * This is an immediate CCB, we can send it on directly. */ - CAM_SIM_LOCK(xpt_path_sim(xpt_periph->path)); xpt_action(inccb); - CAM_SIM_UNLOCK(xpt_path_sim(xpt_periph->path)); /* * Map the buffers back into user space. @@ -784,7 +762,7 @@ static void xpt_scanner_thread(void *dummy) { union ccb *ccb; - struct cam_sim *sim; + struct cam_path path; xpt_lock_buses(); for (;;) { @@ -795,10 +773,16 @@ xpt_scanner_thread(void *dummy) TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_unlock_buses(); - sim = ccb->ccb_h.path->bus->sim; - CAM_SIM_LOCK(sim); + /* + * Since lock can be dropped inside and path freed + * by completion callback even before return here, + * take our own path copy for reference. + */ + xpt_copy_path(&path, ccb->ccb_h.path); + xpt_path_lock(&path); xpt_action(ccb); - CAM_SIM_UNLOCK(sim); + xpt_path_unlock(&path); + xpt_release_path(&path); xpt_lock_buses(); } @@ -857,16 +841,18 @@ xpt_init(void *dummy) struct cam_path *path; struct cam_devq *devq; cam_status status; + int error, i; TAILQ_INIT(&xsoftc.xpt_busses); - TAILQ_INIT(&cam_simq); TAILQ_INIT(&xsoftc.ccb_scanq); STAILQ_INIT(&xsoftc.highpowerq); xsoftc.num_highpower = CAM_MAX_HIGHPOWER; - mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF); mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); + mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF); + xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, + taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); #ifdef CAM_BOOT_DELAY /* @@ -901,6 +887,7 @@ xpt_init(void *dummy) " failing attach\n", status); return (EINVAL); } + mtx_unlock(&xsoftc.xpt_lock); /* * Looking at the XPT from the SIM layer, the XPT is @@ -915,13 +902,32 @@ xpt_init(void *dummy) " failing attach\n", status); return (EINVAL); } - + xpt_path_lock(path); cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, path, NULL, 0, xpt_sim); + xpt_path_unlock(path); xpt_free_path(path); - mtx_unlock(&xsoftc.xpt_lock); - /* Install our software interrupt handlers */ - swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih); + + if (cam_num_doneqs < 1) + cam_num_doneqs = 1 + mp_ncpus / 6; + else if (cam_num_doneqs > MAXCPU) + cam_num_doneqs = MAXCPU; + for (i = 0; i < cam_num_doneqs; i++) { + mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, + MTX_DEF); + STAILQ_INIT(&cam_doneqs[i].cam_doneq); + error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], + &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); + if (error != 0) { + cam_num_doneqs = i; + break; + } + } + if (cam_num_doneqs < 1) { + printf("xpt_init: Cannot init completion queues " + "- failing attach\n"); + return (ENOMEM); + } /* * Register a callback for when interrupts are enabled. */ @@ -966,28 +972,15 @@ xpt_add_periph(struct cam_periph *periph) { struct cam_ed *device; int32_t status; - struct periph_list *periph_head; - - mtx_assert(periph->sim->mtx, MA_OWNED); + TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); device = periph->path->device; - - periph_head = &device->periphs; - status = CAM_REQ_CMP; - if (device != NULL) { - /* - * Make room for this peripheral - * so it will fit in the queue - * when it's scheduled to run - */ - status = camq_resize(&device->drvq, - device->drvq.array_size + 1); - + mtx_lock(&device->target->bus->eb_mtx); device->generation++; - - SLIST_INSERT_HEAD(periph_head, periph, periph_links); + SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); + mtx_unlock(&device->target->bus->eb_mtx); } return (status); @@ -998,21 +991,12 @@ xpt_remove_periph(struct cam_periph *periph) { struct cam_ed *device; - mtx_assert(periph->sim->mtx, MA_OWNED); - device = periph->path->device; - if (device != NULL) { - struct periph_list *periph_head; - - periph_head = &device->periphs; - - /* Release the slot for this peripheral */ - camq_resize(&device->drvq, device->drvq.array_size - 1); - + mtx_lock(&device->target->bus->eb_mtx); device->generation++; - - SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); + SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); + mtx_unlock(&device->target->bus->eb_mtx); } } @@ -1022,7 +1006,7 @@ xpt_announce_periph(struct cam_periph *periph, char *announce_string) { struct cam_path *path = periph->path; - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); periph->flags |= CAM_PERIPH_ANNOUNCED; printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", @@ -1077,7 +1061,7 @@ xpt_denounce_periph(struct cam_periph *periph) { struct cam_path *path = periph->path; - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, @@ -1110,7 +1094,7 @@ xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) struct ccb_dev_advinfo cdai; struct scsi_vpd_id_descriptor *idd; - mtx_assert(path->bus->sim->mtx, MA_OWNED); + xpt_path_assert(path, MA_OWNED); memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); @@ -1531,6 +1515,7 @@ static int xptedtbusfunc(struct cam_eb *bus, void *arg) { struct ccb_dev_match *cdm; + struct cam_et *target; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; @@ -1602,71 +1587,72 @@ xptedtbusfunc(struct cam_eb *bus, void *arg) * If there is a target generation recorded, check it to * make sure the target list hasn't changed. */ - if ((cdm->pos.position_type & CAM_DEV_POS_BUS) - && (bus == cdm->pos.cookie.bus) - && (cdm->pos.position_type & CAM_DEV_POS_TARGET) - && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) - && (cdm->pos.generations[CAM_TARGET_GENERATION] != - bus->generation)) { - cdm->status = CAM_DEV_MATCH_LIST_CHANGED; - return(0); - } - + mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) - && (cdm->pos.cookie.target != NULL)) - return(xpttargettraverse(bus, - (struct cam_et *)cdm->pos.cookie.target, - xptedttargetfunc, arg)); - else - return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); + && (cdm->pos.cookie.target != NULL)) { + if ((cdm->pos.generations[CAM_TARGET_GENERATION] != + bus->generation)) { + mtx_unlock(&bus->eb_mtx); + cdm->status = CAM_DEV_MATCH_LIST_CHANGED; + return (0); + } + target = (struct cam_et *)cdm->pos.cookie.target; + target->refcount++; + } else + target = NULL; + mtx_unlock(&bus->eb_mtx); + + return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); } static int xptedttargetfunc(struct cam_et *target, void *arg) { struct ccb_dev_match *cdm; + struct cam_eb *bus; + struct cam_ed *device; cdm = (struct ccb_dev_match *)arg; + bus = target->bus; /* * If there is a device list generation recorded, check it to * make sure the device list hasn't changed. */ + mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) - && (cdm->pos.cookie.bus == target->bus) + && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) - && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) - && (cdm->pos.generations[CAM_DEV_GENERATION] != - target->generation)) { - cdm->status = CAM_DEV_MATCH_LIST_CHANGED; - return(0); - } + && (cdm->pos.cookie.device != NULL)) { + if (cdm->pos.generations[CAM_DEV_GENERATION] != + target->generation) { + mtx_unlock(&bus->eb_mtx); + cdm->status = CAM_DEV_MATCH_LIST_CHANGED; + return(0); + } + device = (struct cam_ed *)cdm->pos.cookie.device; + device->refcount++; + } else + device = NULL; + mtx_unlock(&bus->eb_mtx); - if ((cdm->pos.position_type & CAM_DEV_POS_BUS) - && (cdm->pos.cookie.bus == target->bus) - && (cdm->pos.position_type & CAM_DEV_POS_TARGET) - && (cdm->pos.cookie.target == target) - && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) - && (cdm->pos.cookie.device != NULL)) - return(xptdevicetraverse(target, - (struct cam_ed *)cdm->pos.cookie.device, - xptedtdevicefunc, arg)); - else - return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); + return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); } static int xptedtdevicefunc(struct cam_ed *device, void *arg) { - + struct cam_eb *bus; + struct cam_periph *periph; struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; + bus = device->target->bus; /* * If our position is for something deeper in the tree, that means @@ -1756,33 +1742,31 @@ xptedtdevicefunc(struct cam_ed *device, void *arg) * If there is a peripheral list generation recorded, make sure * it hasn't changed. */ + xpt_lock_buses(); + mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) - && (device->target->bus == cdm->pos.cookie.bus) - && (cdm->pos.position_type & CAM_DEV_POS_TARGET) - && (device->target == cdm->pos.cookie.target) - && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) - && (device == cdm->pos.cookie.device) - && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) - && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) - && (cdm->pos.generations[CAM_PERIPH_GENERATION] != - device->generation)){ - cdm->status = CAM_DEV_MATCH_LIST_CHANGED; - return(0); - } - - if ((cdm->pos.position_type & CAM_DEV_POS_BUS) - && (cdm->pos.cookie.bus == device->target->bus) + && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == device->target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) - && (cdm->pos.cookie.periph != NULL)) - return(xptperiphtraverse(device, - (struct cam_periph *)cdm->pos.cookie.periph, - xptedtperiphfunc, arg)); - else - return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); + && (cdm->pos.cookie.periph != NULL)) { + if (cdm->pos.generations[CAM_PERIPH_GENERATION] != + device->generation) { + mtx_unlock(&bus->eb_mtx); + xpt_unlock_buses(); + cdm->status = CAM_DEV_MATCH_LIST_CHANGED; + return(0); + } + periph = (struct cam_periph *)cdm->pos.cookie.periph; + periph->refcount++; + } else + periph = NULL; + mtx_unlock(&bus->eb_mtx); + xpt_unlock_buses(); + + return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); } static int @@ -1858,6 +1842,7 @@ xptedtperiphfunc(struct cam_periph *periph, void *arg) static int xptedtmatch(struct ccb_dev_match *cdm) { + struct cam_eb *bus; int ret; cdm->num_matches = 0; @@ -1866,19 +1851,22 @@ xptedtmatch(struct ccb_dev_match *cdm) * Check the bus list generation. If it has changed, the user * needs to reset everything and start over. */ + xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) - && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) - && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) { - cdm->status = CAM_DEV_MATCH_LIST_CHANGED; - return(0); - } + && (cdm->pos.cookie.bus != NULL)) { + if (cdm->pos.generations[CAM_BUS_GENERATION] != + xsoftc.bus_generation) { + xpt_unlock_buses(); + cdm->status = CAM_DEV_MATCH_LIST_CHANGED; + return(0); + } + bus = (struct cam_eb *)cdm->pos.cookie.bus; + bus->refcount++; + } else + bus = NULL; + xpt_unlock_buses(); - if ((cdm->pos.position_type & CAM_DEV_POS_BUS) - && (cdm->pos.cookie.bus != NULL)) - ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, - xptedtbusfunc, cdm); - else - ret = xptbustraverse(NULL, xptedtbusfunc, cdm); + ret = xptbustraverse(bus, xptedtbusfunc, cdm); /* * If we get back 0, that means that we had to stop before fully @@ -1895,29 +1883,29 @@ xptedtmatch(struct ccb_dev_match *cdm) static int xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) { + struct cam_periph *periph; struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; + xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) - && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) - && (cdm->pos.generations[CAM_PERIPH_GENERATION] != - (*pdrv)->generation)) { - cdm->status = CAM_DEV_MATCH_LIST_CHANGED; - return(0); - } + && (cdm->pos.cookie.periph != NULL)) { + if (cdm->pos.generations[CAM_PERIPH_GENERATION] != + (*pdrv)->generation) { + xpt_unlock_buses(); + cdm->status = CAM_DEV_MATCH_LIST_CHANGED; + return(0); + } + periph = (struct cam_periph *)cdm->pos.cookie.periph; + periph->refcount++; + } else + periph = NULL; + xpt_unlock_buses(); - if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) - && (cdm->pos.cookie.pdrv == pdrv) - && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) - && (cdm->pos.cookie.periph != NULL)) - return(xptpdperiphtraverse(pdrv, - (struct cam_periph *)cdm->pos.cookie.periph, - xptplistperiphfunc, arg)); - else - return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); + return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); } static int @@ -2066,35 +2054,31 @@ xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) int retval; retval = 1; - - xpt_lock_buses(); - for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses)); - bus != NULL; - bus = next_bus) { - + if (start_bus) + bus = start_bus; + else { + xpt_lock_buses(); + bus = TAILQ_FIRST(&xsoftc.xpt_busses); + if (bus == NULL) { + xpt_unlock_buses(); + return (retval); + } bus->refcount++; - - /* - * XXX The locking here is obviously very complex. We - * should work to simplify it. - */ xpt_unlock_buses(); - CAM_SIM_LOCK(bus->sim); + } + for (; bus != NULL; bus = next_bus) { retval = tr_func(bus, arg); - CAM_SIM_UNLOCK(bus->sim); - + if (retval == 0) { + xpt_release_bus(bus); + break; + } xpt_lock_buses(); next_bus = TAILQ_NEXT(bus, links); + if (next_bus) + next_bus->refcount++; xpt_unlock_buses(); - xpt_release_bus(bus); - - if (retval == 0) - return(retval); - xpt_lock_buses(); } - xpt_unlock_buses(); - return(retval); } @@ -2105,24 +2089,32 @@ xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, struct cam_et *target, *next_target; int retval; - mtx_assert(bus->sim->mtx, MA_OWNED); retval = 1; - for (target = (start_target ? start_target : - TAILQ_FIRST(&bus->et_entries)); - target != NULL; target = next_target) { - + if (start_target) + target = start_target; + else { + mtx_lock(&bus->eb_mtx); + target = TAILQ_FIRST(&bus->et_entries); + if (target == NULL) { + mtx_unlock(&bus->eb_mtx); + return (retval); + } target->refcount++; - + mtx_unlock(&bus->eb_mtx); + } + for (; target != NULL; target = next_target) { retval = tr_func(target, arg); - + if (retval == 0) { + xpt_release_target(target); + break; + } + mtx_lock(&bus->eb_mtx); next_target = TAILQ_NEXT(target, links); - + if (next_target) + next_target->refcount++; + mtx_unlock(&bus->eb_mtx); xpt_release_target(target); - - if (retval == 0) - return(retval); } - return(retval); } @@ -2130,36 +2122,39 @@ static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg) { + struct cam_eb *bus; struct cam_ed *device, *next_device; int retval; - mtx_assert(target->bus->sim->mtx, MA_OWNED); retval = 1; - for (device = (start_device ? start_device : - TAILQ_FIRST(&target->ed_entries)); - device != NULL; - device = next_device) { - - /* - * Hold a reference so the current device does not go away - * on us. - */ + bus = target->bus; + if (start_device) + device = start_device; + else { + mtx_lock(&bus->eb_mtx); + device = TAILQ_FIRST(&target->ed_entries); + if (device == NULL) { + mtx_unlock(&bus->eb_mtx); + return (retval); + } device->refcount++; - + mtx_unlock(&bus->eb_mtx); + } + for (; device != NULL; device = next_device) { + mtx_lock(&device->device_mtx); retval = tr_func(device, arg); - - /* - * Grab our next pointer before we release the current - * device. - */ + mtx_unlock(&device->device_mtx); + if (retval == 0) { + xpt_release_device(device); + break; + } + mtx_lock(&bus->eb_mtx); next_device = TAILQ_NEXT(device, links); - + if (next_device) + next_device->refcount++; + mtx_unlock(&bus->eb_mtx); xpt_release_device(device); - - if (retval == 0) - return(retval); } - return(retval); } @@ -2167,56 +2162,48 @@ static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { + struct cam_eb *bus; struct cam_periph *periph, *next_periph; int retval; retval = 1; - mtx_assert(device->sim->mtx, MA_OWNED); - xpt_lock_buses(); - for (periph = (start_periph ? start_periph : - SLIST_FIRST(&device->periphs)); - periph != NULL; - periph = next_periph) { - - - /* - * In this case, we want to show peripherals that have been - * invalidated, but not peripherals that are scheduled to - * be freed. So instead of calling cam_periph_acquire(), - * which will fail if the periph has been invalidated, we - * just check for the free flag here. If it is in the - * process of being freed, we skip to the next periph. - */ - if (periph->flags & CAM_PERIPH_FREE) { - next_periph = SLIST_NEXT(periph, periph_links); - continue; + bus = device->target->bus; + if (start_periph) + periph = start_periph; + else { + xpt_lock_buses(); + mtx_lock(&bus->eb_mtx); + periph = SLIST_FIRST(&device->periphs); + while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) + periph = SLIST_NEXT(periph, periph_links); + if (periph == NULL) { + mtx_unlock(&bus->eb_mtx); + xpt_unlock_buses(); + return (retval); } - - /* - * Acquire a reference to this periph while we call the - * traversal function, so it can't go away. - */ periph->refcount++; - + mtx_unlock(&bus->eb_mtx); + xpt_unlock_buses(); + } + for (; periph != NULL; periph = next_periph) { retval = tr_func(periph, arg); - - /* - * Grab the next peripheral before we release this one, so - * our next pointer is still valid. - */ + if (retval == 0) { + cam_periph_release_locked(periph); + break; + } + xpt_lock_buses(); + mtx_lock(&bus->eb_mtx); next_periph = SLIST_NEXT(periph, periph_links); - - cam_periph_release_locked_buses(periph); - - if (retval == 0) - goto bailout_done; + while (next_periph != NULL && + (next_periph->flags & CAM_PERIPH_FREE) != 0) + next_periph = SLIST_NEXT(periph, periph_links); + if (next_periph) + next_periph->refcount++; + mtx_unlock(&bus->eb_mtx); + xpt_unlock_buses(); + cam_periph_release_locked(periph); } - -bailout_done: - - xpt_unlock_buses(); - return(retval); } @@ -2254,57 +2241,42 @@ xptpdperiphtraverse(struct periph_driver **pdrv, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; - struct cam_sim *sim; int retval; retval = 1; - xpt_lock_buses(); - for (periph = (start_periph ? start_periph : - TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; - periph = next_periph) { - - - /* - * In this case, we want to show peripherals that have been - * invalidated, but not peripherals that are scheduled to - * be freed. So instead of calling cam_periph_acquire(), - * which will fail if the periph has been invalidated, we - * just check for the free flag here. If it is free, we - * skip to the next periph. - */ - if (periph->flags & CAM_PERIPH_FREE) { - next_periph = TAILQ_NEXT(periph, unit_links); - continue; + if (start_periph) + periph = start_periph; + else { + xpt_lock_buses(); + periph = TAILQ_FIRST(&(*pdrv)->units); + while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) + periph = TAILQ_NEXT(periph, unit_links); + if (periph == NULL) { + xpt_unlock_buses(); + return (retval); } - - /* - * Acquire a reference to this periph while we call the - * traversal function, so it can't go away. - */ periph->refcount++; - sim = periph->sim; xpt_unlock_buses(); - CAM_SIM_LOCK(sim); - xpt_lock_buses(); + } + for (; periph != NULL; periph = next_periph) { + cam_periph_lock(periph); retval = tr_func(periph, arg); - - /* - * Grab the next peripheral before we release this one, so - * our next pointer is still valid. - */ + cam_periph_unlock(periph); + if (retval == 0) { + cam_periph_release(periph); + break; + } + xpt_lock_buses(); next_periph = TAILQ_NEXT(periph, unit_links); - - cam_periph_release_locked_buses(periph); - CAM_SIM_UNLOCK(sim); - - if (retval == 0) - goto bailout_done; + while (next_periph != NULL && + (next_periph->flags & CAM_PERIPH_FREE) != 0) + next_periph = TAILQ_NEXT(periph, unit_links); + if (next_periph) + next_periph->refcount++; + xpt_unlock_buses(); + cam_periph_release(periph); } -bailout_done: - - xpt_unlock_buses(); - return(retval); } @@ -2450,12 +2422,14 @@ xptsetasyncbusfunc(struct cam_eb *bus, void *arg) bus->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); + xpt_path_lock(&path); xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); csa->callback(csa->callback_arg, AC_PATH_REGISTERED, &path, &cpi); + xpt_path_unlock(&path); xpt_release_path(&path); return(1); @@ -2475,6 +2449,8 @@ void xpt_action_default(union ccb *start_ccb) { struct cam_path *path; + struct cam_sim *sim; + int lock; path = start_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n")); @@ -2523,14 +2499,18 @@ xpt_action_default(union ccb *start_ccb) case XPT_RESET_DEV: case XPT_ENG_EXEC: case XPT_SMP_IO: + { + struct cam_devq *devq; + + devq = path->bus->sim->devq; + mtx_lock(&devq->send_mtx); cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); - if (xpt_schedule_devq(path->bus->sim->devq, path->device)) - xpt_run_devq(path->bus->sim->devq); + if (xpt_schedule_devq(devq, path->device) != 0) + xpt_run_devq(devq); + mtx_unlock(&devq->send_mtx); break; + } case XPT_CALC_GEOMETRY: - { - struct cam_sim *sim; - /* Filter out garbage */ if (start_ccb->ccg.block_size == 0 || start_ccb->ccg.volume_size == 0) { @@ -2558,10 +2538,7 @@ xpt_action_default(union ccb *start_ccb) break; } #endif - sim = path->bus->sim; - (*(sim->sim_action))(sim, start_ccb); - break; - } + goto call_sim; case XPT_ABORT: { union ccb* abort_ccb; @@ -2622,21 +2599,18 @@ xpt_action_default(union ccb *start_ccb) case XPT_NOTIFY_ACKNOWLEDGE: case XPT_GET_SIM_KNOB: case XPT_SET_SIM_KNOB: - { - struct cam_sim *sim; - - sim = path->bus->sim; - (*(sim->sim_action))(sim, start_ccb); - break; - } + case XPT_GET_TRAN_SETTINGS: + case XPT_SET_TRAN_SETTINGS: case XPT_PATH_INQ: - { - struct cam_sim *sim; - +call_sim: sim = path->bus->sim; + lock = (mtx_owned(sim->mtx) == 0); + if (lock) + CAM_SIM_LOCK(sim); (*(sim->sim_action))(sim, start_ccb); + if (lock) + CAM_SIM_UNLOCK(sim); break; - } case XPT_PATH_STATS: start_ccb->cpis.last_reset = path->bus->last_reset; start_ccb->ccb_h.status = CAM_REQ_CMP; @@ -2798,11 +2772,6 @@ xpt_action_default(union ccb *start_ccb) position_type = CAM_DEV_POS_PDRV; } - /* - * Note that we drop the SIM lock here, because the EDT - * traversal code needs to do its own locking. - */ - CAM_SIM_UNLOCK(xpt_path_sim(cdm->ccb_h.path)); switch(position_type & CAM_DEV_POS_TYPEMASK) { case CAM_DEV_POS_EDT: xptedtmatch(cdm); @@ -2814,7 +2783,6 @@ xpt_action_default(union ccb *start_ccb) cdm->status = CAM_DEV_MATCH_ERROR; break; } - CAM_SIM_LOCK(xpt_path_sim(cdm->ccb_h.path)); if (cdm->status == CAM_DEV_MATCH_ERROR) start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; @@ -2869,6 +2837,8 @@ xpt_action_default(union ccb *start_ccb) break; } cur_entry->event_enable = csa->event_enable; + cur_entry->event_lock = + mtx_owned(path->bus->sim->mtx) ? 1 : 0; cur_entry->callback_arg = csa->callback_arg; cur_entry->callback = csa->callback; SLIST_INSERT_HEAD(async_head, cur_entry, links); @@ -2903,6 +2873,7 @@ xpt_action_default(union ccb *start_ccb) } } + mtx_lock(&dev->sim->devq->send_mtx); if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { @@ -2955,6 +2926,7 @@ xpt_action_default(union ccb *start_ccb) start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } + mtx_unlock(&dev->sim->devq->send_mtx); if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); @@ -2964,7 +2936,6 @@ xpt_action_default(union ccb *start_ccb) } case XPT_DEBUG: { struct cam_path *oldpath; - struct cam_sim *oldsim; /* Check that all request bits are supported. */ if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { @@ -2974,15 +2945,9 @@ xpt_action_default(union ccb *start_ccb) cam_dflags = CAM_DEBUG_NONE; if (cam_dpath != NULL) { - /* To release the old path we must hold proper lock. */ oldpath = cam_dpath; cam_dpath = NULL; - oldsim = xpt_path_sim(oldpath); - CAM_SIM_UNLOCK(xpt_path_sim(start_ccb->ccb_h.path)); - CAM_SIM_LOCK(oldsim); xpt_free_path(oldpath); - CAM_SIM_UNLOCK(oldsim); - CAM_SIM_LOCK(xpt_path_sim(start_ccb->ccb_h.path)); } if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, @@ -3029,39 +2994,41 @@ xpt_polled_action(union ccb *start_ccb) struct cam_devq *devq; struct cam_ed *dev; - timeout = start_ccb->ccb_h.timeout * 10; sim = start_ccb->ccb_h.path->bus->sim; devq = sim->devq; dev = start_ccb->ccb_h.path->device; - mtx_assert(sim->mtx, MA_OWNED); - - /* Don't use ISR for this SIM while polling. */ - sim->flags |= CAM_SIM_POLLED; + mtx_unlock(&dev->device_mtx); /* * Steal an opening so that no other queued requests * can get it before us while we simulate interrupts. */ + mtx_lock(&devq->send_mtx); dev->ccbq.devq_openings--; dev->ccbq.dev_openings--; - - while(((devq != NULL && devq->send_openings <= 0) || - dev->ccbq.dev_openings < 0) && (--timeout > 0)) { + while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && + (--timeout > 0)) { + mtx_unlock(&devq->send_mtx); DELAY(100); + CAM_SIM_LOCK(sim); (*(sim->sim_poll))(sim); - camisr_runqueue(sim); + CAM_SIM_UNLOCK(sim); + camisr_runqueue(); + mtx_lock(&devq->send_mtx); } - dev->ccbq.devq_openings++; dev->ccbq.dev_openings++; + mtx_unlock(&devq->send_mtx); if (timeout != 0) { xpt_action(start_ccb); while(--timeout > 0) { + CAM_SIM_LOCK(sim); (*(sim->sim_poll))(sim); - camisr_runqueue(sim); + CAM_SIM_UNLOCK(sim); + camisr_runqueue(); if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; @@ -3080,8 +3047,7 @@ xpt_polled_action(union ccb *start_ccb) start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } - /* We will use CAM ISR for this SIM again. */ - sim->flags &= ~CAM_SIM_POLLED; + mtx_lock(&dev->device_mtx); } /* @@ -3089,38 +3055,14 @@ xpt_polled_action(union ccb *start_ccb) * target device has space for more transactions. */ void -xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) +xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) { - struct cam_ed *device; - int runq = 0; - - mtx_assert(perph->sim->mtx, MA_OWNED); - CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); - device = perph->path->device; - if (periph_is_queued(perph)) { - /* Simply reorder based on new priority */ - CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, - (" change priority to %d\n", new_priority)); - if (new_priority < perph->pinfo.priority) { - camq_change_priority(&device->drvq, - perph->pinfo.index, - new_priority); - runq = 1; - } - } else { - /* New entry on the queue */ - CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, - (" added periph to queue\n")); - perph->pinfo.priority = new_priority; - perph->pinfo.generation = ++device->drvq.generation; - camq_insert(&device->drvq, &perph->pinfo); - runq = 1; - } - if (runq != 0) { - CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, - (" calling xpt_run_dev_allocq\n")); - xpt_run_dev_allocq(device); + CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); + cam_periph_assert(periph, MA_OWNED); + if (new_priority < periph->scheduled_priority) { + periph->scheduled_priority = new_priority; + xpt_run_allocq(periph, 0); } } @@ -3133,7 +3075,7 @@ xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) * started the queue, return 0 so the caller doesn't attempt * to run the queue. */ -int +static int xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, u_int32_t new_priority) { @@ -3173,51 +3115,79 @@ xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, } static void -xpt_run_dev_allocq(struct cam_ed *device) +xpt_run_allocq_task(void *context, int pending) { - struct camq *drvq; + struct cam_periph *periph = context; - if (device->ccbq.devq_allocating) + cam_periph_lock(periph); + periph->flags &= ~CAM_PERIPH_RUN_TASK; + xpt_run_allocq(periph, 1); + cam_periph_unlock(periph); + cam_periph_release(periph); +} + +static void +xpt_run_allocq(struct cam_periph *periph, int sleep) +{ + struct cam_ed *device; + union ccb *ccb; + uint32_t prio; + + cam_periph_assert(periph, MA_OWNED); + if (periph->periph_allocating) return; - device->ccbq.devq_allocating = 1; - CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq(%p)\n", device)); - drvq = &device->drvq; - while ((drvq->entries > 0) && - (device->ccbq.devq_openings > 0 || - CAMQ_GET_PRIO(drvq) <= CAM_PRIORITY_OOB) && - (device->ccbq.queue.qfrozen_cnt == 0)) { - union ccb *work_ccb; - struct cam_periph *drv; - - KASSERT(drvq->entries > 0, ("xpt_run_dev_allocq: " - "Device on queue without any work to do")); - if ((work_ccb = xpt_get_ccb(device)) != NULL) { - drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); - xpt_setup_ccb(&work_ccb->ccb_h, drv->path, - drv->pinfo.priority); + periph->periph_allocating = 1; + CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); + device = periph->path->device; + ccb = NULL; +restart: + while ((prio = min(periph->scheduled_priority, + periph->immediate_priority)) != CAM_PRIORITY_NONE && + (periph->periph_allocated - (ccb != NULL ? 1 : 0) < + device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { + + if (ccb == NULL && + (ccb = xpt_get_ccb_nowait(periph)) == NULL) { + if (sleep) { + ccb = xpt_get_ccb(periph); + goto restart; + } + if (periph->flags & CAM_PERIPH_RUN_TASK) + break; + xpt_lock_buses(); + periph->refcount++; /* Unconditionally acquire */ + xpt_unlock_buses(); + periph->flags |= CAM_PERIPH_RUN_TASK; + taskqueue_enqueue(xsoftc.xpt_taskq, + &periph->periph_run_task); + break; + } + xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); + if (prio == periph->immediate_priority) { + periph->immediate_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, - ("calling periph start\n")); - drv->periph_start(drv, work_ccb); + ("waking cam_periph_getccb()\n")); + SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, + periph_links.sle); + wakeup(&periph->ccb_list); } else { - /* - * Malloc failure in alloc_ccb - */ - /* - * XXX add us to a list to be run from free_ccb - * if we don't have any ccbs active on this - * device queue otherwise we may never get run - * again. - */ - break; + periph->scheduled_priority = CAM_PRIORITY_NONE; + CAM_DEBUG_PRINT(CAM_DEBUG_XPT, + ("calling periph_start()\n")); + periph->periph_start(periph, ccb); } + ccb = NULL; } - device->ccbq.devq_allocating = 0; + if (ccb != NULL) + xpt_release_ccb(ccb); + periph->periph_allocating = 0; } static void xpt_run_devq(struct cam_devq *devq) { char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; + int lock; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); @@ -3225,14 +3195,12 @@ xpt_run_devq(struct cam_devq *devq) while ((devq->send_queue.entries > 0) && (devq->send_openings > 0) && (devq->send_queue.qfrozen_cnt <= 1)) { - struct cam_ed_qinfo *qinfo; struct cam_ed *device; union ccb *work_ccb; struct cam_sim *sim; - qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, + device = (struct cam_ed *)camq_remove(&devq->send_queue, CAMQ_HEAD); - device = qinfo->device; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); @@ -3244,7 +3212,7 @@ xpt_run_devq(struct cam_devq *devq) if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { - mtx_lock(&xsoftc.xpt_lock); + mtx_lock(&xsoftc.xpt_highpower_lock); if (xsoftc.num_highpower <= 0) { /* * We got a high power command, but we @@ -3252,12 +3220,11 @@ xpt_run_devq(struct cam_devq *devq) * the device queue until we have a slot * available. */ - xpt_freeze_devq(work_ccb->ccb_h.path, 1); - STAILQ_INSERT_TAIL(&xsoftc.highpowerq, - work_ccb->ccb_h.path->device, + xpt_freeze_devq_device(device, 1); + STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, highpowerq_entry); - mtx_unlock(&xsoftc.xpt_lock); + mtx_unlock(&xsoftc.xpt_highpower_lock); continue; } else { /* @@ -3266,15 +3233,14 @@ xpt_run_devq(struct cam_devq *devq) */ xsoftc.num_highpower--; } - mtx_unlock(&xsoftc.xpt_lock); + mtx_unlock(&xsoftc.xpt_highpower_lock); } cam_ccbq_remove_ccb(&device->ccbq, work_ccb); cam_ccbq_send_ccb(&device->ccbq, work_ccb); - devq->send_openings--; devq->send_active++; - xpt_schedule_devq(devq, device); + mtx_unlock(&devq->send_mtx); if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { /* @@ -3318,13 +3284,18 @@ xpt_run_devq(struct cam_devq *devq) } /* - * Device queues can be shared among multiple sim instances - * that reside on different busses. Use the SIM in the queue - * CCB's path, rather than the one in the bus that was passed - * into this function. + * Device queues can be shared among multiple SIM instances + * that reside on different busses. Use the SIM from the + * queued device, rather than the one from the calling bus. */ - sim = work_ccb->ccb_h.path->bus->sim; + sim = device->sim; + lock = (mtx_owned(sim->mtx) == 0); + if (lock) + CAM_SIM_LOCK(sim); (*(sim->sim_action))(sim, work_ccb); + if (lock) + CAM_SIM_UNLOCK(sim); + mtx_lock(&devq->send_mtx); } devq->send_queue.qfrozen_cnt--; } @@ -3400,26 +3371,9 @@ xpt_create_path_unlocked(struct cam_path **new_path_ptr, struct cam_periph *periph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { - struct cam_path *path; - struct cam_eb *bus = NULL; - cam_status status; - - path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_WAITOK); - bus = xpt_find_bus(path_id); - if (bus != NULL) - CAM_SIM_LOCK(bus->sim); - status = xpt_compile_path(path, periph, path_id, target_id, lun_id); - if (bus != NULL) { - CAM_SIM_UNLOCK(bus->sim); - xpt_release_bus(bus); - } - if (status != CAM_REQ_CMP) { - free(path, M_CAMPATH); - path = NULL; - } - *new_path_ptr = path; - return (status); + return (xpt_create_path(new_path_ptr, periph, path_id, target_id, + lun_id)); } cam_status @@ -3443,6 +3397,8 @@ xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, if (bus == NULL) { status = CAM_PATH_INVALID; } else { + xpt_lock_buses(); + mtx_lock(&bus->eb_mtx); target = xpt_find_target(bus, target_id); if (target == NULL) { /* Create one */ @@ -3455,6 +3411,7 @@ xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, target = new_target; } } + xpt_unlock_buses(); if (target != NULL) { device = xpt_find_device(target, lun_id); if (device == NULL) { @@ -3472,6 +3429,7 @@ xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, } } } + mtx_unlock(&bus->eb_mtx); } /* @@ -3494,6 +3452,32 @@ xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, return (status); } +cam_status +xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) +{ + struct cam_path *new_path; + + new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); + if (new_path == NULL) + return(CAM_RESRC_UNAVAIL); + xpt_copy_path(new_path, path); + *new_path_ptr = new_path; + return (CAM_REQ_CMP); +} + +void +xpt_copy_path(struct cam_path *new_path, struct cam_path *path) +{ + + *new_path = *path; + if (path->bus != NULL) + xpt_acquire_bus(path->bus); + if (path->target != NULL) + xpt_acquire_target(path->target); + if (path->device != NULL) + xpt_acquire_device(path->device); +} + void xpt_release_path(struct cam_path *path) { @@ -3688,11 +3672,6 @@ xpt_path_string(struct cam_path *path, char *str, size_t str_len) { struct sbuf sb; -#ifdef INVARIANTS - if (path != NULL && path->bus != NULL) - mtx_assert(path->bus->sim->mtx, MA_OWNED); -#endif - sbuf_new(&sb, str, str_len, 0); if (path == NULL) @@ -3761,7 +3740,6 @@ xpt_path_sim(struct cam_path *path) struct cam_periph* xpt_path_periph(struct cam_path *path) { - mtx_assert(path->bus->sim->mtx, MA_OWNED); return (path->periph); } @@ -3814,28 +3792,18 @@ xpt_path_legacy_ata_id(struct cam_path *path) void xpt_release_ccb(union ccb *free_ccb) { - struct cam_path *path; struct cam_ed *device; - struct cam_eb *bus; - struct cam_sim *sim; + struct cam_periph *periph; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); - path = free_ccb->ccb_h.path; - device = path->device; - bus = path->bus; - sim = bus->sim; - - mtx_assert(sim->mtx, MA_OWNED); + xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); + device = free_ccb->ccb_h.path->device; + periph = free_ccb->ccb_h.path->periph; + xpt_free_ccb(free_ccb); + periph->periph_allocated--; cam_ccbq_release_opening(&device->ccbq); - if (sim->ccb_count > sim->max_ccbs) { - xpt_free_ccb(free_ccb); - sim->ccb_count--; - } else { - SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h, - xpt_links.sle); - } - xpt_run_dev_allocq(device); + xpt_run_allocq(periph, 0); } /* Functions accessed by SIM drivers */ @@ -3868,12 +3836,13 @@ xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) sim->bus_id = bus; new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), - M_CAMXPT, M_NOWAIT); + M_CAMXPT, M_NOWAIT|M_ZERO); if (new_bus == NULL) { /* Couldn't satisfy request */ return (CAM_RESRC_UNAVAIL); } + mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); TAILQ_INIT(&new_bus->et_entries); cam_sim_hold(sim); new_bus->sim = sim; @@ -4079,80 +4048,138 @@ xpt_async_string(u_int32_t async_code) return ("AC_UNKNOWN"); } -void -xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) +static int +xpt_async_size(u_int32_t async_code) { - struct cam_eb *bus; - struct cam_et *target, *next_target; - struct cam_ed *device, *next_device; - mtx_assert(path->bus->sim->mtx, MA_OWNED); - CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, - ("xpt_async(%s)\n", xpt_async_string(async_code))); + switch (async_code) { + case AC_BUS_RESET: return (0); + case AC_UNSOL_RESEL: return (0); + case AC_SCSI_AEN: return (0); + case AC_SENT_BDR: return (0); + case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); + case AC_PATH_DEREGISTERED: return (0); + case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); + case AC_LOST_DEVICE: return (0); + case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); + case AC_INQ_CHANGED: return (0); + case AC_GETDEV_CHANGED: return (0); + case AC_CONTRACT: return (sizeof(struct ac_contract)); + case AC_ADVINFO_CHANGED: return (-1); + case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); + } + return (0); +} + +static int +xpt_async_process_dev(struct cam_ed *device, void *arg) +{ + union ccb *ccb = arg; + struct cam_path *path = ccb->ccb_h.path; + void *async_arg = ccb->casync.async_arg_ptr; + u_int32_t async_code = ccb->casync.async_code; + int relock; + + if (path->device != device + && path->device->lun_id != CAM_LUN_WILDCARD + && device->lun_id != CAM_LUN_WILDCARD) + return (1); /* - * Most async events come from a CAM interrupt context. In - * a few cases, the error recovery code at the peripheral layer, - * which may run from our SWI or a process context, may signal - * deferred events with a call to xpt_async. + * The async callback could free the device. + * If it is a broadcast async, it doesn't hold + * device reference, so take our own reference. */ + xpt_acquire_device(device); - bus = path->bus; + /* + * If async for specific device is to be delivered to + * the wildcard client, take the specific device lock. + * XXX: We may need a way for client to specify it. + */ + if ((device->lun_id == CAM_LUN_WILDCARD && + path->device->lun_id != CAM_LUN_WILDCARD) || + (device->target->target_id == CAM_TARGET_WILDCARD && + path->target->target_id != CAM_TARGET_WILDCARD) || + (device->target->bus->path_id == CAM_BUS_WILDCARD && + path->target->bus->path_id != CAM_BUS_WILDCARD)) { + mtx_unlock(&device->device_mtx); + xpt_path_lock(path); + relock = 1; + } else + relock = 0; - if (async_code == AC_BUS_RESET) { - /* Update our notion of when the last reset occurred */ - microtime(&bus->last_reset); - } + (*(device->target->bus->xport->async))(async_code, + device->target->bus, device->target, device, async_arg); + xpt_async_bcast(&device->asyncs, async_code, path, async_arg); - for (target = TAILQ_FIRST(&bus->et_entries); - target != NULL; - target = next_target) { + if (relock) { + xpt_path_unlock(path); + mtx_lock(&device->device_mtx); + } + xpt_release_device(device); + return (1); +} - next_target = TAILQ_NEXT(target, links); +static int +xpt_async_process_tgt(struct cam_et *target, void *arg) +{ + union ccb *ccb = arg; + struct cam_path *path = ccb->ccb_h.path; - if (path->target != target - && path->target->target_id != CAM_TARGET_WILDCARD - && target->target_id != CAM_TARGET_WILDCARD) - continue; + if (path->target != target + && path->target->target_id != CAM_TARGET_WILDCARD + && target->target_id != CAM_TARGET_WILDCARD) + return (1); - if (async_code == AC_SENT_BDR) { - /* Update our notion of when the last reset occurred */ - microtime(&path->target->last_reset); - } + if (ccb->casync.async_code == AC_SENT_BDR) { + /* Update our notion of when the last reset occurred */ + microtime(&target->last_reset); + } - for (device = TAILQ_FIRST(&target->ed_entries); - device != NULL; - device = next_device) { + return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); +} - next_device = TAILQ_NEXT(device, links); +static void +xpt_async_process(struct cam_periph *periph, union ccb *ccb) +{ + struct cam_eb *bus; + struct cam_path *path; + void *async_arg; + u_int32_t async_code; - if (path->device != device - && path->device->lun_id != CAM_LUN_WILDCARD - && device->lun_id != CAM_LUN_WILDCARD) - continue; - /* - * The async callback could free the device. - * If it is a broadcast async, it doesn't hold - * device reference, so take our own reference. - */ - xpt_acquire_device(device); - (*(bus->xport->async))(async_code, bus, - target, device, - async_arg); + path = ccb->ccb_h.path; + async_code = ccb->casync.async_code; + async_arg = ccb->casync.async_arg_ptr; + CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, + ("xpt_async(%s)\n", xpt_async_string(async_code))); + bus = path->bus; - xpt_async_bcast(&device->asyncs, async_code, - path, async_arg); - xpt_release_device(device); - } + if (async_code == AC_BUS_RESET) { + /* Update our notion of when the last reset occurred */ + microtime(&bus->last_reset); } + xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); + /* * If this wasn't a fully wildcarded async, tell all * clients that want all async events. */ - if (bus != xpt_periph->path->bus) - xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, - path, async_arg); + if (bus != xpt_periph->path->bus) { + xpt_path_lock(xpt_periph->path); + xpt_async_process_dev(xpt_periph->path->device, ccb); + xpt_path_unlock(xpt_periph->path); + } + + if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) + xpt_release_devq(path, 1, TRUE); + else + xpt_release_simq(path->bus->sim, TRUE); + if (ccb->casync.async_arg_size > 0) + free(async_arg, M_CAMXPT); + xpt_free_path(path); + xpt_free_ccb(ccb); } static void @@ -4161,6 +4188,7 @@ xpt_async_bcast(struct async_list *async_head, struct cam_path *path, void *async_arg) { struct async_node *cur_entry; + int lock; cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { @@ -4171,72 +4199,163 @@ xpt_async_bcast(struct async_list *async_head, * can delete its async callback entry. */ next_entry = SLIST_NEXT(cur_entry, links); - if ((cur_entry->event_enable & async_code) != 0) + if ((cur_entry->event_enable & async_code) != 0) { + lock = cur_entry->event_lock; + if (lock) + CAM_SIM_LOCK(path->device->sim); cur_entry->callback(cur_entry->callback_arg, async_code, path, async_arg); + if (lock) + CAM_SIM_UNLOCK(path->device->sim); + } cur_entry = next_entry; } } +void +xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) +{ + union ccb *ccb; + int size; + + ccb = xpt_alloc_ccb_nowait(); + if (ccb == NULL) { + xpt_print(path, "Can't allocate CCB to send %s\n", + xpt_async_string(async_code)); + return; + } + + if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { + xpt_print(path, "Can't allocate path to send %s\n", + xpt_async_string(async_code)); + xpt_free_ccb(ccb); + return; + } + ccb->ccb_h.path->periph = NULL; + ccb->ccb_h.func_code = XPT_ASYNC; + ccb->ccb_h.cbfcnp = xpt_async_process; + ccb->ccb_h.flags |= CAM_UNLOCKED; + ccb->casync.async_code = async_code; + ccb->casync.async_arg_size = 0; + size = xpt_async_size(async_code); + if (size > 0 && async_arg != NULL) { + ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); + if (ccb->casync.async_arg_ptr == NULL) { + xpt_print(path, "Can't allocate argument to send %s\n", + xpt_async_string(async_code)); + xpt_free_path(ccb->ccb_h.path); + xpt_free_ccb(ccb); + return; + } + memcpy(ccb->casync.async_arg_ptr, async_arg, size); + ccb->casync.async_arg_size = size; + } else if (size < 0) + ccb->casync.async_arg_size = size; + if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) + xpt_freeze_devq(path, 1); + else + xpt_freeze_simq(path->bus->sim, 1); + xpt_done(ccb); +} + static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { + + /* + * We only need to handle events for real devices. + */ + if (target->target_id == CAM_TARGET_WILDCARD + || device->lun_id == CAM_LUN_WILDCARD) + return; + printf("%s called\n", __func__); } -u_int32_t -xpt_freeze_devq(struct cam_path *path, u_int count) +static uint32_t +xpt_freeze_devq_device(struct cam_ed *dev, u_int count) { - struct cam_ed *dev = path->device; + struct cam_devq *devq; + uint32_t freeze; - mtx_assert(path->bus->sim->mtx, MA_OWNED); - CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq() %u->%u\n", + devq = dev->sim->devq; + mtx_assert(&devq->send_mtx, MA_OWNED); + CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, + ("xpt_freeze_devq_device(%d) %u->%u\n", count, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); - dev->ccbq.queue.qfrozen_cnt += count; + freeze = (dev->ccbq.queue.qfrozen_cnt += count); /* Remove frozen device from sendq. */ - if (device_is_queued(dev)) { - camq_remove(&dev->sim->devq->send_queue, - dev->devq_entry.pinfo.index); - } - return (dev->ccbq.queue.qfrozen_cnt); + if (device_is_queued(dev)) + camq_remove(&devq->send_queue, dev->devq_entry.index); + return (freeze); +} + +u_int32_t +xpt_freeze_devq(struct cam_path *path, u_int count) +{ + struct cam_ed *dev = path->device; + struct cam_devq *devq; + uint32_t freeze; + + devq = dev->sim->devq; + mtx_lock(&devq->send_mtx); + CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); + freeze = xpt_freeze_devq_device(dev, count); + mtx_unlock(&devq->send_mtx); + return (freeze); } u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count) { + struct cam_devq *devq; + uint32_t freeze; - mtx_assert(sim->mtx, MA_OWNED); - sim->devq->send_queue.qfrozen_cnt += count; - return (sim->devq->send_queue.qfrozen_cnt); + devq = sim->devq; + mtx_lock(&devq->send_mtx); + freeze = (devq->send_queue.qfrozen_cnt += count); + mtx_unlock(&devq->send_mtx); + return (freeze); } static void xpt_release_devq_timeout(void *arg) { - struct cam_ed *device; + struct cam_ed *dev; + struct cam_devq *devq; - device = (struct cam_ed *)arg; - CAM_DEBUG_DEV(device, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); - xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE); + dev = (struct cam_ed *)arg; + CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); + devq = dev->sim->devq; + mtx_assert(&devq->send_mtx, MA_OWNED); + if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) + xpt_run_devq(devq); } void xpt_release_devq(struct cam_path *path, u_int count, int run_queue) { + struct cam_ed *dev; + struct cam_devq *devq; - mtx_assert(path->bus->sim->mtx, MA_OWNED); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", count, run_queue)); - xpt_release_devq_device(path->device, count, run_queue); + dev = path->device; + devq = dev->sim->devq; + mtx_lock(&devq->send_mtx); + if (xpt_release_devq_device(dev, count, run_queue)) + xpt_run_devq(dev->sim->devq); + mtx_unlock(&devq->send_mtx); } -void +static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) { + mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); @@ -4262,34 +4381,32 @@ xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) callout_stop(&dev->callout); dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; } - xpt_run_dev_allocq(dev); - if (run_queue == 0) - return; /* * Now that we are unfrozen schedule the * device so any pending transactions are * run. */ - if (xpt_schedule_devq(dev->sim->devq, dev)) - xpt_run_devq(dev->sim->devq); - } + xpt_schedule_devq(dev->sim->devq, dev); + } else + run_queue = 0; + return (run_queue); } void xpt_release_simq(struct cam_sim *sim, int run_queue) { - struct camq *sendq; + struct cam_devq *devq; - mtx_assert(sim->mtx, MA_OWNED); - sendq = &(sim->devq->send_queue); - if (sendq->qfrozen_cnt <= 0) { + devq = sim->devq; + mtx_lock(&devq->send_mtx); + if (devq->send_queue.qfrozen_cnt <= 0) { #ifdef INVARIANTS printf("xpt_release_simq: requested 1 > present %u\n", - sendq->qfrozen_cnt); + devq->send_queue.qfrozen_cnt); #endif } else - sendq->qfrozen_cnt--; - if (sendq->qfrozen_cnt == 0) { + devq->send_queue.qfrozen_cnt--; + if (devq->send_queue.qfrozen_cnt == 0) { /* * If there is a timeout scheduled to release this * sim queue, remove it. The queue frozen count is @@ -4306,6 +4423,7 @@ xpt_release_simq(struct cam_sim *sim, int run_queue) xpt_run_devq(sim->devq); } } + mtx_unlock(&devq->send_mtx); } /* @@ -4323,49 +4441,34 @@ xpt_release_simq_timeout(void *arg) void xpt_done(union ccb *done_ccb) { - struct cam_sim *sim; - int first; + struct cam_doneq *queue; + int run, hash; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); - if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { - /* - * Queue up the request for handling by our SWI handler - * any of the "non-immediate" type of ccbs. - */ - sim = done_ccb->ccb_h.path->bus->sim; - TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h, - sim_links.tqe); - done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; - if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED | - CAM_SIM_BATCH)) == 0) { - mtx_lock(&cam_simq_lock); - first = TAILQ_EMPTY(&cam_simq); - TAILQ_INSERT_TAIL(&cam_simq, sim, links); - mtx_unlock(&cam_simq_lock); - sim->flags |= CAM_SIM_ON_DONEQ; - if (first) - swi_sched(cambio_ih, 0); - } - } -} - -void -xpt_batch_start(struct cam_sim *sim) -{ + if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) + return; - KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set")); - sim->flags |= CAM_SIM_BATCH; + hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + + done_ccb->ccb_h.target_lun) % cam_num_doneqs; + queue = &cam_doneqs[hash]; + mtx_lock(&queue->cam_doneq_mtx); + run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); + STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); + done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; + mtx_unlock(&queue->cam_doneq_mtx); + if (run) + wakeup(&queue->cam_doneq); } void -xpt_batch_done(struct cam_sim *sim) +xpt_done_direct(union ccb *done_ccb) { - KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set")); - sim->flags &= ~CAM_SIM_BATCH; - if (!TAILQ_EMPTY(&sim->sim_doneq) && - (sim->flags & CAM_SIM_ON_DONEQ) == 0) - camisr_runqueue(sim); + CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n")); + if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) + return; + + xpt_done_process(&done_ccb->ccb_h); } union ccb * @@ -4398,32 +4501,64 @@ xpt_free_ccb(union ccb *free_ccb) /* * Get a CAM control block for the caller. Charge the structure to the device - * referenced by the path. If the this device has no 'credits' then the - * device already has the maximum number of outstanding operations under way - * and we return NULL. If we don't have sufficient resources to allocate more - * ccbs, we also return NULL. + * referenced by the path. If we don't have sufficient resources to allocate + * more ccbs, we return NULL. */ static union ccb * -xpt_get_ccb(struct cam_ed *device) +xpt_get_ccb_nowait(struct cam_periph *periph) { union ccb *new_ccb; - struct cam_sim *sim; - sim = device->sim; - if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) { - new_ccb = xpt_alloc_ccb_nowait(); - if (new_ccb == NULL) { - return (NULL); - } - SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h, - xpt_links.sle); - sim->ccb_count++; - } - cam_ccbq_take_opening(&device->ccbq); - SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle); + new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_NOWAIT); + if (new_ccb == NULL) + return (NULL); + periph->periph_allocated++; + cam_ccbq_take_opening(&periph->path->device->ccbq); + return (new_ccb); +} + +static union ccb * +xpt_get_ccb(struct cam_periph *periph) +{ + union ccb *new_ccb; + + cam_periph_unlock(periph); + new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_WAITOK); + cam_periph_lock(periph); + periph->periph_allocated++; + cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } +union ccb * +cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) +{ + struct ccb_hdr *ccb_h; + + CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); + cam_periph_assert(periph, MA_OWNED); + while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || + ccb_h->pinfo.priority != priority) { + if (priority < periph->immediate_priority) { + periph->immediate_priority = priority; + xpt_run_allocq(periph, 0); + } else + cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, + "cgticb", 0); + } + SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); + return ((union ccb *)ccb_h); +} + +static void +xpt_acquire_bus(struct cam_eb *bus) +{ + + xpt_lock_buses(); + bus->refcount++; + xpt_unlock_buses(); +} + static void xpt_release_bus(struct cam_eb *bus) { @@ -4434,12 +4569,13 @@ xpt_release_bus(struct cam_eb *bus) xpt_unlock_buses(); return; } - KASSERT(TAILQ_EMPTY(&bus->et_entries), - ("refcount is zero, but target list is not empty")); TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); + KASSERT(TAILQ_EMPTY(&bus->et_entries), + ("destroying bus, but target list is not empty")); cam_sim_release(bus->sim); + mtx_destroy(&bus->eb_mtx); free(bus, M_CAMXPT); } @@ -4448,7 +4584,8 @@ xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *cur_target, *target; - mtx_assert(bus->sim->mtx, MA_OWNED); + mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); + mtx_assert(&bus->eb_mtx, MA_OWNED); target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT|M_ZERO); if (target == NULL) @@ -4460,14 +4597,13 @@ xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) target->refcount = 1; target->generation = 0; target->luns = NULL; + mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); timevalclear(&target->last_reset); /* * Hold a reference to our parent bus so it * will not go away before we do. */ - xpt_lock_buses(); bus->refcount++; - xpt_unlock_buses(); /* Insertion sort into our bus's target list */ cur_target = TAILQ_FIRST(&bus->et_entries); @@ -4483,17 +4619,32 @@ xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) } static void +xpt_acquire_target(struct cam_et *target) +{ + struct cam_eb *bus = target->bus; + + mtx_lock(&bus->eb_mtx); + target->refcount++; + mtx_unlock(&bus->eb_mtx); +} + +static void xpt_release_target(struct cam_et *target) { + struct cam_eb *bus = target->bus; - mtx_assert(target->bus->sim->mtx, MA_OWNED); - if (--target->refcount > 0) + mtx_lock(&bus->eb_mtx); + if (--target->refcount > 0) { + mtx_unlock(&bus->eb_mtx); return; + } + TAILQ_REMOVE(&bus->et_entries, target, links); + bus->generation++; + mtx_unlock(&bus->eb_mtx); KASSERT(TAILQ_EMPTY(&target->ed_entries), - ("refcount is zero, but device list is not empty")); - TAILQ_REMOVE(&target->bus->et_entries, target, links); - target->bus->generation++; - xpt_release_bus(target->bus); + ("destroying target, but device list is not empty")); + xpt_release_bus(bus); + mtx_destroy(&target->luns_mtx); if (target->luns) free(target->luns, M_CAMXPT); free(target, M_CAMXPT); @@ -4511,10 +4662,19 @@ xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, device->mintags = 1; device->maxtags = 1; - bus->sim->max_ccbs += device->ccbq.devq_openings; return (device); } +static void +xpt_destroy_device(void *context, int pending) +{ + struct cam_ed *device = context; + + mtx_lock(&device->device_mtx); + mtx_destroy(&device->device_mtx); + free(device, M_CAMDEV); +} + struct cam_ed * xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { @@ -4522,10 +4682,12 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) struct cam_devq *devq; cam_status status; - mtx_assert(target->bus->sim->mtx, MA_OWNED); + mtx_assert(&bus->eb_mtx, MA_OWNED); /* Make space for us in the device queue on our bus */ devq = bus->sim->devq; + mtx_lock(&devq->send_mtx); status = cam_devq_resize(devq, devq->send_queue.array_size + 1); + mtx_unlock(&devq->send_mtx); if (status != CAM_REQ_CMP) return (NULL); @@ -4534,19 +4696,12 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) if (device == NULL) return (NULL); - cam_init_pinfo(&device->devq_entry.pinfo); - device->devq_entry.device = device; + cam_init_pinfo(&device->devq_entry); device->target = target; device->lun_id = lun_id; device->sim = bus->sim; - /* Initialize our queues */ - if (camq_init(&device->drvq, 0) != 0) { - free(device, M_CAMDEV); - return (NULL); - } if (cam_ccbq_init(&device->ccbq, bus->sim->max_dev_openings) != 0) { - camq_fini(&device->drvq); free(device, M_CAMDEV); return (NULL); } @@ -4557,7 +4712,14 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) device->tag_delay_count = 0; device->tag_saved_openings = 0; device->refcount = 1; - callout_init_mtx(&device->callout, bus->sim->mtx, 0); + mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); + callout_init_mtx(&device->callout, &devq->send_mtx, 0); + TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); + /* + * Hold a reference to our parent bus so it + * will not go away before we do. + */ + target->refcount++; cur_device = TAILQ_FIRST(&target->ed_entries); while (cur_device != NULL && cur_device->lun_id < lun_id) @@ -4566,7 +4728,6 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) TAILQ_INSERT_BEFORE(cur_device, device, links); else TAILQ_INSERT_TAIL(&target->ed_entries, device, links); - target->refcount++; target->generation++; return (device); } @@ -4574,35 +4735,45 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) void xpt_acquire_device(struct cam_ed *device) { + struct cam_eb *bus = device->target->bus; - mtx_assert(device->sim->mtx, MA_OWNED); + mtx_lock(&bus->eb_mtx); device->refcount++; + mtx_unlock(&bus->eb_mtx); } void xpt_release_device(struct cam_ed *device) { + struct cam_eb *bus = device->target->bus; struct cam_devq *devq; - mtx_assert(device->sim->mtx, MA_OWNED); - if (--device->refcount > 0) + mtx_lock(&bus->eb_mtx); + if (--device->refcount > 0) { + mtx_unlock(&bus->eb_mtx); return; + } + + TAILQ_REMOVE(&device->target->ed_entries, device,links); + device->target->generation++; + mtx_unlock(&bus->eb_mtx); + + /* Release our slot in the devq */ + devq = bus->sim->devq; + mtx_lock(&devq->send_mtx); + cam_devq_resize(devq, devq->send_queue.array_size - 1); + mtx_unlock(&devq->send_mtx); KASSERT(SLIST_EMPTY(&device->periphs), - ("refcount is zero, but periphs list is not empty")); - if (device->devq_entry.pinfo.index != CAM_UNQUEUED_INDEX) - panic("Removing device while still queued for ccbs"); + ("destroying device, but periphs list is not empty")); + KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, + ("destroying device while still queued for ccbs")); if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) callout_stop(&device->callout); - TAILQ_REMOVE(&device->target->ed_entries, device,links); - device->target->generation++; - device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings; - /* Release our slot in the devq */ - devq = device->target->bus->sim->devq; - cam_devq_resize(devq, devq->send_queue.array_size - 1); - camq_fini(&device->drvq); + xpt_release_target(device->target); + cam_ccbq_fini(&device->ccbq); /* * Free allocated memory. free(9) does nothing if the @@ -4614,27 +4785,22 @@ xpt_release_device(struct cam_ed *device) free(device->physpath, M_CAMXPT); free(device->rcap_buf, M_CAMXPT); free(device->serial_num, M_CAMXPT); - - xpt_release_target(device->target); - free(device, M_CAMDEV); + taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); } u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) { - int diff; int result; struct cam_ed *dev; dev = path->device; - - diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); + mtx_lock(&dev->sim->devq->send_mtx); result = cam_ccbq_resize(&dev->ccbq, newopenings); + mtx_unlock(&dev->sim->devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (dev->inq_flags & SID_CmdQue) != 0) dev->tag_saved_openings = newopenings; - /* Adjust the global limit */ - dev->sim->max_ccbs += diff; return (result); } @@ -4661,7 +4827,7 @@ xpt_find_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; - mtx_assert(bus->sim->mtx, MA_OWNED); + mtx_assert(&bus->eb_mtx, MA_OWNED); for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = TAILQ_NEXT(target, links)) { @@ -4678,7 +4844,7 @@ xpt_find_device(struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; - mtx_assert(target->bus->sim->mtx, MA_OWNED); + mtx_assert(&target->bus->eb_mtx, MA_OWNED); for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = TAILQ_NEXT(device, links)) { @@ -4758,10 +4924,12 @@ xpt_config(void *arg) /* * Now that interrupts are enabled, go find our devices */ + if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) + printf("xpt_config: failed to create taskqueue thread.\n"); /* Setup debugging path */ if (cam_dflags != CAM_DEBUG_NONE) { - if (xpt_create_path_unlocked(&cam_dpath, NULL, + if (xpt_create_path(&cam_dpath, NULL, CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN) != CAM_REQ_CMP) { printf("xpt_config: xpt_create_path() failed for debug" @@ -4778,7 +4946,8 @@ xpt_config(void *arg) callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000, xpt_boot_delay, NULL); /* Fire up rescan thread. */ - if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) { + if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, + "cam", "scanner")) { printf("xpt_config: failed to create rescan thread.\n"); } } @@ -4863,13 +5032,11 @@ xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, int xptpath = 0; if (path == NULL) { - mtx_lock(&xsoftc.xpt_lock); status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); - if (status != CAM_REQ_CMP) { - mtx_unlock(&xsoftc.xpt_lock); + if (status != CAM_REQ_CMP) return (status); - } + xpt_path_lock(path); xptpath = 1; } @@ -4882,8 +5049,8 @@ xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, status = csa.ccb_h.status; if (xptpath) { + xpt_path_unlock(path); xpt_free_path(path); - mtx_unlock(&xsoftc.xpt_lock); } if ((status == CAM_REQ_CMP) && @@ -4968,122 +5135,171 @@ xpt_unlock_buses(void) mtx_unlock(&xsoftc.xpt_topo_lock); } -static void -camisr(void *dummy) +struct mtx * +xpt_path_mtx(struct cam_path *path) { - cam_simq_t queue; - struct cam_sim *sim; - - mtx_lock(&cam_simq_lock); - TAILQ_INIT(&queue); - while (!TAILQ_EMPTY(&cam_simq)) { - TAILQ_CONCAT(&queue, &cam_simq, links); - mtx_unlock(&cam_simq_lock); - while ((sim = TAILQ_FIRST(&queue)) != NULL) { - TAILQ_REMOVE(&queue, sim, links); - CAM_SIM_LOCK(sim); - camisr_runqueue(sim); - sim->flags &= ~CAM_SIM_ON_DONEQ; - CAM_SIM_UNLOCK(sim); - } - mtx_lock(&cam_simq_lock); - } - mtx_unlock(&cam_simq_lock); + return (&path->device->device_mtx); } static void -camisr_runqueue(struct cam_sim *sim) +xpt_done_process(struct ccb_hdr *ccb_h) { - struct ccb_hdr *ccb_h; + struct cam_sim *sim; + struct cam_devq *devq; + struct mtx *mtx = NULL; - while ((ccb_h = TAILQ_FIRST(&sim->sim_doneq)) != NULL) { - int runq; + if (ccb_h->flags & CAM_HIGH_POWER) { + struct highpowerlist *hphead; + struct cam_ed *device; - TAILQ_REMOVE(&sim->sim_doneq, ccb_h, sim_links.tqe); - ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; + mtx_lock(&xsoftc.xpt_highpower_lock); + hphead = &xsoftc.highpowerq; - CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, - ("camisr\n")); + device = STAILQ_FIRST(hphead); - runq = FALSE; + /* + * Increment the count since this command is done. + */ + xsoftc.num_highpower++; - if (ccb_h->flags & CAM_HIGH_POWER) { - struct highpowerlist *hphead; - struct cam_ed *device; + /* + * Any high powered commands queued up? + */ + if (device != NULL) { - mtx_lock(&xsoftc.xpt_lock); - hphead = &xsoftc.highpowerq; + STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); + mtx_unlock(&xsoftc.xpt_highpower_lock); - device = STAILQ_FIRST(hphead); + mtx_lock(&device->sim->devq->send_mtx); + xpt_release_devq_device(device, + /*count*/1, /*runqueue*/TRUE); + mtx_unlock(&device->sim->devq->send_mtx); + } else + mtx_unlock(&xsoftc.xpt_highpower_lock); + } - /* - * Increment the count since this command is done. - */ - xsoftc.num_highpower++; + sim = ccb_h->path->bus->sim; - /* - * Any high powered commands queued up? - */ - if (device != NULL) { + if (ccb_h->status & CAM_RELEASE_SIMQ) { + xpt_release_simq(sim, /*run_queue*/FALSE); + ccb_h->status &= ~CAM_RELEASE_SIMQ; + } - STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); - mtx_unlock(&xsoftc.xpt_lock); + if ((ccb_h->flags & CAM_DEV_QFRZDIS) + && (ccb_h->status & CAM_DEV_QFRZN)) { + xpt_release_devq(ccb_h->path, /*count*/1, + /*run_queue*/FALSE); + ccb_h->status &= ~CAM_DEV_QFRZN; + } - xpt_release_devq_device(device, - /*count*/1, /*runqueue*/TRUE); - } else - mtx_unlock(&xsoftc.xpt_lock); + devq = sim->devq; + if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { + struct cam_ed *dev = ccb_h->path->device; + + mtx_lock(&devq->send_mtx); + devq->send_active--; + devq->send_openings++; + cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); + + if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 + && (dev->ccbq.dev_active == 0))) { + dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; + xpt_release_devq_device(dev, /*count*/1, + /*run_queue*/FALSE); } - if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { - struct cam_ed *dev; - - dev = ccb_h->path->device; + if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 + && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { + dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; + xpt_release_devq_device(dev, /*count*/1, + /*run_queue*/FALSE); + } - cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); - sim->devq->send_active--; - sim->devq->send_openings++; - runq = TRUE; + if (!device_is_queued(dev)) + (void)xpt_schedule_devq(devq, dev); + mtx_unlock(&devq->send_mtx); - if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 - && (dev->ccbq.dev_active == 0))) { - dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; - xpt_release_devq(ccb_h->path, /*count*/1, - /*run_queue*/FALSE); - } - - if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 - && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { - dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; - xpt_release_devq(ccb_h->path, /*count*/1, - /*run_queue*/FALSE); - } + if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { + mtx = xpt_path_mtx(ccb_h->path); + mtx_lock(mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 && (--dev->tag_delay_count == 0)) xpt_start_tags(ccb_h->path); - if (!device_is_queued(dev)) { - (void)xpt_schedule_devq(sim->devq, dev); - } } + } - if (ccb_h->status & CAM_RELEASE_SIMQ) { - xpt_release_simq(sim, /*run_queue*/TRUE); - ccb_h->status &= ~CAM_RELEASE_SIMQ; - runq = FALSE; + if ((ccb_h->flags & CAM_UNLOCKED) == 0) { + if (mtx == NULL) { + mtx = xpt_path_mtx(ccb_h->path); + mtx_lock(mtx); + } + } else { + if (mtx != NULL) { + mtx_unlock(mtx); + mtx = NULL; } + } - if ((ccb_h->flags & CAM_DEV_QFRZDIS) - && (ccb_h->status & CAM_DEV_QFRZN)) { - xpt_release_devq(ccb_h->path, /*count*/1, - /*run_queue*/TRUE); - ccb_h->status &= ~CAM_DEV_QFRZN; - } else if (runq) { - xpt_run_devq(sim->devq); + /* Call the peripheral driver's callback */ + ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; + (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); + if (mtx != NULL) + mtx_unlock(mtx); + + mtx_lock(&devq->send_mtx); + xpt_run_devq(devq); + mtx_unlock(&devq->send_mtx); +} + +void +xpt_done_td(void *arg) +{ + struct cam_doneq *queue = arg; + struct ccb_hdr *ccb_h; + STAILQ_HEAD(, ccb_hdr) doneq; + + STAILQ_INIT(&doneq); + mtx_lock(&queue->cam_doneq_mtx); + while (1) { + while (STAILQ_EMPTY(&queue->cam_doneq)) { + queue->cam_doneq_sleep = 1; + msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, + PRIBIO, "-", 0); + queue->cam_doneq_sleep = 0; + } + STAILQ_CONCAT(&doneq, &queue->cam_doneq); + mtx_unlock(&queue->cam_doneq_mtx); + + THREAD_NO_SLEEPING(); + while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { + STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); + xpt_done_process(ccb_h); } + THREAD_SLEEPING_OK(); - /* Call the peripheral driver's callback */ - (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); + mtx_lock(&queue->cam_doneq_mtx); + } +} + +static void +camisr_runqueue(void) +{ + struct ccb_hdr *ccb_h; + struct cam_doneq *queue; + int i; + + /* Process global queues. */ + for (i = 0; i < cam_num_doneqs; i++) { + queue = &cam_doneqs[i]; + mtx_lock(&queue->cam_doneq_mtx); + while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { + STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); + mtx_unlock(&queue->cam_doneq_mtx); + xpt_done_process(ccb_h); + mtx_lock(&queue->cam_doneq_mtx); + } + mtx_unlock(&queue->cam_doneq_mtx); } } diff --git a/sys/cam/cam_xpt.h b/sys/cam/cam_xpt.h index 97933b9..1d983c9 100644 --- a/sys/cam/cam_xpt.h +++ b/sys/cam/cam_xpt.h @@ -56,6 +56,7 @@ struct cam_path; struct async_node { SLIST_ENTRY(async_node) links; u_int32_t event_enable; /* Async Event enables */ + u_int32_t event_lock; /* Take SIM lock for handlers. */ void (*callback)(void *arg, u_int32_t code, struct cam_path *path, void *args); void *callback_arg; @@ -110,6 +111,13 @@ void xpt_hold_boot(void); void xpt_release_boot(void); void xpt_lock_buses(void); void xpt_unlock_buses(void); +struct mtx * xpt_path_mtx(struct cam_path *path); +#define xpt_path_lock(path) mtx_lock(xpt_path_mtx(path)) +#define xpt_path_unlock(path) mtx_unlock(xpt_path_mtx(path)) +#define xpt_path_assert(path, what) mtx_assert(xpt_path_mtx(path), (what)) +#define xpt_path_owned(path) mtx_owned(xpt_path_mtx(path)) +#define xpt_path_sleep(path, chan, priority, wmesg, timo) \ + msleep((chan), xpt_path_mtx(path), (priority), (wmesg), (timo)) cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path); cam_status xpt_compile_path(struct cam_path *new_path, @@ -117,6 +125,10 @@ cam_status xpt_compile_path(struct cam_path *new_path, path_id_t path_id, target_id_t target_id, lun_id_t lun_id); +cam_status xpt_clone_path(struct cam_path **new_path, + struct cam_path *path); +void xpt_copy_path(struct cam_path *new_path, + struct cam_path *path); void xpt_release_path(struct cam_path *path); diff --git a/sys/cam/cam_xpt_internal.h b/sys/cam/cam_xpt_internal.h index 70808ba..f8c6498 100644 --- a/sys/cam/cam_xpt_internal.h +++ b/sys/cam/cam_xpt_internal.h @@ -29,6 +29,8 @@ #ifndef _CAM_CAM_XPT_INTERNAL_H #define _CAM_CAM_XPT_INTERNAL_H 1 +#include <sys/taskqueue.h> + /* Forward Declarations */ struct cam_eb; struct cam_et; @@ -55,30 +57,16 @@ struct xpt_xport { }; /* - * Structure for queueing a device in a run queue. - * There is one run queue for allocating new ccbs, - * and another for sending ccbs to the controller. - */ -struct cam_ed_qinfo { - cam_pinfo pinfo; - struct cam_ed *device; -}; - -/* * The CAM EDT (Existing Device Table) contains the device information for * all devices for all busses in the system. The table contains a * cam_ed structure for each device on the bus. */ struct cam_ed { + cam_pinfo devq_entry; TAILQ_ENTRY(cam_ed) links; - struct cam_ed_qinfo devq_entry; struct cam_et *target; struct cam_sim *sim; lun_id_t lun_id; - struct camq drvq; /* - * Queue of type drivers wanting to do - * work on this device. - */ struct cam_ccbq ccbq; /* Queue of pending ccbs */ struct async_list asyncs; /* Async callback info for this B/T/L */ struct periph_list periphs; /* All attached devices */ @@ -125,6 +113,8 @@ struct cam_ed { u_int32_t refcount; struct callout callout; STAILQ_ENTRY(cam_ed) highpowerq_entry; + struct mtx device_mtx; + struct task device_destroy_task; }; /* @@ -143,6 +133,7 @@ struct cam_et { struct timeval last_reset; u_int rpl_size; struct scsi_report_luns_data *luns; + struct mtx luns_mtx; /* Protection for luns field. */ }; /* @@ -162,6 +153,7 @@ struct cam_eb { u_int generation; device_t parent_dev; struct xpt_xport *xport; + struct mtx eb_mtx; /* Bus topology mutex. */ }; struct cam_path { @@ -179,8 +171,6 @@ struct cam_ed * xpt_alloc_device(struct cam_eb *bus, lun_id_t lun_id); void xpt_acquire_device(struct cam_ed *device); void xpt_release_device(struct cam_ed *device); -int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, - u_int32_t new_priority); u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); void xpt_start_tags(struct cam_path *path); void xpt_stop_tags(struct cam_path *path); diff --git a/sys/cam/cam_xpt_sim.h b/sys/cam/cam_xpt_sim.h index 09126cf..1543645 100644 --- a/sys/cam/cam_xpt_sim.h +++ b/sys/cam/cam_xpt_sim.h @@ -46,8 +46,7 @@ u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count); void xpt_release_devq(struct cam_path *path, u_int count, int run_queue); void xpt_done(union ccb *done_ccb); -void xpt_batch_start(struct cam_sim *sim); -void xpt_batch_done(struct cam_sim *sim); +void xpt_done_direct(union ccb *done_ccb); #endif #endif /* _CAM_CAM_XPT_SIM_H */ diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c index 2e60b64..f381a6b 100644 --- a/sys/cam/ctl/ctl_frontend_cam_sim.c +++ b/sys/cam/ctl/ctl_frontend_cam_sim.c @@ -504,14 +504,9 @@ static void cfcs_done(union ctl_io *io) { union ccb *ccb; - struct cfcs_softc *softc; - struct cam_sim *sim; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; - sim = xpt_path_sim(ccb->ccb_h.path); - softc = (struct cfcs_softc *)cam_sim_softc(sim); - /* * At this point we should have status. If we don't, that's a bug. */ @@ -550,10 +545,7 @@ cfcs_done(union ctl_io *io) break; } - mtx_lock(sim->mtx); xpt_done(ccb); - mtx_unlock(sim->mtx); - ctl_free_io(io); } diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c index 45acdba..3f50269 100644 --- a/sys/cam/ctl/scsi_ctl.c +++ b/sys/cam/ctl/scsi_ctl.c @@ -73,8 +73,7 @@ __FBSDID("$FreeBSD$"); #include <cam/ctl/ctl_error.h> typedef enum { - CTLFE_CCB_DEFAULT = 0x00, - CTLFE_CCB_WAITING = 0x01 + CTLFE_CCB_DEFAULT = 0x00 } ctlfe_ccb_types; struct ctlfe_softc { @@ -82,6 +81,7 @@ struct ctlfe_softc { path_id_t path_id; struct cam_sim *sim; char port_name[DEV_IDLEN]; + struct mtx lun_softc_mtx; STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; STAILQ_ENTRY(ctlfe_softc) links; }; @@ -320,7 +320,6 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) #ifdef CTLFE_INIT_ENABLE if (ctlfe_num_targets >= ctlfe_max_targets) { union ccb *ccb; - struct cam_sim *sim; ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO); @@ -328,18 +327,12 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) printf("%s: unable to malloc CCB!\n", __func__); return; } - xpt_setup_ccb(&ccb->ccb_h, cpi->ccb_h.path, - CAM_PRIORITY_NONE); - - sim = xpt_path_sim(cpi->ccb_h.path); + xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; - /* We should hold the SIM lock here */ - mtx_assert(sim->mtx, MA_OWNED); - xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != @@ -381,7 +374,9 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) } bus_softc->path_id = cpi->ccb_h.path_id; - bus_softc->sim = xpt_path_sim(cpi->ccb_h.path); + bus_softc->sim = xpt_path_sim(path); + mtx_init(&bus_softc->lun_softc_mtx, "LUN softc mtx", NULL, + MTX_DEF); STAILQ_INIT(&bus_softc->lun_softc_list); fe = &bus_softc->fe; @@ -435,6 +430,7 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) if (retval != 0) { printf("%s: ctl_frontend_register() failed with " "error %d!\n", __func__, retval); + mtx_destroy(&bus_softc->lun_softc_mtx); free(bus_softc, M_CTLFE); break; } else { @@ -464,6 +460,7 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) * are no outstanding commands for this frontend? */ ctl_frontend_deregister(&softc->fe); + mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); } break; @@ -538,19 +535,18 @@ ctlferegister(struct cam_periph *periph, void *arg) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; - struct cam_sim *sim; union ccb en_lun_ccb; cam_status status; int i; softc = (struct ctlfe_lun_softc *)arg; bus_softc = softc->parent_softc; - sim = xpt_path_sim(periph->path); TAILQ_INIT(&softc->work_queue); softc->periph = periph; - callout_init_mtx(&softc->dma_callout, sim->mtx, /*flags*/ 0); + callout_init_mtx(&softc->dma_callout, xpt_path_mtx(periph->path), + /*flags*/ 0); periph->softc = softc; xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); @@ -580,6 +576,7 @@ ctlferegister(struct cam_periph *periph, void *arg) xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; + new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); softc->atios_sent++; status = new_ccb->ccb_h.status; @@ -615,6 +612,7 @@ ctlferegister(struct cam_periph *periph, void *arg) xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; new_ccb->ccb_h.cbfcnp = ctlfedone; + new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); softc->inots_sent++; status = new_ccb->ccb_h.status; @@ -646,6 +644,7 @@ ctlfeoninvalidate(struct cam_periph *periph) { union ccb en_lun_ccb; cam_status status; + struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; @@ -668,21 +667,22 @@ ctlfeoninvalidate(struct cam_periph *periph) "INOTs outstanding, %d refs\n", softc->atios_sent - softc->atios_returned, softc->inots_sent - softc->inots_returned, periph->refcount); + + bus_softc = softc->parent_softc; + mtx_lock(&bus_softc->lun_softc_mtx); + STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); + mtx_unlock(&bus_softc->lun_softc_mtx); } static void ctlfecleanup(struct cam_periph *periph) { struct ctlfe_lun_softc *softc; - struct ctlfe_softc *bus_softc; xpt_print(periph->path, "%s: Called\n", __func__); softc = (struct ctlfe_lun_softc *)periph->softc; - bus_softc = softc->parent_softc; - STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); - /* * XXX KDM is there anything else that needs to be done here? */ @@ -705,14 +705,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb) start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT; ccb_h = TAILQ_FIRST(&softc->work_queue); - if (periph->immediate_priority <= periph->pinfo.priority) { - panic("shouldn't get to the CCB waiting case!"); - start_ccb->ccb_h.ccb_type = CTLFE_CCB_WAITING; - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - } else if (ccb_h == NULL) { + if (ccb_h == NULL) { softc->ccbs_freed++; xpt_release_ccb(start_ccb); } else { @@ -791,7 +784,6 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb) } start_ccb->ccb_h.func_code = XPT_ABORT; start_ccb->cab.abort_ccb = (union ccb *)atio; - start_ccb->ccb_h.cbfcnp = ctlfedone; /* Tell the SIM that we've aborted this ATIO */ xpt_action(start_ccb); @@ -1004,6 +996,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb) /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ 5 * 1000); + start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.ccb_atio = atio; if (((flags & CAM_SEND_STATUS) == 0) && (io != NULL)) @@ -1011,7 +1004,9 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb) softc->ctios_sent++; + cam_periph_unlock(periph); xpt_action(start_ccb); + cam_periph_lock(periph); if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, @@ -1148,7 +1143,10 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) struct ctlfe_softc *bus_softc; struct ccb_accept_tio *atio = NULL; union ctl_io *io = NULL; + struct mtx *mtx; + KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, + ("CCB in ctlfedone() without CAM_UNLOCKED flag")); #ifdef CTLFE_DEBUG printf("%s: entered, func_code = %#x, type = %#lx\n", __func__, done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type); @@ -1156,12 +1154,8 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; - - if (done_ccb->ccb_h.ccb_type == CTLFE_CCB_WAITING) { - panic("shouldn't get to the CCB waiting case!"); - wakeup(&done_ccb->ccb_h.cbfcnp); - return; - } + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); /* * If the peripheral is invalid, ATIOs and immediate notify CCBs @@ -1177,7 +1171,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: ctlfe_free_ccb(periph, done_ccb); - return; + goto out; default: break; } @@ -1215,6 +1209,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) xpt_schedule(periph, /*priority*/ 1); break; } + mtx_unlock(mtx); ctl_zero_io(io); /* Save pointers on both sides */ @@ -1271,7 +1266,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) #endif ctl_queue(io); - break; + return; } case XPT_CONT_TARGET_IO: { int srr = 0; @@ -1333,7 +1328,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.tqe); xpt_schedule(periph, /*priority*/ 1); - return; + break; } /* @@ -1359,10 +1354,11 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) } if (periph->flags & CAM_PERIPH_INVALID) { ctlfe_free_ccb(periph, (union ccb *)atio); - return; } else { - xpt_action((union ccb *)atio); softc->atios_sent++; + mtx_unlock(mtx); + xpt_action((union ccb *)atio); + return; } } else { struct ctlfe_lun_cmd_info *cmd_info; @@ -1478,10 +1474,12 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) /*dxfer_len*/ dxfer_len, /*timeout*/ 5 * 1000); + csio->ccb_h.flags |= CAM_UNLOCKED; csio->resid = 0; csio->ccb_h.ccb_atio = atio; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; + mtx_unlock(mtx); xpt_action((union ccb *)csio); } else { /* @@ -1490,10 +1488,12 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) */ softc->ccbs_freed++; xpt_release_ccb(done_ccb); + mtx_unlock(mtx); /* Call the backend move done callback */ io->scsiio.be_move_done(io); } + return; } break; } @@ -1614,7 +1614,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) ctl_free_io(io); ctlfe_free_ccb(periph, done_ccb); - return; + goto out; } if (send_ctl_io != 0) { ctl_queue(io); @@ -1651,12 +1651,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) xpt_action(done_ccb); softc->inots_sent++; break; - case XPT_ABORT: - /* - * XPT_ABORT is an immediate CCB, we shouldn't get here. - */ - panic("%s: XPT_ABORT CCB returned!", __func__); - break; case XPT_SET_SIM_KNOB: case XPT_GET_SIM_KNOB: break; @@ -1665,6 +1659,9 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) done_ccb->ccb_h.func_code); break; } + +out: + mtx_unlock(mtx); } static void @@ -1674,17 +1671,12 @@ ctlfe_onoffline(void *arg, int online) union ccb *ccb; cam_status status; struct cam_path *path; - struct cam_sim *sim; int set_wwnn; bus_softc = (struct ctlfe_softc *)arg; set_wwnn = 0; - sim = bus_softc->sim; - - mtx_assert(sim->mtx, MA_OWNED); - status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { @@ -1844,12 +1836,8 @@ ctlfe_online(void *arg) struct cam_path *path; cam_status status; struct ctlfe_lun_softc *lun_softc; - struct cam_sim *sim; bus_softc = (struct ctlfe_softc *)arg; - sim = bus_softc->sim; - - CAM_SIM_LOCK(sim); /* * Create the wildcard LUN before bringing the port online. @@ -1860,7 +1848,6 @@ ctlfe_online(void *arg) if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); - CAM_SIM_UNLOCK(sim); return; } @@ -1870,15 +1857,16 @@ ctlfe_online(void *arg) xpt_print(path, "%s: unable to allocate softc for " "wildcard periph\n", __func__); xpt_free_path(path); - CAM_SIM_UNLOCK(sim); return; } + xpt_path_lock(path); lun_softc->parent_softc = bus_softc; lun_softc->flags |= CTLFE_LUN_WILDCARD; + mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links); - + mtx_unlock(&bus_softc->lun_softc_mtx); status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, @@ -1901,11 +1889,10 @@ ctlfe_online(void *arg) entry->status_text : "Unknown", status); } - xpt_free_path(path); - ctlfe_onoffline(arg, /*online*/ 1); - CAM_SIM_UNLOCK(sim); + xpt_path_unlock(path); + xpt_free_path(path); } static void @@ -1915,14 +1902,8 @@ ctlfe_offline(void *arg) struct cam_path *path; cam_status status; struct cam_periph *periph; - struct cam_sim *sim; bus_softc = (struct ctlfe_softc *)arg; - sim = bus_softc->sim; - - CAM_SIM_LOCK(sim); - - ctlfe_onoffline(arg, /*online*/ 0); /* * Disable the wildcard LUN for this port now that we have taken @@ -1932,19 +1913,20 @@ ctlfe_offline(void *arg) bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { - CAM_SIM_UNLOCK(sim); printf("%s: unable to create path for wildcard periph\n", __func__); return; } + xpt_path_lock(path); + + ctlfe_onoffline(arg, /*online*/ 0); if ((periph = cam_periph_find(path, "ctl")) != NULL) cam_periph_invalidate(periph); + xpt_path_unlock(path); xpt_free_path(path); - - CAM_SIM_UNLOCK(sim); } static int @@ -1970,15 +1952,13 @@ ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) struct ctlfe_lun_softc *softc; struct cam_path *path; struct cam_periph *periph; - struct cam_sim *sim; cam_status status; bus_softc = (struct ctlfe_softc *)arg; - sim = bus_softc->sim; - status = xpt_create_path_unlocked(&path, /*periph*/ NULL, - bus_softc->path_id, - targ_id.id, lun_id); + status = xpt_create_path(&path, /*periph*/ NULL, + bus_softc->path_id, + targ_id.id, lun_id); /* XXX KDM need some way to return status to CTL here? */ if (status != CAM_REQ_CMP) { printf("%s: could not create path, status %#x\n", __func__, @@ -1987,18 +1967,20 @@ ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) } softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); - CAM_SIM_LOCK(sim); + xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ + xpt_path_unlock(path); xpt_free_path(path); free(softc, M_CTLFE); - CAM_SIM_UNLOCK(sim); return (0); } softc->parent_softc = bus_softc; + mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); + mtx_unlock(&bus_softc->lun_softc_mtx); status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, @@ -2011,10 +1993,8 @@ ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 0, softc); + xpt_path_unlock(path); xpt_free_path(path); - - CAM_SIM_UNLOCK(sim); - return (0); } @@ -2027,12 +2007,10 @@ ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) { struct ctlfe_softc *softc; struct ctlfe_lun_softc *lun_softc; - struct cam_sim *sim; softc = (struct ctlfe_softc *)arg; - sim = softc->sim; - CAM_SIM_LOCK(sim); + mtx_lock(&softc->lun_softc_mtx); STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { struct cam_path *path; @@ -2044,16 +2022,18 @@ ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) } } if (lun_softc == NULL) { - CAM_SIM_UNLOCK(sim); + mtx_unlock(&softc->lun_softc_mtx); printf("%s: can't find target %d lun %d\n", __func__, targ_id.id, lun_id); return (1); } + cam_periph_acquire(lun_softc->periph); + mtx_unlock(&softc->lun_softc_mtx); + cam_periph_lock(lun_softc->periph); cam_periph_invalidate(lun_softc->periph); - - CAM_SIM_UNLOCK(sim); - + cam_periph_unlock(lun_softc->periph); + cam_periph_release(lun_softc->periph); return (0); } @@ -2064,12 +2044,6 @@ ctlfe_dump_sim(struct cam_sim *sim) printf("%s%d: max tagged openings: %d, max dev openings: %d\n", sim->sim_name, sim->unit_number, sim->max_tagged_dev_openings, sim->max_dev_openings); - printf("%s%d: max_ccbs: %u, ccb_count: %u\n", - sim->sim_name, sim->unit_number, - sim->max_ccbs, sim->ccb_count); - printf("%s%d: ccb_freeq is %sempty\n", - sim->sim_name, sim->unit_number, - (SLIST_FIRST(&sim->ccb_freeq) == NULL) ? "" : "NOT "); printf("\n"); } @@ -2200,17 +2174,13 @@ static void ctlfe_datamove_done(union ctl_io *io) { union ccb *ccb; - struct cam_sim *sim; struct cam_periph *periph; struct ctlfe_lun_softc *softc; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; - sim = xpt_path_sim(ccb->ccb_h.path); - - CAM_SIM_LOCK(sim); - periph = xpt_path_periph(ccb->ccb_h.path); + cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; @@ -2255,7 +2225,7 @@ ctlfe_datamove_done(union ctl_io *io) xpt_schedule(periph, /*priority*/ 1); } - CAM_SIM_UNLOCK(sim); + cam_periph_unlock(periph); } static void diff --git a/sys/cam/scsi/scsi_cd.c b/sys/cam/scsi/scsi_cd.c index 1b59eca..354fd5c 100644 --- a/sys/cam/scsi/scsi_cd.c +++ b/sys/cam/scsi/scsi_cd.c @@ -118,7 +118,6 @@ typedef enum { typedef enum { CD_CCB_PROBE = 0x01, CD_CCB_BUFFER_IO = 0x02, - CD_CCB_WAITING = 0x03, CD_CCB_TUR = 0x04, CD_CCB_TYPE_MASK = 0x0F, CD_CCB_RETRY_UA = 0x10 @@ -549,7 +548,7 @@ cdasync(void *callback_arg, u_int32_t code, status = cam_periph_alloc(cdregister, cdoninvalidate, cdcleanup, cdstart, "cd", CAM_PERIPH_BIO, - cgd->ccb_h.path, cdasync, + path, cdasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP @@ -980,9 +979,9 @@ cdregister(struct cam_periph *periph, void *arg) STAILQ_INIT(&nchanger->chluns); callout_init_mtx(&nchanger->long_handle, - periph->sim->mtx, 0); + cam_periph_mtx(periph), 0); callout_init_mtx(&nchanger->short_handle, - periph->sim->mtx, 0); + cam_periph_mtx(periph), 0); mtx_lock(&changerq_mtx); num_changers++; @@ -1051,7 +1050,7 @@ cdregister(struct cam_periph *periph, void *arg) /* * Schedule a periodic media polling events. */ - callout_init_mtx(&softc->mediapoll_c, periph->sim->mtx, 0); + callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) && (softc->flags & CD_FLAG_CHANGER) == 0 && (cgd->inq_flags & SID_AEN) == 0 && @@ -1535,14 +1534,7 @@ cdstart(struct cam_periph *periph, union ccb *start_ccb) case CD_STATE_NORMAL: { bp = bioq_first(&softc->bio_queue); - if (periph->immediate_priority <= periph->pinfo.priority) { - start_ccb->ccb_h.ccb_state = CD_CCB_WAITING; - - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - } else if (bp == NULL) { + if (bp == NULL) { if (softc->tur) { softc->tur = 0; csio = &start_ccb->csio; @@ -1606,11 +1598,9 @@ cdstart(struct cam_periph *periph, union ccb *start_ccb) xpt_action(start_ccb); } - if (bp != NULL || softc->tur || - periph->immediate_priority != CAM_PRIORITY_NONE) { + if (bp != NULL || softc->tur) { /* Have more work to do, so ensure we stay scheduled */ - xpt_schedule(periph, min(CAM_PRIORITY_NORMAL, - periph->immediate_priority)); + xpt_schedule(periph, CAM_PRIORITY_NORMAL); } break; } @@ -1895,15 +1885,6 @@ cddone(struct cam_periph *periph, union ccb *done_ccb) cam_periph_unhold(periph); return; } - case CD_CCB_WAITING: - { - /* Caller will release the CCB */ - CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, - ("trying to wakeup ccbwait\n")); - - wakeup(&done_ccb->ccb_h.cbfcnp); - return; - } case CD_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { diff --git a/sys/cam/scsi/scsi_ch.c b/sys/cam/scsi/scsi_ch.c index fb208ed..b83bc53 100644 --- a/sys/cam/scsi/scsi_ch.c +++ b/sys/cam/scsi/scsi_ch.c @@ -116,8 +116,7 @@ typedef enum { } ch_state; typedef enum { - CH_CCB_PROBE, - CH_CCB_WAITING + CH_CCB_PROBE } ch_ccb_types; typedef enum { @@ -248,20 +247,19 @@ chinit(void) static void chdevgonecb(void *arg) { - struct cam_sim *sim; struct ch_softc *softc; struct cam_periph *periph; + struct mtx *mtx; int i; periph = (struct cam_periph *)arg; - sim = periph->sim; - softc = (struct ch_softc *)periph->softc; + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); + softc = (struct ch_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); - mtx_lock(sim->mtx); - /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the @@ -278,13 +276,13 @@ chdevgonecb(void *arg) cam_periph_release_locked(periph); /* - * We reference the SIM lock directly here, instead of using + * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ - mtx_unlock(sim->mtx); + mtx_unlock(mtx); } static void @@ -350,7 +348,7 @@ chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) */ status = cam_periph_alloc(chregister, choninvalidate, chcleanup, chstart, "ch", - CAM_PERIPH_BIO, cgd->ccb_h.path, + CAM_PERIPH_BIO, path, chasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP @@ -503,25 +501,23 @@ chopen(struct cdev *dev, int flags, int fmt, struct thread *td) static int chclose(struct cdev *dev, int flag, int fmt, struct thread *td) { - struct cam_sim *sim; struct cam_periph *periph; struct ch_softc *softc; + struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return(ENXIO); + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); - sim = periph->sim; softc = (struct ch_softc *)periph->softc; - - mtx_lock(sim->mtx); - softc->open_count--; cam_periph_release_locked(periph); /* - * We reference the SIM lock directly here, instead of using + * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph @@ -532,7 +528,7 @@ chclose(struct cdev *dev, int flag, int fmt, struct thread *td) * protect the open count and avoid another lock acquisition and * release. */ - mtx_unlock(sim->mtx); + mtx_unlock(mtx); return(0); } @@ -547,14 +543,7 @@ chstart(struct cam_periph *periph, union ccb *start_ccb) switch (softc->state) { case CH_STATE_NORMAL: { - if (periph->immediate_priority <= periph->pinfo.priority){ - start_ccb->ccb_h.ccb_state = CH_CCB_WAITING; - - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - } + xpt_release_ccb(start_ccb); break; } case CH_STATE_PROBE: @@ -734,12 +723,6 @@ chdone(struct cam_periph *periph, union ccb *done_ccb) cam_periph_unhold(periph); return; } - case CH_CCB_WAITING: - { - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); - return; - } default: break; } @@ -1724,10 +1707,8 @@ chscsiversion(struct cam_periph *periph) struct scsi_inquiry_data *inq_data; struct ccb_getdev *cgd; int dev_scsi_version; - struct cam_sim *sim; - sim = xpt_path_sim(periph->path); - mtx_assert(sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) == NULL) return (-1); /* diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c index 59a332c..f1ea06a 100644 --- a/sys/cam/scsi/scsi_da.c +++ b/sys/cam/scsi/scsi_da.c @@ -84,7 +84,7 @@ typedef enum { DA_FLAG_PACK_LOCKED = 0x004, DA_FLAG_PACK_REMOVABLE = 0x008, DA_FLAG_NEED_OTAG = 0x020, - DA_FLAG_WENT_IDLE = 0x040, + DA_FLAG_WAS_OTAG = 0x040, DA_FLAG_RETRY_UA = 0x080, DA_FLAG_OPEN = 0x100, DA_FLAG_SCTX_INIT = 0x200, @@ -118,7 +118,6 @@ typedef enum { DA_CCB_PROBE_BDC = 0x05, DA_CCB_PROBE_ATA = 0x06, DA_CCB_BUFFER_IO = 0x07, - DA_CCB_WAITING = 0x08, DA_CCB_DUMP = 0x0A, DA_CCB_DELETE = 0x0B, DA_CCB_TUR = 0x0C, @@ -199,19 +198,17 @@ struct da_softc { struct bio_queue_head bio_queue; struct bio_queue_head delete_queue; struct bio_queue_head delete_run_queue; - SLIST_ENTRY(da_softc) links; LIST_HEAD(, ccb_hdr) pending_ccbs; + int tur; /* TEST UNIT READY should be sent */ + int refcount; /* Active xpt_action() calls */ da_state state; da_flags flags; da_quirks quirks; int sort_io_queue; int minimum_cmd_size; int error_inject; - int ordered_tag_count; - int outstanding_cmds; int trim_max_ranges; int delete_running; - int tur; int delete_available; /* Delete methods possibly available */ uint32_t unmap_max_ranges; uint32_t unmap_max_lba; @@ -1269,86 +1266,72 @@ daclose(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; + union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; - cam_periph_lock(periph); - if (cam_periph_hold(periph, PRIBIO) != 0) { - cam_periph_unlock(periph); - cam_periph_release(periph); - return (0); - } - softc = (struct da_softc *)periph->softc; - + cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daclose\n")); - if ((softc->flags & DA_FLAG_DIRTY) != 0 && - (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && - (softc->flags & DA_FLAG_PACK_INVALID) == 0) { - union ccb *ccb; - - ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); - - scsi_synchronize_cache(&ccb->csio, - /*retries*/1, - /*cbfcnp*/dadone, - MSG_SIMPLE_Q_TAG, - /*begin_lba*/0,/* Cover the whole disk */ - /*lb_count*/0, - SSD_FULL_SIZE, - 5 * 60 * 1000); + if (cam_periph_hold(periph, PRIBIO) == 0) { + + /* Flush disk cache. */ + if ((softc->flags & DA_FLAG_DIRTY) != 0 && + (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && + (softc->flags & DA_FLAG_PACK_INVALID) == 0) { + ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); + scsi_synchronize_cache(&ccb->csio, /*retries*/1, + /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, + /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, + 5 * 60 * 1000); + error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, + /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, + softc->disk->d_devstat); + if (error == 0) + softc->flags &= ~DA_FLAG_DIRTY; + xpt_release_ccb(ccb); + } - error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, - /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, - softc->disk->d_devstat); - if (error == 0) - softc->flags &= ~DA_FLAG_DIRTY; - xpt_release_ccb(ccb); + /* Allow medium removal. */ + if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && + (softc->quirks & DA_Q_NO_PREVENT) == 0) + daprevent(periph, PR_ALLOW); + cam_periph_unhold(periph); } - if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { - if ((softc->quirks & DA_Q_NO_PREVENT) == 0) - daprevent(periph, PR_ALLOW); - /* - * If we've got removeable media, mark the blocksize as - * unavailable, since it could change when new media is - * inserted. - */ + /* + * If we've got removeable media, mark the blocksize as + * unavailable, since it could change when new media is + * inserted. + */ + if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; - } softc->flags &= ~DA_FLAG_OPEN; - cam_periph_unhold(periph); + while (softc->refcount != 0) + cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); - return (0); + return (0); } static void daschedule(struct cam_periph *periph) { struct da_softc *softc = (struct da_softc *)periph->softc; - uint32_t prio; if (softc->state != DA_STATE_NORMAL) return; - /* Check if cam_periph_getccb() was called. */ - prio = periph->immediate_priority; - /* Check if we have more work to do. */ if (bioq_first(&softc->bio_queue) || (!softc->delete_running && bioq_first(&softc->delete_queue)) || softc->tur) { - prio = CAM_PRIORITY_NORMAL; + xpt_schedule(periph, CAM_PRIORITY_NORMAL); } - - /* Schedule CCB if any of above is true. */ - if (prio != CAM_PRIORITY_NONE) - xpt_schedule(periph, prio); } /* @@ -1382,9 +1365,7 @@ dastrategy(struct bio *bp) * Place it in the queue of disk activities for this disk */ if (bp->bio_cmd == BIO_DELETE) { - if (bp->bio_bcount == 0) - biodone(bp); - else if (DA_SIO) + if (DA_SIO) bioq_disksort(&softc->delete_queue, bp); else bioq_insert_tail(&softc->delete_queue, bp); @@ -1621,7 +1602,7 @@ daasync(void *callback_arg, u_int32_t code, status = cam_periph_alloc(daregister, daoninvalidate, dacleanup, dastart, "da", CAM_PERIPH_BIO, - cgd->ccb_h.path, daasync, + path, daasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP @@ -2066,7 +2047,7 @@ daregister(struct cam_periph *periph, void *arg) * Schedule a periodic event to occasionally send an * ordered tag to a device. */ - callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0); + callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); @@ -2186,7 +2167,7 @@ daregister(struct cam_periph *periph, void *arg) /* * Schedule a periodic media polling events. */ - callout_init_mtx(&softc->mediapoll_c, periph->sim->mtx, 0); + callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && da_poll_period != 0) @@ -2214,20 +2195,6 @@ skipstate: struct bio *bp; uint8_t tag_code; - /* Execute immediate CCB if waiting. */ - if (periph->immediate_priority <= periph->pinfo.priority) { - CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, - ("queuing for immediate ccb\n")); - start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - /* May have more work to do, so ensure we stay scheduled */ - daschedule(periph); - break; - } - /* Run BIO_DELETE if not running yet. */ if (!softc->delete_running && (bp = bioq_first(&softc->delete_queue)) != NULL) { @@ -2266,7 +2233,7 @@ skipstate: if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~DA_FLAG_NEED_OTAG; - softc->ordered_tag_count++; + softc->flags |= DA_FLAG_WAS_OTAG; tag_code = MSG_ORDERED_Q_TAG; } else { tag_code = MSG_SIMPLE_Q_TAG; @@ -2316,15 +2283,11 @@ skipstate: break; } start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; + start_ccb->ccb_h.flags |= CAM_UNLOCKED; out: - /* - * Block out any asynchronous callbacks - * while we touch the pending ccb list. - */ LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); - softc->outstanding_cmds++; /* We expect a unit attention from this device */ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { @@ -2333,7 +2296,11 @@ out: } start_ccb->ccb_h.ccb_bp = bp; + softc->refcount++; + cam_periph_unlock(periph); xpt_action(start_ccb); + cam_periph_lock(periph); + softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ daschedule(periph); @@ -2628,6 +2595,7 @@ da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; + ccb->ccb_h.flags |= CAM_UNLOCKED; } static void @@ -2708,6 +2676,7 @@ da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; + ccb->ccb_h.flags |= CAM_UNLOCKED; } /* @@ -2764,6 +2733,7 @@ da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; + ccb->ccb_h.flags |= CAM_UNLOCKED; } static int @@ -2898,6 +2868,7 @@ dadone(struct cam_periph *periph, union ccb *done_ccb) { struct bio *bp, *bp1; + cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; @@ -2914,6 +2885,7 @@ dadone(struct cam_periph *periph, union ccb *done_ccb) * A retry was scheduled, so * just return. */ + cam_periph_unlock(periph); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; @@ -2981,18 +2953,22 @@ dadone(struct cam_periph *periph, union ccb *done_ccb) } } - /* - * Block out any asynchronous callbacks - * while we touch the pending ccb list. - */ LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); - softc->outstanding_cmds--; - if (softc->outstanding_cmds == 0) - softc->flags |= DA_FLAG_WENT_IDLE; + if (LIST_EMPTY(&softc->pending_ccbs)) + softc->flags |= DA_FLAG_WAS_OTAG; + xpt_release_ccb(done_ccb); if (state == DA_CCB_DELETE) { - while ((bp1 = bioq_takefirst(&softc->delete_run_queue)) - != NULL) { + TAILQ_HEAD(, bio) queue; + + TAILQ_INIT(&queue); + TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); + softc->delete_run_queue.insert_point = NULL; + softc->delete_running = 0; + daschedule(periph); + cam_periph_unlock(periph); + while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { + TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = bp->bio_error; if (bp->bio_flags & BIO_ERROR) { bp1->bio_flags |= BIO_ERROR; @@ -3001,13 +2977,11 @@ dadone(struct cam_periph *periph, union ccb *done_ccb) bp1->bio_resid = 0; biodone(bp1); } - softc->delete_running = 0; - if (bp != NULL) - biodone(bp); - daschedule(periph); - } else if (bp != NULL) + } else + cam_periph_unlock(periph); + if (bp != NULL) biodone(bp); - break; + return; } case DA_CCB_PROBE_RC: case DA_CCB_PROBE_RC16: @@ -3457,12 +3431,6 @@ dadone(struct cam_periph *periph, union ccb *done_ccb) daprobedone(periph, done_ccb); return; } - case DA_CCB_WAITING: - { - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); - return; - } case DA_CCB_DUMP: /* No-op. We're polling */ return; @@ -3573,7 +3541,7 @@ damediapoll(void *arg) struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; - if (!softc->tur && softc->outstanding_cmds == 0) { + if (!softc->tur && LIST_EMPTY(&softc->pending_ccbs)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { softc->tur = 1; daschedule(periph); @@ -3745,14 +3713,11 @@ dasendorderedtag(void *arg) struct da_softc *softc = arg; if (da_send_ordered) { - if ((softc->ordered_tag_count == 0) - && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { - softc->flags |= DA_FLAG_NEED_OTAG; + if (!LIST_EMPTY(&softc->pending_ccbs)) { + if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) + softc->flags |= DA_FLAG_NEED_OTAG; + softc->flags &= ~DA_FLAG_WAS_OTAG; } - if (softc->outstanding_cmds > 0) - softc->flags &= ~DA_FLAG_WENT_IDLE; - - softc->ordered_tag_count = 0; } /* Queue us up again */ callout_reset(&softc->sendordered_c, diff --git a/sys/cam/scsi/scsi_enc.c b/sys/cam/scsi/scsi_enc.c index c39061f..2bf6b50 100644 --- a/sys/cam/scsi/scsi_enc.c +++ b/sys/cam/scsi/scsi_enc.c @@ -69,7 +69,6 @@ static periph_init_t enc_init; static periph_ctor_t enc_ctor; static periph_oninv_t enc_oninvalidate; static periph_dtor_t enc_dtor; -static periph_start_t enc_start; static void enc_async(void *, uint32_t, struct cam_path *, void *); static enctyp enc_type(struct ccb_getdev *); @@ -113,17 +112,16 @@ enc_init(void) static void enc_devgonecb(void *arg) { - struct cam_sim *sim; struct cam_periph *periph; struct enc_softc *enc; + struct mtx *mtx; int i; periph = (struct cam_periph *)arg; - sim = periph->sim; + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); enc = (struct enc_softc *)periph->softc; - mtx_lock(sim->mtx); - /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the @@ -140,13 +138,13 @@ enc_devgonecb(void *arg) cam_periph_release_locked(periph); /* - * We reference the SIM lock directly here, instead of using + * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ - mtx_unlock(sim->mtx); + mtx_unlock(mtx); } static void @@ -243,8 +241,8 @@ enc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) } status = cam_periph_alloc(enc_ctor, enc_oninvalidate, - enc_dtor, enc_start, "ses", CAM_PERIPH_BIO, - cgd->ccb_h.path, enc_async, AC_FOUND_DEVICE, cgd); + enc_dtor, NULL, "ses", CAM_PERIPH_BIO, + path, enc_async, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { printf("enc_async: Unable to probe new device due to " @@ -299,25 +297,23 @@ out: static int enc_close(struct cdev *dev, int flag, int fmt, struct thread *td) { - struct cam_sim *sim; struct cam_periph *periph; struct enc_softc *enc; + struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); - sim = periph->sim; enc = periph->softc; - - mtx_lock(sim->mtx); - enc->open_count--; cam_periph_release_locked(periph); /* - * We reference the SIM lock directly here, instead of using + * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph @@ -328,34 +324,11 @@ enc_close(struct cdev *dev, int flag, int fmt, struct thread *td) * protect the open count and avoid another lock acquisition and * release. */ - mtx_unlock(sim->mtx); + mtx_unlock(mtx); return (0); } -static void -enc_start(struct cam_periph *p, union ccb *sccb) -{ - struct enc_softc *enc; - - enc = p->softc; - ENC_DLOG(enc, "%s enter imm=%d prio=%d\n", - __func__, p->immediate_priority, p->pinfo.priority); - if (p->immediate_priority <= p->pinfo.priority) { - SLIST_INSERT_HEAD(&p->ccb_list, &sccb->ccb_h, periph_links.sle); - p->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&p->ccb_list); - } else - xpt_release_ccb(sccb); - ENC_DLOG(enc, "%s exit\n", __func__); -} - -void -enc_done(struct cam_periph *periph, union ccb *dccb) -{ - wakeup(&dccb->ccb_h.cbfcnp); -} - int enc_error(union ccb *ccb, uint32_t cflags, uint32_t sflags) { @@ -614,7 +587,7 @@ enc_runcmd(struct enc_softc *enc, char *cdb, int cdbl, char *dptr, int *dlenp) if (enc->enc_type == ENC_SEMB_SES || enc->enc_type == ENC_SEMB_SAFT) { tdlen = min(dlen, 1020); tdlen = (tdlen + 3) & ~3; - cam_fill_ataio(&ccb->ataio, 0, enc_done, ddf, 0, dptr, tdlen, + cam_fill_ataio(&ccb->ataio, 0, NULL, ddf, 0, dptr, tdlen, 30 * 1000); if (cdb[0] == RECEIVE_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, @@ -632,7 +605,7 @@ enc_runcmd(struct enc_softc *enc, char *cdb, int cdbl, char *dptr, int *dlenp) 0x80, tdlen / 4); } else { tdlen = dlen; - cam_fill_csio(&ccb->csio, 0, enc_done, ddf, MSG_SIMPLE_Q_TAG, + cam_fill_csio(&ccb->csio, 0, NULL, ddf, MSG_SIMPLE_Q_TAG, dptr, dlen, sizeof (struct scsi_sense_data), cdbl, 60 * 1000); bcopy(cdb, ccb->csio.cdb_io.cdb_bytes, cdbl); @@ -886,7 +859,7 @@ enc_kproc_init(enc_softc_t *enc) { int result; - callout_init_mtx(&enc->status_updater, enc->periph->sim->mtx, 0); + callout_init_mtx(&enc->status_updater, cam_periph_mtx(enc->periph), 0); if (cam_periph_acquire(enc->periph) != CAM_REQ_CMP) return (ENXIO); diff --git a/sys/cam/scsi/scsi_enc_internal.h b/sys/cam/scsi/scsi_enc_internal.h index f1b8582..3bf8cfb 100644 --- a/sys/cam/scsi/scsi_enc_internal.h +++ b/sys/cam/scsi/scsi_enc_internal.h @@ -192,7 +192,6 @@ struct ses_mgmt_mode_page { /* Enclosure core interface for sub-drivers */ int enc_runcmd(struct enc_softc *, char *, int, char *, int *); void enc_log(struct enc_softc *, const char *, ...); -void enc_done(struct cam_periph *, union ccb *); int enc_error(union ccb *, uint32_t, uint32_t); void enc_update_request(enc_softc_t *, uint32_t); diff --git a/sys/cam/scsi/scsi_enc_safte.c b/sys/cam/scsi/scsi_enc_safte.c index 3c98ab8..8282d01 100644 --- a/sys/cam/scsi/scsi_enc_safte.c +++ b/sys/cam/scsi/scsi_enc_safte.c @@ -243,12 +243,12 @@ safte_fill_read_buf_io(enc_softc_t *enc, struct enc_fsm_state *state, if (enc->enc_type == ENC_SEMB_SAFT) { semb_read_buffer(&ccb->ataio, /*retries*/5, - enc_done, MSG_SIMPLE_Q_TAG, + NULL, MSG_SIMPLE_Q_TAG, state->page_code, buf, state->buf_size, state->timeout); } else { scsi_read_buffer(&ccb->csio, /*retries*/5, - enc_done, MSG_SIMPLE_Q_TAG, 1, + NULL, MSG_SIMPLE_Q_TAG, 1, state->page_code, 0, buf, state->buf_size, SSD_FULL_SIZE, state->timeout); } @@ -942,11 +942,11 @@ safte_fill_control_request(enc_softc_t *enc, struct enc_fsm_state *state, if (enc->enc_type == ENC_SEMB_SAFT) { semb_write_buffer(&ccb->ataio, /*retries*/5, - enc_done, MSG_SIMPLE_Q_TAG, + NULL, MSG_SIMPLE_Q_TAG, buf, xfer_len, state->timeout); } else { scsi_write_buffer(&ccb->csio, /*retries*/5, - enc_done, MSG_SIMPLE_Q_TAG, 1, + NULL, MSG_SIMPLE_Q_TAG, 1, 0, 0, buf, xfer_len, SSD_FULL_SIZE, state->timeout); } diff --git a/sys/cam/scsi/scsi_enc_ses.c b/sys/cam/scsi/scsi_enc_ses.c index 6917fff..5393244 100644 --- a/sys/cam/scsi/scsi_enc_ses.c +++ b/sys/cam/scsi/scsi_enc_ses.c @@ -888,7 +888,6 @@ ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem, struct device_match_result *device_match; struct device_match_pattern *device_pattern; ses_path_iter_args_t *args; - struct cam_sim *sim; args = (ses_path_iter_args_t *)arg; match_pattern.type = DEV_MATCH_DEVICE; @@ -901,10 +900,10 @@ ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem, device_pattern->data.devid_pat.id_len); memset(&cdm, 0, sizeof(cdm)); - if (xpt_create_path_unlocked(&cdm.ccb_h.path, /*periph*/NULL, - CAM_XPT_PATH_ID, - CAM_TARGET_WILDCARD, - CAM_LUN_WILDCARD) != CAM_REQ_CMP) + if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL, + CAM_XPT_PATH_ID, + CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD) != CAM_REQ_CMP) return; cdm.ccb_h.func_code = XPT_DEV_MATCH; @@ -914,11 +913,8 @@ ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem, cdm.match_buf_len = sizeof(match_result); cdm.matches = &match_result; - sim = xpt_path_sim(cdm.ccb_h.path); - CAM_SIM_LOCK(sim); xpt_action((union ccb *)&cdm); xpt_free_path(cdm.ccb_h.path); - CAM_SIM_UNLOCK(sim); if ((cdm.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP || (cdm.status != CAM_DEV_MATCH_LAST @@ -927,18 +923,15 @@ ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem, return; device_match = &match_result.result.device_result; - if (xpt_create_path_unlocked(&cdm.ccb_h.path, /*periph*/NULL, - device_match->path_id, - device_match->target_id, - device_match->target_lun) != CAM_REQ_CMP) + if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL, + device_match->path_id, + device_match->target_id, + device_match->target_lun) != CAM_REQ_CMP) return; args->callback(enc, elem, cdm.ccb_h.path, args->callback_arg); - sim = xpt_path_sim(cdm.ccb_h.path); - CAM_SIM_LOCK(sim); xpt_free_path(cdm.ccb_h.path); - CAM_SIM_UNLOCK(sim); } /** @@ -1186,7 +1179,7 @@ ses_set_timed_completion(enc_softc_t *enc, uint8_t tc_en) if (mode_buf == NULL) goto out; - scsi_mode_sense(&ccb->csio, /*retries*/4, enc_done, MSG_SIMPLE_Q_TAG, + scsi_mode_sense(&ccb->csio, /*retries*/4, NULL, MSG_SIMPLE_Q_TAG, /*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SES_MGMT_MODE_PAGE_CODE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60 * 1000); @@ -1214,7 +1207,7 @@ ses_set_timed_completion(enc_softc_t *enc, uint8_t tc_en) /* SES2r20: a completion time of zero means as long as possible */ bzero(&mgmt->max_comp_time, sizeof(mgmt->max_comp_time)); - scsi_mode_select(&ccb->csio, 5, enc_done, MSG_SIMPLE_Q_TAG, + scsi_mode_select(&ccb->csio, 5, NULL, MSG_SIMPLE_Q_TAG, /*page_fmt*/FALSE, /*save_pages*/TRUE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60 * 1000); @@ -2030,12 +2023,12 @@ ses_fill_rcv_diag_io(enc_softc_t *enc, struct enc_fsm_state *state, if (enc->enc_type == ENC_SEMB_SES) { semb_receive_diagnostic_results(&ccb->ataio, /*retries*/5, - enc_done, MSG_SIMPLE_Q_TAG, /*pcv*/1, + NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1, state->page_code, buf, state->buf_size, state->timeout); } else { scsi_receive_diagnostic_results(&ccb->csio, /*retries*/5, - enc_done, MSG_SIMPLE_Q_TAG, /*pcv*/1, + NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1, state->page_code, buf, state->buf_size, SSD_FULL_SIZE, state->timeout); } @@ -2153,12 +2146,12 @@ ses_fill_control_request(enc_softc_t *enc, struct enc_fsm_state *state, /* Fill out the ccb */ if (enc->enc_type == ENC_SEMB_SES) { - semb_send_diagnostic(&ccb->ataio, /*retries*/5, enc_done, + semb_send_diagnostic(&ccb->ataio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, buf, ses_page_length(&ses_cache->status_page->hdr), state->timeout); } else { - scsi_send_diagnostic(&ccb->csio, /*retries*/5, enc_done, + scsi_send_diagnostic(&ccb->csio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, /*unit_offline*/0, /*device_offline*/0, /*self_test*/0, /*page_format*/1, /*self_test_code*/0, diff --git a/sys/cam/scsi/scsi_pass.c b/sys/cam/scsi/scsi_pass.c index c135e68..158e90f 100644 --- a/sys/cam/scsi/scsi_pass.c +++ b/sys/cam/scsi/scsi_pass.c @@ -65,8 +65,7 @@ typedef enum { } pass_state; typedef enum { - PASS_CCB_BUFFER_IO, - PASS_CCB_WAITING + PASS_CCB_BUFFER_IO } pass_ccb_types; #define ccb_type ppriv_field0 @@ -94,12 +93,9 @@ static periph_init_t passinit; static periph_ctor_t passregister; static periph_oninv_t passoninvalidate; static periph_dtor_t passcleanup; -static periph_start_t passstart; static void pass_add_physpath(void *context, int pending); static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); -static void passdone(struct cam_periph *periph, - union ccb *done_ccb); static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int passsendccb(struct cam_periph *periph, union ccb *ccb, @@ -143,20 +139,19 @@ passinit(void) static void passdevgonecb(void *arg) { - struct cam_sim *sim; struct cam_periph *periph; + struct mtx *mtx; struct pass_softc *softc; int i; periph = (struct cam_periph *)arg; - sim = periph->sim; - softc = (struct pass_softc *)periph->softc; + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); + softc = (struct pass_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); - mtx_lock(sim->mtx); - /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the @@ -173,13 +168,13 @@ passdevgonecb(void *arg) cam_periph_release_locked(periph); /* - * We reference the SIM lock directly here, instead of using + * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ - mtx_unlock(sim->mtx); + mtx_unlock(mtx); } static void @@ -295,8 +290,8 @@ passasync(void *callback_arg, u_int32_t code, * process. */ status = cam_periph_alloc(passregister, passoninvalidate, - passcleanup, passstart, "pass", - CAM_PERIPH_BIO, cgd->ccb_h.path, + passcleanup, NULL, "pass", + CAM_PERIPH_BIO, path, passasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP @@ -498,25 +493,23 @@ passopen(struct cdev *dev, int flags, int fmt, struct thread *td) static int passclose(struct cdev *dev, int flag, int fmt, struct thread *td) { - struct cam_sim *sim; struct cam_periph *periph; struct pass_softc *softc; + struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); - sim = periph->sim; softc = periph->softc; - - mtx_lock(sim->mtx); - softc->open_count--; cam_periph_release_locked(periph); /* - * We reference the SIM lock directly here, instead of using + * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph @@ -527,46 +520,11 @@ passclose(struct cdev *dev, int flag, int fmt, struct thread *td) * protect the open count and avoid another lock acquisition and * release. */ - mtx_unlock(sim->mtx); + mtx_unlock(mtx); return (0); } -static void -passstart(struct cam_periph *periph, union ccb *start_ccb) -{ - struct pass_softc *softc; - - softc = (struct pass_softc *)periph->softc; - - switch (softc->state) { - case PASS_STATE_NORMAL: - start_ccb->ccb_h.ccb_type = PASS_CCB_WAITING; - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - break; - } -} - -static void -passdone(struct cam_periph *periph, union ccb *done_ccb) -{ - struct pass_softc *softc; - struct ccb_scsiio *csio; - - softc = (struct pass_softc *)periph->softc; - csio = &done_ccb->csio; - switch (csio->ccb_h.ccb_type) { - case PASS_CCB_WAITING: - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); - return; - } - xpt_release_ccb(done_ccb); -} - static int passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { @@ -686,12 +644,6 @@ passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) xpt_merge_ccb(ccb, inccb); /* - * There's no way for the user to have a completion - * function, so we put our own completion function in here. - */ - ccb->ccb_h.cbfcnp = passdone; - - /* * Let cam_periph_mapmem do a sanity check on the data pointer format. * Even if no data transfer is needed, it's a cheap check and it * simplifies the code. diff --git a/sys/cam/scsi/scsi_pt.c b/sys/cam/scsi/scsi_pt.c index 8d8feb2..f34748c 100644 --- a/sys/cam/scsi/scsi_pt.c +++ b/sys/cam/scsi/scsi_pt.c @@ -66,7 +66,6 @@ typedef enum { typedef enum { PT_CCB_BUFFER_IO = 0x01, - PT_CCB_WAITING = 0x02, PT_CCB_RETRY_UA = 0x04, PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA } pt_ccb_state; @@ -378,7 +377,7 @@ ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) */ status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor, ptstart, "pt", CAM_PERIPH_BIO, - cgd->ccb_h.path, ptasync, + path, ptasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP @@ -423,15 +422,7 @@ ptstart(struct cam_periph *periph, union ccb *start_ccb) * See if there is a buf with work for us to do.. */ bp = bioq_first(&softc->bio_queue); - if (periph->immediate_priority <= periph->pinfo.priority) { - CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, - ("queuing for immediate ccb\n")); - start_ccb->ccb_h.ccb_state = PT_CCB_WAITING; - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - } else if (bp == NULL) { + if (bp == NULL) { xpt_release_ccb(start_ccb); } else { bioq_remove(&softc->bio_queue, bp); @@ -554,10 +545,6 @@ ptdone(struct cam_periph *periph, union ccb *done_ccb) biofinish(bp, softc->device_stats, 0); break; } - case PT_CCB_WAITING: - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); - return; } xpt_release_ccb(done_ccb); } diff --git a/sys/cam/scsi/scsi_sa.c b/sys/cam/scsi/scsi_sa.c index d91246b..c1cd0f0 100644 --- a/sys/cam/scsi/scsi_sa.c +++ b/sys/cam/scsi/scsi_sa.c @@ -114,7 +114,6 @@ typedef enum { #define ccb_bp ppriv_ptr1 #define SA_CCB_BUFFER_IO 0x0 -#define SA_CCB_WAITING 0x1 #define SA_CCB_TYPEMASK 0x1 #define SA_POSITION_UPDATED 0x2 @@ -1453,7 +1452,7 @@ saasync(void *callback_arg, u_int32_t code, */ status = cam_periph_alloc(saregister, saoninvalidate, sacleanup, sastart, - "sa", CAM_PERIPH_BIO, cgd->ccb_h.path, + "sa", CAM_PERIPH_BIO, path, saasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP @@ -1722,15 +1721,7 @@ sastart(struct cam_periph *periph, union ccb *start_ccb) * See if there is a buf with work for us to do.. */ bp = bioq_first(&softc->bio_queue); - if (periph->immediate_priority <= periph->pinfo.priority) { - CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, - ("queuing for immediate ccb\n")); - Set_CCB_Type(start_ccb, SA_CCB_WAITING); - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - } else if (bp == NULL) { + if (bp == NULL) { xpt_release_ccb(start_ccb); } else if ((softc->flags & SA_FLAG_ERR_PENDING) != 0) { struct bio *done_bp; @@ -1953,12 +1944,6 @@ sadone(struct cam_periph *periph, union ccb *done_ccb) biofinish(bp, softc->device_stats, 0); break; } - case SA_CCB_WAITING: - { - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); - return; - } } xpt_release_ccb(done_ccb); } @@ -2545,7 +2530,8 @@ saerror(union ccb *ccb, u_int32_t cflgs, u_int32_t sflgs) /* * If a read/write command, we handle it here. */ - if (CCB_Type(csio) != SA_CCB_WAITING) { + if (csio->cdb_io.cdb_bytes[0] == SA_READ || + csio->cdb_io.cdb_bytes[0] == SA_WRITE) { break; } /* diff --git a/sys/cam/scsi/scsi_sg.c b/sys/cam/scsi/scsi_sg.c index b597b64..f1cb75b 100644 --- a/sys/cam/scsi/scsi_sg.c +++ b/sys/cam/scsi/scsi_sg.c @@ -76,8 +76,7 @@ typedef enum { } sg_rdwr_state; typedef enum { - SG_CCB_RDWR_IO, - SG_CCB_WAITING + SG_CCB_RDWR_IO } sg_ccb_types; #define ccb_type ppriv_field0 @@ -119,7 +118,6 @@ static periph_init_t sginit; static periph_ctor_t sgregister; static periph_oninv_t sgoninvalidate; static periph_dtor_t sgcleanup; -static periph_start_t sgstart; static void sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static void sgdone(struct cam_periph *periph, union ccb *done_ccb); @@ -172,20 +170,19 @@ sginit(void) static void sgdevgonecb(void *arg) { - struct cam_sim *sim; struct cam_periph *periph; struct sg_softc *softc; + struct mtx *mtx; int i; periph = (struct cam_periph *)arg; - sim = periph->sim; - softc = (struct sg_softc *)periph->softc; + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); + softc = (struct sg_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); - mtx_lock(sim->mtx); - /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the @@ -202,13 +199,13 @@ sgdevgonecb(void *arg) cam_periph_release_locked(periph); /* - * We reference the SIM lock directly here, instead of using + * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ - mtx_unlock(sim->mtx); + mtx_unlock(mtx); } @@ -277,8 +274,8 @@ sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) * start the probe process. */ status = cam_periph_alloc(sgregister, sgoninvalidate, - sgcleanup, sgstart, "sg", - CAM_PERIPH_BIO, cgd->ccb_h.path, + sgcleanup, NULL, "sg", + CAM_PERIPH_BIO, path, sgasync, AC_FOUND_DEVICE, cgd); if ((status != CAM_REQ_CMP) && (status != CAM_REQ_INPROG)) { const struct cam_status_entry *entry; @@ -383,24 +380,6 @@ sgregister(struct cam_periph *periph, void *arg) } static void -sgstart(struct cam_periph *periph, union ccb *start_ccb) -{ - struct sg_softc *softc; - - softc = (struct sg_softc *)periph->softc; - - switch (softc->state) { - case SG_STATE_NORMAL: - start_ccb->ccb_h.ccb_type = SG_CCB_WAITING; - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - break; - } -} - -static void sgdone(struct cam_periph *periph, union ccb *done_ccb) { struct sg_softc *softc; @@ -409,10 +388,6 @@ sgdone(struct cam_periph *periph, union ccb *done_ccb) softc = (struct sg_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { - case SG_CCB_WAITING: - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); - return; case SG_CCB_RDWR_IO: { struct sg_rdwr *rdwr; @@ -480,25 +455,23 @@ sgopen(struct cdev *dev, int flags, int fmt, struct thread *td) static int sgclose(struct cdev *dev, int flag, int fmt, struct thread *td) { - struct cam_sim *sim; struct cam_periph *periph; struct sg_softc *softc; + struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); + mtx = cam_periph_mtx(periph); + mtx_lock(mtx); - sim = periph->sim; softc = periph->softc; - - mtx_lock(sim->mtx); - softc->open_count--; cam_periph_release_locked(periph); /* - * We reference the SIM lock directly here, instead of using + * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph @@ -509,7 +482,7 @@ sgclose(struct cdev *dev, int flag, int fmt, struct thread *td) * protect the open count and avoid another lock acquisition and * release. */ - mtx_unlock(sim->mtx); + mtx_unlock(mtx); return (0); } @@ -879,7 +852,7 @@ search: break; } if ((rdwr == NULL) || (rdwr->state != SG_RDWR_DONE)) { - if (msleep(rdwr, periph->sim->mtx, PCATCH, "sgread", 0) == ERESTART) + if (cam_periph_sleep(periph, rdwr, PCATCH, "sgread", 0) == ERESTART) return (EAGAIN); goto search; } diff --git a/sys/cam/scsi/scsi_targ_bh.c b/sys/cam/scsi/scsi_targ_bh.c index bcf4eea..5a4a39b 100644 --- a/sys/cam/scsi/scsi_targ_bh.c +++ b/sys/cam/scsi/scsi_targ_bh.c @@ -65,8 +65,7 @@ typedef enum { } targbh_flags; typedef enum { - TARGBH_CCB_WORKQ, - TARGBH_CCB_WAITING + TARGBH_CCB_WORKQ } targbh_ccb_types; #define MAX_ACCEPT 8 @@ -431,7 +430,7 @@ targbhdtor(struct cam_periph *periph) /* FALLTHROUGH */ default: /* XXX Wait for callback of targbhdislun() */ - msleep(softc, periph->sim->mtx, PRIBIO, "targbh", hz/2); + cam_periph_sleep(periph, softc, PRIBIO, "targbh", hz/2); free(softc, M_SCSIBH); break; } @@ -450,13 +449,7 @@ targbhstart(struct cam_periph *periph, union ccb *start_ccb) softc = (struct targbh_softc *)periph->softc; ccbh = TAILQ_FIRST(&softc->work_queue); - if (periph->immediate_priority <= periph->pinfo.priority) { - start_ccb->ccb_h.ccb_type = TARGBH_CCB_WAITING; - SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, - periph_links.sle); - periph->immediate_priority = CAM_PRIORITY_NONE; - wakeup(&periph->ccb_list); - } else if (ccbh == NULL) { + if (ccbh == NULL) { xpt_release_ccb(start_ccb); } else { TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); @@ -535,12 +528,6 @@ targbhdone(struct cam_periph *periph, union ccb *done_ccb) softc = (struct targbh_softc *)periph->softc; - if (done_ccb->ccb_h.ccb_type == TARGBH_CCB_WAITING) { - /* Caller will release the CCB */ - wakeup(&done_ccb->ccb_h.cbfcnp); - return; - } - switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { diff --git a/sys/cam/scsi/scsi_target.c b/sys/cam/scsi/scsi_target.c index 4b4ad78..42dc152 100644 --- a/sys/cam/scsi/scsi_target.c +++ b/sys/cam/scsi/scsi_target.c @@ -236,23 +236,21 @@ targioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t { struct ioc_enable_lun *new_lun; struct cam_path *path; - struct cam_sim *sim; new_lun = (struct ioc_enable_lun *)addr; - status = xpt_create_path_unlocked(&path, /*periph*/NULL, - new_lun->path_id, - new_lun->target_id, - new_lun->lun_id); + status = xpt_create_path(&path, /*periph*/NULL, + new_lun->path_id, + new_lun->target_id, + new_lun->lun_id); if (status != CAM_REQ_CMP) { printf("Couldn't create path, status %#x\n", status); break; } - sim = xpt_path_sim(path); - mtx_lock(sim->mtx); + xpt_path_lock(path); status = targenable(softc, path, new_lun->grp6_len, new_lun->grp7_len); + xpt_path_unlock(path); xpt_free_path(path); - mtx_unlock(sim->mtx); break; } case TARGIOCDISABLE: @@ -278,13 +276,10 @@ targioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t cdbg.flags = CAM_DEBUG_PERIPH; else cdbg.flags = CAM_DEBUG_NONE; - cam_periph_lock(softc->periph); xpt_setup_ccb(&cdbg.ccb_h, softc->path, CAM_PRIORITY_NORMAL); cdbg.ccb_h.func_code = XPT_DEBUG; cdbg.ccb_h.cbfcnp = targdone; - xpt_action((union ccb *)&cdbg); - cam_periph_unlock(softc->periph); status = cdbg.ccb_h.status & CAM_STATUS_MASK; break; } @@ -823,7 +818,7 @@ targread(struct cdev *dev, struct uio *uio, int ioflag) user_descr = TAILQ_FIRST(abort_queue); while (ccb_h == NULL && user_descr == NULL) { if ((ioflag & IO_NDELAY) == 0) { - error = msleep(user_queue, softc->periph->sim->mtx, + error = cam_periph_sleep(softc->periph, user_queue, PRIBIO | PCATCH, "targrd", 0); ccb_h = TAILQ_FIRST(user_queue); user_descr = TAILQ_FIRST(abort_queue); @@ -1019,7 +1014,6 @@ abort_all_pending(struct targ_softc *softc) struct targ_cmd_descr *descr; struct ccb_abort cab; struct ccb_hdr *ccb_h; - struct cam_sim *sim; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n")); @@ -1052,8 +1046,7 @@ abort_all_pending(struct targ_softc *softc) /* If we aborted at least one pending CCB ok, wait for it. */ if (cab.ccb_h.status == CAM_REQ_CMP) { - sim = xpt_path_sim(softc->path); - msleep(&softc->pending_ccb_queue, sim->mtx, + cam_periph_sleep(softc->periph, &softc->pending_ccb_queue, PRIBIO | PCATCH, "tgabrt", 0); } diff --git a/sys/cam/scsi/scsi_xpt.c b/sys/cam/scsi/scsi_xpt.c index af0032e..69752e1 100644 --- a/sys/cam/scsi/scsi_xpt.c +++ b/sys/cam/scsi/scsi_xpt.c @@ -583,7 +583,7 @@ static struct cam_ed * lun_id_t lun_id); static void scsi_devise_transport(struct cam_path *path); static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, - struct cam_ed *device, + struct cam_path *path, int async_update); static void scsi_toggle_tags(struct cam_path *path); static void scsi_dev_async(u_int32_t async_code, @@ -645,6 +645,7 @@ proberegister(struct cam_periph *periph, void *arg) return (status); } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); + scsi_devise_transport(periph->path); /* * Ensure we've waited at least a bus settle @@ -1719,11 +1720,12 @@ probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, if (path->target == NULL) { return; } - if (path->target->luns == NULL) { - path->target->luns = new; - return; - } + mtx_lock(&path->target->luns_mtx); old = path->target->luns; + path->target->luns = new; + mtx_unlock(&path->target->luns_mtx); + if (old == NULL) + return; nlun_old = scsi_4btoul(old->length) / 8; nlun_new = scsi_4btoul(new->length) / 8; @@ -1774,7 +1776,6 @@ probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, } } free(old, M_CAMXPT); - path->target->luns = new; } static void @@ -1836,6 +1837,8 @@ typedef struct { static void scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) { + struct mtx *mtx; + CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("scsi_scan_bus\n")); switch (request_ccb->ccb_h.func_code) { @@ -1903,6 +1906,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT); if (scan_info == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } @@ -1933,6 +1937,8 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) scan_info->counter--; } } + mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); + mtx_unlock(mtx); for (i = low_target; i <= max_target; i++) { cam_status status; @@ -1965,10 +1971,13 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_SCAN_LUN; work_ccb->ccb_h.cbfcnp = scsi_scan_bus; + work_ccb->ccb_h.flags |= CAM_UNLOCKED; work_ccb->ccb_h.ppriv_ptr0 = scan_info; work_ccb->crcn.flags = request_ccb->crcn.flags; xpt_action(work_ccb); } + + mtx_lock(mtx); break; } case XPT_SCAN_LUN: @@ -2001,6 +2010,9 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) target = request_ccb->ccb_h.path->target; next_target = 1; + mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); + mtx_lock(mtx); + mtx_lock(&target->luns_mtx); if (target->luns) { lun_id_t first; u_int nluns = scsi_4btoul(target->luns->length) / 8; @@ -2042,6 +2054,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) } if (scan_info->lunindex[target_id] < nluns) { + mtx_unlock(&target->luns_mtx); next_target = 0; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_PROBE, @@ -2050,6 +2063,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) (uintmax_t)lun_id)); scan_info->lunindex[target_id]++; } else { + mtx_unlock(&target->luns_mtx); /* * We're done with scanning all luns. * @@ -2068,7 +2082,9 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) } } } - } else if (request_ccb->ccb_h.status != CAM_REQ_CMP) { + } else { + mtx_unlock(&target->luns_mtx); + if (request_ccb->ccb_h.status != CAM_REQ_CMP) { int phl; /* @@ -2100,7 +2116,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) if (lun_id == request_ccb->ccb_h.target_lun || lun_id > scan_info->cpi->max_lun) next_target = 1; - } else { + } else { device = request_ccb->ccb_h.path->device; @@ -2116,6 +2132,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) if (lun_id == request_ccb->ccb_h.target_lun || lun_id > scan_info->cpi->max_lun) next_target = 1; + } } /* @@ -2149,6 +2166,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) } } if (done) { + mtx_unlock(mtx); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; @@ -2162,6 +2180,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) } if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) { + mtx_unlock(mtx); xpt_free_ccb(request_ccb); break; } @@ -2169,6 +2188,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) scan_info->request_ccb->ccb_h.path_id, scan_info->counter, 0); if (status != CAM_REQ_CMP) { + mtx_unlock(mtx); printf("scsi_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); @@ -2184,6 +2204,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; + request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; @@ -2207,10 +2228,12 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; + request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } + mtx_unlock(mtx); xpt_action(request_ccb); break; } @@ -2227,6 +2250,7 @@ scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_status status; struct cam_path *new_path; struct cam_periph *old_periph; + int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n")); @@ -2274,9 +2298,13 @@ scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); request_ccb->ccb_h.cbfcnp = xptscandone; request_ccb->ccb_h.func_code = XPT_SCAN_LUN; + request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->crcn.flags = flags; } + lock = (xpt_path_owned(path) == 0); + if (lock) + xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "probe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { probe_softc *softc; @@ -2302,6 +2330,8 @@ scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, xpt_done(request_ccb); } } + if (lock) + xpt_path_unlock(path); } static void @@ -2315,7 +2345,6 @@ xptscandone(struct cam_periph *periph, union ccb *done_ccb) static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { - struct cam_path path; struct scsi_quirk_entry *quirk; struct cam_ed *device; @@ -2340,22 +2369,6 @@ scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) device->device_id_len = 0; device->supported_vpds = NULL; device->supported_vpds_len = 0; - - /* - * XXX should be limited by number of CCBs this bus can - * do. - */ - bus->sim->max_ccbs += device->ccbq.devq_openings; - if (lun_id != CAM_LUN_WILDCARD) { - xpt_compile_path(&path, - NULL, - bus->path_id, - target->target_id, - lun_id); - scsi_devise_transport(&path); - xpt_release_path(&path); - } - return (device); } @@ -2534,15 +2547,8 @@ scsi_dev_advinfo(union ccb *start_ccb) start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { - int owned; - - owned = mtx_owned(start_ccb->ccb_h.path->bus->sim->mtx); - if (owned == 0) - mtx_lock(start_ccb->ccb_h.path->bus->sim->mtx); xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); - if (owned == 0) - mtx_unlock(start_ccb->ccb_h.path->bus->sim->mtx); } } @@ -2554,7 +2560,7 @@ scsi_action(union ccb *start_ccb) case XPT_SET_TRAN_SETTINGS: { scsi_set_transfer_settings(&start_ccb->cts, - start_ccb->ccb_h.path->device, + start_ccb->ccb_h.path, /*async_update*/FALSE); break; } @@ -2567,14 +2573,6 @@ scsi_action(union ccb *start_ccb) start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; - case XPT_GET_TRAN_SETTINGS: - { - struct cam_sim *sim; - - sim = start_ccb->ccb_h.path->bus->sim; - (*(sim->sim_action))(sim, start_ccb); - break; - } case XPT_DEV_ADVINFO: { scsi_dev_advinfo(start_ccb); @@ -2587,17 +2585,17 @@ scsi_action(union ccb *start_ccb) } static void -scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, +scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update) { struct ccb_pathinq cpi; struct ccb_trans_settings cur_cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_scsi *cur_scsi; - struct cam_sim *sim; struct scsi_inquiry_data *inq_data; + struct cam_ed *device; - if (device == NULL) { + if (path == NULL || (device = path->device) == NULL) { cts->ccb_h.status = CAM_PATH_INVALID; xpt_done((union ccb *)cts); return; @@ -2614,14 +2612,14 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device cts->protocol_version = device->protocol_version; if (cts->protocol != device->protocol) { - xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n", + xpt_print(path, "Uninitialized Protocol %x:%x?\n", cts->protocol, device->protocol); cts->protocol = device->protocol; } if (cts->protocol_version > device->protocol_version) { if (bootverbose) { - xpt_print(cts->ccb_h.path, "Down reving Protocol " + xpt_print(path, "Down reving Protocol " "Version from %d to %d?\n", cts->protocol_version, device->protocol_version); } @@ -2639,22 +2637,20 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device cts->transport_version = device->transport_version; if (cts->transport != device->transport) { - xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n", + xpt_print(path, "Uninitialized Transport %x:%x?\n", cts->transport, device->transport); cts->transport = device->transport; } if (cts->transport_version > device->transport_version) { if (bootverbose) { - xpt_print(cts->ccb_h.path, "Down reving Transport " + xpt_print(path, "Down reving Transport " "Version from %d to %d?\n", cts->transport_version, device->transport_version); } cts->transport_version = device->transport_version; } - sim = cts->ccb_h.path->bus->sim; - /* * Nothing more of interest to do unless * this is a device connected via the @@ -2662,13 +2658,13 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device */ if (cts->protocol != PROTO_SCSI) { if (async_update == FALSE) - (*(sim->sim_action))(sim, (union ccb *)cts); + xpt_action_default((union ccb *)cts); return; } inq_data = &device->inq_data; scsi = &cts->proto_specific.scsi; - xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE); + xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); @@ -2689,7 +2685,7 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device * Perform sanity checking against what the * controller and device can do. */ - xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE); + xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE); cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cur_cts.type = cts->type; xpt_action((union ccb *)&cur_cts); @@ -2810,7 +2806,7 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device && (spi->flags & (CTS_SPI_VALID_SYNC_RATE| CTS_SPI_VALID_SYNC_OFFSET| CTS_SPI_VALID_BUS_WIDTH)) != 0) - scsi_toggle_tags(cts->ccb_h.path); + scsi_toggle_tags(path); } if (cts->type == CTS_TYPE_CURRENT_SETTINGS @@ -2847,12 +2843,12 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device device->tag_delay_count = CAM_TAG_DELAY_COUNT; device->flags |= CAM_DEV_TAG_AFTER_COUNT; } else { - xpt_stop_tags(cts->ccb_h.path); + xpt_stop_tags(path); } } } if (async_update == FALSE) - (*(sim->sim_action))(sim, (union ccb *)cts); + xpt_action_default((union ccb *)cts); } static void @@ -2880,10 +2876,10 @@ scsi_toggle_tags(struct cam_path *path) cts.transport_version = XPORT_VERSION_UNSPECIFIED; cts.proto_specific.scsi.flags = 0; cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; - scsi_set_transfer_settings(&cts, path->device, + scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; - scsi_set_transfer_settings(&cts, path->device, + scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); } } @@ -2954,10 +2950,14 @@ scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, xpt_release_device(device); } else if (async_code == AC_TRANSFER_NEG) { struct ccb_trans_settings *settings; + struct cam_path path; settings = (struct ccb_trans_settings *)async_arg; - scsi_set_transfer_settings(settings, device, + xpt_compile_path(&path, NULL, bus->path_id, target->target_id, + device->lun_id); + scsi_set_transfer_settings(settings, &path, /*async_update*/TRUE); + xpt_release_path(&path); } } @@ -2971,7 +2971,7 @@ scsi_announce_periph(struct cam_periph *periph) u_int freq; u_int mb; - mtx_assert(periph->sim->mtx, MA_OWNED); + cam_periph_assert(periph, MA_OWNED); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c index d2ed691..e7efe59 100644 --- a/sys/dev/ahci/ahci.c +++ b/sys/dev/ahci/ahci.c @@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$"); static int ahci_setup_interrupt(device_t dev); static void ahci_intr(void *data); static void ahci_intr_one(void *data); +static void ahci_intr_one_edge(void *data); static int ahci_suspend(device_t dev); static int ahci_resume(device_t dev); static int ahci_ch_init(device_t dev); @@ -62,8 +63,9 @@ static int ahci_ch_deinit(device_t dev); static int ahci_ch_suspend(device_t dev); static int ahci_ch_resume(device_t dev); static void ahci_ch_pm(void *arg); -static void ahci_ch_intr_locked(void *data); -static void ahci_ch_intr(void *data); +static void ahci_ch_intr(void *arg); +static void ahci_ch_intr_direct(void *arg); +static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); static int ahci_ctlr_reset(device_t dev); static int ahci_ctlr_setup(device_t dev); static void ahci_begin_transaction(device_t dev, union ccb *ccb); @@ -430,6 +432,7 @@ ahci_attach(device_t dev) struct ahci_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, speed, i; + u_int u; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); u_int32_t version; @@ -529,6 +532,12 @@ ahci_attach(device_t dev) rman_fini(&ctlr->sc_iomem); return ENXIO; } + i = 0; + for (u = ctlr->ichannels; u != 0; u >>= 1) + i += (u & 1); + ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); + resource_int_value(device_get_name(dev), device_get_unit(dev), + "direct", &ctlr->direct); /* Announce HW capabilities. */ speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; device_printf(dev, @@ -710,24 +719,26 @@ static int ahci_setup_interrupt(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); - int i, msi = 1; + int i; + ctlr->msi = 2; /* Process hints. */ if (ctlr->quirks & AHCI_Q_NOMSI) - msi = 0; + ctlr->msi = 0; resource_int_value(device_get_name(dev), - device_get_unit(dev), "msi", &msi); - if (msi < 0) - msi = 0; - else if (msi == 1) - msi = min(1, pci_msi_count(dev)); - else if (msi > 1) - msi = pci_msi_count(dev); + device_get_unit(dev), "msi", &ctlr->msi); + ctlr->numirqs = 1; + if (ctlr->msi < 0) + ctlr->msi = 0; + else if (ctlr->msi == 1) + ctlr->msi = min(1, pci_msi_count(dev)); + else if (ctlr->msi > 1) { + ctlr->msi = 2; + ctlr->numirqs = pci_msi_count(dev); + } /* Allocate MSI if needed/present. */ - if (msi && pci_alloc_msi(dev, &msi) == 0) { - ctlr->numirqs = msi; - } else { - msi = 0; + if (ctlr->msi && pci_alloc_msi(dev, &ctlr->numirqs) != 0) { + ctlr->msi = 0; ctlr->numirqs = 1; } /* Check for single MSI vector fallback. */ @@ -739,7 +750,7 @@ ahci_setup_interrupt(device_t dev) /* Allocate all IRQs. */ for (i = 0; i < ctlr->numirqs; i++) { ctlr->irqs[i].ctlr = ctlr; - ctlr->irqs[i].r_irq_rid = i + (msi ? 1 : 0); + ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); if (ctlr->numirqs == 1 || i >= ctlr->channels || (ctlr->ccc && i == ctlr->cccv)) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; @@ -753,7 +764,9 @@ ahci_setup_interrupt(device_t dev) return ENXIO; } if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, - (ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE) ? ahci_intr_one : ahci_intr, + (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : + ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : + ahci_intr_one), &ctlr->irqs[i], &ctlr->irqs[i].handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); @@ -822,14 +835,25 @@ ahci_intr_one(void *data) int unit; unit = irq->r_irq_rid - 1; - /* Some controllers have edge triggered IS. */ - if (ctlr->quirks & AHCI_Q_EDGEIS) - ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); /* AHCI declares level triggered IS. */ - if (!(ctlr->quirks & AHCI_Q_EDGEIS)) - ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); + ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); +} + +static void +ahci_intr_one_edge(void *data) +{ + struct ahci_controller_irq *irq = data; + struct ahci_controller *ctlr = irq->ctlr; + void *arg; + int unit; + + unit = irq->r_irq_rid - 1; + /* Some controllers have edge triggered IS. */ + ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); + if ((arg = ctlr->interrupt[unit].argument)) + ctlr->interrupt[unit].function(arg); } static struct resource * @@ -1033,6 +1057,7 @@ ahci_ch_attach(device_t dev) mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); + STAILQ_INIT(&ch->doneq); if (ch->pm_level > 3) callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); @@ -1078,7 +1103,8 @@ ahci_ch_attach(device_t dev) goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, - ahci_ch_intr_locked, dev, &ch->ih))) { + ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, + dev, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; @@ -1501,16 +1527,58 @@ ahci_notify_events(device_t dev, u_int32_t status) } static void -ahci_ch_intr_locked(void *data) +ahci_done(struct ahci_channel *ch, union ccb *ccb) +{ + + mtx_assert(&ch->mtx, MA_OWNED); + if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || + ch->batch == 0) { + xpt_done(ccb); + return; + } + + STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); +} + +static void +ahci_ch_intr(void *arg) +{ + device_t dev = (device_t)arg; + struct ahci_channel *ch = device_get_softc(dev); + uint32_t istatus; + + /* Read interrupt statuses. */ + istatus = ATA_INL(ch->r_mem, AHCI_P_IS); + if (istatus == 0) + return; + + mtx_lock(&ch->mtx); + ahci_ch_intr_main(ch, istatus); + mtx_unlock(&ch->mtx); +} + +static void +ahci_ch_intr_direct(void *arg) { - device_t dev = (device_t)data; + device_t dev = (device_t)arg; struct ahci_channel *ch = device_get_softc(dev); + struct ccb_hdr *ccb_h; + uint32_t istatus; + + /* Read interrupt statuses. */ + istatus = ATA_INL(ch->r_mem, AHCI_P_IS); + if (istatus == 0) + return; mtx_lock(&ch->mtx); - xpt_batch_start(ch->sim); - ahci_ch_intr(data); - xpt_batch_done(ch->sim); + ch->batch = 1; + ahci_ch_intr_main(ch, istatus); + ch->batch = 0; mtx_unlock(&ch->mtx); + while ((ccb_h = STAILQ_FIRST(&ch->doneq)) != NULL) { + STAILQ_REMOVE_HEAD(&ch->doneq, sim_links.stqe); + xpt_done_direct((union ccb *)ccb_h); + } } static void @@ -1531,18 +1599,14 @@ ahci_ch_pm(void *arg) } static void -ahci_ch_intr(void *data) +ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) { - device_t dev = (device_t)data; - struct ahci_channel *ch = device_get_softc(dev); - uint32_t istatus, cstatus, serr = 0, sntf = 0, ok, err; + device_t dev = ch->dev; + uint32_t cstatus, serr = 0, sntf = 0, ok, err; enum ahci_err_type et; int i, ccs, port, reset = 0; - /* Read and clear interrupt statuses. */ - istatus = ATA_INL(ch->r_mem, AHCI_P_IS); - if (istatus == 0) - return; + /* Clear interrupt statuses. */ ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); /* Read command statuses. */ if (ch->numtslots != 0) @@ -1634,7 +1698,7 @@ ahci_ch_intr(void *data) xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } - xpt_done(fccb); + ahci_done(ch, fccb); } for (i = 0; i < ch->numslots; i++) { /* XXX: reqests in loading state. */ @@ -2043,7 +2107,7 @@ ahci_timeout(struct ahci_slot *slot) xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } - xpt_done(fccb); + ahci_done(ch, fccb); } if (!ch->fbs_enabled && !ch->wrongccs) { /* Without FBS we know real timeout source. */ @@ -2249,7 +2313,7 @@ ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) ch->hold[slot->slot] = ccb; ch->numhslots++; } else - xpt_done(ccb); + ahci_done(ch, ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there was fatal error - reset port. */ @@ -2309,7 +2373,7 @@ completeall: continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; - xpt_done(ch->hold[i]); + ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } @@ -2397,7 +2461,7 @@ ahci_process_read_log(device_t dev, union ccb *ccb) ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } - xpt_done(ch->hold[i]); + ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } @@ -2412,7 +2476,7 @@ ahci_process_read_log(device_t dev, union ccb *ccb) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; - xpt_done(ch->hold[i]); + ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } @@ -2437,7 +2501,7 @@ ahci_process_request_sense(device_t dev, union ccb *ccb) ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } - xpt_done(ch->hold[i]); + ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); @@ -2621,7 +2685,7 @@ ahci_reset(device_t dev) xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } - xpt_done(fccb); + ahci_done(ch, fccb); } /* Kill the engine and requeue all running commands. */ ahci_stop(dev); @@ -2635,7 +2699,7 @@ ahci_reset(device_t dev) for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; - xpt_done(ch->hold[i]); + ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } @@ -2831,12 +2895,12 @@ ahci_check_ids(device_t dev, union ccb *ccb) if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { ccb->ccb_h.status = CAM_TID_INVALID; - xpt_done(ccb); + ahci_done(ch, ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; - xpt_done(ccb); + ahci_done(ch, ccb); return (-1); } return (0); @@ -3028,15 +3092,19 @@ ahciaction(struct cam_sim *sim, union ccb *ccb) ccb->ccb_h.status = CAM_REQ_INVALID; break; } - xpt_done(ccb); + ahci_done(ch, ccb); } static void ahcipoll(struct cam_sim *sim) { struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); + uint32_t istatus; - ahci_ch_intr(ch->dev); + /* Read interrupt statuses and process if any. */ + istatus = ATA_INL(ch->r_mem, AHCI_P_IS); + if (istatus != 0) + ahci_ch_intr_main(ch, istatus); if (ch->resetting != 0 && (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { ch->resetpolldiv = 1000; diff --git a/sys/dev/ahci/ahci.h b/sys/dev/ahci/ahci.h index 0c60c7d..e1895e4 100644 --- a/sys/dev/ahci/ahci.h +++ b/sys/dev/ahci/ahci.h @@ -422,6 +422,8 @@ struct ahci_channel { struct ahci_slot slot[AHCI_MAX_SLOTS]; union ccb *hold[AHCI_MAX_SLOTS]; struct mtx mtx; /* state lock */ + STAILQ_HEAD(, ccb_hdr) doneq; /* queue of completed CCBs */ + int batch; /* doneq is in use */ int devices; /* What is present */ int pm_present; /* PM presence reported */ int fbs_enabled; /* FIS-based switching enabled */ @@ -494,6 +496,8 @@ struct ahci_controller { int ichannels; int ccc; /* CCC timeout */ int cccv; /* CCC vector */ + int direct; /* Direct command completion */ + int msi; /* MSI interupts */ struct { void (*function)(void *); void *argument; diff --git a/sys/dev/ata/ata-all.c b/sys/dev/ata/ata-all.c index 4860faf..957dd95 100644 --- a/sys/dev/ata/ata-all.c +++ b/sys/dev/ata/ata-all.c @@ -353,9 +353,7 @@ ata_interrupt(void *data) struct ata_channel *ch = (struct ata_channel *)data; mtx_lock(&ch->state_mtx); - xpt_batch_start(ch->sim); ata_interrupt_locked(data); - xpt_batch_done(ch->sim); mtx_unlock(&ch->state_mtx); } diff --git a/sys/dev/isp/isp_freebsd.c b/sys/dev/isp/isp_freebsd.c index 1d0da03..2832cc0 100644 --- a/sys/dev/isp/isp_freebsd.c +++ b/sys/dev/isp/isp_freebsd.c @@ -106,7 +106,7 @@ isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan) return (EIO); } ISP_UNLOCK(isp); - if (xpt_create_path_unlocked(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(sim)); ISP_UNLOCK(isp); @@ -4131,12 +4131,12 @@ isp_target_thread(ispsoftc_t *isp, int chan) periphdriver_register(&isptargdriver); ISP_GET_PC(isp, chan, sim, sim); ISP_GET_PC(isp, chan, path, path); - status = xpt_create_path_unlocked(&wpath, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); + status = xpt_create_path(&wpath, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGERR, "%s: could not allocate wildcard path", __func__); return; } - status = xpt_create_path_unlocked(&path, NULL, cam_sim_path(sim), 0, 0); + status = xpt_create_path(&path, NULL, cam_sim_path(sim), 0, 0); if (status != CAM_REQ_CMP) { xpt_free_path(wpath); isp_prt(isp, ISP_LOGERR, "%s: could not allocate path", __func__); diff --git a/sys/dev/mvs/mvs.c b/sys/dev/mvs/mvs.c index 7f00dee..8ad85a8 100644 --- a/sys/dev/mvs/mvs.c +++ b/sys/dev/mvs/mvs.c @@ -654,9 +654,7 @@ mvs_ch_intr_locked(void *data) struct mvs_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); - xpt_batch_start(ch->sim); mvs_ch_intr(data); - xpt_batch_done(ch->sim); mtx_unlock(&ch->mtx); } diff --git a/sys/dev/siis/siis.c b/sys/dev/siis/siis.c index b08ee62..dd2bd70 100644 --- a/sys/dev/siis/siis.c +++ b/sys/dev/siis/siis.c @@ -838,9 +838,7 @@ siis_ch_intr_locked(void *data) struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); - xpt_batch_start(ch->sim); siis_ch_intr(data); - xpt_batch_done(ch->sim); mtx_unlock(&ch->mtx); } diff --git a/sys/kern/kern_environment.c b/sys/kern/kern_environment.c index e89b3f7..ff453cb 100644 --- a/sys/kern/kern_environment.c +++ b/sys/kern/kern_environment.c @@ -315,20 +315,12 @@ char * getenv(const char *name) { char buf[KENV_MNAMELEN + 1 + KENV_MVALLEN + 1]; - char *ret, *cp; - int len; + char *ret; if (dynamic_kenv) { - mtx_lock(&kenv_lock); - cp = _getenv_dynamic(name, NULL); - if (cp != NULL) { - strcpy(buf, cp); - mtx_unlock(&kenv_lock); - len = strlen(buf) + 1; - ret = malloc(len, M_KENV, M_WAITOK); - strcpy(ret, buf); + if (getenv_string(name, buf, sizeof(buf))) { + ret = strdup(buf, M_KENV); } else { - mtx_unlock(&kenv_lock); ret = NULL; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "getenv"); @@ -458,15 +450,20 @@ unsetenv(const char *name) int getenv_string(const char *name, char *data, int size) { - char *tmp; + char *cp; - tmp = getenv(name); - if (tmp != NULL) { - strlcpy(data, tmp, size); - freeenv(tmp); - return (1); - } else - return (0); + if (dynamic_kenv) { + mtx_lock(&kenv_lock); + cp = _getenv_dynamic(name, NULL); + if (cp != NULL) + strlcpy(data, cp, size); + mtx_unlock(&kenv_lock); + } else { + cp = _getenv_static(name); + if (cp != NULL) + strlcpy(data, cp, size); + } + return (cp != NULL); } /* @@ -535,18 +532,15 @@ getenv_ulong(const char *name, unsigned long *data) int getenv_quad(const char *name, quad_t *data) { - char *value; + char value[KENV_MNAMELEN + 1 + KENV_MVALLEN + 1]; char *vtp; quad_t iv; - value = getenv(name); - if (value == NULL) + if (!getenv_string(name, value, sizeof(value))) return (0); iv = strtoq(value, &vtp, 0); - if (vtp == value || (vtp[0] != '\0' && vtp[1] != '\0')) { - freeenv(value); + if (vtp == value || (vtp[0] != '\0' && vtp[1] != '\0')) return (0); - } switch (vtp[0]) { case 't': case 'T': iv *= 1024; @@ -559,11 +553,9 @@ getenv_quad(const char *name, quad_t *data) case '\0': break; default: - freeenv(value); return (0); } *data = iv; - freeenv(value); return (1); } |