summaryrefslogtreecommitdiffstats
path: root/sys/cam/cam_xpt.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/cam/cam_xpt.c')
-rw-r--r--sys/cam/cam_xpt.c273
1 files changed, 162 insertions, 111 deletions
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index 4f27724..abd91fa 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: cam_xpt.c,v 1.57 1999/05/11 15:44:39 mjacob Exp $
+ * $Id: cam_xpt.c,v 1.58 1999/05/18 00:41:05 gibbs Exp $
*/
#include <sys/param.h>
#include <sys/systm.h>
@@ -34,6 +34,7 @@
#include <sys/malloc.h>
#include <sys/device.h>
#include <sys/kernel.h>
+#include <sys/time.h>
#include <sys/conf.h>
#include <sys/fcntl.h>
#include <sys/md5.h>
@@ -61,7 +62,6 @@
#include <cam/scsi/scsi_message.h>
#include <cam/scsi/scsi_pass.h>
#include "opt_cam.h"
-#include "opt_scsi.h"
/* Datastructures internal to the xpt layer */
@@ -89,30 +89,6 @@ static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
#define CAM_MAX_HIGHPOWER 4
#endif
-/*
- * This is the number of seconds we wait for devices to settle after a SCSI
- * bus reset.
- */
-#ifndef SCSI_DELAY
-#define SCSI_DELAY 2000
-#endif
-/*
- * If someone sets this to 0, we assume that they want the minimum
- * allowable bus settle delay. All devices need _some_ sort of bus settle
- * delay, so we'll set it to a minimum value of 100ms.
- */
-#if (SCSI_DELAY == 0)
-#undef SCSI_DELAY
-#define SCSI_DELAY 100
-#endif
-
-/*
- * Make sure the user isn't using seconds instead of milliseconds.
- */
-#if (SCSI_DELAY < 100)
-#error "SCSI_DELAY is in milliseconds, not seconds! Please use a larger value"
-#endif
-
/* number of high powered commands that can go through right now */
static int num_highpower = CAM_MAX_HIGHPOWER;
@@ -166,6 +142,7 @@ struct cam_ed {
#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
#define CAM_DEV_TAG_AFTER_COUNT 0x20
+#define CAM_DEV_INQUIRY_DATA_VALID 0x40
u_int32_t tag_delay_count;
#define CAM_TAG_DELAY_COUNT 5
u_int32_t refcount;
@@ -185,6 +162,7 @@ struct cam_et {
target_id_t target_id;
u_int32_t refcount;
u_int generation;
+ struct timeval last_reset;
};
/*
@@ -197,6 +175,7 @@ struct cam_eb {
TAILQ_ENTRY(cam_eb) links;
path_id_t path_id;
struct cam_sim *sim;
+ struct timeval last_reset;
u_int32_t flags;
#define CAM_EB_RUNQ_SCHEDULED 0x01
u_int32_t refcount;
@@ -498,6 +477,9 @@ static struct xpt_quirk_entry xpt_quirk_table[] =
},
};
+static const int xpt_quirk_table_size =
+ sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
+
typedef enum {
DM_RET_COPY = 0x01,
DM_RET_FLAG_MASK = 0x0f,
@@ -744,6 +726,7 @@ static cam_status proberegister(struct cam_periph *periph,
void *arg);
static void probeschedule(struct cam_periph *probe_periph);
static void probestart(struct cam_periph *periph, union ccb *start_ccb);
+static void proberequestdefaultnegotiation(struct cam_periph *periph);
static void probedone(struct cam_periph *periph, union ccb *done_ccb);
static void probecleanup(struct cam_periph *periph);
static void xpt_find_quirk(struct cam_ed *device);
@@ -2843,7 +2826,6 @@ xpt_action(union ccb *start_ccb)
case XPT_IMMED_NOTIFY:
case XPT_NOTIFY_ACK:
case XPT_GET_TRAN_SETTINGS:
- case XPT_PATH_INQ:
case XPT_RESET_BUS:
{
struct cam_sim *sim;
@@ -2852,21 +2834,36 @@ xpt_action(union ccb *start_ccb)
(*(sim->sim_action))(sim, start_ccb);
break;
}
+ case XPT_PATH_INQ:
+ {
+ struct cam_sim *sim;
+
+ sim = start_ccb->ccb_h.path->bus->sim;
+ (*(sim->sim_action))(sim, start_ccb);
+ break;
+ }
+ case XPT_PATH_STATS:
+ start_ccb->cpis.last_reset =
+ start_ccb->ccb_h.path->bus->last_reset;
+ start_ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
case XPT_GDEV_TYPE:
{
+ struct cam_ed *dev;
int s;
+ dev = start_ccb->ccb_h.path->device;
s = splcam();
- if ((start_ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) != 0) {
+ if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
} else {
struct ccb_getdev *cgd;
+ struct cam_eb *bus;
struct cam_et *tar;
- struct cam_ed *dev;
cgd = &start_ccb->cgd;
+ bus = cgd->ccb_h.path->bus;
tar = cgd->ccb_h.path->target;
- dev = cgd->ccb_h.path->device;
cgd->inq_data = dev->inq_data;
cgd->pd_type = SID_TYPE(&dev->inq_data);
cgd->dev_openings = dev->ccbq.dev_openings;
@@ -2886,6 +2883,36 @@ xpt_action(union ccb *start_ccb)
splx(s);
break;
}
+ case XPT_GDEV_STATS:
+ {
+ struct cam_ed *dev;
+ int s;
+
+ dev = start_ccb->ccb_h.path->device;
+ s = splcam();
+ if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
+ start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ } else {
+ struct ccb_getdevstats *cgds;
+ struct cam_eb *bus;
+ struct cam_et *tar;
+
+ cgds = &start_ccb->cgds;
+ bus = cgds->ccb_h.path->bus;
+ tar = cgds->ccb_h.path->target;
+ cgds->dev_openings = dev->ccbq.dev_openings;
+ cgds->dev_active = dev->ccbq.dev_active;
+ cgds->devq_openings = dev->ccbq.devq_openings;
+ cgds->devq_queued = dev->ccbq.queue.entries;
+ cgds->held = dev->ccbq.held;
+ cgds->last_reset = tar->last_reset;
+ if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
+ cgds->last_reset = bus->last_reset;
+ cgds->ccb_h.status = CAM_REQ_CMP;
+ }
+ splx(s);
+ break;
+ }
case XPT_GDEVLIST:
{
struct cam_periph *nperiph;
@@ -3237,6 +3264,8 @@ xpt_action(union ccb *start_ccb)
break;
}
case XPT_NOOP:
+ if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
+ xpt_freeze_devq(start_ccb->ccb_h.path, 1);
start_ccb->ccb_h.status = CAM_REQ_CMP;
break;
default:
@@ -3944,6 +3973,7 @@ xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
new_bus->path_id = sim->path_id;
new_bus->sim = sim;
TAILQ_INIT(&new_bus->et_entries);
+ timevalclear(&new_bus->last_reset);
new_bus->refcount = 1; /* Held until a bus_deregister event */
s = splcam();
TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
@@ -4059,30 +4089,13 @@ xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
bus = path->bus;
- /*
- * Freeze the SIM queue for SCSI_DELAY ms to
- * allow the bus to settle.
- */
if (async_code == AC_BUS_RESET) {
- struct cam_sim *sim;
-
- sim = bus->sim;
-
- /*
- * If there isn't already another timeout pending, go ahead
- * and freeze the simq and set the timeout flag. If there
- * is another timeout pending, replace it with this
- * timeout. There could be two bus reset async broadcasts
- * sent for some dual-channel controllers.
- */
- if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) == 0) {
- xpt_freeze_simq(sim, 1);
- sim->flags |= CAM_SIM_REL_TIMEOUT_PENDING;
- } else
- untimeout(xpt_release_simq_timeout, sim, sim->c_handle);
+ int s;
- sim->c_handle = timeout(xpt_release_simq_timeout,
- sim, (SCSI_DELAY * hz) / 1000);
+ s = splclock();
+ /* Update our notion of when the last reset occurred */
+ microtime(&bus->last_reset);
+ splx(s);
}
for (target = TAILQ_FIRST(&bus->et_entries);
@@ -4092,9 +4105,18 @@ xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
next_target = TAILQ_NEXT(target, links);
if (path->target != target
- && path->target != NULL)
+ && path->target->target_id != CAM_TARGET_WILDCARD)
continue;
+ if (async_code == AC_SENT_BDR) {
+ int s;
+
+ /* Update our notion of when the last reset occurred */
+ s = splclock();
+ microtime(&path->target->last_reset);
+ splx(s);
+ }
+
for (device = TAILQ_FIRST(&target->ed_entries);
device != NULL;
device = next_device) {
@@ -4104,7 +4126,7 @@ xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
next_device = TAILQ_NEXT(device, links);
if (path->device != device
- && path->device != NULL)
+ && path->device->lun_id != CAM_LUN_WILDCARD)
continue;
/*
@@ -4131,38 +4153,7 @@ xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
|| async_code == AC_BUS_RESET)
xpt_toggle_tags(&newpath);
- /*
- * If we send a BDR, freeze the device queue
- * for SCSI_DELAY ms to allow it to settle
- * down.
- */
- if (async_code == AC_SENT_BDR) {
- xpt_freeze_devq(&newpath, 1);
- /*
- * Although this looks bad, it
- * isn't as bad as it seems. We're
- * passing in a stack-allocated path
- * that we then immediately release
- * after scheduling a timeout to
- * release the device queue. So
- * the path won't be around when
- * the timeout fires, right? Right.
- * But it doesn't matter, since
- * xpt_release_devq and its timeout
- * function both take the device as
- * an argument. Theoretically, the
- * device will still be there when
- * the timeout fires, even though
- * the path will be gone.
- */
- cam_release_devq(
- &newpath,
- /*relsim_flags*/
- RELSIM_RELEASE_AFTER_TIMEOUT,
- /*reduction*/0,
- /*timeout*/SCSI_DELAY,
- /*getcount_only*/0);
- } else if (async_code == AC_INQ_CHANGED) {
+ if (async_code == AC_INQ_CHANGED) {
/*
* We've sent a start unit command, or
* something similar to a device that
@@ -4491,6 +4482,7 @@ xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
*/
bus->refcount++;
TAILQ_INIT(&target->ed_entries);
+ timevalclear(&target->last_reset);
/* Insertion sort into our bus's target list */
cur_target = TAILQ_FIRST(&bus->et_entries);
@@ -4552,6 +4544,7 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
callout_handle_init(&device->c_handle);
device->refcount = 1;
device->flags |= CAM_DEV_UNCONFIGURED;
+ device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
device->alloc_ccb_entry.device = device;
@@ -5038,18 +5031,17 @@ xptscandone(struct cam_periph *periph, union ccb *done_ccb)
static cam_status
proberegister(struct cam_periph *periph, void *arg)
{
- struct ccb_getdev *cgd;
+ union ccb *request_ccb; /* CCB representing the probe request */
probe_softc *softc;
- union ccb *ccb;
- cgd = (struct ccb_getdev *)arg;
+ request_ccb = (union ccb *)arg;
if (periph == NULL) {
printf("proberegister: periph was NULL!!\n");
return(CAM_REQ_CMP_ERR);
}
- if (cgd == NULL) {
- printf("proberegister: no getdev CCB, can't register device\n");
+ if (request_ccb == NULL) {
+ printf("proberegister: no probe CCB, can't register device\n");
return(CAM_REQ_CMP_ERR);
}
@@ -5060,12 +5052,18 @@ proberegister(struct cam_periph *periph, void *arg)
"Unable to allocate softc\n");
return(CAM_REQ_CMP_ERR);
}
- ccb = (union ccb *)cgd;
TAILQ_INIT(&softc->request_ccbs);
- TAILQ_INSERT_TAIL(&softc->request_ccbs, &ccb->ccb_h, periph_links.tqe);
+ TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
+ periph_links.tqe);
softc->flags = 0;
periph->softc = softc;
cam_periph_acquire(periph);
+ /*
+ * Ensure we've waited at least a bus settle
+ * delay before attempting to probe the device.
+ */
+ cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
+ SCSI_DELAY);
probeschedule(periph);
return(CAM_REQ_CMP);
}
@@ -5073,12 +5071,17 @@ proberegister(struct cam_periph *periph, void *arg)
static void
probeschedule(struct cam_periph *periph)
{
+ struct ccb_pathinq cpi;
union ccb *ccb;
probe_softc *softc;
softc = (probe_softc *)periph->softc;
ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
+ xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
+ cpi.ccb_h.func_code = XPT_PATH_INQ;
+ xpt_action((union ccb *)&cpi);
+
/*
* If a device has gone away and another device, or the same one,
* is back in the same place, it should have a unit attention
@@ -5090,12 +5093,23 @@ probeschedule(struct cam_periph *periph)
* luns. If you think a device has gone away start your scan from
* lun 0. This will insure that any bogus transfer settings are
* invalidated.
+ *
+ * If we haven't seen the device before and the controller supports
+ * some kind of transfer negotiation, negotiate with the first
+ * sent command if no bus reset was performed at startup. This
+ * ensures that the device is not confused by transfer negotiation
+ * settings left over by loader or BIOS action.
*/
if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
- && (ccb->ccb_h.target_lun == 0))
+ && (ccb->ccb_h.target_lun == 0)) {
softc->action = PROBE_TUR;
- else
+ } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
+ && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
+ proberequestdefaultnegotiation(periph);
softc->action = PROBE_INQUIRY;
+ } else {
+ softc->action = PROBE_INQUIRY;
+ }
if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
softc->flags |= PROBE_NO_ANNOUNCE;
@@ -5238,6 +5252,21 @@ probestart(struct cam_periph *periph, union ccb *start_ccb)
}
static void
+proberequestdefaultnegotiation(struct cam_periph *periph)
+{
+ struct ccb_trans_settings cts;
+
+ xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
+ cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
+ cts.flags = CCB_TRANS_USER_SETTINGS;
+ xpt_action((union ccb *)&cts);
+ cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
+ cts.flags &= ~CCB_TRANS_USER_SETTINGS;
+ cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
+ xpt_action((union ccb *)&cts);
+}
+
+static void
probedone(struct cam_periph *periph, union ccb *done_ccb)
{
probe_softc *softc;
@@ -5275,6 +5304,7 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
u_int8_t periph_qual;
u_int8_t periph_dtype;
+ path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
inq_buf = &path->device->inq_data;
periph_qual = SID_QUAL(inq_buf);
@@ -5456,13 +5486,7 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
* the user settings, and set them as the current
* settings to set the device up.
*/
- done_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
- done_ccb->cts.flags = CCB_TRANS_USER_SETTINGS;
- xpt_action(done_ccb);
- done_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
- done_ccb->cts.flags &= ~CCB_TRANS_USER_SETTINGS;
- done_ccb->cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
- xpt_action(done_ccb);
+ proberequestdefaultnegotiation(periph);
xpt_release_ccb(done_ccb);
/*
@@ -5576,8 +5600,8 @@ xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
cts->flags &= ~CCB_TRANS_TAG_ENB;
cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
}
-
- if ((inq_data->flags & SID_Sync) == 0
+ if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
+ && (inq_data->flags & SID_Sync) == 0)
|| (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) {
/* Force async */
cts->sync_period = 0;
@@ -5586,12 +5610,14 @@ xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
switch (cts->bus_width) {
case MSG_EXT_WDTR_BUS_32_BIT:
- if ((inq_data->flags & SID_WBus32) != 0
+ if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
+ || (inq_data->flags & SID_WBus32) != 0)
&& (cpi.hba_inquiry & PI_WIDE_32) != 0)
break;
/* Fall Through to 16-bit */
case MSG_EXT_WDTR_BUS_16_BIT:
- if ((inq_data->flags & SID_WBus16) != 0
+ if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
+ || (inq_data->flags & SID_WBus16) != 0)
&& (cpi.hba_inquiry & PI_WIDE_16) != 0) {
cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
break;
@@ -5704,6 +5730,8 @@ xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
static void
xpt_toggle_tags(struct cam_path *path)
{
+ struct cam_ed *dev;
+
/*
* Give controllers a chance to renegotiate
* before starting tag operations. We
@@ -5711,8 +5739,10 @@ xpt_toggle_tags(struct cam_path *path)
* which causes the tag enable command delay
* counter to come into effect.
*/
- if ((path->device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
- || (path->device->inq_flags & SID_CmdQue) != 0) {
+ dev = path->device;
+ if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
+ || ((dev->inq_flags & SID_CmdQue) != 0
+ && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
struct ccb_trans_settings cts;
xpt_setup_ccb(&cts.ccb_h, path, 1);
@@ -5752,12 +5782,29 @@ xpt_start_tags(struct cam_path *path)
}
static int busses_to_config;
+static int busses_to_reset;
static int
xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
{
- if (bus->path_id != CAM_XPT_PATH_ID)
+ if (bus->path_id != CAM_XPT_PATH_ID) {
+ struct cam_path path;
+ struct ccb_pathinq cpi;
+ int can_negotiate;
+
busses_to_config++;
+ xpt_compile_path(&path, NULL, bus->path_id,
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
+ xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
+ cpi.ccb_h.func_code = XPT_PATH_INQ;
+ xpt_action((union ccb *)&cpi);
+ can_negotiate = cpi.hba_inquiry;
+ can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
+ if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
+ && can_negotiate)
+ busses_to_reset++;
+ xpt_release_path(&path);
+ }
return(1);
}
@@ -5770,6 +5817,7 @@ xptconfigfunc(struct cam_eb *bus, void *arg)
if (bus->path_id != CAM_XPT_PATH_ID) {
cam_status status;
+ int can_negotiate;
work_ccb = xpt_alloc_ccb();
if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
@@ -5794,7 +5842,10 @@ xptconfigfunc(struct cam_eb *bus, void *arg)
return(1);
}
- if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0) {
+ can_negotiate = work_ccb->cpi.hba_inquiry;
+ can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
+ if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
+ && (can_negotiate != 0)) {
xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
work_ccb->ccb_h.func_code = XPT_RESET_BUS;
work_ccb->ccb_h.cbfcnp = NULL;
@@ -5850,7 +5901,7 @@ xpt_config(void *arg)
/* Call manually because we don't have any busses */
xpt_finishconfig(xpt_periph, NULL);
} else {
- if (SCSI_DELAY >= 2000) {
+ if (busses_to_reset > 0 && SCSI_DELAY >= 2000) {
printf("Waiting %d seconds for SCSI "
"devices to settle\n", SCSI_DELAY/1000);
}
OpenPOWER on IntegriCloud