summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/dev/mpt/mpt.c143
-rw-r--r--sys/dev/mpt/mpt.h74
-rw-r--r--sys/dev/mpt/mpt_cam.c398
-rw-r--r--sys/dev/mpt/mpt_debug.c79
-rw-r--r--sys/dev/mpt/mpt_pci.c116
5 files changed, 587 insertions, 223 deletions
diff --git a/sys/dev/mpt/mpt.c b/sys/dev/mpt/mpt.c
index 8c7dbc9..e62e3a7 100644
--- a/sys/dev/mpt/mpt.c
+++ b/sys/dev/mpt/mpt.c
@@ -88,6 +88,7 @@ static mpt_reply_handler_t mpt_handshake_reply_handler;
static mpt_reply_handler_t mpt_event_reply_handler;
static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
+static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
static int mpt_soft_reset(struct mpt_softc *mpt);
static void mpt_hard_reset(struct mpt_softc *mpt);
static int mpt_configure_ioc(struct mpt_softc *mpt);
@@ -277,8 +278,9 @@ mpt_stdattach(struct mpt_softc *mpt)
}
int
-mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *rep)
+mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
{
+ mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
/* Event was not for us. */
return (0);
}
@@ -468,10 +470,17 @@ mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
MPT_PERS_FOREACH(mpt, pers)
handled += pers->event(mpt, req, msg);
- if (handled == 0)
+ if (handled == 0 && mpt->mpt_pers_mask == 0) {
+ mpt_lprt(mpt, MPT_PRT_WARN,
+ "No Handlers For Any Event Notify Frames. "
+ "Event %#x (ACK %sequired).\n",
+ msg->Event, msg->AckRequired? "r" : "not r");
+ } else if (handled == 0) {
mpt_prt(mpt,
- "Unhandled Event Notify Frame. Event %#x.\n",
- msg->Event);
+ "Unhandled Event Notify Frame. Event %#x "
+ "(ACK %sequired).\n",
+ msg->Event, msg->AckRequired? "r" : "not r");
+ }
if (msg->AckRequired) {
request_t *ack_req;
@@ -525,6 +534,8 @@ static int
mpt_core_event(struct mpt_softc *mpt, request_t *req,
MSG_EVENT_NOTIFY_REPLY *msg)
{
+ mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
+ msg->Event & 0xFF);
switch(msg->Event & 0xFF) {
case MPI_EVENT_NONE:
break;
@@ -547,6 +558,8 @@ mpt_core_event(struct mpt_softc *mpt, request_t *req,
* of our mpt_send_event_request.
*/
break;
+ case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ break;
default:
return (/*handled*/0);
break;
@@ -922,6 +935,7 @@ mpt_reset(struct mpt_softc *mpt, int reinit)
void
mpt_free_request(struct mpt_softc *mpt, request_t *req)
{
+ request_t *nxt;
struct mpt_evtf_record *record;
uint32_t reply_baddr;
@@ -929,6 +943,10 @@ mpt_free_request(struct mpt_softc *mpt, request_t *req)
panic("mpt_free_request bad req ptr\n");
return;
}
+ if ((nxt = req->chain) != NULL) {
+ req->chain = NULL;
+ mpt_free_request(mpt, nxt); /* NB: recursion */
+ }
req->ccb = NULL;
req->state = REQ_STATE_FREE;
if (LIST_EMPTY(&mpt->ack_frames)) {
@@ -964,6 +982,7 @@ retry:
("mpt_get_request: corrupted request free list\n"));
TAILQ_REMOVE(&mpt->request_free_list, req, links);
req->state = REQ_STATE_ALLOCATED;
+ req->chain = NULL;
} else if (sleep_ok != 0) {
mpt->getreqwaiter = 1;
mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
@@ -979,17 +998,19 @@ mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
uint32_t *pReq;
pReq = req->req_vbuf;
- mpt_lprt(mpt, MPT_PRT_TRACE, "Send Request %d (0x%x):\n",
- req->index, req->req_pbuf);
- mpt_lprt(mpt, MPT_PRT_TRACE, "%08x %08x %08x %08x\n",
- pReq[0], pReq[1], pReq[2], pReq[3]);
- mpt_lprt(mpt, MPT_PRT_TRACE, "%08x %08x %08x %08x\n",
- pReq[4], pReq[5], pReq[6], pReq[7]);
- mpt_lprt(mpt, MPT_PRT_TRACE, "%08x %08x %08x %08x\n",
- pReq[8], pReq[9], pReq[10], pReq[11]);
- mpt_lprt(mpt, MPT_PRT_TRACE, "%08x %08x %08x %08x\n",
- pReq[12], pReq[13], pReq[14], pReq[15]);
-
+ if (mpt->verbose > MPT_PRT_TRACE) {
+ int offset;
+ mpt_prt(mpt, "Send Request %d (0x%x):",
+ req->index, req->req_pbuf);
+ for (offset = 0; offset < mpt->request_frame_size; offset++) {
+ if ((offset & 0x7) == 0) {
+ mpt_prtc(mpt, "\n");
+ mpt_prt(mpt, " ");
+ }
+ mpt_prtc(mpt, " %08x", pReq[offset]);
+ }
+ mpt_prtc(mpt, "\n");
+ }
bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
BUS_DMASYNC_PREWRITE);
req->state |= REQ_STATE_QUEUED;
@@ -1028,11 +1049,11 @@ mpt_wait_req(struct mpt_softc *mpt, request_t *req,
timeout = (time_ms * hz) / 1000;
else
timeout = time_ms * 2;
- saved_cnt = mpt->reset_cnt;
req->state |= REQ_STATE_NEED_WAKEUP;
mask &= ~REQ_STATE_NEED_WAKEUP;
+ saved_cnt = mpt->reset_cnt;
while ((req->state & mask) != state
- && mpt->reset_cnt == saved_cnt) {
+ && mpt->reset_cnt == saved_cnt) {
if (sleep_ok != 0) {
error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
@@ -1052,7 +1073,7 @@ mpt_wait_req(struct mpt_softc *mpt, request_t *req,
req->state &= ~REQ_STATE_NEED_WAKEUP;
if (mpt->reset_cnt != saved_cnt)
return (EIO);
- if (time_ms && timeout == 0)
+ if (time_ms && timeout <= 0)
return (ETIMEDOUT);
return (0);
}
@@ -1245,12 +1266,20 @@ mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
init.Function = MPI_FUNCTION_IOC_INIT;
if (mpt->is_fc) {
init.MaxDevices = 255;
+ } else if (mpt->is_sas) {
+ init.MaxDevices = mpt->mpt_max_devices;
} else {
init.MaxDevices = 16;
}
init.MaxBuses = 1;
- init.ReplyFrameSize = MPT_REPLY_SIZE;
+
+ init.MsgVersion = htole16(MPI_VERSION);
+ init.HeaderVersion = htole16(MPI_HEADER_VERSION);
+ init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
+ if (mpt->ioc_facts_flags & MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL) {
+ init.Flags |= MPI_IOCINIT_FLAGS_REPLY_FIFO_HOST_SIGNAL;
+ }
if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
return(error);
@@ -1826,9 +1855,9 @@ mpt_send_port_enable(struct mpt_softc *mpt, int port)
mpt_send_cmd(mpt, req);
error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
- /*sleep_ok*/FALSE, /*time_ms*/500);
+ /*sleep_ok*/FALSE, /*time_ms*/mpt->is_sas? 30000 : 3000);
if (error != 0) {
- mpt_prt(mpt, "port enable timed out");
+ mpt_prt(mpt, "port enable timed out\n");
return (-1);
}
mpt_free_request(mpt, req);
@@ -1919,6 +1948,7 @@ mpt_attach(struct mpt_softc *mpt)
pers->use_count++;
}
}
+
return (0);
}
@@ -2060,11 +2090,13 @@ mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
uint32_t *data_end;
data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
+ pci_enable_io(mpt->dev, SYS_RES_IOPORT);
mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
while (data != data_end) {
mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
data++;
}
+ pci_disable_io(mpt->dev, SYS_RES_IOPORT);
}
static int
@@ -2102,6 +2134,7 @@ mpt_download_fw(struct mpt_softc *mpt)
ext->ImageSize);
}
+ pci_enable_io(mpt->dev, SYS_RES_IOPORT);
/* Setup the address to jump to on reset. */
mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
@@ -2116,6 +2149,8 @@ mpt_download_fw(struct mpt_softc *mpt)
mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
+ pci_disable_io(mpt->dev, SYS_RES_IOPORT);
+
/*
* Re-enable the processor and clear the boot halt flag.
*/
@@ -2138,6 +2173,7 @@ mpt_configure_ioc(struct mpt_softc *mpt)
MSG_IOC_FACTS_REPLY facts;
int try;
int needreset;
+ uint32_t max_chain_depth;
needreset = 0;
for (try = 0; try < MPT_MAX_TRYS; try++) {
@@ -2166,23 +2202,65 @@ mpt_configure_ioc(struct mpt_softc *mpt)
mpt->mpt_global_credits = le16toh(facts.GlobalCredits);
mpt->request_frame_size = le16toh(facts.RequestFrameSize);
+ mpt->ioc_facts_flags = facts.Flags;
mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
le16toh(facts.MsgVersion) >> 8,
le16toh(facts.MsgVersion) & 0xFF,
le16toh(facts.HeaderVersion) >> 8,
le16toh(facts.HeaderVersion) & 0xFF);
+
+ /*
+ * Now that we know request frame size, we can calculate
+ * the actual (reasonable) segment limit for read/write I/O.
+ *
+ * This limit is constrained by:
+ *
+ * + The size of each area we allocate per command (and how
+ * many chain segments we can fit into it).
+ * + The total number of areas we've set up.
+ * + The actual chain depth the card will allow.
+ *
+ * The first area's segment count is limited by the I/O request
+ * at the head of it. We cannot allocate realistically more
+ * than MPT_MAX_REQUESTS areas. Therefore, to account for both
+ * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
+ *
+ */
+ max_chain_depth = facts.MaxChainDepth;
+
+ /* total number of request areas we (can) allocate */
+ mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
+
+ /* converted to the number of chain areas possible */
+ mpt->max_seg_cnt *= MPT_NRFM(mpt);
+
+ /* limited by the number of chain areas the card will support */
+ if (mpt->max_seg_cnt > max_chain_depth) {
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "chain depth limited to %u (from %u)\n",
+ max_chain_depth, mpt->max_seg_cnt);
+ mpt->max_seg_cnt = max_chain_depth;
+ }
+
+ /* converted to the number of simple sges in chain segments. */
+ mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
+
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "Maximum Segment Count: %u\n", mpt->max_seg_cnt);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"MsgLength=%u IOCNumber = %d\n",
facts.MsgLength, facts.IOCNumber);
mpt_lprt(mpt, MPT_PRT_DEBUG,
- "IOCFACTS: GlobalCredits=%d BlockSize=%u "
- "Request Frame Size %u\n", mpt->mpt_global_credits,
- facts.BlockSize * 8, mpt->request_frame_size * 8);
+ "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
+ "Request Frame Size %u bytes Max Chain Depth %u\n",
+ mpt->mpt_global_credits, facts.BlockSize,
+ mpt->request_frame_size << 2, max_chain_depth);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"IOCFACTS: Num Ports %d, FWImageSize %d, "
"Flags=%#x\n", facts.NumberOfPorts,
le32toh(facts.FWImageSize), facts.Flags);
+
if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) {
struct mpt_map_info mi;
int error;
@@ -2249,6 +2327,7 @@ mpt_configure_ioc(struct mpt_softc *mpt)
mpt->mpt_port_type = pfp.PortType;
mpt->mpt_proto_flags = pfp.ProtocolFlags;
if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
+ pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
mpt_prt(mpt, "Unsupported Port Type (%x)\n",
pfp.PortType);
@@ -2260,10 +2339,16 @@ mpt_configure_ioc(struct mpt_softc *mpt)
}
if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
mpt->is_fc = 1;
+ mpt->is_sas = 0;
+ } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
+ mpt->is_fc = 0;
+ mpt->is_sas = 1;
} else {
mpt->is_fc = 0;
+ mpt->is_sas = 0;
}
mpt->mpt_ini_id = pfp.PortSCSIID;
+ mpt->mpt_max_devices = pfp.MaxDevices;
if (mpt_enable_ioc(mpt) != 0) {
mpt_prt(mpt, "Unable to initialize IOC\n");
@@ -2280,7 +2365,7 @@ mpt_configure_ioc(struct mpt_softc *mpt)
*/
mpt_read_config_info_ioc(mpt);
- if (mpt->is_fc == 0) {
+ if (mpt->is_fc == 0 && mpt->is_sas == 0) {
if (mpt_read_config_info_spi(mpt)) {
return (EIO);
}
@@ -2310,7 +2395,7 @@ mpt_enable_ioc(struct mpt_softc *mpt)
uint32_t pptr;
int val;
- if (mpt_send_ioc_init(mpt, MPT_DB_INIT_HOST) != MPT_OK) {
+ if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
mpt_prt(mpt, "mpt_send_ioc_init failed\n");
return (EIO);
}
@@ -2321,7 +2406,7 @@ mpt_enable_ioc(struct mpt_softc *mpt)
mpt_prt(mpt, "IOC failed to go to run state\n");
return (ENXIO);
}
- mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE");
+ mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
/*
* Give it reply buffers
@@ -2342,14 +2427,14 @@ mpt_enable_ioc(struct mpt_softc *mpt)
mpt_send_event_request(mpt, 1);
/*
- * Now enable the port
+ * Enable the port
*/
if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
mpt_prt(mpt, "failed to enable port 0\n");
return (ENXIO);
}
-
mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port 0\n");
+
return (0);
}
diff --git a/sys/dev/mpt/mpt.h b/sys/dev/mpt/mpt.h
index 4a132c6..b1403c1 100644
--- a/sys/dev/mpt/mpt.h
+++ b/sys/dev/mpt/mpt.h
@@ -86,13 +86,21 @@
#include <sys/bus.h>
#include <sys/module.h>
-#include <machine/bus.h>
#include <machine/clock.h>
#include <machine/cpu.h>
#include <machine/resource.h>
#include <sys/rman.h>
+#if __FreeBSD_version < 500000
+#include <pci/pcireg.h>
+#include <pci/pcivar.h>
+#else
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#endif
+
+#include <machine/bus.h>
#include "opt_ddb.h"
/**************************** Register Definitions ****************************/
@@ -247,6 +255,7 @@ struct req_entry {
bus_addr_t req_pbuf; /* Physical Address of Entry */
bus_addr_t sense_pbuf; /* Physical Address of sense data */
bus_dmamap_t dmap; /* DMA map for data buffer */
+ struct req_entry *chain; /* for SGE overallocations */
};
/**************************** Handler Registration ****************************/
@@ -376,7 +385,8 @@ struct mpt_softc {
struct mtx mpt_lock;
#endif
uint32_t mpt_pers_mask;
- uint32_t : 15,
+ uint32_t : 14,
+ is_sas : 1,
raid_mwce_set : 1,
getreqwaiter : 1,
shutdwn_raid : 1,
@@ -397,6 +407,7 @@ struct mpt_softc {
uint16_t request_frame_size;
uint8_t mpt_max_devices;
uint8_t mpt_max_buses;
+ uint8_t ioc_facts_flags;
/*
* Port Facts
@@ -486,6 +497,11 @@ struct mpt_softc {
uint8_t *request; /* KVA of Request memory */
bus_addr_t request_phys; /* BusADdr of request memory */
+ uint32_t max_seg_cnt; /* calculated after IOC facts */
+
+ /*
+ * Hardware management
+ */
u_int reset_cnt;
/*
@@ -658,11 +674,11 @@ mpt_pio_read(struct mpt_softc *mpt, int offset)
}
/*********************** Reply Frame/Request Management ***********************/
/* Max MPT Reply we are willing to accept (must be power of 2) */
-#define MPT_REPLY_SIZE 128
+#define MPT_REPLY_SIZE 256
-#define MPT_MAX_REQUESTS(mpt) ((mpt)->is_fc ? 1024 : 256)
-#define MPT_REQUEST_AREA 512
-#define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_SIZE */
+#define MPT_MAX_REQUESTS(mpt) 512
+#define MPT_REQUEST_AREA 512
+#define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */
#define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
#define MPT_CONTEXT_CB_SHIFT (16)
@@ -714,33 +730,25 @@ mpt_pop_reply_queue(struct mpt_softc *mpt)
void mpt_complete_request_chain(struct mpt_softc *mpt,
struct req_queue *chain, u_int iocstatus);
/************************** Scatter Gather Managment **************************/
-/*
- * We cannot tell prior to getting IOC facts how big the IOC's request
- * area is. Because of this we cannot tell at compile time how many
- * simple SG elements we can fit within an IOC request prior to having
- * to put in a chain element.
- *
- * Experimentally we know that the Ultra4 parts have a 96 byte request
- * element size and the Fibre Channel units have a 144 byte request
- * element size. Therefore, if we have 512-32 (== 480) bytes of request
- * area to play with, we have room for between 3 and 5 request sized
- * regions- the first of which is the command plus a simple SG list,
- * the rest of which are chained continuation SG lists. Given that the
- * normal request we use is 48 bytes w/o the first SG element, we can
- * assume we have 480-48 == 432 bytes to have simple SG elements and/or
- * chain elements. If we assume 32 bit addressing, this works out to
- * 54 SG or chain elements. If we assume 5 chain elements, then we have
- * a maximum of 49 seperate actual SG segments.
- */
-#define MPT_SGL_MAX 49
-
+/* MPT_RQSL- size of request frame, in bytes */
#define MPT_RQSL(mpt) (mpt->request_frame_size << 2)
-#define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_SIMPLE32))
-#define MPT_NSGL_FIRST(mpt) \
- (((mpt->request_frame_size << 2) - \
- sizeof (MSG_SCSI_IO_REQUEST) - \
- sizeof (SGE_IO_UNION)) / sizeof (SGE_SIMPLE32))
+/* MPT_NSGL- how many SG entries can fit in a request frame size */
+#define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
+
+/* MPT_NRFM- how many request frames can fit in each request alloc we make */
+#define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt))
+
+/*
+ * MPT_NSGL_FIRST- # of SG elements that can fit after
+ * an I/O request but still within the request frame.
+ * Do this safely based upon SGE_IO_UNION.
+ *
+ * Note that the first element is *within* the SCSI request.
+ */
+#define MPT_NSGL_FIRST(mpt) \
+ ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
+ sizeof (SGE_IO_UNION))
/***************************** IOC Initialization *****************************/
int mpt_reset(struct mpt_softc *, int /*reinit*/);
@@ -763,7 +771,8 @@ enum {
MPT_PRT_WARN,
MPT_PRT_INFO,
MPT_PRT_DEBUG,
- MPT_PRT_TRACE
+ MPT_PRT_TRACE,
+ MPT_PRT_NONE=100
};
#define mpt_lprt(mpt, level, ...) \
@@ -850,4 +859,5 @@ void mpt_req_state(mpt_req_state_t state);
void mpt_print_config_request(void *vmsg);
void mpt_print_request(void *vmsg);
void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg);
+void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
#endif /* _MPT_H_ */
diff --git a/sys/dev/mpt/mpt_cam.c b/sys/dev/mpt/mpt_cam.c
index 9cb9b64..d791dfa 100644
--- a/sys/dev/mpt/mpt_cam.c
+++ b/sys/dev/mpt/mpt_cam.c
@@ -350,11 +350,16 @@ mpt_timeout(void *arg)
static void
mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
- request_t *req;
+ request_t *req, *trq;
+ char *mpt_off;
union ccb *ccb;
struct mpt_softc *mpt;
+ int seg, first_lim;
+ uint32_t flags, nxt_off;
+ bus_dmasync_op_t op;
MSG_SCSI_IO_REQUEST *mpt_req;
- SGE_SIMPLE32 *se;
+ SGE_SIMPLE64 *se;
+ SGE_CHAIN64 *ce;
req = (request_t *)arg;
ccb = req->ccb;
@@ -362,20 +367,30 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
mpt = ccb->ccb_h.ccb_mpt_ptr;
req = ccb->ccb_h.ccb_req_ptr;
mpt_req = req->req_vbuf;
+ mpt_off = req->req_vbuf;
- if (error == 0 && nseg > MPT_SGL_MAX) {
+ if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
error = EFBIG;
}
+bad:
if (error != 0) {
- if (error != EFBIG)
+ /* if (error != EFBIG) */
mpt_prt(mpt, "bus_dmamap_load returned %d\n", error);
if (ccb->ccb_h.status == CAM_REQ_INPROG) {
xpt_freeze_devq(ccb->ccb_h.path, 1);
ccb->ccb_h.status = CAM_DEV_QFRZN;
- if (error == EFBIG)
+ if (error == EFBIG) {
ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
- else
+ } else if (error == ENOMEM) {
+ if (mpt->outofbeer == 0) {
+ mpt->outofbeer = 1;
+ xpt_freeze_simq(mpt->sim, 1);
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "FREEZEQ\n");
+ }
+ ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+ } else
ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
@@ -385,135 +400,233 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
MPTLOCK_2_CAMLOCK(mpt);
return;
}
-
- if (nseg > MPT_NSGL_FIRST(mpt)) {
- int i, nleft = nseg;
- uint32_t flags;
- bus_dmasync_op_t op;
- SGE_CHAIN32 *ce;
- mpt_req->DataLength = ccb->csio.dxfer_len;
- flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
- if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
- flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
+ /*
+ * No data to transfer?
+ * Just make a single simple SGL with zero length.
+ */
- se = (SGE_SIMPLE32 *) &mpt_req->SGL;
- for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) {
- uint32_t tf;
+ if (mpt->verbose >= MPT_PRT_DEBUG) {
+ int tidx = ((char *)&mpt_req->SGL) - mpt_off;
+ memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
+ }
- bzero(se, sizeof (*se));
- se->Address = dm_segs->ds_addr;
- MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
- tf = flags;
- if (i == MPT_NSGL_FIRST(mpt) - 2) {
- tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
- }
- MPI_pSGE_SET_FLAGS(se, tf);
- nleft -= 1;
- }
+ if (nseg == 0) {
+ SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) &mpt_req->SGL;
+ MPI_pSGE_SET_FLAGS(se1,
+ (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
+ MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
+ goto out;
+ }
+
+ mpt_req->DataLength = ccb->csio.dxfer_len;
+ flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
+ flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
+
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
+ op = BUS_DMASYNC_PREREAD;
+ } else {
+ op = BUS_DMASYNC_PREWRITE;
+ }
+ if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
+ bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
+ }
+ /*
+ * Okay, fill in what we can at the end of the command frame.
+ * If we have up to MPT_NSGL_FIRST, we can fit them all into
+ * the command frame.
+ *
+ * Otherwise, we fill up through MPT_NSGL_FIRST less one
+ * SIMPLE64 pointers and start doing CHAIN64 entries after
+ * that.
+ */
+
+ if (nseg < MPT_NSGL_FIRST(mpt)) {
+ first_lim = nseg;
+ } else {
/*
- * Tell the IOC where to find the first chain element
+ * Leave room for CHAIN element
*/
- mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;
+ first_lim = MPT_NSGL_FIRST(mpt) - 1;
+ }
+
+ se = (SGE_SIMPLE64 *) &mpt_req->SGL;
+ for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
+ uint32_t tf;
+
+ bzero(se, sizeof (*se));
+ se->Address.Low = dm_segs->ds_addr;
+ if (sizeof(bus_addr_t) > 4) {
+ se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
+ }
+ MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
+ tf = flags;
+ if (seg == first_lim - 1) {
+ tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
+ }
+ if (seg == nseg - 1) {
+ tf |= MPI_SGE_FLAGS_END_OF_LIST |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+ }
+ MPI_pSGE_SET_FLAGS(se, tf);
+ }
+
+ if (seg == nseg) {
+ goto out;
+ }
+
+ /*
+ * Tell the IOC where to find the first chain element.
+ */
+ mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;
+ nxt_off = MPT_RQSL(mpt);
+ trq = req;
+
+ /*
+ * Make up the rest of the data segments out of a chain element
+ * (contiained in the current request frame) which points to
+ * SIMPLE64 elements in the next request frame, possibly ending
+ * with *another* chain element (if there's more).
+ */
+ while (seg < nseg) {
+ int this_seg_lim;
+ uint32_t tf, cur_off;
+ bus_addr_t chain_list_addr;
/*
- * Until we're finished with all segments...
+ * Point to the chain descriptor. Note that the chain
+ * descriptor is at the end of the *previous* list (whether
+ * chain or simple).
*/
- while (nleft) {
- int ntodo;
- /*
- * Construct the chain element that point to the
- * next segment.
- */
- ce = (SGE_CHAIN32 *) se++;
- if (nleft > MPT_NSGL(mpt)) {
- ntodo = MPT_NSGL(mpt) - 1;
- ce->NextChainOffset = (MPT_RQSL(mpt) -
- sizeof (SGE_SIMPLE32)) >> 2;
- ce->Length = MPT_NSGL(mpt) *
- sizeof (SGE_SIMPLE32);
- } else {
- ntodo = nleft;
- ce->NextChainOffset = 0;
- ce->Length = ntodo * sizeof (SGE_SIMPLE32);
- }
- ce->Address = req->req_pbuf +
- ((char *)se - (char *)mpt_req);
- ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
- for (i = 0; i < ntodo; i++, se++, dm_segs++) {
- uint32_t tf;
-
- bzero(se, sizeof (*se));
- se->Address = dm_segs->ds_addr;
- MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
- tf = flags;
- if (i == ntodo - 1) {
- tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
- if (ce->NextChainOffset == 0) {
- tf |=
- MPI_SGE_FLAGS_END_OF_LIST |
- MPI_SGE_FLAGS_END_OF_BUFFER;
- }
- }
- MPI_pSGE_SET_FLAGS(se, tf);
- nleft -= 1;
- }
+ ce = (SGE_CHAIN64 *) se;
+ /*
+ * Before we change our current pointer, make sure we won't
+ * overflow the request area with this frame. Note that we
+ * test against 'greater than' here as it's okay in this case
+ * to have next offset be just outside the request area.
+ */
+ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
+ nxt_off = MPT_REQUEST_AREA;
+ goto next_chain;
}
- if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
- op = BUS_DMASYNC_PREREAD;
- else
- op = BUS_DMASYNC_PREWRITE;
- if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
- bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
- }
- } else if (nseg > 0) {
- int i;
- uint32_t flags;
- bus_dmasync_op_t op;
+ /*
+ * Set our SGE element pointer to the beginning of the chain
+ * list and update our next chain list offset.
+ */
+ se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
+ cur_off = nxt_off;
+ nxt_off += MPT_RQSL(mpt);
+
+ /*
+ * Now initialized the chain descriptor.
+ */
+ bzero(ce, sizeof (SGE_CHAIN64));
- mpt_req->DataLength = ccb->csio.dxfer_len;
- flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
- if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
- flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
+ /*
+ * Get the physical address of the chain list.
+ */
+ chain_list_addr = trq->req_pbuf;
+ chain_list_addr += cur_off;
+ if (sizeof (bus_addr_t) > 4) {
+ ce->Address.High =
+ (uint32_t) ((uint64_t)chain_list_addr >> 32);
+ }
+ ce->Address.Low = (uint32_t) chain_list_addr;
+ ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING;
- /* Copy the segments into our SG list */
- se = (SGE_SIMPLE32 *) &mpt_req->SGL;
- for (i = 0; i < nseg; i++, se++, dm_segs++) {
- uint32_t tf;
+ /*
+ * If we have more than a frame's worth of segments left,
+ * set up the chain list to have the last element be another
+ * chain descriptor.
+ */
+ if ((nseg - seg) > MPT_NSGL(mpt)) {
+ this_seg_lim = seg + MPT_NSGL(mpt) - 1;
+ /*
+ * The length of the chain is the length in bytes of the
+ * number of segments plus the next chain element.
+ *
+ * The next chain descriptor offset is the length,
+ * in words, of the number of segments.
+ */
+ ce->Length = (this_seg_lim - seg) *
+ sizeof (SGE_SIMPLE64);
+ ce->NextChainOffset = ce->Length >> 2;
+ ce->Length += sizeof (SGE_CHAIN64);
+ } else {
+ this_seg_lim = nseg;
+ ce->Length = (this_seg_lim - seg) *
+ sizeof (SGE_SIMPLE64);
+ }
+ /*
+ * Fill in the chain list SGE elements with our segment data.
+ *
+ * If we're the last element in this chain list, set the last
+ * element flag. If we're the completely last element period,
+ * set the end of list and end of buffer flags.
+ */
+ while (seg < this_seg_lim) {
bzero(se, sizeof (*se));
- se->Address = dm_segs->ds_addr;
+ se->Address.Low = dm_segs->ds_addr;
+ if (sizeof (bus_addr_t) > 4) {
+ se->Address.High =
+ ((uint64_t)dm_segs->ds_addr) >> 32;
+ }
MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
tf = flags;
- if (i == nseg - 1) {
- tf |=
- MPI_SGE_FLAGS_LAST_ELEMENT |
- MPI_SGE_FLAGS_END_OF_BUFFER |
- MPI_SGE_FLAGS_END_OF_LIST;
+ if (seg == this_seg_lim - 1) {
+ tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
+ }
+ if (seg == nseg - 1) {
+ tf |= MPI_SGE_FLAGS_END_OF_LIST |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
}
MPI_pSGE_SET_FLAGS(se, tf);
+ se++;
+ seg++;
+ dm_segs++;
}
- if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
- op = BUS_DMASYNC_PREREAD;
- else
- op = BUS_DMASYNC_PREWRITE;
- if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
- bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
- }
- } else {
- se = (SGE_SIMPLE32 *) &mpt_req->SGL;
+ next_chain:
/*
- * No data to transfer so we just make a single simple SGL
- * with zero length.
+ * If we have more segments to do and we've used up all of
+ * the space in a request area, go allocate another one
+ * and chain to that.
*/
- MPI_pSGE_SET_FLAGS(se,
- (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
- MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
+ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
+ request_t *nrq = mpt_get_request(mpt, FALSE);
+
+ if (nrq == NULL) {
+ error = ENOMEM;
+ goto bad;
+ }
+
+ /*
+ * Append the new request area on the tail of our list.
+ */
+ if ((trq = req->chain) == NULL) {
+ req->chain = nrq;
+ } else {
+ while (trq->chain != NULL) {
+ trq = trq->chain;
+ }
+ trq->chain = nrq;
+ }
+ trq = nrq;
+ mpt_off = trq->req_vbuf;
+ mpt_req = trq->req_vbuf;
+ if (mpt->verbose >= MPT_PRT_DEBUG) {
+ memset(mpt_off, 0xff, MPT_REQUEST_AREA);
+ }
+ nxt_off = 0;
+ }
}
+out:
/*
* Last time we need to check if this CCB needs to be aborted.
@@ -537,8 +650,14 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
} else {
callout_handle_init(&ccb->ccb_h.timeout_ch);
}
- if (mpt->verbose >= MPT_PRT_DEBUG)
- mpt_print_scsi_io_request(mpt_req);
+ if (mpt->verbose >= MPT_PRT_DEBUG) {
+ int nc = 0;
+ mpt_print_scsi_io_request(req->req_vbuf);
+ for (trq = req->chain; trq; trq = trq->chain) {
+ printf(" Additional Chain Area %d\n", nc++);
+ mpt_dump_sgl(trq->req_vbuf, 0);
+ }
+ }
mpt_send_cmd(mpt, req);
MPTLOCK_2_CAMLOCK(mpt);
}
@@ -578,6 +697,7 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
if (raid_passthru) {
status = mpt_raid_quiesce_disk(mpt, mpt->raid_disks + ccb->ccb_h.target_id,
request_t *req)
+ }
#endif
/*
@@ -646,7 +766,7 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
}
- if (mpt->is_fc == 0) {
+ if (mpt->is_fc == 0 && mpt->is_sas == 0) {
if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
}
@@ -716,9 +836,7 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
} else {
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
- mpt_execute_req(req, segs, csio->sglist_cnt,
- (csio->sglist_cnt < MPT_SGL_MAX)?
- 0 : EFBIG);
+ mpt_execute_req(req, segs, csio->sglist_cnt, 0);
}
}
} else {
@@ -771,6 +889,8 @@ static int
mpt_cam_event(struct mpt_softc *mpt, request_t *req,
MSG_EVENT_NOTIFY_REPLY *msg)
{
+ mpt_lprt(mpt, MPT_PRT_ALWAYS, "mpt_cam_event: 0x%x\n",
+ msg->Event & 0xFF);
switch(msg->Event & 0xFF) {
case MPI_EVENT_UNIT_ATTENTION:
mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
@@ -875,6 +995,17 @@ mpt_cam_event(struct mpt_softc *mpt, request_t *req,
mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
(msg->Data[1] >> 8) & 0xff, msg->Data[0]);
break;
+ case MPI_EVENT_EVENT_CHANGE:
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
+ break;
+ case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ /*
+ * Devices are attachin'.....
+ */
+ mpt_prt(mpt,
+ "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
+ break;
default:
return (/*handled*/0);
}
@@ -1282,7 +1413,7 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
xpt_done(ccb);
break;
}
- if (mpt->is_fc == 0) {
+ if (mpt->is_fc == 0 && mpt->is_sas == 0) {
uint8_t dval = 0;
u_int period = 0, offset = 0;
#ifndef CAM_NEW_TRAN_CODE
@@ -1414,6 +1545,30 @@ mpt_prt(mpt, "Set sync Failed!\n");
fc->bitrate = 100000; /* XXX: Need for 2Gb/s */
/* XXX: need a port database for each target */
#endif
+ } else if (mpt->is_sas) {
+#ifndef CAM_NEW_TRAN_CODE
+ cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
+ cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
+ /*
+ * How do you measure the width of a high
+ * speed serial bus? Well, in bytes.
+ *
+ * Offset and period make no sense, though, so we set
+ * (above) a 'base' transfer speed to be gigabit.
+ */
+ cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+#else
+ struct ccb_trans_settings_sas *sas =
+ &cts->xport_specific.sas;
+
+ cts->protocol = PROTO_SCSI;
+ cts->protocol_version = SCSI_REV_3;
+ cts->transport = XPORT_SAS;
+ cts->transport_version = 0;
+
+ sas->valid = CTS_SAS_VALID_SPEED;
+ sas->bitrate = 300000; /* XXX: Default 3Gbps */
+#endif
} else {
#ifdef CAM_NEW_TRAN_CODE
struct ccb_trans_settings_scsi *scsi =
@@ -1572,17 +1727,26 @@ mpt_prt(mpt, "Set sync Failed!\n");
cpi->hba_inquiry = PI_TAG_ABLE;
if (mpt->is_fc) {
cpi->base_transfer_speed = 100000;
+ } else if (mpt->is_sas) {
+ cpi->base_transfer_speed = 300000;
} else {
cpi->base_transfer_speed = 3300;
cpi->hba_inquiry |=
PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
}
} else if (mpt->is_fc) {
+/* XXX SHOULD BE BASED UPON IOC FACTS XXX */
cpi->max_target = 255;
cpi->hba_misc = PIM_NOBUSRESET;
cpi->initiator_id = cpi->max_target + 1;
cpi->base_transfer_speed = 100000;
cpi->hba_inquiry = PI_TAG_ABLE;
+ } else if (mpt->is_sas) {
+ cpi->max_target = 63; /* XXX */
+ cpi->hba_misc = PIM_NOBUSRESET;
+ cpi->initiator_id = cpi->max_target;
+ cpi->base_transfer_speed = 300000;
+ cpi->hba_inquiry = PI_TAG_ABLE;
} else {
cpi->initiator_id = mpt->mpt_ini_id;
cpi->base_transfer_speed = 3300;
diff --git a/sys/dev/mpt/mpt_debug.c b/sys/dev/mpt/mpt_debug.c
index b745c43..a8445fc 100644
--- a/sys/dev/mpt/mpt_debug.c
+++ b/sys/dev/mpt/mpt_debug.c
@@ -172,8 +172,6 @@ static const struct Error_Map IOC_SCSITMType[] = {
{ -1, 0 },
};
-static void mpt_dump_sgl(SGE_IO_UNION *sgl);
-
static char *
mpt_ioc_status(int code)
{
@@ -527,7 +525,12 @@ mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *orig_msg)
for (i = 0; i < msg->CDBLength; i++)
printf("%02x ", msg->CDB[i]);
printf("\n");
- mpt_dump_sgl(&orig_msg->SGL);
+
+ if ((msg->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) !=
+ MPI_SCSIIO_CONTROL_NODATATRANSFER ) {
+ mpt_dump_sgl(&orig_msg->SGL,
+ ((char *)&orig_msg->SGL)-(char *)orig_msg);
+ }
}
static void
@@ -625,35 +628,69 @@ mpt_req_state(mpt_req_state_t state)
"REQ_STATE", state, NULL, 80);
}
-static void
-mpt_dump_sgl(SGE_IO_UNION *su)
+#define LAST_SGE ( \
+ MPI_SGE_FLAGS_END_OF_LIST | \
+ MPI_SGE_FLAGS_END_OF_BUFFER| \
+ MPI_SGE_FLAGS_LAST_ELEMENT)
+void
+mpt_dump_sgl(SGE_IO_UNION *su, int offset)
{
SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) su;
- int iCount, flags;
+ const char allfox[4] = { 0xff, 0xff, 0xff, 0xff };
+ void *nxtaddr = se;
+ void *lim;
+ int flags;
+
+ /*
+ * Can't be any bigger than this.
+ */
+ lim = &((char *)se)[MPT_REQUEST_AREA - offset];
- iCount = MPT_SGL_MAX;
do {
int iprt;
printf("\t");
+ if (memcmp(se, allfox, 4) == 0) {
+ uint32_t *nxt = (uint32_t *)se;
+ printf("PAD %p\n", se);
+ nxtaddr = nxt + 1;
+ se = nxtaddr;
+ flags = 0;
+ continue;
+ }
+ nxtaddr = se + 1;
flags = MPI_SGE_GET_FLAGS(se->FlagsLength);
switch (flags & MPI_SGE_FLAGS_ELEMENT_MASK) {
case MPI_SGE_FLAGS_SIMPLE_ELEMENT:
- {
- printf("SE32 %p: Addr=0x%0x FlagsLength=0x%0x\n",
- se, se->Address, se->FlagsLength);
+ if (flags & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
+ SGE_SIMPLE64 *se64 = (SGE_SIMPLE64 *)se;
+ printf("SE64 %p: Addr=0x%08x%08x FlagsLength"
+ "=0x%0x\n", se64, se64->Address.High,
+ se64->Address.Low, se64->FlagsLength);
+ nxtaddr = se64 + 1;
+ } else {
+ printf("SE32 %p: Addr=0x%0x FlagsLength=0x%0x"
+ "\n", se, se->Address, se->FlagsLength);
+ }
printf(" ");
break;
- }
case MPI_SGE_FLAGS_CHAIN_ELEMENT:
- {
- SGE_CHAIN32 *ce = (SGE_CHAIN32 *) se;
- printf("CE32 %p: Addr=0x%0x NxtChnO=0x%x Flgs=0x%x "
- "Len=0x%0x\n", ce, ce->Address, ce->NextChainOffset,
- ce->Flags, ce->Length);
+ if (flags & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
+ SGE_CHAIN64 *ce64 = (SGE_CHAIN64 *) se;
+ printf("CE64 %p: Addr=0x%08x%08x NxtChnO=0x%x "
+ "Flgs=0x%x Len=0x%0x\n", ce64,
+ ce64->Address.High, ce64->Address.Low,
+ ce64->NextChainOffset,
+ ce64->Flags, ce64->Length);
+ nxtaddr = ce64 + 1;
+ } else {
+ SGE_CHAIN32 *ce = (SGE_CHAIN32 *) se;
+ printf("CE32 %p: Addr=0x%0x NxtChnO=0x%x "
+ " Flgs=0x%x Len=0x%0x\n", ce, ce->Address,
+ ce->NextChainOffset, ce->Flags, ce->Length);
+ }
flags = 0;
break;
- }
case MPI_SGE_FLAGS_TRANSACTION_ELEMENT:
printf("TE32 @ %p\n", se);
flags = 0;
@@ -678,9 +715,11 @@ mpt_dump_sgl(SGE_IO_UNION *su)
#undef MPT_PRINT_FLAG
if (iprt)
printf("\n");
- se++;
- iCount -= 1;
- } while ((flags & MPI_SGE_FLAGS_END_OF_LIST) == 0 && iCount != 0);
+ se = nxtaddr;
+ if ((flags & LAST_SGE) == LAST_SGE) {
+ break;
+ }
+ } while ((flags & MPI_SGE_FLAGS_END_OF_LIST) == 0 && nxtaddr < lim);
}
void
diff --git a/sys/dev/mpt/mpt_pci.c b/sys/dev/mpt/mpt_pci.c
index ebc1ac3..cdaec8d 100644
--- a/sys/dev/mpt/mpt_pci.c
+++ b/sys/dev/mpt/mpt_pci.c
@@ -69,13 +69,6 @@ __FBSDID("$FreeBSD$");
#include <dev/mpt/mpt_cam.h>
#include <dev/mpt/mpt_raid.h>
-#if __FreeBSD_version < 500000
-#include <pci/pcireg.h>
-#include <pci/pcivar.h>
-#else
-#include <dev/pci/pcireg.h>
-#include <dev/pci/pcivar.h>
-#endif
#ifndef PCI_VENDOR_LSI
#define PCI_VENDOR_LSI 0x1000
@@ -105,12 +98,43 @@ __FBSDID("$FreeBSD$");
#define PCI_PRODUCT_LSI_1030 0x0030
#endif
+#ifndef PCI_PRODUCT_LSI_SAS1064
+#define PCI_PRODUCT_LSI_SAS1064 0x0050
+#endif
+
+#ifndef PCI_PRODUCT_LSI_SAS1064A
+#define PCI_PRODUCT_LSI_SAS1064A 0x005C
+#endif
+
+#ifndef PCI_PRODUCT_LSI_SAS1064E
+#define PCI_PRODUCT_LSI_SAS1064E 0x0056
+#endif
+
+#ifndef PCI_PRODUCT_LSI_SAS1066
+#define PCI_PRODUCT_LSI_SAS1066 0x005E
+#endif
+
+#ifndef PCI_PRODUCT_LSI_SAS1066E
+#define PCI_PRODUCT_LSI_SAS1066E 0x005A
+#endif
+
+#ifndef PCI_PRODUCT_LSI_SAS1068
+#define PCI_PRODUCT_LSI_SAS1068 0x0054
+#endif
+
+#ifndef PCI_PRODUCT_LSI_SAS1068E
+#define PCI_PRODUCT_LSI_SAS1068E 0x0058
+#endif
+
+#ifndef PCI_PRODUCT_LSI_SAS1078
+#define PCI_PRODUCT_LSI_SAS1078 0x0060
+#endif
+
#ifndef PCIM_CMD_SERRESPEN
#define PCIM_CMD_SERRESPEN 0x0100
#endif
-
#define MPT_IO_BAR 0
#define MPT_MEM_BAR 1
@@ -167,6 +191,16 @@ mpt_pci_probe(device_t dev)
case PCI_PRODUCT_LSI_1030:
desc = "LSILogic 1030 Ultra4 Adapter";
break;
+ case PCI_PRODUCT_LSI_SAS1064:
+ case PCI_PRODUCT_LSI_SAS1064A:
+ case PCI_PRODUCT_LSI_SAS1064E:
+ case PCI_PRODUCT_LSI_SAS1066:
+ case PCI_PRODUCT_LSI_SAS1066E:
+ case PCI_PRODUCT_LSI_SAS1068:
+ case PCI_PRODUCT_LSI_SAS1068E:
+ case PCI_PRODUCT_LSI_SAS1078:
+ desc = "LSILogic SAS Adapter";
+ break;
default:
return (ENXIO);
}
@@ -268,6 +302,16 @@ mpt_pci_attach(device_t dev)
case PCI_PRODUCT_LSI_FC929:
mpt->is_fc = 1;
break;
+ case PCI_PRODUCT_LSI_SAS1064:
+ case PCI_PRODUCT_LSI_SAS1064A:
+ case PCI_PRODUCT_LSI_SAS1064E:
+ case PCI_PRODUCT_LSI_SAS1066:
+ case PCI_PRODUCT_LSI_SAS1066E:
+ case PCI_PRODUCT_LSI_SAS1068:
+ case PCI_PRODUCT_LSI_SAS1068E:
+ case PCI_PRODUCT_LSI_SAS1078:
+ mpt->is_sas = 1;
+ break;
default:
break;
}
@@ -276,10 +320,13 @@ mpt_pci_attach(device_t dev)
mpt->raid_resync_rate = MPT_RAID_RESYNC_RATE_DEFAULT;
mpt->raid_mwce_setting = MPT_RAID_MWCE_DEFAULT;
mpt->raid_queue_depth = MPT_RAID_QUEUE_DEPTH_DEFAULT;
+ mpt->verbose = MPT_PRT_NONE;
mpt_set_options(mpt);
- mpt->verbose = MPT_PRT_INFO;
- mpt->verbose += (bootverbose != 0)? 1 : 0;
-
+ if (mpt->verbose == MPT_PRT_NONE) {
+ mpt->verbose = MPT_PRT_WARN;
+ /* Print INFO level (if any) if bootverbose is set */
+ mpt->verbose += (bootverbose != 0)? 1 : 0;
+ }
/* Make sure memory access decoders are enabled */
cmd = pci_read_config(dev, PCIR_COMMAND, 2);
if ((cmd & PCIM_CMD_MEMEN) == 0) {
@@ -313,7 +360,8 @@ mpt_pci_attach(device_t dev)
/*
* Set up register access. PIO mode is required for
- * certain reset operations.
+ * certain reset operations (but must be disabled for
+ * some cards otherwise).
*/
mpt->pci_pio_rid = PCIR_BAR(MPT_IO_BAR);
mpt->pci_pio_reg = bus_alloc_resource(dev, SYS_RES_IOPORT,
@@ -331,6 +379,10 @@ mpt_pci_attach(device_t dev)
&mpt->pci_mem_rid, 0, ~0, 0, RF_ACTIVE);
if (mpt->pci_reg == NULL) {
device_printf(dev, "Unable to memory map registers.\n");
+ if (mpt->is_sas) {
+ device_printf(dev, "Giving Up.\n");
+ goto bad;
+ }
device_printf(dev, "Falling back to PIO mode.\n");
mpt->pci_st = mpt->pci_pio_st;
mpt->pci_sh = mpt->pci_pio_sh;
@@ -384,8 +436,14 @@ mpt_pci_attach(device_t dev)
mpt_read_config_regs(mpt);
+ /*
+ * Disable PIO until we need it
+ */
+ pci_disable_io(dev, SYS_RES_IOPORT);
+
/* Initialize the hardware */
if (mpt->disabled == 0) {
+
MPT_LOCK(mpt);
if (mpt_attach(mpt) != 0) {
MPT_UNLOCK(mpt);
@@ -497,7 +555,7 @@ mpt_pci_shutdown(device_t dev)
static int
mpt_dma_mem_alloc(struct mpt_softc *mpt)
{
- int i, error;
+ int i, error, nsegs;
uint8_t *vptr;
uint32_t pptr, end;
size_t len;
@@ -526,13 +584,13 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
#endif
/*
- * Create a parent dma tag for this device
+ * Create a parent dma tag for this device.
*
- * Align at byte boundaries, limit to 32-bit addressing
- * (The chip supports 64-bit addressing, but this driver doesn't)
+ * Align at byte boundaries, limit to 32-bit addressing for
+ * request/reply queues.
*/
if (mpt_dma_tag_create(mpt, /*parent*/NULL, /*alignment*/1,
- /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
/*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL,
/*maxsize*/BUS_SPACE_MAXSIZE_32BIT,
/*nsegments*/BUS_SPACE_MAXSIZE_32BIT,
@@ -543,9 +601,9 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
}
/* Create a child tag for reply buffers */
- if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE,
- 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
- NULL, NULL, PAGE_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0,
+ if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 2 * PAGE_SIZE,
+ 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
+ NULL, NULL, 2 * PAGE_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0,
&mpt->reply_dmat) != 0) {
device_printf(dev, "cannot create a dma tag for replies\n");
return (1);
@@ -554,8 +612,9 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
/* Allocate some DMA accessable memory for replies */
if (bus_dmamem_alloc(mpt->reply_dmat, (void **)&mpt->reply,
BUS_DMA_NOWAIT, &mpt->reply_dmap) != 0) {
- device_printf(dev, "cannot allocate %lu bytes of reply memory\n",
- (u_long)PAGE_SIZE);
+ device_printf(dev,
+ "cannot allocate %lu bytes of reply memory\n",
+ (u_long) (2 * PAGE_SIZE));
return (1);
}
@@ -564,7 +623,7 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
/* Load and lock it into "bus space" */
bus_dmamap_load(mpt->reply_dmat, mpt->reply_dmap, mpt->reply,
- PAGE_SIZE, mpt_map_rquest, &mi, 0);
+ 2 * PAGE_SIZE, mpt_map_rquest, &mi, 0);
if (mi.error) {
device_printf(dev,
@@ -574,9 +633,16 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
mpt->reply_phys = mi.phys;
/* Create a child tag for data buffers */
+
+ /*
+ * XXX: we should say that nsegs is 'unrestricted, but that
+ * XXX: tickles a horrible bug in the busdma code. Instead,
+ * XXX: we'll derive a reasonable segment limit from MAXPHYS
+ */
+ nsegs = (MAXPHYS / PAGE_SIZE) + 1;
if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
- NULL, NULL, MAXBSIZE, MPT_SGL_MAX, BUS_SPACE_MAXSIZE_32BIT, 0,
+ NULL, NULL, MAXBSIZE, nsegs, BUS_SPACE_MAXSIZE_32BIT, 0,
&mpt->buffer_dmat) != 0) {
device_printf(dev,
"cannot create a dma tag for data buffers\n");
@@ -585,7 +651,7 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
/* Create a child tag for request buffers */
if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE,
- 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
+ 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
&mpt->request_dmat) != 0) {
device_printf(dev, "cannot create a dma tag for requests\n");
OpenPOWER on IntegriCloud