summaryrefslogtreecommitdiffstats
path: root/sys/dev/isp
diff options
context:
space:
mode:
authormjacob <mjacob@FreeBSD.org>2004-09-07 08:04:09 +0000
committermjacob <mjacob@FreeBSD.org>2004-09-07 08:04:09 +0000
commite73978dc2243115bc4470a31517686b2f80736b2 (patch)
treeb1d1e5df8846f8db55e6c04f5459cdaf282dc16b /sys/dev/isp
parentfb5fe8618b5719321cb72a6362cc4090ea5e154f (diff)
downloadFreeBSD-src-e73978dc2243115bc4470a31517686b2f80736b2.zip
FreeBSD-src-e73978dc2243115bc4470a31517686b2f80736b2.tar.gz
Do the small amount of tweaking to support PAE for at least initiator mode.
I was unable to test this as the PAE kernel crashed with a "cannot copy LDT" before coming up. When this gets a bit more testing, I'll fix the PAE conf file to allow isp devices. PR: 59728
Diffstat (limited to 'sys/dev/isp')
-rw-r--r--sys/dev/isp/isp_pci.c138
1 files changed, 137 insertions, 1 deletions
diff --git a/sys/dev/isp/isp_pci.c b/sys/dev/isp/isp_pci.c
index f5b0128..7dc7fc1 100644
--- a/sys/dev/isp/isp_pci.c
+++ b/sys/dev/isp/isp_pci.c
@@ -45,6 +45,12 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <sys/malloc.h>
+#ifdef ISP_TARGET_MODE
+#ifdef PAE
+#error "PAE and ISP_TARGET_MODE not supported yet"
+#endif
+#endif
+
#include <dev/isp/isp_freebsd.h>
static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
@@ -1621,6 +1627,131 @@ tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
static void dma2(void *, bus_dma_segment_t *, int, int);
+#ifdef PAE
+static void
+dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
+{
+ mush_t *mp;
+ struct ispsoftc *isp;
+ struct ccb_scsiio *csio;
+ struct isp_pcisoftc *pcs;
+ bus_dmamap_t *dp;
+ bus_dma_segment_t *eseg;
+ ispreq64_t *rq;
+ int seglim, datalen;
+ u_int16_t nxti;
+
+ mp = (mush_t *) arg;
+ if (error) {
+ mp->error = error;
+ return;
+ }
+
+ if (nseg < 1) {
+ isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
+ mp->error = EFAULT;
+ return;
+ }
+ csio = mp->cmd_token;
+ isp = mp->isp;
+ rq = mp->rq;
+ pcs = (struct isp_pcisoftc *)mp->isp;
+ dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
+ nxti = *mp->nxtip;
+
+ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
+ bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
+ } else {
+ bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
+ }
+
+ datalen = XS_XFRLEN(csio);
+
+ /*
+ * We're passed an initial partially filled in entry that
+ * has most fields filled in except for data transfer
+ * related values.
+ *
+ * Our job is to fill in the initial request queue entry and
+ * then to start allocating and filling in continuation entries
+ * until we've covered the entire transfer.
+ */
+
+ if (IS_FC(isp)) {
+ seglim = ISP_RQDSEG_T3;
+ ((ispreqt3_t *)rq)->req_totalcnt = datalen;
+ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
+ ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
+ } else {
+ ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
+ }
+ } else {
+ if (csio->cdb_len > 12) {
+ seglim = 0;
+ } else {
+ seglim = ISP_RQDSEG_A64;
+ }
+ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
+ rq->req_flags |= REQFLAG_DATA_IN;
+ } else {
+ rq->req_flags |= REQFLAG_DATA_OUT;
+ }
+ }
+
+ eseg = dm_segs + nseg;
+
+ while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
+ if (IS_FC(isp)) {
+ ispreqt3_t *rq3 = (ispreqt3_t *)rq;
+ rq3->req_dataseg[rq3->req_seg_count].ds_base =
+ dm_segs->ds_addr;
+ rq3->req_dataseg[rq3->req_seg_count].ds_count =
+ dm_segs->ds_len;
+ } else {
+ rq->req_dataseg[rq->req_seg_count].ds_base =
+ dm_segs->ds_addr;
+ rq->req_dataseg[rq->req_seg_count].ds_count =
+ dm_segs->ds_len;
+ }
+ datalen -= dm_segs->ds_len;
+ rq->req_seg_count++;
+ dm_segs++;
+ }
+
+ while (datalen > 0 && dm_segs != eseg) {
+ u_int16_t onxti;
+ ispcontreq64_t local, *crq = &local, *cqe;
+
+ cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
+ onxti = nxti;
+ nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
+ if (nxti == mp->optr) {
+ isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
+ mp->error = MUSHERR_NOQENTRIES;
+ return;
+ }
+ rq->req_header.rqs_entry_count++;
+ MEMZERO((void *)crq, sizeof (*crq));
+ crq->req_header.rqs_entry_count = 1;
+ crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
+
+ seglim = 0;
+ while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
+ crq->req_dataseg[seglim].ds_base =
+ dm_segs->ds_addr;
+ crq->req_dataseg[seglim].ds_count =
+ dm_segs->ds_len;
+ rq->req_seg_count++;
+ dm_segs++;
+ seglim++;
+ datalen -= dm_segs->ds_len;
+ }
+ isp_put_cont64_req(isp, crq, cqe);
+ MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
+ }
+ *mp->nxtip = nxti;
+}
+#else
static void
dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
@@ -1744,6 +1875,7 @@ dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
}
*mp->nxtip = nxti;
}
+#endif
static int
isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
@@ -1821,7 +1953,7 @@ isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
} else {
/* Pointer to physical buffer */
struct bus_dma_segment seg;
- seg.ds_addr = (bus_addr_t)csio->data_ptr;
+ seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
(*eptr)(mp, &seg, 1, 0);
}
@@ -1867,6 +1999,10 @@ mbxsync:
case RQSTYPE_T2RQS:
isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
break;
+ case RQSTYPE_A64:
+ case RQSTYPE_T3RQS:
+ isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
+ break;
}
return (CMD_QUEUED);
}
OpenPOWER on IntegriCloud