summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordelphij <delphij@FreeBSD.org>2012-10-25 17:29:11 +0000
committerdelphij <delphij@FreeBSD.org>2012-10-25 17:29:11 +0000
commit7bfe1a58c7d708f31044cbce4f23a01c4341f281 (patch)
tree078c246496a7ab37ed55dd7efd79f268c827f326
parente85387fbf1e08197b1b399092b2d71c41d1e0adb (diff)
downloadFreeBSD-src-7bfe1a58c7d708f31044cbce4f23a01c4341f281.zip
FreeBSD-src-7bfe1a58c7d708f31044cbce4f23a01c4341f281.tar.gz
Update hptiop(4) to version 1.8, which added support for HighPoint
RocketRAID 4500 series. Many thanks to HighPoint Technologies for their continued support of FreeBSD! Submitted by: HighPoint Technologies MFC after: 3 days
-rw-r--r--share/man/man4/hptiop.46
-rw-r--r--sys/dev/hptiop/hptiop.c769
-rw-r--r--sys/dev/hptiop/hptiop.h100
3 files changed, 854 insertions, 21 deletions
diff --git a/share/man/man4/hptiop.4 b/share/man/man4/hptiop.4
index f7f8405..ea963dd 100644
--- a/share/man/man4/hptiop.4
+++ b/share/man/man4/hptiop.4
@@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd August 5, 2012
+.Dd October 25, 2012
.Dt HPTIOP 4
.Os
.Sh NAME
@@ -58,6 +58,10 @@ driver supports the following SAS and SATA RAID controllers:
.Pp
.Bl -bullet -compact
.It
+HighPoint RocketRAID 4522
+.It
+HighPoint RocketRAID 4520
+.It
HighPoint RocketRAID 4322
.It
HighPoint RocketRAID 4321
diff --git a/sys/dev/hptiop/hptiop.c b/sys/dev/hptiop/hptiop.c
index 520e75c..c4e3d5a 100644
--- a/sys/dev/hptiop/hptiop.c
+++ b/sys/dev/hptiop/hptiop.c
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
- * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -89,8 +89,8 @@ __FBSDID("$FreeBSD$");
#include <dev/hptiop/hptiop.h>
-static char driver_name[] = "hptiop";
-static char driver_version[] = "v1.3 (010208)";
+static const char driver_name[] = "hptiop";
+static const char driver_version[] = "v1.8";
static devclass_t hptiop_devclass;
@@ -99,41 +99,62 @@ static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
u_int32_t req);
static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
+static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
+ u_int32_t req);
static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
struct hpt_iop_ioctl_param *pParams);
static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
struct hpt_iop_ioctl_param *pParams);
+static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_ioctl_param *pParams);
static int hptiop_rescan_bus(struct hpt_iop_hba *hba);
static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
+static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
struct hpt_iop_request_get_config *config);
static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
struct hpt_iop_request_get_config *config);
+static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_request_get_config *config);
static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
struct hpt_iop_request_set_config *config);
static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
struct hpt_iop_request_set_config *config);
+static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_request_set_config *config);
static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
+static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
+static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
+static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
struct hpt_iop_request_ioctl_command *req,
struct hpt_iop_ioctl_param *pParams);
+static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_request_ioctl_command *req,
+ struct hpt_iop_ioctl_param *pParams);
static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
struct hpt_iop_srb *srb,
bus_dma_segment_t *segs, int nsegs);
static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
struct hpt_iop_srb *srb,
bus_dma_segment_t *segs, int nsegs);
+static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_srb *srb,
+ bus_dma_segment_t *segs, int nsegs);
static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
+static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
+static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
+static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
static int hptiop_probe(device_t dev);
@@ -146,8 +167,7 @@ static void hptiop_async(void *callback_arg, u_int32_t code,
struct cam_path *path, void *arg);
static void hptiop_pci_intr(void *arg);
static void hptiop_release_resource(struct hpt_iop_hba *hba);
-static int hptiop_reset_adapter(struct hpt_iop_hba *hba);
-
+static void hptiop_reset_adapter(void *argv);
static d_open_t hptiop_open;
static d_close_t hptiop_close;
static d_ioctl_t hptiop_ioctl;
@@ -193,6 +213,11 @@ static struct cdevsw hptiop_cdevsw = {
#define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
+#define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
+ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
+#define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
+ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
+
static int hptiop_open(ioctl_dev_t dev, int flags,
int devtype, ioctl_thread_t proc)
{
@@ -294,6 +319,12 @@ static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
BUS_SPACE_RD4_MV0(outbound_intmask);
}
+static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
+{
+ BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
+ BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
+}
+
static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
{
u_int32_t req=0;
@@ -323,6 +354,15 @@ static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
return 0;
}
+static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
+ u_int32_t millisec)
+{
+ if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
+ return -1;
+
+ return 0;
+}
+
static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
u_int32_t index)
{
@@ -618,6 +658,111 @@ scsi_done:
}
}
+static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
+ u_int32_t _tag)
+{
+ u_int32_t req_type = _tag & 0xf;
+
+ struct hpt_iop_srb *srb;
+ struct hpt_iop_request_scsi_command *req;
+ union ccb *ccb;
+ u_int8_t *cdb;
+
+ switch (req_type) {
+ case IOP_REQUEST_TYPE_GET_CONFIG:
+ case IOP_REQUEST_TYPE_SET_CONFIG:
+ hba->config_done = 1;
+ break;
+
+ case IOP_REQUEST_TYPE_SCSI_COMMAND:
+ srb = hba->srb[(_tag >> 4) & 0xff];
+ req = (struct hpt_iop_request_scsi_command *)srb;
+
+ ccb = (union ccb *)srb->ccb;
+
+ untimeout(hptiop_reset_adapter, hba, ccb->ccb_h.timeout_ch);
+
+ if (ccb->ccb_h.flags & CAM_CDB_POINTER)
+ cdb = ccb->csio.cdb_io.cdb_ptr;
+ else
+ cdb = ccb->csio.cdb_io.cdb_bytes;
+
+ if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ goto scsi_done;
+ }
+
+ if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
+ req->header.result = IOP_RESULT_SUCCESS;
+
+ switch (req->header.result) {
+ case IOP_RESULT_SUCCESS:
+ switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN:
+ bus_dmamap_sync(hba->io_dmat,
+ srb->dma_map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(hba->io_dmat, srb->dma_map);
+ break;
+ case CAM_DIR_OUT:
+ bus_dmamap_sync(hba->io_dmat,
+ srb->dma_map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(hba->io_dmat, srb->dma_map);
+ break;
+ }
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case IOP_RESULT_BAD_TARGET:
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ break;
+ case IOP_RESULT_BUSY:
+ ccb->ccb_h.status = CAM_BUSY;
+ break;
+ case IOP_RESULT_INVALID_REQUEST:
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ break;
+ case IOP_RESULT_FAIL:
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ break;
+ case IOP_RESULT_RESET:
+ ccb->ccb_h.status = CAM_BUSY;
+ break;
+ case IOP_RESULT_CHECK_CONDITION:
+ memset(&ccb->csio.sense_data, 0,
+ sizeof(ccb->csio.sense_data));
+ if (req->dataxfer_length < ccb->csio.sense_len)
+ ccb->csio.sense_resid = ccb->csio.sense_len -
+ req->dataxfer_length;
+ else
+ ccb->csio.sense_resid = 0;
+ memcpy(&ccb->csio.sense_data, &req->sg_list,
+ MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ break;
+ default:
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ break;
+ }
+scsi_done:
+ ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
+
+ hptiop_free_srb(hba, srb);
+ xpt_done(ccb);
+ break;
+ case IOP_REQUEST_TYPE_IOCTL_COMMAND:
+ if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
+ hba->config_done = 1;
+ else
+ hba->config_done = -1;
+ wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
+ break;
+ default:
+ device_printf(hba->pcidev, "wrong callback type\n");
+ break;
+ }
+}
+
static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
{
u_int64_t req;
@@ -656,6 +801,50 @@ static int hptiop_intr_mv(struct hpt_iop_hba * hba)
return ret;
}
+static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
+{
+ u_int32_t status, _tag, cptr;
+ int ret = 0;
+
+ if (hba->initialized) {
+ BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
+ }
+
+ status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
+ if (status) {
+ BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
+ if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
+ u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
+ hptiop_os_message_callback(hba, msg);
+ }
+ ret = 1;
+ }
+
+ status = BUS_SPACE_RD4_MVFREY2(isr_cause);
+ if (status) {
+ BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
+ do {
+ cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
+ while (hba->u.mvfrey.outlist_rptr != cptr) {
+ hba->u.mvfrey.outlist_rptr++;
+ if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
+ hba->u.mvfrey.outlist_rptr = 0;
+ }
+
+ _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
+ hptiop_request_callback_mvfrey(hba, _tag);
+ ret = 2;
+ }
+ } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
+ }
+
+ if (hba->initialized) {
+ BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
+ }
+
+ return ret;
+}
+
static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
u_int32_t req32, u_int32_t millisec)
{
@@ -702,6 +891,48 @@ static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
return -1;
}
+static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
+ void *req, u_int32_t millisec)
+{
+ u_int32_t i, index;
+ u_int64_t phy_addr;
+ struct hpt_iop_request_header *reqhdr =
+ (struct hpt_iop_request_header *)req;
+
+ hba->config_done = 0;
+
+ phy_addr = hba->ctlcfgcmd_phy;
+ reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
+ | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
+ | IOP_REQUEST_FLAG_ADDR_BITS
+ | ((phy_addr >> 16) & 0xffff0000);
+ reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
+ | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
+
+ hba->u.mvfrey.inlist_wptr++;
+ index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+ if (index == hba->u.mvfrey.list_count) {
+ index = 0;
+ hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+ hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+ }
+
+ hba->u.mvfrey.inlist[index].addr = phy_addr;
+ hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
+
+ BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
+ BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
+
+ for (i = 0; i < millisec; i++) {
+ hptiop_intr_mvfrey(hba);
+ if (hba->config_done)
+ return 0;
+ DELAY(1000);
+ }
+ return -1;
+}
+
static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
u_int32_t msg, u_int32_t millisec)
{
@@ -776,6 +1007,37 @@ static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
return 0;
}
+static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
+ struct hpt_iop_request_get_config * config)
+{
+ struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
+
+ if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
+ info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
+ KdPrint(("hptiop: header size %x/%x type %x/%x",
+ info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
+ info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
+ return -1;
+ }
+
+ config->interface_version = info->interface_version;
+ config->firmware_version = info->firmware_version;
+ config->max_requests = info->max_requests;
+ config->request_size = info->request_size;
+ config->max_sg_count = info->max_sg_count;
+ config->data_transfer_length = info->data_transfer_length;
+ config->alignment_mask = info->alignment_mask;
+ config->max_devices = info->max_devices;
+ config->sdram_size = info->sdram_size;
+
+ KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
+ config->max_requests, config->request_size,
+ config->data_transfer_length, config->max_devices,
+ config->sdram_size));
+
+ return 0;
+}
+
static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
struct hpt_iop_request_set_config *config)
{
@@ -833,6 +1095,31 @@ static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
return 0;
}
+static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_request_set_config *config)
+{
+ struct hpt_iop_request_set_config *req;
+
+ if (!(req = hba->ctlcfg_ptr))
+ return -1;
+
+ memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
+ (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
+ sizeof(struct hpt_iop_request_set_config) -
+ sizeof(struct hpt_iop_request_header));
+
+ req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
+ req->header.size = sizeof(struct hpt_iop_request_set_config);
+ req->header.result = IOP_RESULT_PENDING;
+
+ if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
+ KdPrint(("hptiop: set config send cmd failed"));
+ return -1;
+ }
+
+ return 0;
+}
+
static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
u_int32_t req32,
struct hpt_iop_ioctl_param *pParams)
@@ -884,7 +1171,8 @@ static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
return 0;
}
-static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
+static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
+ void *user, int size)
{
unsigned char byte;
int i;
@@ -898,7 +1186,8 @@ static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void
return 0;
}
-static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
+static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
+ void *user, int size)
{
unsigned char byte;
int i;
@@ -1045,6 +1334,103 @@ invalid:
}
}
+static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_request_ioctl_command *req,
+ struct hpt_iop_ioctl_param *pParams)
+{
+ u_int64_t phy_addr;
+ u_int32_t index;
+
+ phy_addr = hba->ctlcfgcmd_phy;
+
+ if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
+ (hba->max_request_size -
+ offsetof(struct hpt_iop_request_ioctl_command, buf))) {
+ device_printf(hba->pcidev, "request size beyond max value");
+ return -1;
+ }
+
+ req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
+ req->inbuf_size = pParams->nInBufferSize;
+ req->outbuf_size = pParams->nOutBufferSize;
+ req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
+ + pParams->nInBufferSize;
+
+ req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
+ req->header.result = IOP_RESULT_PENDING;
+
+ req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
+ | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
+ | IOP_REQUEST_FLAG_ADDR_BITS
+ | ((phy_addr >> 16) & 0xffff0000);
+ req->header.context = ((phy_addr & 0xffffffff) << 32 )
+ | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
+
+ hba->u.mvfrey.inlist_wptr++;
+ index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+ if (index == hba->u.mvfrey.list_count) {
+ index = 0;
+ hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+ hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+ }
+
+ hba->u.mvfrey.inlist[index].addr = phy_addr;
+ hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
+
+ BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
+ BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
+
+ while (hba->config_done == 0) {
+ if (hptiop_sleep(hba, req, PPAUSE,
+ "hptctl", HPT_OSM_TIMEOUT)==0)
+ continue;
+ hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
+ }
+ return 0;
+}
+
+static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_ioctl_param *pParams)
+{
+ struct hpt_iop_request_ioctl_command *req;
+
+ if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
+ (pParams->Magic != HPT_IOCTL_MAGIC32))
+ return EFAULT;
+
+ req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
+ hba->config_done = 0;
+ hptiop_lock_adapter(hba);
+ if (pParams->nInBufferSize)
+ if (copyin((void *)pParams->lpInBuffer,
+ req->buf, pParams->nInBufferSize))
+ goto invalid;
+ if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
+ goto invalid;
+
+ if (hba->config_done == 1) {
+ if (pParams->nOutBufferSize)
+ if (copyout(req->buf +
+ ((pParams->nInBufferSize + 3) & ~3),
+ (void *)pParams->lpOutBuffer,
+ pParams->nOutBufferSize))
+ goto invalid;
+
+ if (pParams->lpBytesReturned)
+ if (copyout(&req->bytes_returned,
+ (void*)pParams->lpBytesReturned,
+ sizeof(u_int32_t)))
+ goto invalid;
+ hptiop_unlock_adapter(hba);
+ return 0;
+ } else{
+invalid:
+ hptiop_unlock_adapter(hba);
+ return EFAULT;
+ }
+}
+
static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
{
union ccb *ccb;
@@ -1063,6 +1449,7 @@ static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
static bus_dmamap_callback_t hptiop_map_srb;
static bus_dmamap_callback_t hptiop_post_scsi_command;
static bus_dmamap_callback_t hptiop_mv_map_ctlcfg;
+static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg;
static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
{
@@ -1139,6 +1526,56 @@ static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
return 0;
}
+static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
+{
+ hba->bar0_rid = 0x10;
+ hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
+ SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
+
+ if (hba->bar0_res == NULL) {
+ device_printf(hba->pcidev, "failed to get iop bar0.\n");
+ return -1;
+ }
+ hba->bar0t = rman_get_bustag(hba->bar0_res);
+ hba->bar0h = rman_get_bushandle(hba->bar0_res);
+ hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
+ rman_get_virtual(hba->bar0_res);
+
+ if (!hba->u.mvfrey.config) {
+ bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+ hba->bar0_rid, hba->bar0_res);
+ device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
+ return -1;
+ }
+
+ hba->bar2_rid = 0x18;
+ hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
+ SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
+
+ if (hba->bar2_res == NULL) {
+ bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+ hba->bar0_rid, hba->bar0_res);
+ device_printf(hba->pcidev, "failed to get iop bar2.\n");
+ return -1;
+ }
+
+ hba->bar2t = rman_get_bustag(hba->bar2_res);
+ hba->bar2h = rman_get_bushandle(hba->bar2_res);
+ hba->u.mvfrey.mu =
+ (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
+
+ if (!hba->u.mvfrey.mu) {
+ bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+ hba->bar0_rid, hba->bar0_res);
+ bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+ hba->bar2_rid, hba->bar2_res);
+ device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
{
if (hba->bar0_res)
@@ -1156,6 +1593,16 @@ static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
hba->bar2_rid, hba->bar2_res);
}
+static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
+{
+ if (hba->bar0_res)
+ bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+ hba->bar0_rid, hba->bar0_res);
+ if (hba->bar2_res)
+ bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
+ hba->bar2_rid, hba->bar2_res);
+}
+
static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
{
if (bus_dma_tag_create(hba->parent_dmat,
@@ -1172,7 +1619,7 @@ static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
NULL,
NULL,
#endif
- &hba->ctlcfg_dmat)) {
+ &hba->ctlcfg_dmat)) {
device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
return -1;
}
@@ -1205,6 +1652,72 @@ static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
return 0;
}
+static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
+{
+ u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
+
+ list_count >>= 16;
+
+ if (list_count == 0) {
+ return -1;
+ }
+
+ hba->u.mvfrey.list_count = list_count;
+ hba->u.mvfrey.internal_mem_size = 0x800
+ + list_count * sizeof(struct mvfrey_inlist_entry)
+ + list_count * sizeof(struct mvfrey_outlist_entry)
+ + sizeof(int);
+ if (bus_dma_tag_create(hba->parent_dmat,
+ 1,
+ 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ hba->u.mvfrey.internal_mem_size,
+ 1,
+ BUS_SPACE_MAXSIZE_32BIT,
+ BUS_DMA_ALLOCNOW,
+#if __FreeBSD_version > 502000
+ NULL,
+ NULL,
+#endif
+ &hba->ctlcfg_dmat)) {
+ device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
+ return -1;
+ }
+
+ if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
+#if __FreeBSD_version>501000
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT,
+#else
+ BUS_DMA_WAITOK,
+#endif
+ &hba->ctlcfg_dmamap) != 0) {
+ device_printf(hba->pcidev,
+ "bus_dmamem_alloc failed!\n");
+ bus_dma_tag_destroy(hba->ctlcfg_dmat);
+ return -1;
+ }
+
+ if (bus_dmamap_load(hba->ctlcfg_dmat,
+ hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
+ hba->u.mvfrey.internal_mem_size,
+ hptiop_mvfrey_map_ctlcfg, hba, 0)) {
+ device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
+ if (hba->ctlcfg_dmat)
+ bus_dmamem_free(hba->ctlcfg_dmat,
+ hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
+ bus_dma_tag_destroy(hba->ctlcfg_dmat);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
+ return 0;
+}
+
static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
{
if (hba->ctlcfg_dmat) {
@@ -1217,6 +1730,54 @@ static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
return 0;
}
+static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
+{
+ if (hba->ctlcfg_dmat) {
+ bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
+ bus_dmamem_free(hba->ctlcfg_dmat,
+ hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
+ bus_dma_tag_destroy(hba->ctlcfg_dmat);
+ }
+
+ return 0;
+}
+
+static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
+{
+ u_int32_t i = 100;
+
+ if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
+ return -1;
+
+ /* wait 100ms for MCU ready */
+ while(i--) {
+ DELAY(1000);
+ }
+
+ BUS_SPACE_WRT4_MVFREY2(inbound_base,
+ hba->u.mvfrey.inlist_phy & 0xffffffff);
+ BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
+ (hba->u.mvfrey.inlist_phy >> 16) >> 16);
+
+ BUS_SPACE_WRT4_MVFREY2(outbound_base,
+ hba->u.mvfrey.outlist_phy & 0xffffffff);
+ BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
+ (hba->u.mvfrey.outlist_phy >> 16) >> 16);
+
+ BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
+ hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
+ BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
+ (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
+
+ hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
+ | CL_POINTER_TOGGLE;
+ *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
+ | CL_POINTER_TOGGLE;
+ hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
+
+ return 0;
+}
+
/*
* CAM driver interface
*/
@@ -1230,9 +1791,10 @@ static device_method_t driver_methods[] = {
};
static struct hptiop_adapter_ops hptiop_itl_ops = {
+ .family = INTEL_BASED_IOP,
.iop_wait_ready = hptiop_wait_ready_itl,
.internal_memalloc = 0,
- .internal_memfree = 0,
+ .internal_memfree = hptiop_internal_memfree_itl,
.alloc_pci_res = hptiop_alloc_pci_res_itl,
.release_pci_res = hptiop_release_pci_res_itl,
.enable_intr = hptiop_enable_intr_itl,
@@ -1243,9 +1805,11 @@ static struct hptiop_adapter_ops hptiop_itl_ops = {
.post_msg = hptiop_post_msg_itl,
.post_req = hptiop_post_req_itl,
.do_ioctl = hptiop_do_ioctl_itl,
+ .reset_comm = 0,
};
static struct hptiop_adapter_ops hptiop_mv_ops = {
+ .family = MV_BASED_IOP,
.iop_wait_ready = hptiop_wait_ready_mv,
.internal_memalloc = hptiop_internal_memalloc_mv,
.internal_memfree = hptiop_internal_memfree_mv,
@@ -1259,6 +1823,25 @@ static struct hptiop_adapter_ops hptiop_mv_ops = {
.post_msg = hptiop_post_msg_mv,
.post_req = hptiop_post_req_mv,
.do_ioctl = hptiop_do_ioctl_mv,
+ .reset_comm = 0,
+};
+
+static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
+ .family = MVFREY_BASED_IOP,
+ .iop_wait_ready = hptiop_wait_ready_mvfrey,
+ .internal_memalloc = hptiop_internal_memalloc_mvfrey,
+ .internal_memfree = hptiop_internal_memfree_mvfrey,
+ .alloc_pci_res = hptiop_alloc_pci_res_mvfrey,
+ .release_pci_res = hptiop_release_pci_res_mvfrey,
+ .enable_intr = hptiop_enable_intr_mvfrey,
+ .disable_intr = hptiop_disable_intr_mvfrey,
+ .get_config = hptiop_get_config_mvfrey,
+ .set_config = hptiop_set_config_mvfrey,
+ .iop_intr = hptiop_intr_mvfrey,
+ .post_msg = hptiop_post_msg_mvfrey,
+ .post_req = hptiop_post_req_mvfrey,
+ .do_ioctl = hptiop_do_ioctl_mvfrey,
+ .reset_comm = hptiop_reset_comm_mvfrey,
};
static driver_t hptiop_pci_driver = {
@@ -1284,6 +1867,11 @@ static int hptiop_probe(device_t dev)
id = pci_get_device(dev);
switch (id) {
+ case 0x4520:
+ case 0x4522:
+ sas = 1;
+ ops = &hptiop_mvfrey_ops;
+ break;
case 0x4210:
case 0x4211:
case 0x4310:
@@ -1385,7 +1973,7 @@ static int hptiop_attach(device_t dev)
goto release_pci_res;
}
- if (hba->ops->internal_memalloc) {
+ if (hba->ops->family == MV_BASED_IOP) {
if (hba->ops->internal_memalloc(hba)) {
device_printf(dev, "alloc srb_dmat failed\n");
goto destroy_parent_tag;
@@ -1404,6 +1992,17 @@ static int hptiop_attach(device_t dev)
hba->max_request_size = iop_config.request_size;
hba->max_sg_count = iop_config.max_sg_count;
+ if (hba->ops->family == MVFREY_BASED_IOP) {
+ if (hba->ops->internal_memalloc(hba)) {
+ device_printf(dev, "alloc srb_dmat failed\n");
+ goto destroy_parent_tag;
+ }
+ if (hba->ops->reset_comm(hba)) {
+ device_printf(dev, "reset comm failed\n");
+ goto get_config_failed;
+ }
+ }
+
if (bus_dma_tag_create(hba->parent_dmat,/* parent */
4, /* alignment */
BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
@@ -1542,6 +2141,7 @@ static int hptiop_attach(device_t dev)
}
hba->ops->enable_intr(hba);
+ hba->initialized = 1;
hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
@@ -1587,8 +2187,7 @@ destroy_io_dmat:
bus_dma_tag_destroy(hba->io_dmat);
get_config_failed:
- if (hba->ops->internal_memfree)
- hba->ops->internal_memfree(hba);
+ hba->ops->internal_memfree(hba);
destroy_parent_tag:
if (hba->parent_dmat)
@@ -1682,6 +2281,18 @@ static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
}
+static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
+{
+ BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
+ BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
+
+ BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
+ BUS_SPACE_RD4_MVFREY2(isr_enable);
+
+ BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
+ BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
+}
+
static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
{
u_int32_t int_mask;
@@ -1704,9 +2315,24 @@ static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
BUS_SPACE_RD4_MV0(outbound_intmask);
}
-static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
+static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
+{
+ BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
+ BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
+
+ BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
+ BUS_SPACE_RD4_MVFREY2(isr_enable);
+
+ BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
+ BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
+}
+
+static void hptiop_reset_adapter(void *argv)
{
- return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
+ struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
+ if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
+ return;
+ hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
}
static void *hptiop_get_srb(struct hpt_iop_hba * hba)
@@ -1910,7 +2536,8 @@ static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
bcopy(cdb, req.cdb, ccb->csio.cdb_len);
- req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
+ req.header.size =
+ offsetof(struct hpt_iop_request_scsi_command, sg_list)
+ nsegs*sizeof(struct hpt_iopsg);
req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
req.header.flags = 0;
@@ -1956,7 +2583,8 @@ static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
req->channel = 0;
req->target = ccb->ccb_h.target_id;
req->lun = ccb->ccb_h.target_lun;
- req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
+ req->header.size =
+ offsetof(struct hpt_iop_request_scsi_command, sg_list)
+ nsegs*sizeof(struct hpt_iopsg);
req->header.context = (u_int64_t)srb->index |
IOPMU_QUEUE_ADDR_HOST_BIT;
@@ -2044,6 +2672,78 @@ static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
| (size > 3 ? 3 : size), hba);
}
+static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
+ struct hpt_iop_srb *srb,
+ bus_dma_segment_t *segs, int nsegs)
+{
+ int idx, index;
+ union ccb *ccb = srb->ccb;
+ u_int8_t *cdb;
+ struct hpt_iop_request_scsi_command *req;
+ u_int64_t req_phy;
+
+ req = (struct hpt_iop_request_scsi_command *)srb;
+ req_phy = srb->phy_addr;
+
+ if (ccb->csio.dxfer_len && nsegs > 0) {
+ struct hpt_iopsg *psg = req->sg_list;
+ for (idx = 0; idx < nsegs; idx++, psg++) {
+ psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
+ psg->size = segs[idx].ds_len;
+ psg->eot = 0;
+ }
+ psg[-1].eot = 1;
+ }
+ if (ccb->ccb_h.flags & CAM_CDB_POINTER)
+ cdb = ccb->csio.cdb_io.cdb_ptr;
+ else
+ cdb = ccb->csio.cdb_io.cdb_bytes;
+
+ bcopy(cdb, req->cdb, ccb->csio.cdb_len);
+ req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
+ req->header.result = IOP_RESULT_PENDING;
+ req->dataxfer_length = ccb->csio.dxfer_len;
+ req->channel = 0;
+ req->target = ccb->ccb_h.target_id;
+ req->lun = ccb->ccb_h.target_lun;
+ req->header.size = sizeof(struct hpt_iop_request_scsi_command)
+ - sizeof(struct hpt_iopsg)
+ + nsegs * sizeof(struct hpt_iopsg);
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
+ bus_dmamap_sync(hba->io_dmat,
+ srb->dma_map, BUS_DMASYNC_PREREAD);
+ }
+ else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
+ bus_dmamap_sync(hba->io_dmat,
+ srb->dma_map, BUS_DMASYNC_PREWRITE);
+
+ req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
+ | IOP_REQUEST_FLAG_ADDR_BITS
+ | ((req_phy >> 16) & 0xffff0000);
+ req->header.context = ((req_phy & 0xffffffff) << 32 )
+ | srb->index << 4
+ | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
+
+ hba->u.mvfrey.inlist_wptr++;
+ index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+ if (index == hba->u.mvfrey.list_count) {
+ index = 0;
+ hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+ hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+ }
+
+ hba->u.mvfrey.inlist[index].addr = req_phy;
+ hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
+
+ BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
+ BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
+
+ if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
+ ccb->ccb_h.timeout_ch = timeout(hptiop_reset_adapter, hba, 20*hz);
+ }
+}
+
static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
int nsegs, int error)
{
@@ -2076,6 +2776,41 @@ static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
& ~0x1F);
}
+static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
+ int nsegs, int error)
+{
+ struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
+ char *p;
+ u_int64_t phy;
+ u_int32_t list_count = hba->u.mvfrey.list_count;
+
+ phy = ((u_int64_t)segs->ds_addr + 0x1F)
+ & ~(u_int64_t)0x1F;
+ p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
+ & ~0x1F);
+
+ hba->ctlcfgcmd_phy = phy;
+ hba->ctlcfg_ptr = p;
+
+ p += 0x800;
+ phy += 0x800;
+
+ hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
+ hba->u.mvfrey.inlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_inlist_entry);
+ phy += list_count * sizeof(struct mvfrey_inlist_entry);
+
+ hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
+ hba->u.mvfrey.outlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_outlist_entry);
+ phy += list_count * sizeof(struct mvfrey_outlist_entry);
+
+ hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
+ hba->u.mvfrey.outlist_cptr_phy = phy;
+}
+
static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
int nsegs, int error)
{
@@ -2130,7 +2865,7 @@ static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
{
- hba->msg_done = 1;
+ hba->msg_done = 1;
}
static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
diff --git a/sys/dev/hptiop/hptiop.h b/sys/dev/hptiop/hptiop.h
index eb48676..0c8de3c 100644
--- a/sys/dev/hptiop/hptiop.h
+++ b/sys/dev/hptiop/hptiop.h
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
- * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -115,6 +115,68 @@ struct hpt_iopmv_regs {
u_int32_t outbound_intmask;
};
+#define CL_POINTER_TOGGLE 0x00004000
+#define CPU_TO_F0_DRBL_MSG_A_BIT 0x02000000
+
+#pragma pack(1)
+struct hpt_iopmu_mvfrey {
+ u_int32_t reserved[0x4000 / 4];
+
+ /* hpt_frey_com_reg */
+ u_int32_t inbound_base; /* 0x4000 : 0 */
+ u_int32_t inbound_base_high; /* 4 */
+ u_int32_t reserved2[(0x18 - 8)/ 4];
+ u_int32_t inbound_write_ptr; /* 0x18 */
+ u_int32_t inbound_read_ptr; /* 0x1c */
+ u_int32_t reserved3[(0x2c - 0x20) / 4];
+ u_int32_t inbound_conf_ctl; /* 0x2c */
+ u_int32_t reserved4[(0x50 - 0x30) / 4];
+ u_int32_t outbound_base; /* 0x50 */
+ u_int32_t outbound_base_high; /* 0x54 */
+ u_int32_t outbound_shadow_base; /* 0x58 */
+ u_int32_t outbound_shadow_base_high; /* 0x5c */
+ u_int32_t reserved5[(0x68 - 0x60) / 4];
+ u_int32_t outbound_write; /* 0x68 */
+ u_int32_t reserved6[(0x70 - 0x6c) / 4];
+ u_int32_t outbound_read; /* 0x70 */
+ u_int32_t reserved7[(0x88 - 0x74) / 4];
+ u_int32_t isr_cause; /* 0x88 */
+ u_int32_t isr_enable; /* 0x8c */
+
+ u_int32_t reserved8[(0x10200 - 0x4090) / 4];
+
+ /* hpt_frey_intr_ctl intr_ctl */
+ u_int32_t main_int_cuase; /* 0x10200: 0 */
+ u_int32_t main_irq_enable; /* 4 */
+ u_int32_t main_fiq_enable; /* 8 */
+ u_int32_t pcie_f0_int_enable; /* 0xc */
+ u_int32_t pcie_f1_int_enable; /* 0x10 */
+ u_int32_t pcie_f2_int_enable; /* 0x14 */
+ u_int32_t pcie_f3_int_enable; /* 0x18 */
+
+ u_int32_t reserved9[(0x10400 - 0x1021c) / 4];
+
+ /* hpt_frey_msg_drbl */
+ u_int32_t f0_to_cpu_msg_a; /* 0x10400: 0 */
+ u_int32_t reserved10[(0x20 - 4) / 4];
+ u_int32_t cpu_to_f0_msg_a; /* 0x20 */
+ u_int32_t reserved11[(0x80 - 0x24) / 4];
+ u_int32_t f0_doorbell; /* 0x80 */
+ u_int32_t f0_doorbell_enable; /* 0x84 */
+};
+
+struct mvfrey_inlist_entry {
+ u_int64_t addr;
+ u_int32_t intrfc_len;
+ u_int32_t reserved;
+};
+
+struct mvfrey_outlist_entry {
+ u_int32_t val;
+};
+
+#pragma pack()
+
#define MVIOP_IOCTLCFG_SIZE 0x800
#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
@@ -136,6 +198,8 @@ struct hpt_iopmv_regs {
#define MVIOP_REQUEST_NUMBER_START_BIT 16
+#define MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT 0x40000000
+
enum hpt_iopmu_message {
/* host-to-iop messages */
IOPMU_INBOUND_MSG0_NOP = 0,
@@ -144,6 +208,7 @@ enum hpt_iopmu_message {
IOPMU_INBOUND_MSG0_SHUTDOWN,
IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
+ IOPMU_INBOUND_MSG0_RESET_COMM,
IOPMU_INBOUND_MSG0_MAX = 0xff,
/* iop-to-host messages */
IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
@@ -159,6 +224,8 @@ enum hpt_iopmu_message {
#define IOP_REQUEST_FLAG_REMAPPED 4
#define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
+#define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
+
enum hpt_iop_request_type {
IOP_REQUEST_TYPE_GET_CONFIG = 0,
IOP_REQUEST_TYPE_SET_CONFIG,
@@ -179,6 +246,7 @@ enum hpt_iop_result_type {
IOP_RESULT_CHECK_CONDITION,
};
+#pragma pack(1)
struct hpt_iop_request_header {
u_int32_t size;
u_int32_t type;
@@ -287,6 +355,21 @@ struct hpt_iop_hba {
struct hpt_iopmv_regs *regs;
struct hpt_iopmu_mv *mu;
} mv;
+ struct {
+ struct hpt_iop_request_get_config *config;
+ struct hpt_iopmu_mvfrey *mu;
+
+ int internal_mem_size;
+ int list_count;
+ struct mvfrey_inlist_entry *inlist;
+ u_int64_t inlist_phy;
+ u_int32_t inlist_wptr;
+ struct mvfrey_outlist_entry *outlist;
+ u_int64_t outlist_phy;
+ u_int32_t *outlist_cptr; /* copy pointer shadow */
+ u_int64_t outlist_cptr_phy;
+ u_int32_t outlist_rptr;
+ } mvfrey;
} u;
struct hpt_iop_hba *next;
@@ -335,7 +418,8 @@ struct hpt_iop_hba {
struct resource *ctlcfg_res;
void *ctlcfg_handle;
u_int64_t ctlcfgcmd_phy;
- u_int32_t config_done;
+ u_int32_t config_done; /* can be negative value */
+ u_int32_t initialized:1;
/* other resources */
struct cam_sim *sim;
@@ -350,8 +434,17 @@ struct hpt_iop_hba {
u_int32_t flag;
struct hpt_iop_srb* srb[HPT_SRB_MAX_QUEUE_SIZE];
};
+#pragma pack()
+
+enum hptiop_family {
+ INTEL_BASED_IOP = 0,
+ MV_BASED_IOP,
+ MVFREY_BASED_IOP,
+ UNKNOWN_BASED_IOP = 0xf
+};
struct hptiop_adapter_ops {
+ enum hptiop_family family;
int (*iop_wait_ready)(struct hpt_iop_hba *hba, u_int32_t millisec);
int (*internal_memalloc)(struct hpt_iop_hba *hba);
int (*internal_memfree)(struct hpt_iop_hba *hba);
@@ -366,7 +459,8 @@ struct hptiop_adapter_ops {
int (*iop_intr)(struct hpt_iop_hba *hba);
void (*post_msg)(struct hpt_iop_hba *hba, u_int32_t msg);
void (*post_req)(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs);
- int (*do_ioctl)(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param * pParams);
+ int (*do_ioctl)(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param * pParams);
+ int (*reset_comm)(struct hpt_iop_hba *hba);
};
struct hpt_iop_srb {
OpenPOWER on IntegriCloud