summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
authordelphij <delphij@FreeBSD.org>2010-06-09 21:40:38 +0000
committerdelphij <delphij@FreeBSD.org>2010-06-09 21:40:38 +0000
commitc1095acf561cb6c7c8211dfcaa7e7d5c71bd054b (patch)
tree60c8bb8ffe52055f003054cbaac9ead81b48b520 /sys/dev
parent5d64b894b6296859e4c90f22c946659d7f71886c (diff)
downloadFreeBSD-src-c1095acf561cb6c7c8211dfcaa7e7d5c71bd054b.zip
FreeBSD-src-c1095acf561cb6c7c8211dfcaa7e7d5c71bd054b.tar.gz
Apply driver update from LSI. Many thanks to LSI for continuing to
support FreeBSD. 1) Timeout ioctl command timeouts. Do not reset the controller if ioctl command completed successfully. 2) Remove G66_WORKAROUND code (this bug never shipped). 3) Remove unnecessary interrupt lock (intr_lock). 4) Timeout firmware handshake for PChip reset (don't wait forever). 5) Handle interrupts inline. 6) Unmask command interrupt ONLY when adding a command to the pending queue. 7) Mask command interrupt ONLY after removing the last command from the pending queue. 8) Remove TW_OSLI_DEFERRED_INTR_USED code. 9) Replace controller "state" with separate data fields to avoid races: TW_CLI_CTLR_STATE_ACTIVE ctlr->active TW_CLI_CTLR_STATE_INTR_ENABLED ctlr->interrupts_enabled TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY ctlr->internal_req_busy TW_CLI_CTLR_STATE_GET_MORE_AENS ctlr->get_more_aens TW_CLI_CTLR_STATE_RESET_IN_PROGRESS ctlr->reset_in_progress TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS ctlr->reset_phase1_in_progress 10) Fix "req" leak in twa_action() when simq is frozen and req is NOT null. 11) Replace softc "state" with separate data fields to avoid races: TW_OSLI_CTLR_STATE_OPEN sc->open TW_OSLI_CTLR_STATE_SIMQ_FROZEN sc->simq_frozen 12) Fix reference to TW_OSLI_REQ_FLAGS_IN_PROGRESS in tw_osl_complete_passthru() 13) Use correct CAM status values. Change CAM_REQ_CMP_ERR to CAM_REQ_INVALID. Remove use of CAM_RELEASE_SIMQ for physical data addresses. 14) Do not freeze/ release the simq with non I/O commands. When it is appropriate to temporarily freeze the simq with an I/O command use: xpt_freeze_simq(sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; otherwise use: xpt_freeze_simq(sim, 1); xpt_release_simq(sim, 1); Submitted by: Tom Couch <tom.couch lsi.com> PR: kern/147695 MFC after: 3 days
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/twa/tw_cl.h27
-rw-r--r--sys/dev/twa/tw_cl_fwif.h3
-rw-r--r--sys/dev/twa/tw_cl_init.c60
-rw-r--r--sys/dev/twa/tw_cl_intr.c121
-rw-r--r--sys/dev/twa/tw_cl_io.c110
-rw-r--r--sys/dev/twa/tw_cl_misc.c49
-rw-r--r--sys/dev/twa/tw_cl_share.h13
-rw-r--r--sys/dev/twa/tw_osl.h18
-rw-r--r--sys/dev/twa/tw_osl_cam.c98
-rw-r--r--sys/dev/twa/tw_osl_externs.h6
-rw-r--r--sys/dev/twa/tw_osl_freebsd.c144
-rw-r--r--sys/dev/twa/tw_osl_inline.h15
-rw-r--r--sys/dev/twa/tw_osl_share.h2
13 files changed, 196 insertions, 470 deletions
diff --git a/sys/dev/twa/tw_cl.h b/sys/dev/twa/tw_cl.h
index f4741fa..4bdb442 100644
--- a/sys/dev/twa/tw_cl.h
+++ b/sys/dev/twa/tw_cl.h
@@ -51,22 +51,6 @@
#define TW_CLI_RESET_TIMEOUT_PERIOD 60 /* seconds */
#define TW_CLI_MAX_RESET_ATTEMPTS 2
-/* Possible values of ctlr->state. */
-/* Initialization done, and controller is active. */
-#define TW_CLI_CTLR_STATE_ACTIVE (1<<0)
-/* Interrupts on controller enabled. */
-#define TW_CLI_CTLR_STATE_INTR_ENABLED (1<<1)
-/* Data buffer for internal requests in use. */
-#define TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY (1<<2)
-/* More AEN's need to be retrieved. */
-#define TW_CLI_CTLR_STATE_GET_MORE_AENS (1<<3)
-/* Controller is being reset. */
-#define TW_CLI_CTLR_STATE_RESET_IN_PROGRESS (1<<4)
-/* G133 controller is in 'phase 1' of being reset. */
-#define TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS (1<<5)
-/* G66 register write access bug needs to be worked around. */
-#define TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED (1<<6)
-
/* Possible values of ctlr->ioctl_lock.lock. */
#define TW_CLI_LOCK_FREE 0x0 /* lock is free */
#define TW_CLI_LOCK_HELD 0x1 /* lock is held */
@@ -146,7 +130,12 @@ struct tw_cli_ctlr_context {
TW_UINT32 device_id; /* controller device id */
TW_UINT32 arch_id; /* controller architecture id */
- TW_UINT32 state; /* controller state */
+ TW_UINT8 active; /* Initialization done, and controller is active. */
+ TW_UINT8 interrupts_enabled; /* Interrupts on controller enabled. */
+ TW_UINT8 internal_req_busy; /* Data buffer for internal requests in use. */
+ TW_UINT8 get_more_aens; /* More AEN's need to be retrieved. */
+ TW_UINT8 reset_in_progress; /* Controller is being reset. */
+ TW_UINT8 reset_phase1_in_progress; /* In 'phase 1' of reset. */
TW_UINT32 flags; /* controller settings */
TW_UINT32 sg_size_factor; /* SG element size should be a
multiple of this */
@@ -199,10 +188,6 @@ struct tw_cli_ctlr_context {
submission */
TW_LOCK_HANDLE *io_lock;/* ptr to lock held during cmd
submission */
- TW_LOCK_HANDLE intr_lock_handle;/* lock held during
- ISR/response intr processing */
- TW_LOCK_HANDLE *intr_lock;/* ptr to lock held during ISR/
- response intr processing */
#ifdef TW_OSL_CAN_SLEEP
TW_SLEEP_HANDLE sleep_handle; /* handle to co-ordinate sleeps
diff --git a/sys/dev/twa/tw_cl_fwif.h b/sys/dev/twa/tw_cl_fwif.h
index 965ddda..f9c7678 100644
--- a/sys/dev/twa/tw_cl_fwif.h
+++ b/sys/dev/twa/tw_cl_fwif.h
@@ -89,7 +89,6 @@
#define TWA_STATUS_MINOR_VERSION_MASK 0x0F000000
#define TWA_STATUS_MAJOR_VERSION_MASK 0xF0000000
-#define TWA_STATUS_EXPECTED_BITS 0x00002000
#define TWA_STATUS_UNEXPECTED_BITS 0x00F00000
@@ -142,7 +141,7 @@
#define TWA_BASE_FW_SRL 24
#define TWA_BASE_FW_BRANCH 0
#define TWA_BASE_FW_BUILD 1
-#define TWA_CURRENT_FW_SRL 30
+#define TWA_CURRENT_FW_SRL 41
#define TWA_CURRENT_FW_BRANCH_9K 4
#define TWA_CURRENT_FW_BUILD_9K 8
#define TWA_CURRENT_FW_BRANCH_9K_X 8
diff --git a/sys/dev/twa/tw_cl_init.c b/sys/dev/twa/tw_cl_init.c
index ca282fa..0b7daa5 100644
--- a/sys/dev/twa/tw_cl_init.c
+++ b/sys/dev/twa/tw_cl_init.c
@@ -208,7 +208,7 @@ tw_cl_get_mem_requirements(struct tw_cl_ctlr_handle *ctlr_handle,
*/
*non_dma_mem_size = sizeof(struct tw_cli_ctlr_context) +
- (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) +
+ (sizeof(struct tw_cli_req_context) * max_simult_reqs) +
(sizeof(struct tw_cl_event_packet) * max_aens);
@@ -220,7 +220,7 @@ tw_cl_get_mem_requirements(struct tw_cl_ctlr_handle *ctlr_handle,
*/
*dma_mem_size = (sizeof(struct tw_cl_command_packet) *
- (max_simult_reqs + 1)) + (TW_CLI_SECTOR_SIZE);
+ (max_simult_reqs)) + (TW_CLI_SECTOR_SIZE);
return(0);
}
@@ -287,12 +287,12 @@ tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
}
tw_osl_memzero(non_dma_mem, sizeof(struct tw_cli_ctlr_context) +
- (sizeof(struct tw_cli_req_context) * (max_simult_reqs + 1)) +
+ (sizeof(struct tw_cli_req_context) * max_simult_reqs) +
(sizeof(struct tw_cl_event_packet) * max_aens));
tw_osl_memzero(dma_mem,
(sizeof(struct tw_cl_command_packet) *
- (max_simult_reqs + 1)) +
+ max_simult_reqs) +
TW_CLI_SECTOR_SIZE);
free_non_dma_mem = (TW_UINT8 *)non_dma_mem;
@@ -307,7 +307,7 @@ tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
ctlr->arch_id = TWA_ARCH_ID(device_id);
ctlr->flags = flags;
ctlr->sg_size_factor = TWA_SG_ELEMENT_SIZE_FACTOR(device_id);
- ctlr->max_simult_reqs = max_simult_reqs + 1;
+ ctlr->max_simult_reqs = max_simult_reqs;
ctlr->max_aens_supported = max_aens;
/* Initialize queues of CL internal request context packets. */
@@ -321,55 +321,23 @@ tw_cl_init_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags,
tw_osl_init_lock(ctlr_handle, "tw_cl_gen_lock", ctlr->gen_lock);
ctlr->io_lock = &(ctlr->io_lock_handle);
tw_osl_init_lock(ctlr_handle, "tw_cl_io_lock", ctlr->io_lock);
- /*
- * If 64 bit cmd pkt addresses are used, we will need to serialize
- * writes to the hardware (across registers), since existing (G66)
- * hardware will get confused if, for example, we wrote the low 32 bits
- * of the cmd pkt address, followed by a response interrupt mask to the
- * control register, followed by the high 32 bits of the cmd pkt
- * address. It will then interpret the value written to the control
- * register as the low cmd pkt address. So, for this case, we will
- * make a note that we will need to synchronize control register writes
- * with command register writes.
- */
- if ((ctlr->flags & TW_CL_64BIT_ADDRESSES) &&
- ((ctlr->device_id == TW_CL_DEVICE_ID_9K) ||
- (ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
- (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
- (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))) {
- ctlr->state |= TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED;
- ctlr->intr_lock = ctlr->io_lock;
- } else {
- ctlr->intr_lock = &(ctlr->intr_lock_handle);
- tw_osl_init_lock(ctlr_handle, "tw_cl_intr_lock",
- ctlr->intr_lock);
- }
/* Initialize CL internal request context packets. */
ctlr->req_ctxt_buf = (struct tw_cli_req_context *)free_non_dma_mem;
free_non_dma_mem += (sizeof(struct tw_cli_req_context) *
- (
- max_simult_reqs +
- 1));
+ max_simult_reqs);
ctlr->cmd_pkt_buf = (struct tw_cl_command_packet *)dma_mem;
ctlr->cmd_pkt_phys = dma_mem_phys;
ctlr->internal_req_data = (TW_UINT8 *)
(ctlr->cmd_pkt_buf +
- (
- max_simult_reqs +
- 1));
+ max_simult_reqs);
ctlr->internal_req_data_phys = ctlr->cmd_pkt_phys +
(sizeof(struct tw_cl_command_packet) *
- (
- max_simult_reqs +
- 1));
-
- for (i = 0;
- i < (
- max_simult_reqs +
- 1); i++) {
+ max_simult_reqs);
+
+ for (i = 0; i < max_simult_reqs; i++) {
req = &(ctlr->req_ctxt_buf[i]);
req->cmd_pkt = &(ctlr->cmd_pkt_buf[i]);
@@ -421,8 +389,8 @@ start_ctlr:
/* Notify some info about the controller to the OSL. */
tw_cli_notify_ctlr_info(ctlr);
- /* Mark the controller as active. */
- ctlr->state |= TW_CLI_CTLR_STATE_ACTIVE;
+ /* Mark the controller active. */
+ ctlr->active = TW_CL_TRUE;
return(error);
}
@@ -597,7 +565,7 @@ tw_cl_shutdown_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags)
* Mark the controller as inactive, disable any further interrupts,
* and notify the controller that we are going down.
*/
- ctlr->state &= ~TW_CLI_CTLR_STATE_ACTIVE;
+ ctlr->active = TW_CL_FALSE;
tw_cli_disable_interrupts(ctlr);
@@ -617,8 +585,6 @@ tw_cl_shutdown_ctlr(struct tw_cl_ctlr_handle *ctlr_handle, TW_UINT32 flags)
/* Destroy all locks used by CL. */
tw_osl_destroy_lock(ctlr_handle, ctlr->gen_lock);
tw_osl_destroy_lock(ctlr_handle, ctlr->io_lock);
- if (!(ctlr->flags & TW_CL_64BIT_ADDRESSES))
- tw_osl_destroy_lock(ctlr_handle, ctlr->intr_lock);
ret:
return(error);
diff --git a/sys/dev/twa/tw_cl_intr.c b/sys/dev/twa/tw_cl_intr.c
index 44b4c14..08d63f9 100644
--- a/sys/dev/twa/tw_cl_intr.c
+++ b/sys/dev/twa/tw_cl_intr.c
@@ -75,22 +75,16 @@ tw_cl_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
if (ctlr == NULL)
goto out;
- /* If we get an interrupt while resetting, it is a shared
- one for another device, so just bail */
- if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS)
- goto out;
-
/*
- * Synchronize access between writes to command and control registers
- * in 64-bit environments, on G66.
+ * Bail If we get an interrupt while resetting, or shutting down.
*/
- if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
- tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
+ if (ctlr->reset_in_progress || !(ctlr->active))
+ goto out;
/* Read the status register to determine the type of interrupt. */
status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
if (tw_cli_check_ctlr_state(ctlr, status_reg))
- goto out_unlock;
+ goto out;
/* Clear the interrupt. */
if (status_reg & TWA_STATUS_HOST_INTERRUPT) {
@@ -98,36 +92,30 @@ tw_cl_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
"Host interrupt");
TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
TWA_CONTROL_CLEAR_HOST_INTERRUPT);
- ctlr->host_intr_pending = 0; /* we don't use this */
- rc |= TW_CL_FALSE; /* don't request for a deferred isr call */
}
if (status_reg & TWA_STATUS_ATTENTION_INTERRUPT) {
tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
"Attention interrupt");
+ rc |= TW_CL_TRUE; /* request for a deferred isr call */
+ tw_cli_process_attn_intr(ctlr);
TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
- ctlr->attn_intr_pending = 1;
- rc |= TW_CL_TRUE; /* request for a deferred isr call */
}
if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) {
tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
"Command interrupt");
- TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
- TWA_CONTROL_MASK_COMMAND_INTERRUPT);
- ctlr->cmd_intr_pending = 1;
rc |= TW_CL_TRUE; /* request for a deferred isr call */
+ tw_cli_process_cmd_intr(ctlr);
+ if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
+ TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
+ TWA_CONTROL_MASK_COMMAND_INTERRUPT);
}
if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) {
tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
"Response interrupt");
- TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
- TWA_CONTROL_MASK_RESPONSE_INTERRUPT);
- ctlr->resp_intr_pending = 1;
rc |= TW_CL_TRUE; /* request for a deferred isr call */
+ tw_cli_process_resp_intr(ctlr);
}
-out_unlock:
- if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
- tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
out:
return(rc);
}
@@ -135,52 +123,6 @@ out:
/*
- * Function name: tw_cl_deferred_interrupt
- * Description: Deferred interrupt handler. Does most of the processing
- * related to an interrupt.
- *
- * Input: ctlr_handle -- controller handle
- * Output: None
- * Return value: None
- */
-TW_VOID
-tw_cl_deferred_interrupt(struct tw_cl_ctlr_handle *ctlr_handle)
-{
- struct tw_cli_ctlr_context *ctlr =
- (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
-
- tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
-
- /* Dispatch based on the kind of interrupt. */
- if (ctlr->host_intr_pending) {
- tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
- "Processing Host interrupt");
- ctlr->host_intr_pending = 0;
- tw_cli_process_host_intr(ctlr);
- }
- if (ctlr->attn_intr_pending) {
- tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
- "Processing Attention interrupt");
- ctlr->attn_intr_pending = 0;
- tw_cli_process_attn_intr(ctlr);
- }
- if (ctlr->cmd_intr_pending) {
- tw_cli_dbg_printf(6, ctlr_handle, tw_osl_cur_func(),
- "Processing Command interrupt");
- ctlr->cmd_intr_pending = 0;
- tw_cli_process_cmd_intr(ctlr);
- }
- if (ctlr->resp_intr_pending) {
- tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
- "Processing Response interrupt");
- ctlr->resp_intr_pending = 0;
- tw_cli_process_resp_intr(ctlr);
- }
-}
-
-
-
-/*
* Function name: tw_cli_process_host_intr
* Description: This function gets called if we triggered an interrupt.
* We don't use it as of now.
@@ -248,12 +190,6 @@ tw_cli_process_cmd_intr(struct tw_cli_ctlr_context *ctlr)
{
tw_cli_dbg_printf(6, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
- /*
- * Let the OS Layer submit any requests in its pending queue,
- * if it has one.
- */
- tw_osl_ctlr_ready(ctlr->ctlr_handle);
-
/* Start any requests that might be in the pending queue. */
tw_cli_submit_pending_queue(ctlr);
@@ -286,9 +222,6 @@ tw_cli_process_resp_intr(struct tw_cli_ctlr_context *ctlr)
tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
- /* Serialize access to the controller response queue. */
- tw_osl_get_lock(ctlr->ctlr_handle, ctlr->intr_lock);
-
for (;;) {
status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr->ctlr_handle);
if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
@@ -315,7 +248,6 @@ tw_cli_process_resp_intr(struct tw_cli_ctlr_context *ctlr)
#ifdef TW_OSL_DEBUG
tw_cl_print_ctlr_stats(ctlr->ctlr_handle);
#endif /* TW_OSL_DEBUG */
- tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
tw_cl_reset_ctlr(ctlr->ctlr_handle);
return(TW_OSL_EIO);
}
@@ -330,12 +262,6 @@ tw_cli_process_resp_intr(struct tw_cli_ctlr_context *ctlr)
}
- /* Unmask the response interrupt. */
- TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
- TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT);
-
- tw_osl_free_lock(ctlr->ctlr_handle, ctlr->intr_lock);
-
/* Complete this, and other requests in the complete queue. */
tw_cli_process_complete_queue(ctlr);
@@ -614,12 +540,11 @@ tw_cli_param_callback(struct tw_cli_req_context *req)
"status = %d", cmd->param.status);
}
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
- if ((ctlr->state & TW_CLI_CTLR_STATE_GET_MORE_AENS) &&
- (!(ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS))) {
- ctlr->state &= ~TW_CLI_CTLR_STATE_GET_MORE_AENS;
+ if ((ctlr->get_more_aens) && (!(ctlr->reset_in_progress))) {
+ ctlr->get_more_aens = TW_CL_FALSE;
tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(),
"Fetching more AEN's");
if ((error = tw_cli_get_aen(ctlr)))
@@ -677,7 +602,7 @@ tw_cli_aen_callback(struct tw_cli_req_context *req)
}
if (error) {
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
return;
}
@@ -688,7 +613,7 @@ tw_cli_aen_callback(struct tw_cli_req_context *req)
aen_code = tw_cli_manage_aen(ctlr, req);
if (aen_code != TWA_AEN_SYNC_TIME_WITH_HOST) {
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
if (aen_code != TWA_AEN_QUEUE_EMPTY)
if ((error = tw_cli_get_aen(ctlr)))
@@ -736,25 +661,25 @@ tw_cli_manage_aen(struct tw_cli_ctlr_context *ctlr,
* Free the internal req pkt right here, since
* tw_cli_set_param will need it.
*/
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
/*
* We will use a callback in tw_cli_set_param only when
* interrupts are enabled and we can expect our callback
- * to get called. Setting the TW_CLI_CTLR_STATE_GET_MORE_AENS
+ * to get called. Setting the get_more_aens
* flag will make the callback continue to try to retrieve
* more AEN's.
*/
- if (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED)
- ctlr->state |= TW_CLI_CTLR_STATE_GET_MORE_AENS;
+ if (ctlr->interrupts_enabled)
+ ctlr->get_more_aens = TW_CL_TRUE;
/* Calculate time (in seconds) since last Sunday 12.00 AM. */
local_time = tw_osl_get_local_time();
sync_time = (local_time - (3 * 86400)) % 604800;
if ((error = tw_cli_set_param(ctlr, TWA_PARAM_TIME_TABLE,
TWA_PARAM_TIME_SCHED_TIME, 4,
&sync_time,
- (ctlr->state & TW_CLI_CTLR_STATE_INTR_ENABLED)
+ (ctlr->interrupts_enabled)
? tw_cli_param_callback : TW_CL_NULL)))
tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
@@ -799,7 +724,7 @@ tw_cli_enable_interrupts(struct tw_cli_ctlr_context *ctlr)
{
tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
- ctlr->state |= TW_CLI_CTLR_STATE_INTR_ENABLED;
+ ctlr->interrupts_enabled = TW_CL_TRUE;
TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
@@ -823,6 +748,6 @@ tw_cli_disable_interrupts(struct tw_cli_ctlr_context *ctlr)
TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
TWA_CONTROL_DISABLE_INTERRUPTS);
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTR_ENABLED;
+ ctlr->interrupts_enabled = TW_CL_FALSE;
}
diff --git a/sys/dev/twa/tw_cl_io.c b/sys/dev/twa/tw_cl_io.c
index 6fea175..51ce08c 100644
--- a/sys/dev/twa/tw_cl_io.c
+++ b/sys/dev/twa/tw_cl_io.c
@@ -49,6 +49,10 @@
#include "tw_cl_externs.h"
#include "tw_osl_ioctl.h"
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_xpt_sim.h>
+
/*
@@ -76,11 +80,9 @@ tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
- if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
+ if (ctlr->reset_in_progress) {
tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
- "I/O during reset: returning busy. Ctlr state = 0x%x",
- ctlr->state);
- tw_osl_ctlr_busy(ctlr_handle, req_handle);
+ "I/O during reset: returning busy.");
return(TW_OSL_EBUSY);
}
@@ -101,7 +103,6 @@ tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
)) == TW_CL_NULL) {
tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
"Out of request context packets: returning busy");
- tw_osl_ctlr_busy(ctlr_handle, req_handle);
return(TW_OSL_EBUSY);
}
@@ -171,7 +172,6 @@ tw_cli_submit_cmd(struct tw_cli_req_context *req)
struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
TW_UINT32 status_reg;
TW_INT32 error;
- TW_UINT8 notify_osl_of_ctlr_busy = TW_CL_FALSE;
tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
@@ -208,10 +208,13 @@ tw_cli_submit_cmd(struct tw_cli_req_context *req)
req->state = TW_CLI_REQ_STATE_PENDING;
tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
error = 0;
+ /* Unmask command interrupt. */
+ TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
+ TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
} else
error = TW_OSL_EBUSY;
} else {
- notify_osl_of_ctlr_busy = TW_CL_TRUE;
+ tw_osl_ctlr_busy(ctlr_handle, req->req_handle);
error = TW_OSL_EBUSY;
}
} else {
@@ -246,25 +249,6 @@ tw_cli_submit_cmd(struct tw_cli_req_context *req)
out:
tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
- if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
- if (notify_osl_of_ctlr_busy)
- tw_osl_ctlr_busy(ctlr_handle, req->req_handle);
-
- /*
- * Synchronize access between writes to command and control
- * registers in 64-bit environments, on G66.
- */
- if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
- tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
-
- /* Unmask command interrupt. */
- TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
- TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
-
- if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
- tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
- }
-
return(error);
}
@@ -299,12 +283,9 @@ tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
- if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
+ if (ctlr->reset_in_progress) {
tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
- "Passthru request during reset: returning busy. "
- "Ctlr state = 0x%x",
- ctlr->state);
- tw_osl_ctlr_busy(ctlr_handle, req_handle);
+ "Passthru request during reset: returning busy.");
return(TW_OSL_EBUSY);
}
@@ -312,7 +293,6 @@ tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
)) == TW_CL_NULL) {
tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
"Out of request context packets: returning busy");
- tw_osl_ctlr_busy(ctlr_handle, req_handle);
return(TW_OSL_EBUSY);
}
@@ -759,11 +739,11 @@ tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
goto out;
/* Make sure this is the only CL internal request at this time. */
- if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
+ if (ctlr->internal_req_busy) {
error = TW_OSL_EBUSY;
goto out;
}
- ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_TRUE;
req->data = ctlr->internal_req_data;
req->data_phys = ctlr->internal_req_data_phys;
req->length = TW_CLI_SECTOR_SIZE;
@@ -821,7 +801,7 @@ tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
goto out;
}
tw_osl_memcpy(param_data, param->data, param_size);
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
} else {
/* There's a call back. Simply submit the command. */
@@ -838,7 +818,7 @@ out:
"get_param failed",
"error = %d", error);
if (param)
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
if (req)
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
return(1);
@@ -878,11 +858,11 @@ tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
goto out;
/* Make sure this is the only CL internal request at this time. */
- if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
+ if (ctlr->internal_req_busy) {
error = TW_OSL_EBUSY;
goto out;
}
- ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_TRUE;
req->data = ctlr->internal_req_data;
req->data_phys = ctlr->internal_req_data_phys;
req->length = TW_CLI_SECTOR_SIZE;
@@ -939,7 +919,7 @@ tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
&(req->cmd_pkt->cmd_hdr));
goto out;
}
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
} else {
/* There's a call back. Simply submit the command. */
@@ -956,7 +936,7 @@ out:
"set_param failed",
"error = %d", error);
if (param)
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
if (req)
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
return(error);
@@ -1054,8 +1034,11 @@ tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
* taking care of it).
*/
tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
+ if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
+ TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
+ TWA_CONTROL_MASK_COMMAND_INTERRUPT);
if (req->data)
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
}
@@ -1079,12 +1062,16 @@ tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
{
struct tw_cli_ctlr_context *ctlr =
(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
+ struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
TW_INT32 reset_attempt = 1;
TW_INT32 error;
tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
- ctlr->state |= TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
+ ctlr->reset_in_progress = TW_CL_TRUE;
+ xpt_freeze_simq(sc->sim, 1);
+
+ tw_cli_disable_interrupts(ctlr);
/*
* Error back all requests in the complete, busy, and pending queues.
@@ -1098,8 +1085,8 @@ tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
tw_cli_drain_complete_queue(ctlr);
tw_cli_drain_busy_queue(ctlr);
tw_cli_drain_pending_queue(ctlr);
-
- tw_cli_disable_interrupts(ctlr);
+ ctlr->internal_req_busy = TW_CL_FALSE;
+ ctlr->get_more_aens = TW_CL_FALSE;
/* Soft reset the controller. */
try_reset:
@@ -1135,7 +1122,9 @@ try_reset:
" ");
out:
- ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
+ ctlr->reset_in_progress = TW_CL_FALSE;
+ xpt_release_simq(sc->sim, 1);
+
/*
* Enable interrupts, and also clear attention and response interrupts.
*/
@@ -1163,6 +1152,8 @@ tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
{
struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
TW_UINT32 status_reg;
+ int found;
+ int loop_count;
TW_UINT32 error;
tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
@@ -1192,12 +1183,27 @@ tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
* make sure we don't access any hardware registers (for
* polling) during that window.
*/
- ctlr->state |= TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
- while (tw_cli_find_response(ctlr,
- TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) != TW_OSL_ESUCCESS)
+ ctlr->reset_phase1_in_progress = TW_CL_TRUE;
+ loop_count = 0;
+ do {
+ found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS);
tw_osl_delay(10);
+ loop_count++;
+ error = 0x7888;
+ } while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */
+
+ if (!found) {
+ tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
+ TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
+ 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
+ "Missed firmware handshake after soft-reset",
+ "error = %d", error);
+ tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
+ return(error);
+ }
+
tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
- ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
+ ctlr->reset_phase1_in_progress = TW_CL_FALSE;
}
if ((error = tw_cli_poll_status(ctlr,
@@ -1285,9 +1291,9 @@ tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
/* Make sure this is the only CL internal request at this time. */
- if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY)
+ if (ctlr->internal_req_busy)
return(TW_OSL_EBUSY);
- ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_TRUE;
req->data = ctlr->internal_req_data;
req->data_phys = ctlr->internal_req_data_phys;
tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
@@ -1365,7 +1371,7 @@ tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
"Could not send SCSI command",
"request = %p, error = %d", req, error);
if (req->data)
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
}
return(error);
diff --git a/sys/dev/twa/tw_cl_misc.c b/sys/dev/twa/tw_cl_misc.c
index 1ee8120..1b9897e 100644
--- a/sys/dev/twa/tw_cl_misc.c
+++ b/sys/dev/twa/tw_cl_misc.c
@@ -368,14 +368,14 @@ tw_cli_drain_aen_queue(struct tw_cli_ctlr_context *ctlr)
if (aen_code == TWA_AEN_SYNC_TIME_WITH_HOST)
continue;
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
}
out:
if (req) {
if (req->data)
- ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
+ ctlr->internal_req_busy = TW_CL_FALSE;
tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
}
return(error);
@@ -447,34 +447,7 @@ tw_cli_poll_status(struct tw_cli_ctlr_context *ctlr, TW_UINT32 status,
/* got the required bit(s) */
return(TW_OSL_ESUCCESS);
- /*
- * The OSL should not define TW_OSL_CAN_SLEEP if it calls
- * tw_cl_deferred_interrupt from within the ISR and not a
- * lower interrupt level, since, in that case, we might end
- * up here, and try to sleep (within an ISR).
- */
-#ifndef TW_OSL_CAN_SLEEP
- /* OSL doesn't support sleeping; will spin. */
tw_osl_delay(1000);
-#else /* TW_OSL_CAN_SLEEP */
-#if 0
- /* Will spin if initializing, sleep otherwise. */
- if (!(ctlr->state & TW_CLI_CTLR_STATE_ACTIVE))
- tw_osl_delay(1000);
- else
- tw_osl_sleep(ctlr->ctlr_handle,
- &(ctlr->sleep_handle), 1 /* ms */);
-#else /* #if 0 */
- /*
- * Will always spin for now (since reset holds a spin lock).
- * We could free io_lock after the call to TW_CLI_SOFT_RESET,
- * so we could sleep here. To block new requests (since
- * the lock will have been released) we could use the
- * ...RESET_IN_PROGRESS flag. Need to revisit.
- */
- tw_osl_delay(1000);
-#endif /* #if 0 */
-#endif /* TW_OSL_CAN_SLEEP */
} while (tw_osl_get_local_time() <= end_time);
return(TW_OSL_ETIMEDOUT);
@@ -736,22 +709,20 @@ tw_cli_check_ctlr_state(struct tw_cli_ctlr_context *ctlr, TW_UINT32 status_reg)
tw_cli_dbg_printf(8, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
/* Check if the 'micro-controller ready' bit is not set. */
- if ((status_reg & TWA_STATUS_EXPECTED_BITS) !=
- TWA_STATUS_EXPECTED_BITS) {
+ if (!(status_reg & TWA_STATUS_MICROCONTROLLER_READY)) {
TW_INT8 desc[200];
tw_osl_memzero(desc, 200);
- if ((status_reg & TWA_STATUS_MICROCONTROLLER_READY) ||
- (!(ctlr->state &
- TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS))) {
+ if (!(ctlr->reset_phase1_in_progress)) {
tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
0x1301, 0x1, TW_CL_SEVERITY_ERROR_STRING,
"Missing expected status bit(s)",
"status reg = 0x%x; Missing bits: %s",
status_reg,
- tw_cli_describe_bits (~status_reg &
- TWA_STATUS_EXPECTED_BITS, desc));
+ tw_cli_describe_bits(
+ TWA_STATUS_MICROCONTROLLER_READY,
+ desc));
error = TW_OSL_EGENFAILURE;
}
}
@@ -765,7 +736,7 @@ tw_cli_check_ctlr_state(struct tw_cli_ctlr_context *ctlr, TW_UINT32 status_reg)
/* Skip queue error msgs during 9650SE/9690SA reset */
if (((ctlr->device_id != TW_CL_DEVICE_ID_9K_E) &&
(ctlr->device_id != TW_CL_DEVICE_ID_9K_SA)) ||
- ((ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) == 0) ||
+ (!(ctlr->reset_in_progress)) ||
((status_reg & TWA_STATUS_QUEUE_ERROR_INTERRUPT) == 0))
tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
@@ -819,7 +790,7 @@ tw_cli_check_ctlr_state(struct tw_cli_ctlr_context *ctlr, TW_UINT32 status_reg)
/* Skip queue error msgs during 9650SE/9690SA reset */
if (((ctlr->device_id != TW_CL_DEVICE_ID_9K_E) &&
(ctlr->device_id != TW_CL_DEVICE_ID_9K_SA)) ||
- ((ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) == 0))
+ (!(ctlr->reset_in_progress)))
tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
0x1305, 0x1, TW_CL_SEVERITY_ERROR_STRING,
@@ -839,7 +810,7 @@ tw_cli_check_ctlr_state(struct tw_cli_ctlr_context *ctlr, TW_UINT32 status_reg)
"status reg = 0x%x %s",
status_reg,
tw_cli_describe_bits(status_reg, desc));
- error = TW_OSL_EGENFAILURE;
+ error = TW_OSL_EGENFAILURE; // tw_cl_reset_ctlr(ctlr_handle);
}
}
return(error);
diff --git a/sys/dev/twa/tw_cl_share.h b/sys/dev/twa/tw_cl_share.h
index e583818..2b3a0b7 100644
--- a/sys/dev/twa/tw_cl_share.h
+++ b/sys/dev/twa/tw_cl_share.h
@@ -76,7 +76,7 @@
* of supporting only 255, since we want to keep one CL internal request
* context packet always available for internal requests.
*/
-#define TW_CL_MAX_SIMULTANEOUS_REQUESTS 0xFF /* max simult reqs supported */
+#define TW_CL_MAX_SIMULTANEOUS_REQUESTS 256 /* max simult reqs supported */
#define TW_CL_MAX_32BIT_SG_ELEMENTS 109 /* max 32-bit sg elements */
#define TW_CL_MAX_64BIT_SG_ELEMENTS 72 /* max 64-bit sg elements */
@@ -144,6 +144,7 @@ struct tw_cl_ctlr_handle {
struct tw_cl_req_handle {
TW_VOID *osl_req_ctxt; /* OSL's request context */
TW_VOID *cl_req_ctxt; /* CL's request context */
+ TW_UINT8 is_io; /* Only freeze/release simq for IOs */
};
@@ -353,12 +354,6 @@ extern TW_VOID tw_osl_ctlr_busy(struct tw_cl_ctlr_handle *ctlr_handle,
#endif
-#ifndef tw_osl_ctlr_ready
-/* Called on cmd interrupt. Allows re-submission of any pending requests. */
-extern TW_VOID tw_osl_ctlr_ready(struct tw_cl_ctlr_handle *ctlr_handle);
-#endif
-
-
#ifndef tw_osl_cur_func
/* Text name of current function. */
extern TW_INT8 *tw_osl_cur_func(TW_VOID);
@@ -528,10 +523,6 @@ extern TW_VOID tw_cl_create_event(struct tw_cl_ctlr_handle *ctlr_handle,
extern TW_INT32 tw_cl_ctlr_supported(TW_INT32 vendor_id, TW_INT32 device_id);
-/* Deferred interrupt handler. */
-extern TW_VOID tw_cl_deferred_interrupt(struct tw_cl_ctlr_handle *ctlr_handle);
-
-
/* Submit a firmware cmd packet. */
extern TW_INT32 tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle);
diff --git a/sys/dev/twa/tw_osl.h b/sys/dev/twa/tw_osl.h
index 6082253..bce9958 100644
--- a/sys/dev/twa/tw_osl.h
+++ b/sys/dev/twa/tw_osl.h
@@ -50,13 +50,11 @@
#define TW_OSLI_DEVICE_NAME "3ware 9000 series Storage Controller"
#define TW_OSLI_MALLOC_CLASS M_TWA
-#define TW_OSLI_MAX_NUM_IOS TW_CL_MAX_SIMULTANEOUS_REQUESTS
+#define TW_OSLI_MAX_NUM_REQUESTS TW_CL_MAX_SIMULTANEOUS_REQUESTS
+/* Reserve two command packets. One for ioctls and one for AENs */
+#define TW_OSLI_MAX_NUM_IOS (TW_OSLI_MAX_NUM_REQUESTS - 2)
#define TW_OSLI_MAX_NUM_AENS 0x100
-/* Disabled, doesn't work yet.
-#define TW_OSLI_DEFERRED_INTR_USED
-*/
-
#ifdef PAE
#define TW_OSLI_DMA_BOUNDARY (1u << 31)
#else
@@ -80,10 +78,6 @@
#define TW_OSLI_REQ_FLAGS_PASSTHRU (1<<5) /* pass through request */
#define TW_OSLI_REQ_FLAGS_SLEEPING (1<<6) /* owner sleeping on this cmd */
-/* Possible values of sc->state. */
-#define TW_OSLI_CTLR_STATE_OPEN (1<<0) /* control device is open */
-#define TW_OSLI_CTLR_STATE_SIMQ_FROZEN (1<<1) /* simq frozen */
-
#ifdef TW_OSL_DEBUG
struct tw_osli_q_stats {
@@ -101,6 +95,8 @@ struct tw_osli_q_stats {
/* Driver's request packet. */
struct tw_osli_req_context {
struct tw_cl_req_handle req_handle;/* tag to track req b/w OSL & CL */
+ struct mtx ioctl_wake_timeout_lock_handle;/* non-spin lock used to detect ioctl timeout */
+ struct mtx *ioctl_wake_timeout_lock;/* ptr to above lock */
struct twa_softc *ctlr; /* ptr to OSL's controller context */
TW_VOID *data; /* ptr to data being passed to CL */
TW_UINT32 length; /* length of buf being passed to CL */
@@ -130,10 +126,10 @@ struct tw_osli_req_context {
/* Per-controller structure. */
struct twa_softc {
struct tw_cl_ctlr_handle ctlr_handle;
- struct tw_osli_req_context *req_ctxt_buf;
+ struct tw_osli_req_context *req_ctx_buf;
/* Controller state. */
- TW_UINT32 state;
+ TW_UINT8 open;
TW_UINT32 flags;
TW_INT32 device_id;
diff --git a/sys/dev/twa/tw_osl_cam.c b/sys/dev/twa/tw_osl_cam.c
index 1d22920..5854fd6 100644
--- a/sys/dev/twa/tw_osl_cam.c
+++ b/sys/dev/twa/tw_osl_cam.c
@@ -81,7 +81,7 @@ tw_osli_cam_attach(struct twa_softc *sc)
/*
* Create the device queue for our SIM.
*/
- if ((devq = cam_simq_alloc(TW_OSLI_MAX_NUM_IOS)) == NULL) {
+ if ((devq = cam_simq_alloc(TW_OSLI_MAX_NUM_REQUESTS)) == NULL) {
tw_osli_printf(sc, "error = %d",
TW_CL_SEVERITY_ERROR_STRING,
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
@@ -92,15 +92,15 @@ tw_osli_cam_attach(struct twa_softc *sc)
}
/*
- * Create a SIM entry. Though we can support TW_OSLI_MAX_NUM_IOS
+ * Create a SIM entry. Though we can support TW_OSLI_MAX_NUM_REQUESTS
* simultaneous requests, we claim to be able to handle only
- * (TW_OSLI_MAX_NUM_IOS - 1), so that we always have a request
- * packet available to service ioctls.
+ * TW_OSLI_MAX_NUM_IOS (two less), so that we always have a request
+ * packet available to service ioctls and AENs.
*/
tw_osli_dbg_dprintf(3, sc, "Calling cam_sim_alloc");
sc->sim = cam_sim_alloc(twa_action, twa_poll, "twa", sc,
device_get_unit(sc->bus_dev), sc->sim_lock,
- TW_OSLI_MAX_NUM_IOS - 1, 1, devq);
+ TW_OSLI_MAX_NUM_IOS, 1, devq);
if (sc->sim == NULL) {
cam_simq_free(devq);
tw_osli_printf(sc, "error = %d",
@@ -168,14 +168,6 @@ tw_osli_cam_detach(struct twa_softc *sc)
{
tw_osli_dbg_dprintf(3, sc, "entered");
-#ifdef TW_OSLI_DEFERRED_INTR_USED
- /* - drain the taskqueue
- Ctrl is already went down so, no more enqueuetask will
- happen . Don't hold any locks, that task might need.
- */
-
- taskqueue_drain(taskqueue_fast, &(sc->deferred_intr_callback));
-#endif
mtx_lock(sc->sim_lock);
if (sc->path)
@@ -236,7 +228,7 @@ tw_osli_execute_scsi(struct tw_osli_req_context *req, union ccb *ccb)
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
0x2105,
"Physical CDB address!");
- ccb_h->status = CAM_REQ_CMP_ERR;
+ ccb_h->status = CAM_REQ_INVALID;
xpt_done(ccb);
return(1);
}
@@ -297,7 +289,7 @@ tw_osli_execute_scsi(struct tw_osli_req_context *req, union ccb *ccb)
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
0x2107,
"XPT_SCSI_IO: Got SGList");
- ccb_h->status = CAM_REQ_CMP_ERR;
+ ccb_h->status = CAM_REQ_INVALID;
xpt_done(ccb);
return(1);
}
@@ -308,8 +300,7 @@ tw_osli_execute_scsi(struct tw_osli_req_context *req, union ccb *ccb)
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
0x2108,
"XPT_SCSI_IO: Physical data addresses");
- ccb_h->status = CAM_REQ_CMP_ERR;
- ccb_h->status |= CAM_RELEASE_SIMQ;
+ ccb_h->status = CAM_REQ_INVALID;
ccb_h->status &= ~CAM_SIM_QUEUED;
xpt_done(ccb);
return(1);
@@ -347,20 +338,19 @@ twa_action(struct cam_sim *sim, union ccb *ccb)
{
struct tw_osli_req_context *req;
- if ((sc->state & TW_OSLI_CTLR_STATE_SIMQ_FROZEN) ||
- ((req = tw_osli_get_request(sc)) == NULL)) {
- tw_osli_dbg_dprintf(2, sc,
- "simq frozen/Cannot get request pkt.");
+ req = tw_osli_get_request(sc);
+ if (req == NULL) {
+ tw_osli_dbg_dprintf(2, sc, "Cannot get request pkt.");
/*
* Freeze the simq to maintain ccb ordering. The next
* ccb that gets completed will unfreeze the simq.
*/
- tw_osli_disallow_new_requests(sc);
ccb_h->status |= CAM_REQUEUE_REQ;
xpt_done(ccb);
break;
}
req->req_handle.osl_req_ctxt = req;
+ req->req_handle.is_io = TW_CL_TRUE;
req->orig_req = ccb;
if (tw_osli_execute_scsi(req, ccb))
tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
@@ -490,18 +480,7 @@ twa_poll(struct cam_sim *sim)
struct twa_softc *sc = (struct twa_softc *)(cam_sim_softc(sim));
tw_osli_dbg_dprintf(3, sc, "entering; sc = %p", sc);
- /*
- * It's been observed that twa_poll can get called (from
- * dashutdown --> xpt_polled_action) even when interrupts are
- * active, in which case, the ISR might clear the interrupt,
- * leaving the call to tw_cl_interrupt below, no way of determining
- * that the response from firmware is ready, resulting in
- * tw_cl_deferred_interrupt never getting called. To cover this case,
- * we will make the call to tw_cl_deferred_interrupt not dependent
- * on the return value from tw_cl_interrupt.
- */
tw_cl_interrupt(&(sc->ctlr_handle));
- tw_cl_deferred_interrupt(&(sc->ctlr_handle));
tw_osli_dbg_dprintf(3, sc, "exiting; sc = %p", sc);
}
@@ -561,12 +540,6 @@ tw_osli_request_bus_scan(struct twa_softc *sc)
return(EIO);
}
- /* Release simq at the end of a reset */
- if (sc->state & TW_OSLI_CTLR_STATE_SIMQ_FROZEN) {
- xpt_release_simq(sc->sim, 1);
- sc->state &= ~TW_OSLI_CTLR_STATE_SIMQ_FROZEN;
- }
-
xpt_rescan(ccb);
mtx_unlock(sc->sim_lock);
return(0);
@@ -575,44 +548,26 @@ tw_osli_request_bus_scan(struct twa_softc *sc)
/*
- * Function name: tw_osli_allow_new_requests
- * Description: Sets the appropriate status bits in a ccb such that,
- * when the ccb is completed by a call to xpt_done,
- * CAM knows that it's ok to unfreeze the flow of new
- * requests to this controller, if the flow is frozen.
- *
- * Input: sc -- ptr to OSL internal ctlr context
- * ccb -- ptr to CAM request
- * Output: None
- * Return value: None
- */
-TW_VOID
-tw_osli_allow_new_requests(struct twa_softc *sc, TW_VOID *ccb)
-{
- ((union ccb *)(ccb))->ccb_h.status |= CAM_RELEASE_SIMQ;
- sc->state &= ~TW_OSLI_CTLR_STATE_SIMQ_FROZEN;
-}
-
-
-
-/*
* Function name: tw_osli_disallow_new_requests
* Description: Calls the appropriate CAM function, so as to freeze
* the flow of new requests from CAM to this controller.
*
* Input: sc -- ptr to OSL internal ctlr context
+ * req_handle -- ptr to request handle sent by OSL.
* Output: None
* Return value: None
*/
TW_VOID
-tw_osli_disallow_new_requests(struct twa_softc *sc)
+tw_osli_disallow_new_requests(struct twa_softc *sc,
+ struct tw_cl_req_handle *req_handle)
{
- /* Don't double freeze if already frozen */
- if ((sc->state & TW_OSLI_CTLR_STATE_SIMQ_FROZEN) == 0) {
- mtx_lock(sc->sim_lock);
+ /* Only freeze/release the simq for IOs */
+ if (req_handle->is_io) {
+ struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
+ union ccb *ccb = (union ccb *)(req->orig_req);
+
xpt_freeze_simq(sc->sim, 1);
- mtx_unlock(sc->sim_lock);
- sc->state |= TW_OSLI_CTLR_STATE_SIMQ_FROZEN;
+ ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
}
@@ -632,7 +587,7 @@ TW_VOID
tw_osl_ctlr_busy(struct tw_cl_ctlr_handle *ctlr_handle,
struct tw_cl_req_handle *req_handle)
{
- tw_osli_disallow_new_requests(ctlr_handle->osl_ctlr_ctxt);
+ tw_osli_disallow_new_requests(ctlr_handle->osl_ctlr_ctxt, req_handle);
}
@@ -706,10 +661,8 @@ tw_osl_complete_io(struct tw_cl_req_handle *req_handle)
if (req->error_code == EBUSY) {
/*
* Cmd queue is full, or the Common Layer is out of
- * resources. The simq will already have been frozen
- * by CL's call to tw_osl_ctlr_busy, and this will
- * maintain ccb ordering. The next ccb that gets
- * completed will unfreeze the simq.
+ * resources. The simq will already have been frozen.
+ * When this ccb gets completed will unfreeze the simq.
*/
ccb->ccb_h.status |= CAM_REQUEUE_REQ;
}
@@ -744,9 +697,6 @@ tw_osl_complete_io(struct tw_cl_req_handle *req_handle)
}
ccb->csio.scsi_status = scsi_req->scsi_status;
- /* If simq is frozen, unfreeze it. */
- if (sc->state & TW_OSLI_CTLR_STATE_SIMQ_FROZEN)
- tw_osli_allow_new_requests(sc, (TW_VOID *)ccb);
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
diff --git a/sys/dev/twa/tw_osl_externs.h b/sys/dev/twa/tw_osl_externs.h
index e46c2d2..08963e5 100644
--- a/sys/dev/twa/tw_osl_externs.h
+++ b/sys/dev/twa/tw_osl_externs.h
@@ -79,11 +79,9 @@ extern TW_VOID tw_osli_cam_detach(struct twa_softc *sc);
/* Request CAM for a bus scan. */
extern TW_INT32 tw_osli_request_bus_scan(struct twa_softc *sc);
-/* Unfreeze ccb flow from CAM. */
-extern TW_VOID tw_osli_allow_new_requests(struct twa_softc *sc, TW_VOID *ccb);
-
/* Freeze ccb flow from CAM. */
-extern TW_VOID tw_osli_disallow_new_requests(struct twa_softc *sc);
+extern TW_VOID tw_osli_disallow_new_requests(struct twa_softc *sc,
+ struct tw_cl_req_handle *req_handle);
/* OSL's completion routine for SCSI I/O's. */
extern TW_VOID tw_osl_complete_io(struct tw_cl_req_handle *req_handle);
diff --git a/sys/dev/twa/tw_osl_freebsd.c b/sys/dev/twa/tw_osl_freebsd.c
index d3f82f3..09bb55e 100644
--- a/sys/dev/twa/tw_osl_freebsd.c
+++ b/sys/dev/twa/tw_osl_freebsd.c
@@ -91,7 +91,7 @@ twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
tw_osli_dbg_dprintf(5, sc, "entered");
- sc->state |= TW_OSLI_CTLR_STATE_OPEN;
+ sc->open = TW_CL_TRUE;
return(0);
}
@@ -116,7 +116,7 @@ twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
tw_osli_dbg_dprintf(5, sc, "entered");
- sc->state &= ~TW_OSLI_CTLR_STATE_OPEN;
+ sc->open = TW_CL_FALSE;
return(0);
}
@@ -174,12 +174,7 @@ static TW_INT32 twa_attach(device_t dev);
static TW_INT32 twa_detach(device_t dev);
static TW_INT32 twa_shutdown(device_t dev);
static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
-#ifdef TW_OSLI_DEFERRED_INTR_USED
-static int twa_pci_intr_fast(TW_VOID *arg);
-static TW_VOID twa_deferred_intr(TW_VOID *context, TW_INT32 pending);
-#else
static TW_VOID twa_pci_intr(TW_VOID *arg);
-#endif /* TW_OSLI_DEFERRED_INTR_USED */
static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
@@ -360,13 +355,8 @@ twa_attach(device_t dev)
return(ENXIO);
}
if ((error = bus_setup_intr(sc->bus_dev, sc->irq_res,
-#ifdef TW_OSLI_DEFERRED_INTR_USED
- INTR_TYPE_CAM | INTR_FAST,
- twa_pci_intr_fast, NULL,
-#else
INTR_TYPE_CAM | INTR_MPSAFE,
NULL, twa_pci_intr,
-#endif
sc, &sc->intr_handle))) {
tw_osli_printf(sc, "error = %d",
TW_CL_SEVERITY_ERROR_STRING,
@@ -378,10 +368,6 @@ twa_attach(device_t dev)
return(error);
}
-#ifdef TW_OSLI_DEFERRED_INTR_USED
- TASK_INIT(&sc->deferred_intr_callback, 0, twa_deferred_intr, sc);
-#endif /* TW_OSLI_DEFERRED_INTR_USED */
-
if ((error = tw_osli_alloc_mem(sc))) {
tw_osli_printf(sc, "error = %d",
TW_CL_SEVERITY_ERROR_STRING,
@@ -395,7 +381,7 @@ twa_attach(device_t dev)
/* Initialize the Common Layer for this controller. */
if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
- TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
+ TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
sc->non_dma_mem, sc->dma_mem,
sc->dma_mem_phys
))) {
@@ -454,15 +440,12 @@ tw_osli_alloc_mem(struct twa_softc *sc)
sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
-#ifdef TW_OSLI_DEFERRED_INTR_USED
- sc->flags |= TW_CL_DEFERRED_INTR_USED;
-#endif /* TW_OSLI_DEFERRED_INTR_USED */
max_sg_elements = (sizeof(bus_addr_t) == 8) ?
TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
- sc->device_id, TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
+ sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
&(sc->alignment), &(sc->sg_size_factor),
&non_dma_mem_size, &dma_mem_size
))) {
@@ -621,9 +604,9 @@ tw_osli_alloc_mem(struct twa_softc *sc)
tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
- if ((sc->req_ctxt_buf = (struct tw_osli_req_context *)
+ if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
malloc((sizeof(struct tw_osli_req_context) *
- TW_OSLI_MAX_NUM_IOS),
+ TW_OSLI_MAX_NUM_REQUESTS),
TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
tw_osli_printf(sc, "error = %d",
TW_CL_SEVERITY_ERROR_STRING,
@@ -633,11 +616,11 @@ tw_osli_alloc_mem(struct twa_softc *sc)
ENOMEM);
return(ENOMEM);
}
- bzero(sc->req_ctxt_buf,
- sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_IOS);
+ bzero(sc->req_ctx_buf,
+ sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);
- for (i = 0; i < TW_OSLI_MAX_NUM_IOS; i++) {
- req = &(sc->req_ctxt_buf[i]);
+ for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
+ req = &(sc->req_ctx_buf[i]);
req->ctlr = sc;
if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
tw_osli_printf(sc, "request # = %d, error = %d",
@@ -649,6 +632,10 @@ tw_osli_alloc_mem(struct twa_softc *sc)
return(ENOMEM);
}
+ /* Initialize the ioctl wakeup/ timeout mutex */
+ req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
+ mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);
+
/* Insert request into the free queue. */
tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
}
@@ -677,14 +664,17 @@ tw_osli_free_resources(struct twa_softc *sc)
/* Detach from CAM */
tw_osli_cam_detach(sc);
- if (sc->req_ctxt_buf)
+ if (sc->req_ctx_buf)
while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
- NULL)
+ NULL) {
+ mtx_destroy(req->ioctl_wake_timeout_lock);
+
if ((error = bus_dmamap_destroy(sc->dma_tag,
req->dma_map)))
tw_osli_dbg_dprintf(1, sc,
"dmamap_destroy(dma) returned %d",
error);
+ }
if ((sc->ioctl_tag) && (sc->ioctl_map))
if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
@@ -692,8 +682,8 @@ tw_osli_free_resources(struct twa_softc *sc)
"dmamap_destroy(ioctl) returned %d", error);
/* Free all memory allocated so far. */
- if (sc->req_ctxt_buf)
- free(sc->req_ctxt_buf, TW_OSLI_MALLOC_CLASS);
+ if (sc->req_ctx_buf)
+ free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
if (sc->non_dma_mem)
free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
@@ -777,7 +767,7 @@ twa_detach(device_t dev)
tw_osli_dbg_dprintf(3, sc, "entered");
error = EBUSY;
- if (sc->state & TW_OSLI_CTLR_STATE_OPEN) {
+ if (sc->open) {
tw_osli_printf(sc, "error = %d",
TW_CL_SEVERITY_ERROR_STRING,
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
@@ -863,29 +853,6 @@ twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
}
-#ifdef TW_OSLI_DEFERRED_INTR_USED
-/*
- * Function name: twa_pci_intr_fast
- * Description: Interrupt handler. Wrapper for twa_interrupt.
- *
- * Input: arg -- ptr to OSL internal ctlr context
- * Output: FILTER_HANDLED or FILTER_STRAY
- * Return value: None
- */
-static int
-twa_pci_intr_fast(TW_VOID *arg)
-{
- struct twa_softc *sc = (struct twa_softc *)arg;
-
- tw_osli_dbg_dprintf(10, sc, "entered");
- if (tw_cl_interrupt(&(sc->ctlr_handle))) {
- taskqueue_enqueue_fast(taskqueue_fast,
- &(sc->deferred_intr_callback));
- return(FILTER_HANDLED);
- }
- return(FILTER_STRAY);
-}
-#else
/*
* Function name: twa_pci_intr
* Description: Interrupt handler. Wrapper for twa_interrupt.
@@ -900,35 +867,9 @@ twa_pci_intr(TW_VOID *arg)
struct twa_softc *sc = (struct twa_softc *)arg;
tw_osli_dbg_dprintf(10, sc, "entered");
- if (tw_cl_interrupt(&(sc->ctlr_handle)))
- tw_cl_deferred_interrupt(&(sc->ctlr_handle));
-}
-#endif
-
-#ifdef TW_OSLI_DEFERRED_INTR_USED
-
-/*
- * Function name: twa_deferred_intr
- * Description: Deferred interrupt handler.
- *
- * Input: context -- ptr to OSL internal ctlr context
- * pending -- not used
- * Output: None
- * Return value: None
- */
-static TW_VOID
-twa_deferred_intr(TW_VOID *context, TW_INT32 pending)
-{
- struct twa_softc *sc = (struct twa_softc *)context;
-
- tw_osli_dbg_dprintf(10, sc, "entered");
-
- tw_cl_deferred_interrupt(&(sc->ctlr_handle));
+ tw_cl_interrupt(&(sc->ctlr_handle));
}
-#endif /* TW_OSLI_DEFERRED_INTR_USED */
-
-
/*
* Function name: tw_osli_fw_passthru
@@ -1015,9 +956,12 @@ tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
end_time = tw_osl_get_local_time() + timeout;
while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
+ mtx_lock(req->ioctl_wake_timeout_lock);
req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
-
- error = tsleep(req, PRIBIO, "twa_passthru", timeout * hz);
+
+ error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
+ "twa_passthru", timeout*hz);
+ mtx_unlock(req->ioctl_wake_timeout_lock);
if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
error = 0;
@@ -1039,6 +983,20 @@ tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
if (error == EWOULDBLOCK) {
/* Time out! */
+ if ((!(req->error_code)) &&
+ (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
+ (!(req_pkt->status)) ) {
+#ifdef TW_OSL_DEBUG
+ tw_osli_printf(sc, "request = %p",
+ TW_CL_SEVERITY_ERROR_STRING,
+ TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
+ 0x7777,
+ "FALSE Passthru timeout!",
+ req);
+#endif /* TW_OSL_DEBUG */
+ error = 0; /* False error */
+ break;
+ }
tw_osli_printf(sc, "request = %p",
TW_CL_SEVERITY_ERROR_STRING,
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
@@ -1144,8 +1102,7 @@ tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
* EINPROGRESS. The request originator will then be returned an
* error, and he can do the clean-up.
*/
- if ((req->error_code) &&
- (!(req->state & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
+ if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
return;
if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
@@ -1157,7 +1114,7 @@ tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
wakeup_one(req);
} else {
/*
- * If the request completed even before tsleep
+ * If the request completed even before mtx_sleep
* was called, simply return.
*/
if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
@@ -1206,6 +1163,7 @@ tw_osli_get_request(struct twa_softc *sc)
if (req) {
req->req_handle.osl_req_ctxt = NULL;
req->req_handle.cl_req_ctxt = NULL;
+ req->req_handle.is_io = 0;
req->data = NULL;
req->length = 0;
req->real_data = NULL;
@@ -1255,8 +1213,6 @@ twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
- if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
- tw_osli_allow_new_requests(sc, (TW_VOID *)(req->orig_req));
if (error == EFBIG) {
req->error_code = error;
@@ -1345,7 +1301,7 @@ static TW_VOID
twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
TW_INT32 nsegments, TW_INT32 error)
{
- *((TW_UINT64 *)arg) = segs[0].ds_addr;
+ *((bus_addr_t *)arg) = segs[0].ds_addr;
}
@@ -1438,11 +1394,9 @@ tw_osli_map_request(struct tw_osli_req_context *req)
* of ...FLAGS_MAPPED from the callback.
*/
mtx_lock_spin(sc->io_lock);
- if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) {
- req->flags |=
- TW_OSLI_REQ_FLAGS_IN_PROGRESS;
- tw_osli_disallow_new_requests(sc);
- }
+ if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
+ req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
+ tw_osli_disallow_new_requests(sc, &(req->req_handle));
mtx_unlock_spin(sc->io_lock);
error = 0;
} else {
diff --git a/sys/dev/twa/tw_osl_inline.h b/sys/dev/twa/tw_osl_inline.h
index ad7ece5..74d100d 100644
--- a/sys/dev/twa/tw_osl_inline.h
+++ b/sys/dev/twa/tw_osl_inline.h
@@ -108,21 +108,6 @@
-/*
- * Function name: tw_osl_ctlr_ready
- * Description: CL calls this function to notify the OS Layer that it
- * is ready to accept new requests. This function is
- * called only if a call to tw_osl_ctlr_busy has been
- * made previously. We don't use this function as of now.
- *
- * Input: ctlr_handle -- ptr to controller handle
- * Output: None
- * Return value: None
- */
-#define tw_osl_ctlr_ready(ctlr_handle)
-
-
-
#ifdef TW_OSL_DEBUG
/*
diff --git a/sys/dev/twa/tw_osl_share.h b/sys/dev/twa/tw_osl_share.h
index 08276d0..4d10ed4 100644
--- a/sys/dev/twa/tw_osl_share.h
+++ b/sys/dev/twa/tw_osl_share.h
@@ -75,7 +75,7 @@
#define TW_OSL_ENCLOSURE_SUPPORT
#endif
-#define TW_OSL_DRIVER_VERSION_STRING "3.70.05.001"
+#define TW_OSL_DRIVER_VERSION_STRING "3.80.06.002"
#define TW_OSL_CAN_SLEEP
OpenPOWER on IntegriCloud