summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c40
1 files changed, 23 insertions, 17 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d18da21..9f99649 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -92,10 +92,12 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
*/
sreq->sr_request->flags &= ~REQ_DONTPREP;
blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
- at_head, sreq, 0);
+ at_head, sreq);
return 0;
}
+static void scsi_run_queue(struct request_queue *q);
+
/*
* Function: scsi_queue_insert()
*
@@ -119,18 +121,14 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
+ struct request_queue *q = device->request_queue;
+ unsigned long flags;
SCSI_LOG_MLQUEUE(1,
printk("Inserting command %p into mlqueue\n", cmd));
/*
- * We are inserting the command into the ml queue. First, we
- * cancel the timer, so it doesn't time out.
- */
- scsi_delete_timer(cmd);
-
- /*
- * Next, set the appropriate busy bit for the device/host.
+ * Set the appropriate busy bit for the device/host.
*
* If the host/device isn't busy, assume that something actually
* completed, and that we should be able to queue a command now.
@@ -160,17 +158,22 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
scsi_device_unbusy(device);
/*
- * Insert this command at the head of the queue for it's device.
- * It will go before all other commands that are already in the queue.
+ * Requeue this command. It will go before all other commands
+ * that are already in the queue.
*
* NOTE: there is magic here about the way the queue is plugged if
* we have no outstanding commands.
*
- * Although this *doesn't* plug the queue, it does call the request
+ * Although we *don't* plug the queue, we call the request
* function. The SCSI request function detects the blocked condition
* and plugs the queue appropriately.
- */
- blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1);
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, cmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ scsi_run_queue(q);
+
return 0;
}
@@ -485,8 +488,13 @@ static void scsi_run_queue(struct request_queue *q)
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
+ unsigned long flags;
+
cmd->request->flags &= ~REQ_DONTPREP;
- blk_insert_request(q, cmd->request, 1, cmd, 1);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, cmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
}
@@ -941,10 +949,8 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
* if sg table allocation fails, requeue request later.
*/
sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
- if (unlikely(!sgpnt)) {
- req->flags |= REQ_SPECIAL;
+ if (unlikely(!sgpnt))
return BLKPREP_DEFER;
- }
cmd->request_buffer = (char *) sgpnt;
cmd->request_bufflen = req->nr_sectors << 9;
OpenPOWER on IntegriCloud