summaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394/sbp2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ieee1394/sbp2.c')
-rw-r--r--drivers/ieee1394/sbp2.c218
1 files changed, 91 insertions, 127 deletions
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 1d6ad34..c52f6e6 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -526,26 +526,41 @@ static void sbp2util_write_doorbell(struct work_struct *work)
static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
{
- struct sbp2_fwhost_info *hi = lu->hi;
struct sbp2_command_info *cmd;
+ struct device *dmadev = lu->hi->host->device.parent;
int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
for (i = 0; i < orbs; i++) {
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
- return -ENOMEM;
- cmd->command_orb_dma = dma_map_single(hi->host->device.parent,
- &cmd->command_orb,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- cmd->sge_dma = dma_map_single(hi->host->device.parent,
- &cmd->scatter_gather_element,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
+ goto failed_alloc;
+
+ cmd->command_orb_dma =
+ dma_map_single(dmadev, &cmd->command_orb,
+ sizeof(struct sbp2_command_orb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, cmd->command_orb_dma))
+ goto failed_orb;
+
+ cmd->sge_dma =
+ dma_map_single(dmadev, &cmd->scatter_gather_element,
+ sizeof(cmd->scatter_gather_element),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, cmd->sge_dma))
+ goto failed_sge;
+
INIT_LIST_HEAD(&cmd->list);
list_add_tail(&cmd->list, &lu->cmd_orb_completed);
}
return 0;
+
+failed_sge:
+ dma_unmap_single(dmadev, cmd->command_orb_dma,
+ sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
+failed_orb:
+ kfree(cmd);
+failed_alloc:
+ return -ENOMEM;
}
static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu,
@@ -641,24 +656,11 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
struct sbp2_command_info *cmd)
{
- struct hpsb_host *host = lu->ud->ne->host;
-
- if (cmd->cmd_dma) {
- if (cmd->dma_type == CMD_DMA_SINGLE)
- dma_unmap_single(host->device.parent, cmd->cmd_dma,
- cmd->dma_size, cmd->dma_dir);
- else if (cmd->dma_type == CMD_DMA_PAGE)
- dma_unmap_page(host->device.parent, cmd->cmd_dma,
- cmd->dma_size, cmd->dma_dir);
- /* XXX: Check for CMD_DMA_NONE bug */
- cmd->dma_type = CMD_DMA_NONE;
- cmd->cmd_dma = 0;
- }
- if (cmd->sge_buffer) {
- dma_unmap_sg(host->device.parent, cmd->sge_buffer,
- cmd->dma_size, cmd->dma_dir);
- cmd->sge_buffer = NULL;
- }
+ if (scsi_sg_count(cmd->Current_SCpnt))
+ dma_unmap_sg(lu->ud->ne->host->device.parent,
+ scsi_sglist(cmd->Current_SCpnt),
+ scsi_sg_count(cmd->Current_SCpnt),
+ cmd->Current_SCpnt->sc_data_direction);
list_move_tail(&cmd->list, &lu->cmd_orb_completed);
}
@@ -823,6 +825,10 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
#endif
}
+ if (dma_get_max_seg_size(hi->host->device.parent) > SBP2_MAX_SEG_SIZE)
+ BUG_ON(dma_set_max_seg_size(hi->host->device.parent,
+ SBP2_MAX_SEG_SIZE));
+
/* Prevent unloading of the 1394 host */
if (!try_module_get(hi->host->driver->owner)) {
SBP2_ERR("failed to get a reference on 1394 host driver");
@@ -1494,84 +1500,65 @@ static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
return 0;
}
-static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
- struct sbp2_fwhost_info *hi,
- struct sbp2_command_info *cmd,
- unsigned int scsi_use_sg,
- struct scatterlist *sg,
- u32 orb_direction,
- enum dma_data_direction dma_dir)
+static int sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
+ struct sbp2_fwhost_info *hi,
+ struct sbp2_command_info *cmd,
+ unsigned int sg_count,
+ struct scatterlist *sg,
+ u32 orb_direction,
+ enum dma_data_direction dma_dir)
{
- cmd->dma_dir = dma_dir;
+ struct device *dmadev = hi->host->device.parent;
+ struct sbp2_unrestricted_page_table *pt;
+ int i, n;
+
+ n = dma_map_sg(dmadev, sg, sg_count, dma_dir);
+ if (n == 0)
+ return -ENOMEM;
+
orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
orb->misc |= ORB_SET_DIRECTION(orb_direction);
/* special case if only one element (and less than 64KB in size) */
- if (scsi_use_sg == 1 && sg->length <= SBP2_MAX_SG_ELEMENT_LENGTH) {
-
- cmd->dma_size = sg->length;
- cmd->dma_type = CMD_DMA_PAGE;
- cmd->cmd_dma = dma_map_page(hi->host->device.parent,
- sg_page(sg), sg->offset,
- cmd->dma_size, cmd->dma_dir);
-
- orb->data_descriptor_lo = cmd->cmd_dma;
- orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);
-
+ if (n == 1) {
+ orb->misc |= ORB_SET_DATA_SIZE(sg_dma_len(sg));
+ orb->data_descriptor_lo = sg_dma_address(sg);
} else {
- struct sbp2_unrestricted_page_table *sg_element =
- &cmd->scatter_gather_element[0];
- u32 sg_count, sg_len;
- dma_addr_t sg_addr;
- int i, count = dma_map_sg(hi->host->device.parent, sg,
- scsi_use_sg, dma_dir);
-
- cmd->dma_size = scsi_use_sg;
- cmd->sge_buffer = sg;
-
- /* use page tables (s/g) */
- orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
- orb->data_descriptor_lo = cmd->sge_dma;
+ pt = &cmd->scatter_gather_element[0];
- /* loop through and fill out our SBP-2 page tables
- * (and split up anything too large) */
- for (i = 0, sg_count = 0; i < count; i++, sg = sg_next(sg)) {
- sg_len = sg_dma_len(sg);
- sg_addr = sg_dma_address(sg);
- while (sg_len) {
- sg_element[sg_count].segment_base_lo = sg_addr;
- if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
- sg_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
- sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
- sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
- } else {
- sg_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
- sg_len = 0;
- }
- sg_count++;
- }
+ dma_sync_single_for_cpu(dmadev, cmd->sge_dma,
+ sizeof(cmd->scatter_gather_element),
+ DMA_TO_DEVICE);
+
+ for_each_sg(sg, sg, n, i) {
+ pt[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
+ pt[i].low = cpu_to_be32(sg_dma_address(sg));
}
- orb->misc |= ORB_SET_DATA_SIZE(sg_count);
+ orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1) |
+ ORB_SET_DATA_SIZE(n);
+ orb->data_descriptor_lo = cmd->sge_dma;
- sbp2util_cpu_to_be32_buffer(sg_element,
- (sizeof(struct sbp2_unrestricted_page_table)) *
- sg_count);
+ dma_sync_single_for_device(dmadev, cmd->sge_dma,
+ sizeof(cmd->scatter_gather_element),
+ DMA_TO_DEVICE);
}
+ return 0;
}
-static void sbp2_create_command_orb(struct sbp2_lu *lu,
- struct sbp2_command_info *cmd,
- struct scsi_cmnd *SCpnt)
+static int sbp2_create_command_orb(struct sbp2_lu *lu,
+ struct sbp2_command_info *cmd,
+ struct scsi_cmnd *SCpnt)
{
- struct sbp2_fwhost_info *hi = lu->hi;
+ struct device *dmadev = lu->hi->host->device.parent;
struct sbp2_command_orb *orb = &cmd->command_orb;
- u32 orb_direction;
unsigned int scsi_request_bufflen = scsi_bufflen(SCpnt);
enum dma_data_direction dma_dir = SCpnt->sc_data_direction;
+ u32 orb_direction;
+ int ret;
+ dma_sync_single_for_cpu(dmadev, cmd->command_orb_dma,
+ sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
/*
* Set-up our command ORB.
*
@@ -1602,15 +1589,21 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
orb->data_descriptor_hi = 0x0;
orb->data_descriptor_lo = 0x0;
orb->misc |= ORB_SET_DIRECTION(1);
- } else
- sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_sg_count(SCpnt),
- scsi_sglist(SCpnt),
- orb_direction, dma_dir);
-
+ ret = 0;
+ } else {
+ ret = sbp2_prep_command_orb_sg(orb, lu->hi, cmd,
+ scsi_sg_count(SCpnt),
+ scsi_sglist(SCpnt),
+ orb_direction, dma_dir);
+ }
sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
memset(orb->cdb, 0, sizeof(orb->cdb));
memcpy(orb->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+ dma_sync_single_for_device(dmadev, cmd->command_orb_dma,
+ sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
+ return ret;
}
static void sbp2_link_orb_command(struct sbp2_lu *lu,
@@ -1624,14 +1617,6 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
size_t length;
unsigned long flags;
- dma_sync_single_for_device(hi->host->device.parent,
- cmd->command_orb_dma,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
-
/* check to see if there are any previous orbs to use */
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
last_orb = lu->last_orb;
@@ -1699,9 +1684,10 @@ static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
if (!cmd)
return -EIO;
- sbp2_create_command_orb(lu, cmd, SCpnt);
- sbp2_link_orb_command(lu, cmd);
+ if (sbp2_create_command_orb(lu, cmd, SCpnt))
+ return -ENOMEM;
+ sbp2_link_orb_command(lu, cmd);
return 0;
}
@@ -1789,13 +1775,6 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
else
cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
if (cmd) {
- dma_sync_single_for_cpu(hi->host->device.parent,
- cmd->command_orb_dma,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
/* Grab SCSI command pointers and check status. */
/*
* FIXME: If the src field in the status is 1, the ORB DMA must
@@ -1912,7 +1891,6 @@ done:
static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
{
- struct sbp2_fwhost_info *hi = lu->hi;
struct list_head *lh;
struct sbp2_command_info *cmd;
unsigned long flags;
@@ -1921,13 +1899,6 @@ static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
while (!list_empty(&lu->cmd_orb_inuse)) {
lh = lu->cmd_orb_inuse.next;
cmd = list_entry(lh, struct sbp2_command_info, list);
- dma_sync_single_for_cpu(hi->host->device.parent,
- cmd->command_orb_dma,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
sbp2util_mark_command_completed(lu, cmd);
if (cmd->Current_SCpnt) {
cmd->Current_SCpnt->result = status << 16;
@@ -2033,6 +2004,8 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
sdev->start_stop_pwr_cond = 1;
if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+
+ blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
return 0;
}
@@ -2049,7 +2022,6 @@ static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
{
struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
- struct sbp2_fwhost_info *hi = lu->hi;
struct sbp2_command_info *cmd;
unsigned long flags;
@@ -2063,14 +2035,6 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
if (cmd) {
- dma_sync_single_for_cpu(hi->host->device.parent,
- cmd->command_orb_dma,
- sizeof(struct sbp2_command_orb),
- DMA_TO_DEVICE);
- dma_sync_single_for_cpu(hi->host->device.parent,
- cmd->sge_dma,
- sizeof(cmd->scatter_gather_element),
- DMA_TO_DEVICE);
sbp2util_mark_command_completed(lu, cmd);
if (cmd->Current_SCpnt) {
cmd->Current_SCpnt->result = DID_ABORT << 16;
OpenPOWER on IntegriCloud