diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 15:15:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 15:15:15 -0700 |
commit | 03da30986793385af57eeca3296253c887b742e6 (patch) | |
tree | 9c46dbe51c9d0856990649dd917ab45474b7be87 /drivers/s390 | |
parent | 6ba74014c1ab0e37af7de6f64b4eccbbae3cb9e7 (diff) | |
parent | 339f4f4eab80caa6cf0d39fb057ad6ddb84ba91e (diff) | |
download | op-kernel-dev-03da30986793385af57eeca3296253c887b742e6.zip op-kernel-dev-03da30986793385af57eeca3296253c887b742e6.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (276 commits)
[SCSI] zfcp: Trigger logging in the FCP channel on qdio error conditions
[SCSI] zfcp: Introduce experimental support for DIF/DIX
[SCSI] zfcp: Enable data division support for FCP devices
[SCSI] zfcp: Prevent access on uninitialized memory.
[SCSI] zfcp: Post events through FC transport class
[SCSI] zfcp: Cleanup QDIO attachment and improve processing.
[SCSI] zfcp: Cleanup function parameters for sbal value.
[SCSI] zfcp: Use correct width for timer_interval field
[SCSI] zfcp: Remove SCSI device when removing unit
[SCSI] zfcp: Use memdup_user and kstrdup
[SCSI] zfcp: Fix retry after failed "open port" erp action
[SCSI] zfcp: Fail erp after timeout
[SCSI] zfcp: Use forced_reopen in terminate_rport_io callback
[SCSI] zfcp: Register SCSI devices after successful fc_remote_port_add
[SCSI] zfcp: Do not try "forced close" when port is already closed
[SCSI] zfcp: Do not unblock rport from REOPEN_PORT_FORCED
[SCSI] sd: add support for runtime PM
[SCSI] implement runtime Power Management
[SCSI] convert to the new PM framework
[SCSI] Unify SAM_ and SAM_STAT_ macros
...
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 2 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_aux.c | 10 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_cfdc.c | 12 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_dbf.c | 5 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_dbf.h | 1 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_def.h | 5 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_erp.c | 24 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_ext.h | 11 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fc.c | 54 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fc.h | 27 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 169 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.h | 34 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 206 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.h | 95 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_scsi.c | 103 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_sysfs.c | 12 |
16 files changed, 510 insertions, 260 deletions
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 6326b67..34c7e40 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -368,6 +368,8 @@ static void setup_qib(struct qdio_irq *irq_ptr, if (qebsm_possible()) irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; + irq_ptr->qib.rflags |= init_data->qib_rflags; + irq_ptr->qib.qfmt = init_data->q_format; if (init_data->no_input_qs) irq_ptr->qib.isliba = diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index e331df2..96fa1f5 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -98,13 +98,11 @@ static void __init zfcp_init_device_setup(char *devstr) u64 wwpn, lun; /* duplicate devstr and keep the original for sysfs presentation*/ - str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL); + str_saved = kstrdup(devstr, GFP_KERNEL); str = str_saved; if (!str) return; - strcpy(str, devstr); - token = strsep(&str, ","); if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) goto err_out; @@ -314,7 +312,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) } retval = -EINVAL; - INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); + INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work); spin_lock_init(&unit->latencies.lock); unit->latencies.write.channel.min = 0xFFFFFFFF; @@ -526,6 +524,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) rwlock_init(&adapter->port_list_lock); INIT_LIST_HEAD(&adapter->port_list); + INIT_LIST_HEAD(&adapter->events.list); + INIT_WORK(&adapter->events.work, zfcp_fc_post_event); + spin_lock_init(&adapter->events.list_lock); + init_waitqueue_head(&adapter->erp_ready_wq); init_waitqueue_head(&adapter->erp_done_wqh); diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index 1a2db0a..fcbd2b7 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -189,18 +189,12 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, if (!fsf_cfdc) return -ENOMEM; - data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL); - if (!data) { - retval = -ENOMEM; + data = memdup_user(data_user, sizeof(*data_user)); + if (IS_ERR(data)) { + retval = PTR_ERR(data); goto no_mem_sense; } - retval = copy_from_user(data, data_user, sizeof(*data)); - if (retval) { - retval = -EFAULT; - goto free_buffer; - } - if (data->signature != 0xCFDCACDF) { retval = -EINVAL; goto free_buffer; diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 075852f..a86117b 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -155,6 +155,8 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, if (scsi_cmnd) { response->u.fcp.cmnd = (unsigned long)scsi_cmnd; response->u.fcp.serial = scsi_cmnd->serial_number; + response->u.fcp.data_dir = + qtcb->bottom.io.data_direction; } break; @@ -326,6 +328,7 @@ static void zfcp_dbf_hba_view_response(char **p, case FSF_QTCB_FCP_CMND: if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) break; + zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir); zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); *p += sprintf(*p, "\n"); @@ -1005,7 +1008,7 @@ int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) char dbf_name[DEBUG_MAX_NAME_LEN]; struct zfcp_dbf *dbf; - dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); + dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); if (!dbf) return -ENOMEM; diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 457e046..2bcc340 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -111,6 +111,7 @@ struct zfcp_dbf_hba_record_response { struct { u64 cmnd; u64 serial; + u32 data_dir; } fcp; struct { u64 wwpn; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9fa1b06..e1c6b6e 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -37,6 +37,7 @@ #include <asm/ebcdic.h> #include <asm/sysinfo.h> #include "zfcp_fsf.h" +#include "zfcp_fc.h" #include "zfcp_qdio.h" struct zfcp_reqlist; @@ -72,10 +73,12 @@ struct zfcp_reqlist; /* adapter status */ #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 +#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 +#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400 /* remote port status */ #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 @@ -190,6 +193,7 @@ struct zfcp_adapter { struct service_level service_level; struct workqueue_struct *work_queue; struct device_dma_parameters dma_parms; + struct zfcp_fc_events events; }; struct zfcp_port { @@ -212,6 +216,7 @@ struct zfcp_port { struct work_struct test_link_work; struct work_struct rport_work; enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task; + unsigned int starget_id; }; struct zfcp_unit { diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index fd068bc..160b432 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -141,9 +141,13 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) need = ZFCP_ERP_ACTION_REOPEN_PORT; /* fall through */ - case ZFCP_ERP_ACTION_REOPEN_PORT: case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: p_status = atomic_read(&port->status); + if (!(p_status & ZFCP_STATUS_COMMON_OPEN)) + need = ZFCP_ERP_ACTION_REOPEN_PORT; + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT: + p_status = atomic_read(&port->status); if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; a_status = atomic_read(&adapter->status); @@ -893,8 +897,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) } if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { port->d_id = 0; - _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL); - return ZFCP_ERP_EXIT; + return ZFCP_ERP_FAILED; } /* fall through otherwise */ } @@ -1188,19 +1191,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) switch (act->action) { case ZFCP_ERP_ACTION_REOPEN_UNIT: - if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { - get_device(&unit->dev); - if (scsi_queue_work(unit->port->adapter->scsi_host, - &unit->scsi_work) <= 0) - put_device(&unit->dev); - } put_device(&unit->dev); break; - case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: if (result == ZFCP_ERP_SUCCEEDED) zfcp_scsi_schedule_rport_register(port); + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: put_device(&port->dev); break; @@ -1247,6 +1245,11 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) goto unlock; } + if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { + retval = ZFCP_ERP_FAILED; + goto check_target; + } + zfcp_erp_action_to_running(erp_action); /* no lock to allow for blocking operations */ @@ -1279,6 +1282,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) goto unlock; } +check_target: retval = zfcp_erp_strategy_check_target(erp_action, retval); zfcp_erp_action_dequeue(erp_action); retval = zfcp_erp_strategy_statechange(erp_action, retval); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 48a8f93..3b93239 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -96,6 +96,9 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ +extern void zfcp_fc_enqueue_event(struct zfcp_adapter *, + enum fc_host_event_code event_code, u32); +extern void zfcp_fc_post_event(struct work_struct *); extern void zfcp_fc_scan_ports(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); extern void zfcp_fc_port_did_lookup(struct work_struct *); @@ -146,9 +149,10 @@ extern void zfcp_qdio_destroy(struct zfcp_qdio *); extern int zfcp_qdio_sbal_get(struct zfcp_qdio *); extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *, - struct scatterlist *, int); + struct scatterlist *); extern int zfcp_qdio_open(struct zfcp_qdio *); extern void zfcp_qdio_close(struct zfcp_qdio *); +extern void zfcp_qdio_siosl(struct zfcp_adapter *); /* zfcp_scsi.c */ extern struct zfcp_data zfcp_data; @@ -159,7 +163,10 @@ extern void zfcp_scsi_rport_work(struct work_struct *); extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); -extern void zfcp_scsi_scan(struct work_struct *); +extern void zfcp_scsi_scan(struct zfcp_unit *); +extern void zfcp_scsi_scan_work(struct work_struct *); +extern void zfcp_scsi_set_prot(struct zfcp_adapter *); +extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); /* zfcp_sysfs.c */ extern struct attribute_group zfcp_sysfs_unit_attrs; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6f8ab43..6f3ed2b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -23,6 +23,58 @@ static u32 zfcp_fc_rscn_range_mask[] = { [ELS_ADDR_FMT_FAB] = 0x000000, }; +/** + * zfcp_fc_post_event - post event to userspace via fc_transport + * @work: work struct with enqueued events + */ +void zfcp_fc_post_event(struct work_struct *work) +{ + struct zfcp_fc_event *event = NULL, *tmp = NULL; + LIST_HEAD(tmp_lh); + struct zfcp_fc_events *events = container_of(work, + struct zfcp_fc_events, work); + struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter, + events); + + spin_lock_bh(&events->list_lock); + list_splice_init(&events->list, &tmp_lh); + spin_unlock_bh(&events->list_lock); + + list_for_each_entry_safe(event, tmp, &tmp_lh, list) { + fc_host_post_event(adapter->scsi_host, fc_get_event_number(), + event->code, event->data); + list_del(&event->list); + kfree(event); + } + +} + +/** + * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context + * @adapter: The adapter where to enqueue the event + * @event_code: The event code (as defined in fc_host_event_code in + * scsi_transport_fc.h) + * @event_data: The event data (e.g. n_port page in case of els) + */ +void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter, + enum fc_host_event_code event_code, u32 event_data) +{ + struct zfcp_fc_event *event; + + event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC); + if (!event) + return; + + event->code = event_code; + event->data = event_data; + + spin_lock(&adapter->events.list_lock); + list_add_tail(&event->list, &adapter->events.list); + spin_unlock(&adapter->events.list_lock); + + queue_work(adapter->work_queue, &adapter->events.work); +} + static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) { if (mutex_lock_interruptible(&wka_port->mutex)) @@ -148,6 +200,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK; _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt], page); + zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN, + *(u32 *)page); } queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); } diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 0747b08..938d503 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -30,6 +30,30 @@ #define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) /** + * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context + * @code: Event code + * @data: Event data + * @list: list_head for zfcp_fc_events list + */ +struct zfcp_fc_event { + enum fc_host_event_code code; + u32 data; + struct list_head list; +}; + +/** + * struct zfcp_fc_events - Infrastructure for posting FC events from irq context + * @list: List for queueing of events from irq context to workqueue + * @list_lock: Lock for event list + * @work: work_struct for forwarding events in workqueue +*/ +struct zfcp_fc_events { + struct list_head list; + spinlock_t list_lock; + struct work_struct work; +}; + +/** * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request * @ct_hdr: FC GS common transport header * @gid_pn: GID_PN request @@ -196,6 +220,9 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len); fcp->fc_dl = scsi_bufflen(scsi); + + if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1) + fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8; } /** diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 71663fb..9d1d7d1 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -21,6 +21,7 @@ static void zfcp_fsf_request_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; + zfcp_qdio_siosl(adapter); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "fsrth_1", NULL); } @@ -274,6 +275,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) break; case FSF_STATUS_READ_LINK_DOWN: zfcp_fsf_status_read_link_down(req); + zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); break; case FSF_STATUS_READ_LINK_UP: dev_info(&adapter->ccw_device->dev, @@ -286,6 +288,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | ZFCP_STATUS_COMMON_ERP_FAILED, "fssrh_2", req); + zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); + break; case FSF_STATUS_READ_NOTIFICATION_LOST: if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) @@ -323,6 +327,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) dev_err(&req->adapter->ccw_device->dev, "The FCP adapter reported a problem " "that cannot be recovered\n"); + zfcp_qdio_siosl(req->adapter); zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); break; } @@ -413,6 +418,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) dev_err(&adapter->ccw_device->dev, "0x%x is not a valid transfer protocol status\n", qtcb->prefix.prot_status); + zfcp_qdio_siosl(adapter); zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); } req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -495,7 +501,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; adapter->hydra_version = bottom->adapter_type; - adapter->timer_ticks = bottom->timer_interval; + adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)FSF_STATUS_READS_RECOM); @@ -523,6 +529,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) return -EIO; } + zfcp_scsi_set_prot(adapter); + return 0; } @@ -732,7 +740,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) zfcp_reqlist_add(adapter->req_list, req); - req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); + req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); req->issued = get_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { del_timer(&req->timer); @@ -959,8 +967,7 @@ static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, struct scatterlist *sg_req, - struct scatterlist *sg_resp, - int max_sbals) + struct scatterlist *sg_resp) { struct zfcp_adapter *adapter = req->adapter; u32 feat = adapter->adapter_features; @@ -983,18 +990,19 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, return 0; } - bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, - sg_req, max_sbals); + bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req); if (bytes <= 0) return -EIO; + zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); req->qtcb->bottom.support.req_buf_length = bytes; zfcp_qdio_skip_to_last_sbale(&req->qdio_req); bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, - sg_resp, max_sbals); + sg_resp); req->qtcb->bottom.support.resp_buf_length = bytes; if (bytes <= 0) return -EIO; + zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); return 0; } @@ -1002,11 +1010,11 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, struct scatterlist *sg_req, struct scatterlist *sg_resp, - int max_sbals, unsigned int timeout) + unsigned int timeout) { int ret; - ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); + ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); if (ret) return ret; @@ -1046,8 +1054,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, - ZFCP_FSF_MAX_SBALS_PER_REQ, timeout); + ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); if (ret) goto failed_send; @@ -1143,7 +1150,10 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout); + + zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); + + ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); if (ret) goto failed_send; @@ -2025,7 +2035,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) blktrc.flags |= ZFCP_BLK_REQ_ERROR; - blktrc.inb_usage = req->qdio_req.qdio_inb_usage; + blktrc.inb_usage = 0; blktrc.outb_usage = req->qdio_req.qdio_outb_usage; if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && @@ -2035,9 +2045,13 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) blktrc.fabric_lat = lat_in->fabric_lat * ticks; switch (req->qtcb->bottom.io.data_direction) { + case FSF_DATADIR_DIF_READ_STRIP: + case FSF_DATADIR_DIF_READ_CONVERT: case FSF_DATADIR_READ: lat = &unit->latencies.read; break; + case FSF_DATADIR_DIF_WRITE_INSERT: + case FSF_DATADIR_DIF_WRITE_CONVERT: case FSF_DATADIR_WRITE: lat = &unit->latencies.write; break; @@ -2078,6 +2092,21 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) goto skip_fsfstatus; } + switch (req->qtcb->header.fsf_status) { + case FSF_INCONSISTENT_PROT_DATA: + case FSF_INVALID_PROT_PARM: + set_host_byte(scpnt, DID_ERROR); + goto skip_fsfstatus; + case FSF_BLOCK_GUARD_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x1); + goto skip_fsfstatus; + case FSF_APP_TAG_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x2); + goto skip_fsfstatus; + case FSF_REF_TAG_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x3); + goto skip_fsfstatus; + } fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); @@ -2187,6 +2216,44 @@ skip_fsfstatus: } } +static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) +{ + switch (scsi_get_prot_op(scsi_cmnd)) { + case SCSI_PROT_NORMAL: + switch (scsi_cmnd->sc_data_direction) { + case DMA_NONE: + *data_dir = FSF_DATADIR_CMND; + break; + case DMA_FROM_DEVICE: + *data_dir = FSF_DATADIR_READ; + break; + case DMA_TO_DEVICE: + *data_dir = FSF_DATADIR_WRITE; + break; + case DMA_BIDIRECTIONAL: + return -EINVAL; + } + break; + + case SCSI_PROT_READ_STRIP: + *data_dir = FSF_DATADIR_DIF_READ_STRIP; + break; + case SCSI_PROT_WRITE_INSERT: + *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; + break; + case SCSI_PROT_READ_PASS: + *data_dir = FSF_DATADIR_DIF_READ_CONVERT; + break; + case SCSI_PROT_WRITE_PASS: + *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; + break; + default: + return -EINVAL; + } + + return 0; +} + /** * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) * @unit: unit where command is sent to @@ -2198,16 +2265,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, struct zfcp_fsf_req *req; struct fcp_cmnd *fcp_cmnd; unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; - int real_bytes, retval = -EIO; + int real_bytes, retval = -EIO, dix_bytes = 0; struct zfcp_adapter *adapter = unit->port->adapter; struct zfcp_qdio *qdio = adapter->qdio; + struct fsf_qtcb_bottom_io *io; if (unlikely(!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return -EBUSY; spin_lock(&qdio->req_q_lock); - if (atomic_read(&qdio->req_q.count) <= 0) { + if (atomic_read(&qdio->req_q_free) <= 0) { atomic_inc(&qdio->req_q_full); goto out; } @@ -2223,56 +2291,45 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, goto out; } + scsi_cmnd->host_scribble = (unsigned char *) req->req_id; + + io = &req->qtcb->bottom.io; req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; req->unit = unit; req->data = scsi_cmnd; req->handler = zfcp_fsf_send_fcp_command_handler; req->qtcb->header.lun_handle = unit->handle; req->qtcb->header.port_handle = unit->port->handle; - req->qtcb->bottom.io.service_class = FSF_CLASS_3; - req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; + io->service_class = FSF_CLASS_3; + io->fcp_cmnd_length = FCP_CMND_LEN; - scsi_cmnd->host_scribble = (unsigned char *) req->req_id; - - /* - * set depending on data direction: - * data direction bits in SBALE (SB Type) - * data direction bits in QTCB - */ - switch (scsi_cmnd->sc_data_direction) { - case DMA_NONE: - req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; - break; - case DMA_FROM_DEVICE: - req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; - break; - case DMA_TO_DEVICE: - req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; - break; - case DMA_BIDIRECTIONAL: - goto failed_scsi_cmnd; + if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { + io->data_block_length = scsi_cmnd->device->sector_size; + io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; } + zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); + get_device(&unit->dev); fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); + if (scsi_prot_sg_count(scsi_cmnd)) { + zfcp_qdio_set_data_div(qdio, &req->qdio_req, + scsi_prot_sg_count(scsi_cmnd)); + dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, + scsi_prot_sglist(scsi_cmnd)); + io->prot_data_length = dix_bytes; + } + real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, - scsi_sglist(scsi_cmnd), - ZFCP_FSF_MAX_SBALS_PER_REQ); - if (unlikely(real_bytes < 0)) { - if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) { - dev_err(&adapter->ccw_device->dev, - "Oversize data package, unit 0x%016Lx " - "on port 0x%016Lx closed\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req); - retval = -EINVAL; - } + scsi_sglist(scsi_cmnd)); + + if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0)) goto failed_scsi_cmnd; - } + + zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); retval = zfcp_fsf_req_send(req); if (unlikely(retval)) @@ -2391,13 +2448,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; bottom->option = fsf_cfdc->option; - bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, - fsf_cfdc->sg, - ZFCP_FSF_MAX_SBALS_PER_REQ); + bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg); + if (bytes != ZFCP_CFDC_MAX_SIZE) { zfcp_fsf_req_free(req); goto out; } + zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); @@ -2419,7 +2476,7 @@ out: void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) { struct zfcp_adapter *adapter = qdio->adapter; - struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; + struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; struct qdio_buffer_element *sbale; struct zfcp_fsf_req *fsf_req; unsigned long req_id; @@ -2431,17 +2488,17 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) req_id = (unsigned long) sbale->addr; fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); - if (!fsf_req) + if (!fsf_req) { /* * Unknown request means that we have potentially memory * corruption and must stop the machine immediately. */ + zfcp_qdio_siosl(adapter); panic("error: unknown req_id (%lx) on adapter %s.\n", req_id, dev_name(&adapter->ccw_device->dev)); + } fsf_req->qdio_req.sbal_response = sbal_idx; - fsf_req->qdio_req.qdio_inb_usage = - atomic_read(&qdio->resp_q.count); zfcp_fsf_req_complete(fsf_req); if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 519083f..db8c853 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -80,11 +80,15 @@ #define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 #define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 #define FSF_SBAL_MISMATCH 0x00000063 +#define FSF_INCONSISTENT_PROT_DATA 0x00000070 +#define FSF_INVALID_PROT_PARM 0x00000071 +#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081 +#define FSF_APP_TAG_CHECK_FAILURE 0x00000082 +#define FSF_REF_TAG_CHECK_FAILURE 0x00000083 #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD #define FSF_UNKNOWN_COMMAND 0x000000E2 #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 #define FSF_INVALID_COMMAND_OPTION 0x000000E5 -/* #define FSF_ERROR 0x000000FF */ #define FSF_PROT_STATUS_QUAL_SIZE 16 #define FSF_STATUS_QUALIFIER_SIZE 16 @@ -147,18 +151,17 @@ #define FSF_DATADIR_WRITE 0x00000001 #define FSF_DATADIR_READ 0x00000002 #define FSF_DATADIR_CMND 0x00000004 +#define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009 +#define FSF_DATADIR_DIF_READ_STRIP 0x0000000a +#define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b +#define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c + +/* data protection control flags */ +#define FSF_APP_TAG_CHECK_ENABLE 0x10 /* fc service class */ #define FSF_CLASS_3 0x00000003 -/* SBAL chaining */ -#define ZFCP_FSF_MAX_SBALS_PER_REQ 36 - -/* max. number of (data buffer) SBALEs in largest SBAL chain - * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ -#define ZFCP_FSF_MAX_SBALES_PER_REQ \ - (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2) - /* logging space behind QTCB */ #define FSF_QTCB_LOG_SIZE 1024 @@ -170,6 +173,8 @@ #define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 #define FSF_FEATURE_UPDATE_ALERT 0x00000100 #define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 +#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000 +#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000 /* host connection features */ #define FSF_FEATURE_NPIV_MODE 0x00000001 @@ -324,9 +329,14 @@ struct fsf_qtcb_header { struct fsf_qtcb_bottom_io { u32 data_direction; u32 service_class; - u8 res1[8]; + u8 res1; + u8 data_prot_flags; + u16 app_tag_value; + u32 ref_tag_value; u32 fcp_cmnd_length; - u8 res2[12]; + u32 data_block_length; + u32 prot_data_length; + u8 res2[4]; u8 fcp_cmnd[FSF_FCP_CMND_SIZE]; u8 fcp_rsp[FSF_FCP_RSP_SIZE]; u8 res3[64]; @@ -352,6 +362,8 @@ struct fsf_qtcb_bottom_support { u8 els[256]; } __attribute__ ((packed)); +#define ZFCP_FSF_TIMER_INT_MASK 0x3FFF + struct fsf_qtcb_bottom_config { u32 lic_version; u32 feature_selection; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 6fa5e04..b263575 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -30,12 +30,15 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) return 0; } -static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) +static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, + unsigned int qdio_err) { struct zfcp_adapter *adapter = qdio->adapter; dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); + if (qdio_err & QDIO_ERROR_SLSB_STATE) + zfcp_qdio_siosl(adapter); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); @@ -55,72 +58,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) { unsigned long long now, span; - int free, used; + int used; spin_lock(&qdio->stat_lock); now = get_clock_monotonic(); span = (now - qdio->req_q_time) >> 12; - free = atomic_read(&qdio->req_q.count); - used = QDIO_MAX_BUFFERS_PER_Q - free; + used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); qdio->req_q_util += used * span; qdio->req_q_time = now; spin_unlock(&qdio->stat_lock); } static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, - int queue_no, int first, int count, + int queue_no, int idx, int count, unsigned long parm) { struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; - struct zfcp_qdio_queue *queue = &qdio->req_q; if (unlikely(qdio_err)) { - zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, - count); - zfcp_qdio_handler_error(qdio, "qdireq1"); + zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count); + zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); return; } /* cleanup all SBALs being program-owned now */ - zfcp_qdio_zero_sbals(queue->sbal, first, count); + zfcp_qdio_zero_sbals(qdio->req_q, idx, count); zfcp_qdio_account(qdio); - atomic_add(count, &queue->count); + atomic_add(count, &qdio->req_q_free); wake_up(&qdio->req_q_wq); } -static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) -{ - struct zfcp_qdio_queue *queue = &qdio->resp_q; - struct ccw_device *cdev = qdio->adapter->ccw_device; - u8 count, start = queue->first; - unsigned int retval; - - count = atomic_read(&queue->count) + processed; - - retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count); - - if (unlikely(retval)) { - atomic_set(&queue->count, count); - zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL); - } else { - queue->first += count; - queue->first %= QDIO_MAX_BUFFERS_PER_Q; - atomic_set(&queue->count, 0); - } -} - static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, - int queue_no, int first, int count, + int queue_no, int idx, int count, unsigned long parm) { struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; int sbal_idx, sbal_no; if (unlikely(qdio_err)) { - zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, - count); - zfcp_qdio_handler_error(qdio, "qdires1"); + zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count); + zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); return; } @@ -129,25 +107,16 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, * returned by QDIO layer */ for (sbal_no = 0; sbal_no < count; sbal_no++) { - sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; + sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; /* go through all SBALEs of SBAL */ zfcp_fsf_reqid_check(qdio, sbal_idx); } /* - * put range of SBALs back to response queue - * (including SBALs which have already been free before) + * put SBALs back to response queue */ - zfcp_qdio_resp_put_back(qdio, count); -} - -static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, - struct zfcp_qdio_req *q_req, int max_sbals) -{ - int count = atomic_read(&qdio->req_q.count); - count = min(count, max_sbals); - q_req->sbal_limit = (q_req->sbal_first + count - 1) - % QDIO_MAX_BUFFERS_PER_Q; + if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) + zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL); } static struct qdio_buffer_element * @@ -173,6 +142,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) /* keep this requests number of SBALs up-to-date */ q_req->sbal_number++; + BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ); /* start at first SBALE of new SBAL */ q_req->sbale_curr = 0; @@ -193,17 +163,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) return zfcp_qdio_sbale_curr(qdio, q_req); } -static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, - struct zfcp_qdio_req *q_req) -{ - struct qdio_buffer **sbal = qdio->req_q.sbal; - int first = q_req->sbal_first; - int last = q_req->sbal_last; - int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % - QDIO_MAX_BUFFERS_PER_Q + 1; - zfcp_qdio_zero_sbals(sbal, first, count); -} - /** * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list * @qdio: pointer to struct zfcp_qdio @@ -213,14 +172,11 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, * Returns: number of bytes, or error (negativ) */ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, - struct scatterlist *sg, int max_sbals) + struct scatterlist *sg) { struct qdio_buffer_element *sbale; int bytes = 0; - /* figure out last allowed SBAL */ - zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); - /* set storage-block type for this request */ sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->flags |= q_req->sbtype; @@ -229,7 +185,8 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, sbale = zfcp_qdio_sbale_next(qdio, q_req); if (!sbale) { atomic_inc(&qdio->req_q_full); - zfcp_qdio_undo_sbals(qdio, q_req); + zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, + q_req->sbal_number); return -EINVAL; } @@ -239,19 +196,13 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, bytes += sg->length; } - /* assume that no other SBALEs are to follow in the same SBAL */ - sbale = zfcp_qdio_sbale_curr(qdio, q_req); - sbale->flags |= SBAL_FLAGS_LAST_ENTRY; - return bytes; } static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) { - struct zfcp_qdio_queue *req_q = &qdio->req_q; - spin_lock_bh(&qdio->req_q_lock); - if (atomic_read(&req_q->count) || + if (atomic_read(&qdio->req_q_free) || !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return 1; spin_unlock_bh(&qdio->req_q_lock); @@ -300,25 +251,25 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) */ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { - struct zfcp_qdio_queue *req_q = &qdio->req_q; - int first = q_req->sbal_first; - int count = q_req->sbal_number; int retval; - unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; + u8 sbal_number = q_req->sbal_number; zfcp_qdio_account(qdio); - retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, - count); + retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, + q_req->sbal_first, sbal_number); + if (unlikely(retval)) { - zfcp_qdio_zero_sbals(req_q->sbal, first, count); + zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, + sbal_number); return retval; } /* account for transferred buffers */ - atomic_sub(count, &req_q->count); - req_q->first += count; - req_q->first %= QDIO_MAX_BUFFERS_PER_Q; + atomic_sub(sbal_number, &qdio->req_q_free); + qdio->req_q_idx += sbal_number; + qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q; + return 0; } @@ -331,6 +282,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, id->q_format = QDIO_ZFCP_QFMT; memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); ASCEBC(id->adapter_name, 8); + id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; id->qib_param_field_format = 0; id->qib_param_field = NULL; id->input_slib_elements = NULL; @@ -340,10 +292,10 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, id->input_handler = zfcp_qdio_int_resp; id->output_handler = zfcp_qdio_int_req; id->int_parm = (unsigned long) qdio; - id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); - id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); - + id->input_sbal_addr_array = (void **) (qdio->res_q); + id->output_sbal_addr_array = (void **) (qdio->req_q); } + /** * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data * @adapter: pointer to struct zfcp_adapter @@ -354,8 +306,8 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) { struct qdio_initialize init_data; - if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || - zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) + if (zfcp_qdio_buffers_enqueue(qdio->req_q) || + zfcp_qdio_buffers_enqueue(qdio->res_q)) return -ENOMEM; zfcp_qdio_setup_init_data(&init_data, qdio); @@ -369,34 +321,30 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) */ void zfcp_qdio_close(struct zfcp_qdio *qdio) { - struct zfcp_qdio_queue *req_q; - int first, count; + struct zfcp_adapter *adapter = qdio->adapter; + int idx, count; - if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return; /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ - req_q = &qdio->req_q; spin_lock_bh(&qdio->req_q_lock); - atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); + atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); spin_unlock_bh(&qdio->req_q_lock); wake_up(&qdio->req_q_wq); - qdio_shutdown(qdio->adapter->ccw_device, - QDIO_FLAG_CLEANUP_USING_CLEAR); + qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); /* cleanup used outbound sbals */ - count = atomic_read(&req_q->count); + count = atomic_read(&qdio->req_q_free); if (count < QDIO_MAX_BUFFERS_PER_Q) { - first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; + idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q; count = QDIO_MAX_BUFFERS_PER_Q - count; - zfcp_qdio_zero_sbals(req_q->sbal, first, count); + zfcp_qdio_zero_sbals(qdio->req_q, idx, count); } - req_q->first = 0; - atomic_set(&req_q->count, 0); - qdio->resp_q.first = 0; - atomic_set(&qdio->resp_q.count, 0); + qdio->req_q_idx = 0; + atomic_set(&qdio->req_q_free, 0); } /** @@ -408,34 +356,45 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) { struct qdio_buffer_element *sbale; struct qdio_initialize init_data; - struct ccw_device *cdev = qdio->adapter->ccw_device; + struct zfcp_adapter *adapter = qdio->adapter; + struct ccw_device *cdev = adapter->ccw_device; + struct qdio_ssqd_desc ssqd; int cc; - if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) + if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) return -EIO; + atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, + &qdio->adapter->status); + zfcp_qdio_setup_init_data(&init_data, qdio); if (qdio_establish(&init_data)) goto failed_establish; + if (qdio_get_ssqd_desc(init_data.cdev, &ssqd)) + goto failed_qdio; + + if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) + atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, + &qdio->adapter->status); + if (qdio_activate(cdev)) goto failed_qdio; for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { - sbale = &(qdio->resp_q.sbal[cc]->element[0]); + sbale = &(qdio->res_q[cc]->element[0]); sbale->length = 0; sbale->flags = SBAL_FLAGS_LAST_ENTRY; sbale->addr = NULL; } - if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, - QDIO_MAX_BUFFERS_PER_Q)) + if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) goto failed_qdio; /* set index of first avalable SBALS / number of available SBALS */ - qdio->req_q.first = 0; - atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); + qdio->req_q_idx = 0; + atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); return 0; @@ -449,7 +408,6 @@ failed_establish: void zfcp_qdio_destroy(struct zfcp_qdio *qdio) { - struct qdio_buffer **sbal_req, **sbal_resp; int p; if (!qdio) @@ -458,12 +416,9 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio) if (qdio->adapter->ccw_device) qdio_free(qdio->adapter->ccw_device); - sbal_req = qdio->req_q.sbal; - sbal_resp = qdio->resp_q.sbal; - for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { - free_page((unsigned long) sbal_req[p]); - free_page((unsigned long) sbal_resp[p]); + free_page((unsigned long) qdio->req_q[p]); + free_page((unsigned long) qdio->res_q[p]); } kfree(qdio); @@ -491,3 +446,26 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter) return 0; } +/** + * zfcp_qdio_siosl - Trigger logging in FCP channel + * @adapter: The zfcp_adapter where to trigger logging + * + * Call the cio siosl function to trigger hardware logging. This + * wrapper function sets a flag to ensure hardware logging is only + * triggered once before going through qdio shutdown. + * + * The triggers are always run from qdio tasklet context, so no + * additional synchronization is necessary. + */ +void zfcp_qdio_siosl(struct zfcp_adapter *adapter) +{ + int rc; + + if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED) + return; + + rc = ccw_device_siosl(adapter->ccw_device); + if (!rc) + atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, + &adapter->status); +} diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h index 138fba5..2297d8d 100644 --- a/drivers/s390/scsi/zfcp_qdio.h +++ b/drivers/s390/scsi/zfcp_qdio.h @@ -19,22 +19,20 @@ /* index of last SBALE (with respect to DMQ bug workaround) */ #define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1) -/** - * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count - * @sbal: qdio buffers - * @first: index of next free buffer in queue - * @count: number of free buffers in queue - */ -struct zfcp_qdio_queue { - struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; - u8 first; - atomic_t count; -}; +/* Max SBALS for chaining */ +#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36 + +/* max. number of (data buffer) SBALEs in largest SBAL chain + * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ +#define ZFCP_QDIO_MAX_SBALES_PER_REQ \ + (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2) /** * struct zfcp_qdio - basic qdio data structure - * @resp_q: response queue + * @res_q: response queue * @req_q: request queue + * @req_q_idx: index of next free buffer + * @req_q_free: number of free buffers in queue * @stat_lock: lock to protect req_q_util and req_q_time * @req_q_lock: lock to serialize access to request queue * @req_q_time: time of last fill level change @@ -44,8 +42,10 @@ struct zfcp_qdio_queue { * @adapter: adapter used in conjunction with this qdio structure */ struct zfcp_qdio { - struct zfcp_qdio_queue resp_q; - struct zfcp_qdio_queue req_q; + struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q]; + struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q]; + u8 req_q_idx; + atomic_t req_q_free; spinlock_t stat_lock; spinlock_t req_q_lock; unsigned long long req_q_time; @@ -65,7 +65,6 @@ struct zfcp_qdio { * @sbale_curr: current sbale at creation of this request * @sbal_response: sbal used in interrupt * @qdio_outb_usage: usage of outbound queue - * @qdio_inb_usage: usage of inbound queue */ struct zfcp_qdio_req { u32 sbtype; @@ -76,22 +75,9 @@ struct zfcp_qdio_req { u8 sbale_curr; u8 sbal_response; u16 qdio_outb_usage; - u16 qdio_inb_usage; }; /** - * zfcp_qdio_sbale - return pointer to sbale in qdio queue - * @q: queue where to find sbal - * @sbal_idx: sbal index in queue - * @sbale_idx: sbale index in sbal - */ -static inline struct qdio_buffer_element * -zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) -{ - return &q->sbal[sbal_idx]->element[sbale_idx]; -} - -/** * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request * @qdio: pointer to struct zfcp_qdio * @q_rec: pointer to struct zfcp_qdio_req @@ -100,7 +86,7 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) static inline struct qdio_buffer_element * zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { - return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); + return &qdio->req_q[q_req->sbal_last]->element[0]; } /** @@ -112,8 +98,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) static inline struct qdio_buffer_element * zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { - return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, - q_req->sbale_curr); + return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr]; } /** @@ -134,21 +119,25 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, unsigned long req_id, u32 sbtype, void *data, u32 len) { struct qdio_buffer_element *sbale; + int count = min(atomic_read(&qdio->req_q_free), + ZFCP_QDIO_MAX_SBALS_PER_REQ); - q_req->sbal_first = q_req->sbal_last = qdio->req_q.first; + q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx; q_req->sbal_number = 1; q_req->sbtype = sbtype; + q_req->sbale_curr = 1; + q_req->sbal_limit = (q_req->sbal_first + count - 1) + % QDIO_MAX_BUFFERS_PER_Q; sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->addr = (void *) req_id; - sbale->flags |= SBAL_FLAGS0_COMMAND; - sbale->flags |= sbtype; + sbale->flags = SBAL_FLAGS0_COMMAND | sbtype; - q_req->sbale_curr = 1; + if (unlikely(!data)) + return; sbale++; sbale->addr = data; - if (likely(data)) - sbale->length = len; + sbale->length = len; } /** @@ -210,4 +199,36 @@ void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req) q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL; } +/** + * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req + * @qdio: pointer to struct zfcp_qdio + * @q_req: The current zfcp_qdio_req + * @max_sbals: maximum number of SBALs allowed + */ +static inline +void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, + struct zfcp_qdio_req *q_req, int max_sbals) +{ + int count = min(atomic_read(&qdio->req_q_free), max_sbals); + + q_req->sbal_limit = (q_req->sbal_first + count - 1) % + QDIO_MAX_BUFFERS_PER_Q; +} + +/** + * zfcp_qdio_set_data_div - set data division count + * @qdio: pointer to struct zfcp_qdio + * @q_req: The current zfcp_qdio_req + * @count: The data division count + */ +static inline +void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio, + struct zfcp_qdio_req *q_req, u32 count) +{ + struct qdio_buffer_element *sbale; + + sbale = &qdio->req_q[q_req->sbal_first]->element[0]; + sbale->length = count; +} + #endif /* ZFCP_QDIO_H */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index be5d2c6..cb000c9 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -12,6 +12,7 @@ #include <linux/types.h> #include <linux/slab.h> #include <scsi/fc/fc_fcp.h> +#include <scsi/scsi_eh.h> #include <asm/atomic.h> #include "zfcp_ext.h" #include "zfcp_dbf.h" @@ -22,6 +23,13 @@ static unsigned int default_depth = 32; module_param_named(queue_depth, default_depth, uint, 0600); MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); +static bool enable_dif; + +#ifdef CONFIG_ZFCP_DIF +module_param_named(dif, enable_dif, bool, 0600); +MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); +#endif + static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) { @@ -506,8 +514,10 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) * @rport: The FC rport where to teminate I/O * * Abort all pending SCSI commands for a port by closing the - * port. Using a reopen avoiding a conflict with a shutdown - * overwriting a reopen. + * port. Using a reopen avoids a conflict with a shutdown + * overwriting a reopen. The "forced" ensures that a disappeared port + * is not opened again as valid due to the cached plogi data in + * non-NPIV mode. */ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) { @@ -519,11 +529,25 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) port = zfcp_get_port_by_wwpn(adapter, rport->port_name); if (port) { - zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); + zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL); put_device(&port->dev); } } +static void zfcp_scsi_queue_unit_register(struct zfcp_port *port) +{ + struct zfcp_unit *unit; + + read_lock_irq(&port->unit_list_lock); + list_for_each_entry(unit, &port->unit_list, list) { + get_device(&unit->dev); + if (scsi_queue_work(port->adapter->scsi_host, + &unit->scsi_work) <= 0) + put_device(&unit->dev); + } + read_unlock_irq(&port->unit_list_lock); +} + static void zfcp_scsi_rport_register(struct zfcp_port *port) { struct fc_rport_identifiers ids; @@ -548,6 +572,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) rport->maxframe_size = port->maxframe_size; rport->supported_classes = port->supported_classes; port->rport = rport; + port->starget_id = rport->scsi_target_id; + + zfcp_scsi_queue_unit_register(port); } static void zfcp_scsi_rport_block(struct zfcp_port *port) @@ -610,24 +637,74 @@ void zfcp_scsi_rport_work(struct work_struct *work) put_device(&port->dev); } - -void zfcp_scsi_scan(struct work_struct *work) +/** + * zfcp_scsi_scan - Register LUN with SCSI midlayer + * @unit: The LUN/unit to register + */ +void zfcp_scsi_scan(struct zfcp_unit *unit) { - struct zfcp_unit *unit = container_of(work, struct zfcp_unit, - scsi_work); - struct fc_rport *rport; - - flush_work(&unit->port->rport_work); - rport = unit->port->rport; + struct fc_rport *rport = unit->port->rport; if (rport && rport->port_state == FC_PORTSTATE_ONLINE) scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, scsilun_to_int((struct scsi_lun *) &unit->fcp_lun), 0); +} +void zfcp_scsi_scan_work(struct work_struct *work) +{ + struct zfcp_unit *unit = container_of(work, struct zfcp_unit, + scsi_work); + + zfcp_scsi_scan(unit); put_device(&unit->dev); } +/** + * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host + * @adapter: The adapter where to configure DIF/DIX for the SCSI host + */ +void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) +{ + unsigned int mask = 0; + unsigned int data_div; + struct Scsi_Host *shost = adapter->scsi_host; + + data_div = atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED; + + if (enable_dif && + adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1) + mask |= SHOST_DIF_TYPE1_PROTECTION; + + if (enable_dif && data_div && + adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { + mask |= SHOST_DIX_TYPE1_PROTECTION; + scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); + shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; + shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; + } + + scsi_host_set_prot(shost, mask); +} + +/** + * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error + * @scmd: The SCSI command to report the error for + * @ascq: The ASCQ to put in the sense buffer + * + * See the error handling in sd_done for the sense codes used here. + * Set DID_SOFT_ERROR to retry the request, if possible. + */ +void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq) +{ + scsi_build_sense_buffer(1, scmd->sense_buffer, + ILLEGAL_REQUEST, 0x10, ascq); + set_driver_byte(scmd, DRIVER_SENSE); + scmd->result |= SAM_STAT_CHECK_CONDITION; + set_host_byte(scmd, DID_SOFT_ERROR); +} + struct fc_function_template zfcp_transport_functions = { .show_starget_port_id = 1, .show_starget_port_name = 1, @@ -677,11 +754,11 @@ struct zfcp_data zfcp_data = { .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .can_queue = 4096, .this_id = -1, - .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ, + .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, .cmd_per_lun = 1, .use_clustering = 1, .sdev_attrs = zfcp_sysfs_sdev_attrs, - .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8), + .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .shost_attrs = zfcp_sysfs_shost_attrs, }, diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index f5f6069..b4561c86 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -275,7 +275,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); zfcp_erp_wait(unit->port->adapter); - flush_work(&unit->scsi_work); + zfcp_scsi_scan(unit); out: put_device(&port->dev); return retval ? retval : (ssize_t) count; @@ -290,6 +290,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, struct zfcp_unit *unit; u64 fcp_lun; int retval = -EINVAL; + struct scsi_device *sdev; if (!(port && get_device(&port->dev))) return -EBUSY; @@ -303,8 +304,13 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, else retval = 0; - /* wait for possible timeout during SCSI probe */ - flush_work(&unit->scsi_work); + sdev = scsi_device_lookup(port->adapter->scsi_host, 0, + port->starget_id, + scsilun_to_int((struct scsi_lun *)&fcp_lun)); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } write_lock_irq(&port->unit_list_lock); list_del(&unit->list); |