diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 11:48:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 11:48:13 -0700 |
commit | 6a454f71d795368c00d9c329b60cc4d58929e7bc (patch) | |
tree | f85a7ed30ba1d25606a22cc07e7383e73fc49972 /drivers | |
parent | d613839ef987d20f7c9347732b452efd921b97d9 (diff) | |
parent | 155af2f95f905c830688dd0ca7c7cac4107334fd (diff) | |
download | op-kernel-dev-6a454f71d795368c00d9c329b60cc4d58929e7bc.zip op-kernel-dev-6a454f71d795368c00d9c329b60cc4d58929e7bc.tar.gz |
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (33 commits)
[S390] s390: hibernation support for s390
[S390] pm: dcssblk power management callbacks.
[S390] pm: monreader power management callbacks.
[S390] pm: monwriter power management callbacks.
[S390] pm: memory hotplug power management callbacks
[S390] pm: con3270 power management callbacks.
[S390] pm: smsgiucv power management callbacks.
[S390] pm: hvc_iucv power management callbacks
[S390] PM: af_iucv power management callbacks.
[S390] pm: netiucv power management callbacks.
[S390] pm: iucv power management callbacks.
[S390] iucv: establish reboot notifier
[S390] pm: power management support for SCLP drivers.
[S390] pm: tape power management callbacks
[S390] pm: vmlogrdr power management callbacks
[S390] pm: vmur driver power management callbacks
[S390] pm: appldata power management callbacks
[S390] pm: vmwatchdog power management callbacks.
[S390] pm: zfcp driver power management callbacks
[S390] pm: claw driver power management callbacks
...
Diffstat (limited to 'drivers')
50 files changed, 2773 insertions, 456 deletions
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index 54481a8..86105ef 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c @@ -4,7 +4,7 @@ * This HVC device driver provides terminal access using * z/VM IUCV communication paths. * - * Copyright IBM Corp. 2008 + * Copyright IBM Corp. 2008, 2009 * * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ @@ -15,6 +15,7 @@ #include <asm/ebcdic.h> #include <linux/ctype.h> #include <linux/delay.h> +#include <linux/device.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/moduleparam.h> @@ -74,6 +75,7 @@ struct hvc_iucv_private { wait_queue_head_t sndbuf_waitq; /* wait for send completion */ struct list_head tty_outqueue; /* outgoing IUCV messages */ struct list_head tty_inqueue; /* incoming IUCV messages */ + struct device *dev; /* device structure */ }; struct iucv_tty_buffer { @@ -542,7 +544,68 @@ static void flush_sndbuf_sync(struct hvc_iucv_private *priv) if (sync_wait) wait_event_timeout(priv->sndbuf_waitq, - tty_outqueue_empty(priv), HZ); + tty_outqueue_empty(priv), HZ/10); +} + +/** + * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up + * @priv: Pointer to hvc_iucv_private structure + * + * This routine severs an existing IUCV communication path and hangs + * up the underlying HVC terminal device. + * The hang-up occurs only if an IUCV communication path is established; + * otherwise there is no need to hang up the terminal device. + * + * The IUCV HVC hang-up is separated into two steps: + * 1. After the IUCV path has been severed, the iucv_state is set to + * IUCV_SEVERED. + * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the + * IUCV_SEVERED state causes the tty hang-up in the HVC layer. + * + * If the tty has not yet been opened, clean up the hvc_iucv_private + * structure to allow re-connects. + * If the tty has been opened, let get_chars() return -EPIPE to signal + * the HVC layer to hang up the tty and, if so, wake up the HVC thread + * to call get_chars()... + * + * Special notes on hanging up a HVC terminal instantiated as console: + * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops) + * 2. do_tty_hangup() calls tty->ops->close() for console_filp + * => no hangup notifier is called by HVC (default) + * 2. hvc_close() returns because of tty_hung_up_p(filp) + * => no delete notifier is called! + * Finally, the back-end is not being notified, thus, the tty session is + * kept active (TTY_OPEN) to be ready for re-connects. + * + * Locking: spin_lock(&priv->lock) w/o disabling bh + */ +static void hvc_iucv_hangup(struct hvc_iucv_private *priv) +{ + struct iucv_path *path; + + path = NULL; + spin_lock(&priv->lock); + if (priv->iucv_state == IUCV_CONNECTED) { + path = priv->path; + priv->path = NULL; + priv->iucv_state = IUCV_SEVERED; + if (priv->tty_state == TTY_CLOSED) + hvc_iucv_cleanup(priv); + else + /* console is special (see above) */ + if (priv->is_console) { + hvc_iucv_cleanup(priv); + priv->tty_state = TTY_OPENED; + } else + hvc_kick(); + } + spin_unlock(&priv->lock); + + /* finally sever path (outside of priv->lock due to lock ordering) */ + if (path) { + iucv_path_sever(path, NULL); + iucv_path_free(path); + } } /** @@ -735,11 +798,8 @@ out_path_handled: * @ipuser: User specified data for this path * (AF_IUCV: port/service name and originator port) * - * The function also severs the path (as required by the IUCV protocol) and - * sets the iucv state to IUCV_SEVERED for the associated struct - * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty - * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). - * If tty portion of the HVC is closed, clean up the outqueue. + * This function calls the hvc_iucv_hangup() function for the + * respective IUCV HVC terminal. * * Locking: struct hvc_iucv_private->lock */ @@ -747,33 +807,7 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) { struct hvc_iucv_private *priv = path->private; - spin_lock(&priv->lock); - priv->iucv_state = IUCV_SEVERED; - - /* If the tty has not yet been opened, clean up the hvc_iucv_private - * structure to allow re-connects. - * This is also done for our console device because console hangups - * are handled specially and no notifier is called by HVC. - * The tty session is active (TTY_OPEN) and ready for re-connects... - * - * If it has been opened, let get_chars() return -EPIPE to signal the - * HVC layer to hang up the tty. - * If so, we need to wake up the HVC thread to call get_chars()... - */ - priv->path = NULL; - if (priv->tty_state == TTY_CLOSED) - hvc_iucv_cleanup(priv); - else - if (priv->is_console) { - hvc_iucv_cleanup(priv); - priv->tty_state = TTY_OPENED; - } else - hvc_kick(); - spin_unlock(&priv->lock); - - /* finally sever path (outside of priv->lock due to lock ordering) */ - iucv_path_sever(path, ipuser); - iucv_path_free(path); + hvc_iucv_hangup(priv); } /** @@ -853,6 +887,37 @@ static void hvc_iucv_msg_complete(struct iucv_path *path, destroy_tty_buffer_list(&list_remove); } +/** + * hvc_iucv_pm_freeze() - Freeze PM callback + * @dev: IUVC HVC terminal device + * + * Sever an established IUCV communication path and + * trigger a hang-up of the underlying HVC terminal. + */ +static int hvc_iucv_pm_freeze(struct device *dev) +{ + struct hvc_iucv_private *priv = dev_get_drvdata(dev); + + local_bh_disable(); + hvc_iucv_hangup(priv); + local_bh_enable(); + + return 0; +} + +/** + * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback + * @dev: IUVC HVC terminal device + * + * Wake up the HVC thread to trigger hang-up and respective + * HVC back-end notifier invocations. + */ +static int hvc_iucv_pm_restore_thaw(struct device *dev) +{ + hvc_kick(); + return 0; +} + /* HVC operations */ static struct hv_ops hvc_iucv_ops = { @@ -863,6 +928,20 @@ static struct hv_ops hvc_iucv_ops = { .notifier_hangup = hvc_iucv_notifier_hangup, }; +/* Suspend / resume device operations */ +static struct dev_pm_ops hvc_iucv_pm_ops = { + .freeze = hvc_iucv_pm_freeze, + .thaw = hvc_iucv_pm_restore_thaw, + .restore = hvc_iucv_pm_restore_thaw, +}; + +/* IUCV HVC device driver */ +static struct device_driver hvc_iucv_driver = { + .name = KMSG_COMPONENT, + .bus = &iucv_bus, + .pm = &hvc_iucv_pm_ops, +}; + /** * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance * @id: hvc_iucv_table index @@ -897,14 +976,12 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console) /* set console flag */ priv->is_console = is_console; - /* finally allocate hvc */ + /* allocate hvc device */ priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */ HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256); if (IS_ERR(priv->hvc)) { rc = PTR_ERR(priv->hvc); - free_page((unsigned long) priv->sndbuf); - kfree(priv); - return rc; + goto out_error_hvc; } /* notify HVC thread instead of using polling */ @@ -915,8 +992,45 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console) memcpy(priv->srv_name, name, 8); ASCEBC(priv->srv_name, 8); + /* create and setup device */ + priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL); + if (!priv->dev) { + rc = -ENOMEM; + goto out_error_dev; + } + dev_set_name(priv->dev, "hvc_iucv%d", id); + dev_set_drvdata(priv->dev, priv); + priv->dev->bus = &iucv_bus; + priv->dev->parent = iucv_root; + priv->dev->driver = &hvc_iucv_driver; + priv->dev->release = (void (*)(struct device *)) kfree; + rc = device_register(priv->dev); + if (rc) { + kfree(priv->dev); + goto out_error_dev; + } + hvc_iucv_table[id] = priv; return 0; + +out_error_dev: + hvc_remove(priv->hvc); +out_error_hvc: + free_page((unsigned long) priv->sndbuf); + kfree(priv); + + return rc; +} + +/** + * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances + */ +static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv) +{ + hvc_remove(priv->hvc); + device_unregister(priv->dev); + free_page((unsigned long) priv->sndbuf); + kfree(priv); } /** @@ -1109,6 +1223,11 @@ static int __init hvc_iucv_init(void) goto out_error; } + /* register IUCV HVC device driver */ + rc = driver_register(&hvc_iucv_driver); + if (rc) + goto out_error; + /* parse hvc_iucv_allow string and create z/VM user ID filter list */ if (hvc_iucv_filter_string) { rc = hvc_iucv_setup_filter(hvc_iucv_filter_string); @@ -1183,15 +1302,14 @@ out_error_iucv: iucv_unregister(&hvc_iucv_handler, 0); out_error_hvc: for (i = 0; i < hvc_iucv_devices; i++) - if (hvc_iucv_table[i]) { - if (hvc_iucv_table[i]->hvc) - hvc_remove(hvc_iucv_table[i]->hvc); - kfree(hvc_iucv_table[i]); - } + if (hvc_iucv_table[i]) + hvc_iucv_destroy(hvc_iucv_table[i]); out_error_memory: mempool_destroy(hvc_iucv_mempool); kmem_cache_destroy(hvc_iucv_buffer_cache); out_error: + if (hvc_iucv_filter) + kfree(hvc_iucv_filter); hvc_iucv_devices = 0; /* ensure that we do not provide any device */ return rc; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 442bb98..e5b84db 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -5,8 +5,7 @@ * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 - * + * Copyright IBM Corp. 1999, 2009 */ #define KMSG_COMPONENT "dasd" @@ -61,6 +60,7 @@ static int dasd_flush_block_queue(struct dasd_block *); static void dasd_device_tasklet(struct dasd_device *); static void dasd_block_tasklet(struct dasd_block *); static void do_kick_device(struct work_struct *); +static void do_restore_device(struct work_struct *); static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); static void dasd_device_timeout(unsigned long); static void dasd_block_timeout(unsigned long); @@ -109,6 +109,7 @@ struct dasd_device *dasd_alloc_device(void) device->timer.function = dasd_device_timeout; device->timer.data = (unsigned long) device; INIT_WORK(&device->kick_work, do_kick_device); + INIT_WORK(&device->restore_device, do_restore_device); device->state = DASD_STATE_NEW; device->target = DASD_STATE_NEW; @@ -512,6 +513,25 @@ void dasd_kick_device(struct dasd_device *device) } /* + * dasd_restore_device will schedule a call do do_restore_device to the kernel + * event daemon. + */ +static void do_restore_device(struct work_struct *work) +{ + struct dasd_device *device = container_of(work, struct dasd_device, + restore_device); + device->cdev->drv->restore(device->cdev); + dasd_put_device(device); +} + +void dasd_restore_device(struct dasd_device *device) +{ + dasd_get_device(device); + /* queue call to dasd_restore_device to the kernel event daemon. */ + schedule_work(&device->restore_device); +} + +/* * Set the target state for a device and starts the state change. */ void dasd_set_target_state(struct dasd_device *device, int target) @@ -908,6 +928,12 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) DBF_DEV_EVENT(DBF_DEBUG, device, "%s", "start_IO: -EIO device gone, retry"); break; + case -EINVAL: + /* most likely caused in power management context */ + DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + "start_IO: -EINVAL device currently " + "not accessible"); + break; default: /* internal error 11 - unknown rc */ snprintf(errorstring, ERRORLENGTH, "11 %d", rc); @@ -2400,6 +2426,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) case CIO_OPER: /* FIXME: add a sanity check. */ device->stopped &= ~DASD_STOPPED_DC_WAIT; + if (device->stopped & DASD_UNRESUMED_PM) { + device->stopped &= ~DASD_UNRESUMED_PM; + dasd_restore_device(device); + ret = 1; + break; + } dasd_schedule_device_bh(device); if (device->block) dasd_schedule_block_bh(device->block); @@ -2410,6 +2442,79 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) return ret; } +int dasd_generic_pm_freeze(struct ccw_device *cdev) +{ + struct dasd_ccw_req *cqr, *n; + int rc; + struct list_head freeze_queue; + struct dasd_device *device = dasd_device_from_cdev(cdev); + + if (IS_ERR(device)) + return PTR_ERR(device); + /* disallow new I/O */ + device->stopped |= DASD_STOPPED_PM; + /* clear active requests */ + INIT_LIST_HEAD(&freeze_queue); + spin_lock_irq(get_ccwdev_lock(cdev)); + rc = 0; + list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { + /* Check status and move request to flush_queue */ + if (cqr->status == DASD_CQR_IN_IO) { + rc = device->discipline->term_IO(cqr); + if (rc) { + /* unable to terminate requeust */ + dev_err(&device->cdev->dev, + "Unable to terminate request %p " + "on suspend\n", cqr); + spin_unlock_irq(get_ccwdev_lock(cdev)); + dasd_put_device(device); + return rc; + } + } + list_move_tail(&cqr->devlist, &freeze_queue); + } + + spin_unlock_irq(get_ccwdev_lock(cdev)); + + list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { + wait_event(dasd_flush_wq, + (cqr->status != DASD_CQR_CLEAR_PENDING)); + if (cqr->status == DASD_CQR_CLEARED) + cqr->status = DASD_CQR_QUEUED; + } + /* move freeze_queue to start of the ccw_queue */ + spin_lock_irq(get_ccwdev_lock(cdev)); + list_splice_tail(&freeze_queue, &device->ccw_queue); + spin_unlock_irq(get_ccwdev_lock(cdev)); + + if (device->discipline->freeze) + rc = device->discipline->freeze(device); + + dasd_put_device(device); + return rc; +} +EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); + +int dasd_generic_restore_device(struct ccw_device *cdev) +{ + struct dasd_device *device = dasd_device_from_cdev(cdev); + int rc = 0; + + if (IS_ERR(device)) + return PTR_ERR(device); + + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); + + if (device->discipline->restore) + rc = device->discipline->restore(device); + + dasd_put_device(device); + return rc; +} +EXPORT_SYMBOL_GPL(dasd_generic_restore_device); + static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, void *rdc_buffer, int rdc_buffer_size, diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index e77666c..4cac5b5 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -1098,6 +1098,7 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid) spin_unlock(&dasd_devmap_lock); return 0; } +EXPORT_SYMBOL_GPL(dasd_get_uid); /* * Register the given device unique identifier into devmap struct. diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index cf0cfdb..1c28ec3 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -5,10 +5,9 @@ * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * Copyright IBM Corp. 1999, 2009 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 * Author.........: Nigel Hislop <hislop_nigel@emc.com> - * */ #define KMSG_COMPONENT "dasd" @@ -104,17 +103,6 @@ dasd_eckd_set_online(struct ccw_device *cdev) return dasd_generic_set_online(cdev, &dasd_eckd_discipline); } -static struct ccw_driver dasd_eckd_driver = { - .name = "dasd-eckd", - .owner = THIS_MODULE, - .ids = dasd_eckd_ids, - .probe = dasd_eckd_probe, - .remove = dasd_generic_remove, - .set_offline = dasd_generic_set_offline, - .set_online = dasd_eckd_set_online, - .notify = dasd_generic_notify, -}; - static const int sizes_trk0[] = { 28, 148, 84 }; #define LABEL_SIZE 140 @@ -3236,6 +3224,98 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, dasd_eckd_dump_sense_ccw(device, req, irb); } +int dasd_eckd_pm_freeze(struct dasd_device *device) +{ + /* + * the device should be disconnected from our LCU structure + * on restore we will reconnect it and reread LCU specific + * information like PAV support that might have changed + */ + dasd_alias_remove_device(device); + dasd_alias_disconnect_device_from_lcu(device); + + return 0; +} + +int dasd_eckd_restore_device(struct dasd_device *device) +{ + struct dasd_eckd_private *private; + int is_known, rc; + struct dasd_uid temp_uid; + + /* allow new IO again */ + device->stopped &= ~DASD_STOPPED_PM; + + private = (struct dasd_eckd_private *) device->private; + + /* Read Configuration Data */ + rc = dasd_eckd_read_conf(device); + if (rc) + goto out_err; + + /* Generate device unique id and register in devmap */ + rc = dasd_eckd_generate_uid(device, &private->uid); + dasd_get_uid(device->cdev, &temp_uid); + if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) + dev_err(&device->cdev->dev, "The UID of the DASD has changed\n"); + if (rc) + goto out_err; + dasd_set_uid(device->cdev, &private->uid); + + /* register lcu with alias handling, enable PAV if this is a new lcu */ + is_known = dasd_alias_make_device_known_to_lcu(device); + if (is_known < 0) + return is_known; + if (!is_known) { + /* new lcu found */ + rc = dasd_eckd_validate_server(device); /* will switch pav on */ + if (rc) + goto out_err; + } + + /* Read Feature Codes */ + rc = dasd_eckd_read_features(device); + if (rc) + goto out_err; + + /* Read Device Characteristics */ + memset(&private->rdc_data, 0, sizeof(private->rdc_data)); + rc = dasd_generic_read_dev_chars(device, "ECKD", + &private->rdc_data, 64); + if (rc) { + DBF_EVENT(DBF_WARNING, + "Read device characteristics failed, rc=%d for " + "device: %s", rc, dev_name(&device->cdev->dev)); + goto out_err; + } + + /* add device to alias management */ + dasd_alias_add_device(device); + + return 0; + +out_err: + /* + * if the resume failed for the DASD we put it in + * an UNRESUMED stop state + */ + device->stopped |= DASD_UNRESUMED_PM; + return 0; +} + +static struct ccw_driver dasd_eckd_driver = { + .name = "dasd-eckd", + .owner = THIS_MODULE, + .ids = dasd_eckd_ids, + .probe = dasd_eckd_probe, + .remove = dasd_generic_remove, + .set_offline = dasd_generic_set_offline, + .set_online = dasd_eckd_set_online, + .notify = dasd_generic_notify, + .freeze = dasd_generic_pm_freeze, + .thaw = dasd_generic_restore_device, + .restore = dasd_generic_restore_device, +}; /* * max_blocks is dependent on the amount of storage that is available @@ -3274,6 +3354,8 @@ static struct dasd_discipline dasd_eckd_discipline = { .dump_sense_dbf = dasd_eckd_dump_sense_dbf, .fill_info = dasd_eckd_fill_info, .ioctl = dasd_eckd_ioctl, + .freeze = dasd_eckd_pm_freeze, + .restore = dasd_eckd_restore_device, }; static int __init diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 597c6ff..e21ee73 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -2,8 +2,7 @@ * File...........: linux/drivers/s390/block/dasd_fba.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 - * + * Copyright IBM Corp. 1999, 2009 */ #define KMSG_COMPONENT "dasd" @@ -75,6 +74,9 @@ static struct ccw_driver dasd_fba_driver = { .set_offline = dasd_generic_set_offline, .set_online = dasd_fba_set_online, .notify = dasd_generic_notify, + .freeze = dasd_generic_pm_freeze, + .thaw = dasd_generic_restore_device, + .restore = dasd_generic_restore_device, }; static void diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index f97ceb7..fd63b2f 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -4,8 +4,7 @@ * Horst Hummel <Horst.Hummel@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 - * + * Copyright IBM Corp. 1999, 2009 */ #ifndef DASD_INT_H @@ -295,6 +294,10 @@ struct dasd_discipline { int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); int (*ioctl) (struct dasd_block *, unsigned int, void __user *); + + /* suspend/resume functions */ + int (*freeze) (struct dasd_device *); + int (*restore) (struct dasd_device *); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -367,6 +370,7 @@ struct dasd_device { atomic_t tasklet_scheduled; struct tasklet_struct tasklet; struct work_struct kick_work; + struct work_struct restore_device; struct timer_list timer; debug_info_t *debug_area; @@ -410,6 +414,8 @@ struct dasd_block { #define DASD_STOPPED_PENDING 4 /* long busy */ #define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */ #define DASD_STOPPED_SU 16 /* summary unit check handling */ +#define DASD_STOPPED_PM 32 /* pm state transition */ +#define DASD_UNRESUMED_PM 64 /* pm resume failed state */ /* per device flags */ #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ @@ -556,6 +562,7 @@ void dasd_free_block(struct dasd_block *); void dasd_enable_device(struct dasd_device *); void dasd_set_target_state(struct dasd_device *, int); void dasd_kick_device(struct dasd_device *); +void dasd_restore_device(struct dasd_device *); void dasd_add_request_head(struct dasd_ccw_req *); void dasd_add_request_tail(struct dasd_ccw_req *); @@ -578,6 +585,8 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); int dasd_generic_set_offline (struct ccw_device *cdev); int dasd_generic_notify(struct ccw_device *, int); void dasd_generic_handle_state_change(struct dasd_device *); +int dasd_generic_pm_freeze(struct ccw_device *); +int dasd_generic_restore_device(struct ccw_device *); int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int); char *dasd_get_sense(struct irb *); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index b21caf1..016f9e9 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -14,10 +14,11 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/blkdev.h> -#include <asm/extmem.h> -#include <asm/io.h> #include <linux/completion.h> #include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <asm/extmem.h> +#include <asm/io.h> #define DCSSBLK_NAME "dcssblk" #define DCSSBLK_MINORS_PER_DISK 1 @@ -940,11 +941,94 @@ dcssblk_check_params(void) } /* + * Suspend / Resume + */ +static int dcssblk_freeze(struct device *dev) +{ + struct dcssblk_dev_info *dev_info; + int rc = 0; + + list_for_each_entry(dev_info, &dcssblk_devices, lh) { + switch (dev_info->segment_type) { + case SEG_TYPE_SR: + case SEG_TYPE_ER: + case SEG_TYPE_SC: + if (!dev_info->is_shared) + rc = -EINVAL; + break; + default: + rc = -EINVAL; + break; + } + if (rc) + break; + } + if (rc) + pr_err("Suspend failed because device %s is writeable.\n", + dev_info->segment_name); + return rc; +} + +static int dcssblk_restore(struct device *dev) +{ + struct dcssblk_dev_info *dev_info; + struct segment_info *entry; + unsigned long start, end; + int rc = 0; + + list_for_each_entry(dev_info, &dcssblk_devices, lh) { + list_for_each_entry(entry, &dev_info->seg_list, lh) { + segment_unload(entry->segment_name); + rc = segment_load(entry->segment_name, SEGMENT_SHARED, + &start, &end); + if (rc < 0) { +// TODO in_use check ? + segment_warning(rc, entry->segment_name); + goto out_panic; + } + if (start != entry->start || end != entry->end) { + pr_err("Mismatch of start / end address after " + "resuming device %s\n", + entry->segment_name); + goto out_panic; + } + } + } + return 0; +out_panic: + panic("fatal dcssblk resume error\n"); +} + +static int dcssblk_thaw(struct device *dev) +{ + return 0; +} + +static struct dev_pm_ops dcssblk_pm_ops = { + .freeze = dcssblk_freeze, + .thaw = dcssblk_thaw, + .restore = dcssblk_restore, +}; + +static struct platform_driver dcssblk_pdrv = { + .driver = { + .name = "dcssblk", + .owner = THIS_MODULE, + .pm = &dcssblk_pm_ops, + }, +}; + +static struct platform_device *dcssblk_pdev; + + +/* * The init/exit functions. */ static void __exit dcssblk_exit(void) { + platform_device_unregister(dcssblk_pdev); + platform_driver_unregister(&dcssblk_pdrv); root_device_unregister(dcssblk_root_dev); unregister_blkdev(dcssblk_major, DCSSBLK_NAME); } @@ -954,30 +1038,44 @@ dcssblk_init(void) { int rc; - dcssblk_root_dev = root_device_register("dcssblk"); - if (IS_ERR(dcssblk_root_dev)) - return PTR_ERR(dcssblk_root_dev); - rc = device_create_file(dcssblk_root_dev, &dev_attr_add); - if (rc) { - root_device_unregister(dcssblk_root_dev); + rc = platform_driver_register(&dcssblk_pdrv); + if (rc) return rc; + + dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL, + 0); + if (IS_ERR(dcssblk_pdev)) { + rc = PTR_ERR(dcssblk_pdev); + goto out_pdrv; } - rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); - if (rc) { - root_device_unregister(dcssblk_root_dev); - return rc; + + dcssblk_root_dev = root_device_register("dcssblk"); + if (IS_ERR(dcssblk_root_dev)) { + rc = PTR_ERR(dcssblk_root_dev); + goto out_pdev; } + rc = device_create_file(dcssblk_root_dev, &dev_attr_add); + if (rc) + goto out_root; + rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); + if (rc) + goto out_root; rc = register_blkdev(0, DCSSBLK_NAME); - if (rc < 0) { - root_device_unregister(dcssblk_root_dev); - return rc; - } + if (rc < 0) + goto out_root; dcssblk_major = rc; init_rwsem(&dcssblk_devices_sem); dcssblk_check_params(); - return 0; + +out_root: + root_device_unregister(dcssblk_root_dev); +out_pdev: + platform_device_unregister(dcssblk_pdev); +out_pdrv: + platform_driver_unregister(&dcssblk_pdrv); + return rc; } module_init(dcssblk_init); diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 0ae0c83..2e9e1ec 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -39,7 +39,10 @@ #include <linux/hdreg.h> /* HDIO_GETGEO */ #include <linux/sysdev.h> #include <linux/bio.h> +#include <linux/suspend.h> +#include <linux/platform_device.h> #include <asm/uaccess.h> +#include <asm/checksum.h> #define XPRAM_NAME "xpram" #define XPRAM_DEVS 1 /* one partition */ @@ -48,6 +51,7 @@ typedef struct { unsigned int size; /* size of xpram segment in pages */ unsigned int offset; /* start page of xpram segment */ + unsigned int csum; /* partition checksum for suspend */ } xpram_device_t; static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; @@ -138,7 +142,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) /* * Check if xpram is available. */ -static int __init xpram_present(void) +static int xpram_present(void) { unsigned long mem_page; int rc; @@ -154,7 +158,7 @@ static int __init xpram_present(void) /* * Return index of the last available xpram page. */ -static unsigned long __init xpram_highest_page_index(void) +static unsigned long xpram_highest_page_index(void) { unsigned int page_index, add_bit; unsigned long mem_page; @@ -383,6 +387,106 @@ out: } /* + * Save checksums for all partitions. + */ +static int xpram_save_checksums(void) +{ + unsigned long mem_page; + int rc, i; + + rc = 0; + mem_page = (unsigned long) __get_free_page(GFP_KERNEL); + if (!mem_page) + return -ENOMEM; + for (i = 0; i < xpram_devs; i++) { + rc = xpram_page_in(mem_page, xpram_devices[i].offset); + if (rc) + goto fail; + xpram_devices[i].csum = csum_partial((const void *) mem_page, + PAGE_SIZE, 0); + } +fail: + free_page(mem_page); + return rc ? -ENXIO : 0; +} + +/* + * Verify checksums for all partitions. + */ +static int xpram_validate_checksums(void) +{ + unsigned long mem_page; + unsigned int csum; + int rc, i; + + rc = 0; + mem_page = (unsigned long) __get_free_page(GFP_KERNEL); + if (!mem_page) + return -ENOMEM; + for (i = 0; i < xpram_devs; i++) { + rc = xpram_page_in(mem_page, xpram_devices[i].offset); + if (rc) + goto fail; + csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0); + if (xpram_devices[i].csum != csum) { + rc = -EINVAL; + goto fail; + } + } +fail: + free_page(mem_page); + return rc ? -ENXIO : 0; +} + +/* + * Resume failed: Print error message and call panic. + */ +static void xpram_resume_error(const char *message) +{ + pr_err("Resume error: %s\n", message); + panic("xpram resume error\n"); +} + +/* + * Check if xpram setup changed between suspend and resume. + */ +static int xpram_restore(struct device *dev) +{ + if (!xpram_pages) + return 0; + if (xpram_present() != 0) + xpram_resume_error("xpram disappeared"); + if (xpram_pages != xpram_highest_page_index() + 1) + xpram_resume_error("Size of xpram changed"); + if (xpram_validate_checksums()) + xpram_resume_error("Data of xpram changed"); + return 0; +} + +/* + * Save necessary state in suspend. + */ +static int xpram_freeze(struct device *dev) +{ + return xpram_save_checksums(); +} + +static struct dev_pm_ops xpram_pm_ops = { + .freeze = xpram_freeze, + .restore = xpram_restore, +}; + +static struct platform_driver xpram_pdrv = { + .driver = { + .name = XPRAM_NAME, + .owner = THIS_MODULE, + .pm = &xpram_pm_ops, + }, +}; + +static struct platform_device *xpram_pdev; + +/* * Finally, the init/exit functions. */ static void __exit xpram_exit(void) @@ -394,6 +498,8 @@ static void __exit xpram_exit(void) put_disk(xpram_disks[i]); } unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); + platform_device_unregister(xpram_pdev); + platform_driver_unregister(&xpram_pdrv); } static int __init xpram_init(void) @@ -411,7 +517,24 @@ static int __init xpram_init(void) rc = xpram_setup_sizes(xpram_pages); if (rc) return rc; - return xpram_setup_blkdev(); + rc = platform_driver_register(&xpram_pdrv); + if (rc) + return rc; + xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0); + if (IS_ERR(xpram_pdev)) { + rc = PTR_ERR(xpram_pdev); + goto fail_platform_driver_unregister; + } + rc = xpram_setup_blkdev(); + if (rc) + goto fail_platform_device_unregister; + return 0; + +fail_platform_device_unregister: + platform_device_unregister(xpram_pdev); +fail_platform_driver_unregister: + platform_driver_unregister(&xpram_pdrv); + return rc; } module_init(xpram_init); diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 9ab06e0..b79f31a 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -1,14 +1,12 @@ /* - * drivers/s390/char/con3215.c - * 3215 line mode terminal driver. + * 3215 line mode terminal driver. * - * S390 version - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Copyright IBM Corp. 1999, 2009 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * - * Updated: - * Aug-2000: Added tab support - * Dan Morrison, IBM Corporation (dmorriso@cse.buffalo.edu) + * Updated: + * Aug-2000: Added tab support + * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> */ #include <linux/module.h> @@ -56,6 +54,7 @@ #define RAW3215_CLOSING 32 /* set while in close process */ #define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */ #define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */ +#define RAW3215_FROZEN 256 /* set if 3215 is frozen for suspend */ #define TAB_STOP_SIZE 8 /* tab stop size */ @@ -111,8 +110,8 @@ static struct tty_driver *tty3215_driver; /* * Get a request structure from the free list */ -static inline struct raw3215_req * -raw3215_alloc_req(void) { +static inline struct raw3215_req *raw3215_alloc_req(void) +{ struct raw3215_req *req; unsigned long flags; @@ -126,8 +125,8 @@ raw3215_alloc_req(void) { /* * Put a request structure back to the free list */ -static inline void -raw3215_free_req(struct raw3215_req *req) { +static inline void raw3215_free_req(struct raw3215_req *req) +{ unsigned long flags; if (req->type == RAW3215_FREE) @@ -145,8 +144,7 @@ raw3215_free_req(struct raw3215_req *req) { * because a 3215 terminal won't accept a new read before the old one is * completed. */ -static void -raw3215_mk_read_req(struct raw3215_info *raw) +static void raw3215_mk_read_req(struct raw3215_info *raw) { struct raw3215_req *req; struct ccw1 *ccw; @@ -174,8 +172,7 @@ raw3215_mk_read_req(struct raw3215_info *raw) * buffer to the 3215 device. If a queued write exists it is replaced by * the new, probably lengthened request. */ -static void -raw3215_mk_write_req(struct raw3215_info *raw) +static void raw3215_mk_write_req(struct raw3215_info *raw) { struct raw3215_req *req; struct ccw1 *ccw; @@ -251,8 +248,7 @@ raw3215_mk_write_req(struct raw3215_info *raw) /* * Start a read or a write request */ -static void -raw3215_start_io(struct raw3215_info *raw) +static void raw3215_start_io(struct raw3215_info *raw) { struct raw3215_req *req; int res; @@ -290,8 +286,7 @@ raw3215_start_io(struct raw3215_info *raw) /* * Function to start a delayed output after RAW3215_TIMEOUT seconds */ -static void -raw3215_timeout(unsigned long __data) +static void raw3215_timeout(unsigned long __data) { struct raw3215_info *raw = (struct raw3215_info *) __data; unsigned long flags; @@ -300,8 +295,10 @@ raw3215_timeout(unsigned long __data) if (raw->flags & RAW3215_TIMER_RUNS) { del_timer(&raw->timer); raw->flags &= ~RAW3215_TIMER_RUNS; - raw3215_mk_write_req(raw); - raw3215_start_io(raw); + if (!(raw->flags & RAW3215_FROZEN)) { + raw3215_mk_write_req(raw); + raw3215_start_io(raw); + } } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } @@ -312,10 +309,9 @@ raw3215_timeout(unsigned long __data) * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not * done immediately a timer is started with a delay of RAW3215_TIMEOUT. */ -static inline void -raw3215_try_io(struct raw3215_info *raw) +static inline void raw3215_try_io(struct raw3215_info *raw) { - if (!(raw->flags & RAW3215_ACTIVE)) + if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FROZEN)) return; if (raw->queued_read != NULL) raw3215_start_io(raw); @@ -359,8 +355,8 @@ static void raw3215_next_io(struct raw3215_info *raw) /* * Interrupt routine, called from common io layer */ -static void -raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) +static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) { struct raw3215_info *raw; struct raw3215_req *req; @@ -459,14 +455,40 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } /* + * Drop the oldest line from the output buffer. + */ +static void raw3215_drop_line(struct raw3215_info *raw) +{ + int ix; + char ch; + + BUG_ON(raw->written != 0); + ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1); + while (raw->count > 0) { + ch = raw->buffer[ix]; + ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1); + raw->count--; + if (ch == 0x15) + break; + } + raw->head = ix; +} + +/* * Wait until length bytes are available int the output buffer. * Has to be called with the s390irq lock held. Can be called * disabled. */ -static void -raw3215_make_room(struct raw3215_info *raw, unsigned int length) +static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) { while (RAW3215_BUFFER_SIZE - raw->count < length) { + /* While console is frozen for suspend we have no other + * choice but to drop message from the buffer to make + * room for even more messages. */ + if (raw->flags & RAW3215_FROZEN) { + raw3215_drop_line(raw); + continue; + } /* there might be a request pending */ raw->flags |= RAW3215_FLUSHING; raw3215_mk_write_req(raw); @@ -488,8 +510,8 @@ raw3215_make_room(struct raw3215_info *raw, unsigned int length) /* * String write routine for 3215 devices */ -static void -raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length) +static void raw3215_write(struct raw3215_info *raw, const char *str, + unsigned int length) { unsigned long flags; int c, count; @@ -529,8 +551,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length) /* * Put character routine for 3215 devices */ -static void -raw3215_putchar(struct raw3215_info *raw, unsigned char ch) +static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch) { unsigned long flags; unsigned int length, i; @@ -566,8 +587,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch) * Flush routine, it simply sets the flush flag and tries to start * pending IO. */ -static void -raw3215_flush_buffer(struct raw3215_info *raw) +static void raw3215_flush_buffer(struct raw3215_info *raw) { unsigned long flags; @@ -583,8 +603,7 @@ raw3215_flush_buffer(struct raw3215_info *raw) /* * Fire up a 3215 device. */ -static int -raw3215_startup(struct raw3215_info *raw) +static int raw3215_startup(struct raw3215_info *raw) { unsigned long flags; @@ -602,8 +621,7 @@ raw3215_startup(struct raw3215_info *raw) /* * Shutdown a 3215 device. */ -static void -raw3215_shutdown(struct raw3215_info *raw) +static void raw3215_shutdown(struct raw3215_info *raw) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; @@ -628,8 +646,7 @@ raw3215_shutdown(struct raw3215_info *raw) spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } -static int -raw3215_probe (struct ccw_device *cdev) +static int raw3215_probe (struct ccw_device *cdev) { struct raw3215_info *raw; int line; @@ -675,8 +692,7 @@ raw3215_probe (struct ccw_device *cdev) return 0; } -static void -raw3215_remove (struct ccw_device *cdev) +static void raw3215_remove (struct ccw_device *cdev) { struct raw3215_info *raw; @@ -689,8 +705,7 @@ raw3215_remove (struct ccw_device *cdev) } } -static int -raw3215_set_online (struct ccw_device *cdev) +static int raw3215_set_online (struct ccw_device *cdev) { struct raw3215_info *raw; @@ -701,8 +716,7 @@ raw3215_set_online (struct ccw_device *cdev) return raw3215_startup(raw); } -static int -raw3215_set_offline (struct ccw_device *cdev) +static int raw3215_set_offline (struct ccw_device *cdev) { struct raw3215_info *raw; @@ -715,6 +729,36 @@ raw3215_set_offline (struct ccw_device *cdev) return 0; } +static int raw3215_pm_stop(struct ccw_device *cdev) +{ + struct raw3215_info *raw; + unsigned long flags; + + /* Empty the output buffer, then prevent new I/O. */ + raw = cdev->dev.driver_data; + spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); + raw3215_make_room(raw, RAW3215_BUFFER_SIZE); + raw->flags |= RAW3215_FROZEN; + spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); + return 0; +} + +static int raw3215_pm_start(struct ccw_device *cdev) +{ + struct raw3215_info *raw; + unsigned long flags; + + /* Allow I/O again and flush output buffer. */ + raw = cdev->dev.driver_data; + spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); + raw->flags &= ~RAW3215_FROZEN; + raw->flags |= RAW3215_FLUSHING; + raw3215_try_io(raw); + raw->flags &= ~RAW3215_FLUSHING; + spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); + return 0; +} + static struct ccw_device_id raw3215_id[] = { { CCW_DEVICE(0x3215, 0) }, { /* end of list */ }, @@ -728,14 +772,17 @@ static struct ccw_driver raw3215_ccw_driver = { .remove = &raw3215_remove, .set_online = &raw3215_set_online, .set_offline = &raw3215_set_offline, + .freeze = &raw3215_pm_stop, + .thaw = &raw3215_pm_start, + .restore = &raw3215_pm_start, }; #ifdef CONFIG_TN3215_CONSOLE /* * Write a string to the 3215 console */ -static void -con3215_write(struct console *co, const char *str, unsigned int count) +static void con3215_write(struct console *co, const char *str, + unsigned int count) { struct raw3215_info *raw; int i; @@ -768,13 +815,17 @@ static struct tty_driver *con3215_device(struct console *c, int *index) * panic() calls con3215_flush through a panic_notifier * before the system enters a disabled, endless loop. */ -static void -con3215_flush(void) +static void con3215_flush(void) { struct raw3215_info *raw; unsigned long flags; raw = raw3215[0]; /* console 3215 is the first one */ + if (raw->flags & RAW3215_FROZEN) + /* The console is still frozen for suspend. */ + if (ccw_device_force_console()) + /* Forcing didn't work, no panic message .. */ + return; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_make_room(raw, RAW3215_BUFFER_SIZE); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); @@ -811,8 +862,7 @@ static struct console con3215 = { * 3215 console initialization code called from console_init(). * NOTE: This is called before kmalloc is available. */ -static int __init -con3215_init(void) +static int __init con3215_init(void) { struct ccw_device *cdev; struct raw3215_info *raw; @@ -875,8 +925,7 @@ console_initcall(con3215_init); * * This routine is called whenever a 3215 tty is opened. */ -static int -tty3215_open(struct tty_struct *tty, struct file * filp) +static int tty3215_open(struct tty_struct *tty, struct file * filp) { struct raw3215_info *raw; int retval, line; @@ -909,8 +958,7 @@ tty3215_open(struct tty_struct *tty, struct file * filp) * This routine is called when the 3215 tty is closed. We wait * for the remaining request to be completed. Then we clean up. */ -static void -tty3215_close(struct tty_struct *tty, struct file * filp) +static void tty3215_close(struct tty_struct *tty, struct file * filp) { struct raw3215_info *raw; @@ -927,8 +975,7 @@ tty3215_close(struct tty_struct *tty, struct file * filp) /* * Returns the amount of free space in the output buffer. */ -static int -tty3215_write_room(struct tty_struct *tty) +static int tty3215_write_room(struct tty_struct *tty) { struct raw3215_info *raw; @@ -944,9 +991,8 @@ tty3215_write_room(struct tty_struct *tty) /* * String write routine for 3215 ttys */ -static int -tty3215_write(struct tty_struct * tty, - const unsigned char *buf, int count) +static int tty3215_write(struct tty_struct * tty, + const unsigned char *buf, int count) { struct raw3215_info *raw; @@ -960,8 +1006,7 @@ tty3215_write(struct tty_struct * tty, /* * Put character routine for 3215 ttys */ -static int -tty3215_put_char(struct tty_struct *tty, unsigned char ch) +static int tty3215_put_char(struct tty_struct *tty, unsigned char ch) { struct raw3215_info *raw; @@ -972,16 +1017,14 @@ tty3215_put_char(struct tty_struct *tty, unsigned char ch) return 1; } -static void -tty3215_flush_chars(struct tty_struct *tty) +static void tty3215_flush_chars(struct tty_struct *tty) { } /* * Returns the number of characters in the output buffer */ -static int -tty3215_chars_in_buffer(struct tty_struct *tty) +static int tty3215_chars_in_buffer(struct tty_struct *tty) { struct raw3215_info *raw; @@ -989,8 +1032,7 @@ tty3215_chars_in_buffer(struct tty_struct *tty) return raw->count; } -static void -tty3215_flush_buffer(struct tty_struct *tty) +static void tty3215_flush_buffer(struct tty_struct *tty) { struct raw3215_info *raw; @@ -1002,9 +1044,8 @@ tty3215_flush_buffer(struct tty_struct *tty) /* * Currently we don't have any io controls for 3215 ttys */ -static int -tty3215_ioctl(struct tty_struct *tty, struct file * file, - unsigned int cmd, unsigned long arg) +static int tty3215_ioctl(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; @@ -1019,8 +1060,7 @@ tty3215_ioctl(struct tty_struct *tty, struct file * file, /* * Disable reading from a 3215 tty */ -static void -tty3215_throttle(struct tty_struct * tty) +static void tty3215_throttle(struct tty_struct * tty) { struct raw3215_info *raw; @@ -1031,8 +1071,7 @@ tty3215_throttle(struct tty_struct * tty) /* * Enable reading from a 3215 tty */ -static void -tty3215_unthrottle(struct tty_struct * tty) +static void tty3215_unthrottle(struct tty_struct * tty) { struct raw3215_info *raw; unsigned long flags; @@ -1049,8 +1088,7 @@ tty3215_unthrottle(struct tty_struct * tty) /* * Disable writing to a 3215 tty */ -static void -tty3215_stop(struct tty_struct *tty) +static void tty3215_stop(struct tty_struct *tty) { struct raw3215_info *raw; @@ -1061,8 +1099,7 @@ tty3215_stop(struct tty_struct *tty) /* * Enable writing to a 3215 tty */ -static void -tty3215_start(struct tty_struct *tty) +static void tty3215_start(struct tty_struct *tty) { struct raw3215_info *raw; unsigned long flags; @@ -1096,8 +1133,7 @@ static const struct tty_operations tty3215_ops = { * 3215 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. */ -static int __init -tty3215_init(void) +static int __init tty3215_init(void) { struct tty_driver *driver; int ret; @@ -1142,8 +1178,7 @@ tty3215_init(void) return 0; } -static void __exit -tty3215_exit(void) +static void __exit tty3215_exit(void) { tty_unregister_driver(tty3215_driver); put_tty_driver(tty3215_driver); diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index ed5396d..44d02e3 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -1,11 +1,10 @@ /* - * drivers/s390/char/con3270.c - * IBM/3270 Driver - console view. + * IBM/3270 Driver - console view. * - * Author(s): - * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) - * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> - * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): + * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) + * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corp. 2003, 2009 */ #include <linux/bootmem.h> @@ -530,6 +529,7 @@ con3270_flush(void) cp = condev; if (!cp->view.dev) return; + raw3270_pm_unfreeze(&cp->view); spin_lock_irqsave(&cp->view.lock, flags); con3270_wait_write(cp); cp->nr_up = 0; diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 40759c3..097d384 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -1,11 +1,10 @@ /* - * drivers/s390/char/fs3270.c - * IBM/3270 Driver - fullscreen driver. + * IBM/3270 Driver - fullscreen driver. * - * Author(s): - * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) - * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com> - * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): + * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) + * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corp. 2003, 2009 */ #include <linux/bootmem.h> @@ -399,6 +398,11 @@ fs3270_free_view(struct raw3270_view *view) static void fs3270_release(struct raw3270_view *view) { + struct fs3270 *fp; + + fp = (struct fs3270 *) view; + if (fp->fs_pid) + kill_pid(fp->fs_pid, SIGHUP, 1); } /* View to a 3270 device. Can be console, tty or fullscreen. */ diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 97e63cf..75a8831 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -1,10 +1,9 @@ /* - * drivers/s390/char/monreader.c - * * Character device driver for reading z/VM *MONITOR service records. * - * Copyright IBM Corp. 2004, 2008 - * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> + * Copyright IBM Corp. 2004, 2009 + * + * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ #define KMSG_COMPONENT "monreader" @@ -22,6 +21,7 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/poll.h> +#include <linux/device.h> #include <net/iucv/iucv.h> #include <asm/uaccess.h> #include <asm/ebcdic.h> @@ -78,6 +78,7 @@ static u8 user_data_sever[16] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; +static struct device *monreader_device; /****************************************************************************** * helper functions * @@ -319,11 +320,12 @@ static int mon_open(struct inode *inode, struct file *filp) goto out_path; } filp->private_data = monpriv; + monreader_device->driver_data = monpriv; unlock_kernel(); return nonseekable_open(inode, filp); out_path: - kfree(monpriv->path); + iucv_path_free(monpriv->path); out_priv: mon_free_mem(monpriv); out_use: @@ -341,10 +343,13 @@ static int mon_close(struct inode *inode, struct file *filp) /* * Close IUCV connection and unregister */ - rc = iucv_path_sever(monpriv->path, user_data_sever); - if (rc) - pr_warning("Disconnecting the z/VM *MONITOR system service " - "failed with rc=%i\n", rc); + if (monpriv->path) { + rc = iucv_path_sever(monpriv->path, user_data_sever); + if (rc) + pr_warning("Disconnecting the z/VM *MONITOR system " + "service failed with rc=%i\n", rc); + iucv_path_free(monpriv->path); + } atomic_set(&monpriv->iucv_severed, 0); atomic_set(&monpriv->iucv_connected, 0); @@ -452,6 +457,94 @@ static struct miscdevice mon_dev = { .minor = MISC_DYNAMIC_MINOR, }; + +/****************************************************************************** + * suspend / resume * + *****************************************************************************/ +static int monreader_freeze(struct device *dev) +{ + struct mon_private *monpriv = dev->driver_data; + int rc; + + if (!monpriv) + return 0; + if (monpriv->path) { + rc = iucv_path_sever(monpriv->path, user_data_sever); + if (rc) + pr_warning("Disconnecting the z/VM *MONITOR system " + "service failed with rc=%i\n", rc); + iucv_path_free(monpriv->path); + } + atomic_set(&monpriv->iucv_severed, 0); + atomic_set(&monpriv->iucv_connected, 0); + atomic_set(&monpriv->read_ready, 0); + atomic_set(&monpriv->msglim_count, 0); + monpriv->write_index = 0; + monpriv->read_index = 0; + monpriv->path = NULL; + return 0; +} + +static int monreader_thaw(struct device *dev) +{ + struct mon_private *monpriv = dev->driver_data; + int rc; + + if (!monpriv) + return 0; + rc = -ENOMEM; + monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL); + if (!monpriv->path) + goto out; + rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, + MON_SERVICE, NULL, user_data_connect, monpriv); + if (rc) { + pr_err("Connecting to the z/VM *MONITOR system service " + "failed with rc=%i\n", rc); + goto out_path; + } + wait_event(mon_conn_wait_queue, + atomic_read(&monpriv->iucv_connected) || + atomic_read(&monpriv->iucv_severed)); + if (atomic_read(&monpriv->iucv_severed)) + goto out_path; + return 0; +out_path: + rc = -EIO; + iucv_path_free(monpriv->path); + monpriv->path = NULL; +out: + atomic_set(&monpriv->iucv_severed, 1); + return rc; +} + +static int monreader_restore(struct device *dev) +{ + int rc; + + segment_unload(mon_dcss_name); + rc = segment_load(mon_dcss_name, SEGMENT_SHARED, + &mon_dcss_start, &mon_dcss_end); + if (rc < 0) { + segment_warning(rc, mon_dcss_name); + panic("fatal monreader resume error: no monitor dcss\n"); + } + return monreader_thaw(dev); +} + +static struct dev_pm_ops monreader_pm_ops = { + .freeze = monreader_freeze, + .thaw = monreader_thaw, + .restore = monreader_restore, +}; + +static struct device_driver monreader_driver = { + .name = "monreader", + .bus = &iucv_bus, + .pm = &monreader_pm_ops, +}; + + /****************************************************************************** * module init/exit * *****************************************************************************/ @@ -475,16 +568,33 @@ static int __init mon_init(void) return rc; } + rc = driver_register(&monreader_driver); + if (rc) + goto out_iucv; + monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!monreader_device) + goto out_driver; + dev_set_name(monreader_device, "monreader-dev"); + monreader_device->bus = &iucv_bus; + monreader_device->parent = iucv_root; + monreader_device->driver = &monreader_driver; + monreader_device->release = (void (*)(struct device *))kfree; + rc = device_register(monreader_device); + if (rc) { + kfree(monreader_device); + goto out_driver; + } + rc = segment_type(mon_dcss_name); if (rc < 0) { segment_warning(rc, mon_dcss_name); - goto out_iucv; + goto out_device; } if (rc != SEG_TYPE_SC) { pr_err("The specified *MONITOR DCSS %s does not have the " "required type SC\n", mon_dcss_name); rc = -EINVAL; - goto out_iucv; + goto out_device; } rc = segment_load(mon_dcss_name, SEGMENT_SHARED, @@ -492,7 +602,7 @@ static int __init mon_init(void) if (rc < 0) { segment_warning(rc, mon_dcss_name); rc = -EINVAL; - goto out_iucv; + goto out_device; } dcss_mkname(mon_dcss_name, &user_data_connect[8]); @@ -503,6 +613,10 @@ static int __init mon_init(void) out: segment_unload(mon_dcss_name); +out_device: + device_unregister(monreader_device); +out_driver: + driver_unregister(&monreader_driver); out_iucv: iucv_unregister(&monreader_iucv_handler, 1); return rc; @@ -512,6 +626,8 @@ static void __exit mon_exit(void) { segment_unload(mon_dcss_name); WARN_ON(misc_deregister(&mon_dev) != 0); + device_unregister(monreader_device); + driver_unregister(&monreader_driver); iucv_unregister(&monreader_iucv_handler, 1); return; } diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index c7d7483..66fb8eb 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -1,9 +1,7 @@ /* - * drivers/s390/char/monwriter.c - * * Character device driver for writing z/VM *MONITOR service records. * - * Copyright (C) IBM Corp. 2006 + * Copyright IBM Corp. 2006, 2009 * * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> */ @@ -22,6 +20,7 @@ #include <linux/ctype.h> #include <linux/poll.h> #include <linux/mutex.h> +#include <linux/platform_device.h> #include <asm/uaccess.h> #include <asm/ebcdic.h> #include <asm/io.h> @@ -40,7 +39,10 @@ struct mon_buf { char *data; }; +static LIST_HEAD(mon_priv_list); + struct mon_private { + struct list_head priv_list; struct list_head list; struct monwrite_hdr hdr; size_t hdr_to_read; @@ -188,6 +190,7 @@ static int monwrite_open(struct inode *inode, struct file *filp) monpriv->hdr_to_read = sizeof(monpriv->hdr); mutex_init(&monpriv->thread_mutex); filp->private_data = monpriv; + list_add_tail(&monpriv->priv_list, &mon_priv_list); unlock_kernel(); return nonseekable_open(inode, filp); } @@ -206,6 +209,7 @@ static int monwrite_close(struct inode *inode, struct file *filp) kfree(entry->data); kfree(entry); } + list_del(&monpriv->priv_list); kfree(monpriv); return 0; } @@ -281,20 +285,102 @@ static struct miscdevice mon_dev = { }; /* + * suspend/resume + */ + +static int monwriter_freeze(struct device *dev) +{ + struct mon_private *monpriv; + struct mon_buf *monbuf; + + list_for_each_entry(monpriv, &mon_priv_list, priv_list) { + list_for_each_entry(monbuf, &monpriv->list, list) { + if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT) + monwrite_diag(&monbuf->hdr, monbuf->data, + APPLDATA_STOP_REC); + } + } + return 0; +} + +static int monwriter_restore(struct device *dev) +{ + struct mon_private *monpriv; + struct mon_buf *monbuf; + + list_for_each_entry(monpriv, &mon_priv_list, priv_list) { + list_for_each_entry(monbuf, &monpriv->list, list) { + if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL) + monwrite_diag(&monbuf->hdr, monbuf->data, + APPLDATA_START_INTERVAL_REC); + if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG) + monwrite_diag(&monbuf->hdr, monbuf->data, + APPLDATA_START_CONFIG_REC); + } + } + return 0; +} + +static int monwriter_thaw(struct device *dev) +{ + return monwriter_restore(dev); +} + +static struct dev_pm_ops monwriter_pm_ops = { + .freeze = monwriter_freeze, + .thaw = monwriter_thaw, + .restore = monwriter_restore, +}; + +static struct platform_driver monwriter_pdrv = { + .driver = { + .name = "monwriter", + .owner = THIS_MODULE, + .pm = &monwriter_pm_ops, + }, +}; + +static struct platform_device *monwriter_pdev; + +/* * module init/exit */ static int __init mon_init(void) { - if (MACHINE_IS_VM) - return misc_register(&mon_dev); - else + int rc; + + if (!MACHINE_IS_VM) return -ENODEV; + + rc = platform_driver_register(&monwriter_pdrv); + if (rc) + return rc; + + monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL, + 0); + if (IS_ERR(monwriter_pdev)) { + rc = PTR_ERR(monwriter_pdev); + goto out_driver; + } + + rc = misc_register(&mon_dev); + if (rc) + goto out_device; + return 0; + +out_device: + platform_device_unregister(monwriter_pdev); +out_driver: + platform_driver_unregister(&monwriter_pdrv); + return rc; } static void __exit mon_exit(void) { WARN_ON(misc_deregister(&mon_dev) != 0); + platform_device_unregister(monwriter_pdev); + platform_driver_unregister(&monwriter_pdrv); } module_init(mon_init); diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 0b15cf1..81c151b 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -1,11 +1,10 @@ /* - * drivers/s390/char/raw3270.c - * IBM/3270 Driver - core functions. + * IBM/3270 Driver - core functions. * - * Author(s): - * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) - * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> - * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): + * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) + * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corp. 2003, 2009 */ #include <linux/bootmem.h> @@ -61,6 +60,7 @@ struct raw3270 { #define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */ #define RAW3270_FLAGS_READY 4 /* Device is useable by views */ #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ +#define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */ /* Semaphore to protect global data of raw3270 (devices, views, etc). */ static DEFINE_MUTEX(raw3270_mutex); @@ -306,7 +306,8 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); rp = view->dev; - if (!rp || rp->view != view) + if (!rp || rp->view != view || + test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) rc = -ENODEV; @@ -323,7 +324,8 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq) int rc; rp = view->dev; - if (!rp || rp->view != view) + if (!rp || rp->view != view || + test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) rc = -ENODEV; @@ -764,7 +766,8 @@ raw3270_reset(struct raw3270_view *view) int rc; rp = view->dev; - if (!rp || rp->view != view) + if (!rp || rp->view != view || + test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) rc = -ENODEV; @@ -922,6 +925,8 @@ raw3270_activate_view(struct raw3270_view *view) rc = 0; else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) rc = -ENODEV; + else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) + rc = -EACCES; else { oldview = NULL; if (rp->view) { @@ -969,7 +974,8 @@ raw3270_deactivate_view(struct raw3270_view *view) list_del_init(&view->list); list_add_tail(&view->list, &rp->view_list); /* Try to activate another view. */ - if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { + if (test_bit(RAW3270_FLAGS_READY, &rp->flags) && + !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { list_for_each_entry(view, &rp->view_list, list) { rp->view = view; if (view->fn->activate(view) == 0) @@ -1068,7 +1074,8 @@ raw3270_del_view(struct raw3270_view *view) rp->view = NULL; } list_del_init(&view->list); - if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) { + if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) && + !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { /* Try to activate another view. */ list_for_each_entry(nv, &rp->view_list, list) { if (nv->fn->activate(nv) == 0) { @@ -1337,6 +1344,58 @@ raw3270_set_offline (struct ccw_device *cdev) return 0; } +static int raw3270_pm_stop(struct ccw_device *cdev) +{ + struct raw3270 *rp; + struct raw3270_view *view; + unsigned long flags; + + rp = cdev->dev.driver_data; + if (!rp) + return 0; + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); + if (rp->view) + rp->view->fn->deactivate(rp->view); + if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) { + /* + * Release tty and fullscreen for all non-console + * devices. + */ + list_for_each_entry(view, &rp->view_list, list) { + if (view->fn->release) + view->fn->release(view); + } + } + set_bit(RAW3270_FLAGS_FROZEN, &rp->flags); + spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); + return 0; +} + +static int raw3270_pm_start(struct ccw_device *cdev) +{ + struct raw3270 *rp; + unsigned long flags; + + rp = cdev->dev.driver_data; + if (!rp) + return 0; + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); + clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags); + if (rp->view) + rp->view->fn->activate(rp->view); + spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); + return 0; +} + +void raw3270_pm_unfreeze(struct raw3270_view *view) +{ + struct raw3270 *rp; + + rp = view->dev; + if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) + ccw_device_force_console(); +} + static struct ccw_device_id raw3270_id[] = { { CCW_DEVICE(0x3270, 0) }, { CCW_DEVICE(0x3271, 0) }, @@ -1360,6 +1419,9 @@ static struct ccw_driver raw3270_ccw_driver = { .remove = &raw3270_remove, .set_online = &raw3270_set_online, .set_offline = &raw3270_set_offline, + .freeze = &raw3270_pm_stop, + .thaw = &raw3270_pm_start, + .restore = &raw3270_pm_start, }; static int diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 90beaa8..ed34eb2 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -1,11 +1,10 @@ /* - * drivers/s390/char/raw3270.h - * IBM/3270 Driver + * IBM/3270 Driver * - * Author(s): - * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) - * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> - * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): + * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) + * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corp. 2003, 2009 */ #include <asm/idals.h> @@ -195,6 +194,7 @@ void raw3270_wait_cons_dev(struct raw3270 *); /* Notifier for device addition/removal */ int raw3270_register_notifier(void (*notifier)(int, int)); void raw3270_unregister_notifier(void (*notifier)(int, int)); +void raw3270_pm_unfreeze(struct raw3270_view *); /* * Little memory allocator for string objects. diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 4377e93..a983f50 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -1,11 +1,10 @@ /* - * drivers/s390/char/sclp.c - * core function to access sclp interface + * core function to access sclp interface * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke <mpeschke@de.ibm.com> - * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Peschke <mpeschke@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/module.h> @@ -16,6 +15,9 @@ #include <linux/reboot.h> #include <linux/jiffies.h> #include <linux/init.h> +#include <linux/suspend.h> +#include <linux/completion.h> +#include <linux/platform_device.h> #include <asm/types.h> #include <asm/s390_ext.h> @@ -47,6 +49,16 @@ static struct sclp_req sclp_init_req; static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); +/* Suspend request */ +static DECLARE_COMPLETION(sclp_request_queue_flushed); + +static void sclp_suspend_req_cb(struct sclp_req *req, void *data) +{ + complete(&sclp_request_queue_flushed); +} + +static struct sclp_req sclp_suspend_req; + /* Timer for request retries. */ static struct timer_list sclp_request_timer; @@ -84,6 +96,12 @@ static volatile enum sclp_mask_state_t { sclp_mask_state_initializing } sclp_mask_state = sclp_mask_state_idle; +/* Internal state: is the driver suspended? */ +static enum sclp_suspend_state_t { + sclp_suspend_state_running, + sclp_suspend_state_suspended, +} sclp_suspend_state = sclp_suspend_state_running; + /* Maximum retry counts */ #define SCLP_INIT_RETRY 3 #define SCLP_MASK_RETRY 3 @@ -211,6 +229,8 @@ sclp_process_queue(void) del_timer(&sclp_request_timer); while (!list_empty(&sclp_req_queue)) { req = list_entry(sclp_req_queue.next, struct sclp_req, list); + if (!req->sccb) + goto do_post; rc = __sclp_start_request(req); if (rc == 0) break; @@ -222,6 +242,7 @@ sclp_process_queue(void) sclp_request_timeout, 0); break; } +do_post: /* Post-processing for aborted request */ list_del(&req->list); if (req->callback) { @@ -233,6 +254,19 @@ sclp_process_queue(void) spin_unlock_irqrestore(&sclp_lock, flags); } +static int __sclp_can_add_request(struct sclp_req *req) +{ + if (req == &sclp_suspend_req || req == &sclp_init_req) + return 1; + if (sclp_suspend_state != sclp_suspend_state_running) + return 0; + if (sclp_init_state != sclp_init_state_initialized) + return 0; + if (sclp_activation_state != sclp_activation_state_active) + return 0; + return 1; +} + /* Queue a new request. Return zero on success, non-zero otherwise. */ int sclp_add_request(struct sclp_req *req) @@ -241,9 +275,7 @@ sclp_add_request(struct sclp_req *req) int rc; spin_lock_irqsave(&sclp_lock, flags); - if ((sclp_init_state != sclp_init_state_initialized || - sclp_activation_state != sclp_activation_state_active) && - req != &sclp_init_req) { + if (!__sclp_can_add_request(req)) { spin_unlock_irqrestore(&sclp_lock, flags); return -EIO; } @@ -254,10 +286,16 @@ sclp_add_request(struct sclp_req *req) /* Start if request is first in list */ if (sclp_running_state == sclp_running_state_idle && req->list.prev == &sclp_req_queue) { + if (!req->sccb) { + list_del(&req->list); + rc = -ENODATA; + goto out; + } rc = __sclp_start_request(req); if (rc) list_del(&req->list); } +out: spin_unlock_irqrestore(&sclp_lock, flags); return rc; } @@ -560,6 +598,7 @@ sclp_register(struct sclp_register *reg) /* Trigger initial state change callback */ reg->sclp_receive_mask = 0; reg->sclp_send_mask = 0; + reg->pm_event_posted = 0; list_add(®->list, &sclp_reg_list); spin_unlock_irqrestore(&sclp_lock, flags); rc = sclp_init_mask(1); @@ -880,20 +919,134 @@ static struct notifier_block sclp_reboot_notifier = { .notifier_call = sclp_reboot_event }; +/* + * Suspend/resume SCLP notifier implementation + */ + +static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) +{ + struct sclp_register *reg; + unsigned long flags; + + if (!rollback) { + spin_lock_irqsave(&sclp_lock, flags); + list_for_each_entry(reg, &sclp_reg_list, list) + reg->pm_event_posted = 0; + spin_unlock_irqrestore(&sclp_lock, flags); + } + do { + spin_lock_irqsave(&sclp_lock, flags); + list_for_each_entry(reg, &sclp_reg_list, list) { + if (rollback && reg->pm_event_posted) + goto found; + if (!rollback && !reg->pm_event_posted) + goto found; + } + spin_unlock_irqrestore(&sclp_lock, flags); + return; +found: + spin_unlock_irqrestore(&sclp_lock, flags); + if (reg->pm_event_fn) + reg->pm_event_fn(reg, sclp_pm_event); + reg->pm_event_posted = rollback ? 0 : 1; + } while (1); +} + +/* + * Susend/resume callbacks for platform device + */ + +static int sclp_freeze(struct device *dev) +{ + unsigned long flags; + int rc; + + sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); + + spin_lock_irqsave(&sclp_lock, flags); + sclp_suspend_state = sclp_suspend_state_suspended; + spin_unlock_irqrestore(&sclp_lock, flags); + + /* Init supend data */ + memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); + sclp_suspend_req.callback = sclp_suspend_req_cb; + sclp_suspend_req.status = SCLP_REQ_FILLED; + init_completion(&sclp_request_queue_flushed); + + rc = sclp_add_request(&sclp_suspend_req); + if (rc == 0) + wait_for_completion(&sclp_request_queue_flushed); + else if (rc != -ENODATA) + goto fail_thaw; + + rc = sclp_deactivate(); + if (rc) + goto fail_thaw; + return 0; + +fail_thaw: + spin_lock_irqsave(&sclp_lock, flags); + sclp_suspend_state = sclp_suspend_state_running; + spin_unlock_irqrestore(&sclp_lock, flags); + sclp_pm_event(SCLP_PM_EVENT_THAW, 1); + return rc; +} + +static int sclp_undo_suspend(enum sclp_pm_event event) +{ + unsigned long flags; + int rc; + + rc = sclp_reactivate(); + if (rc) + return rc; + + spin_lock_irqsave(&sclp_lock, flags); + sclp_suspend_state = sclp_suspend_state_running; + spin_unlock_irqrestore(&sclp_lock, flags); + + sclp_pm_event(event, 0); + return 0; +} + +static int sclp_thaw(struct device *dev) +{ + return sclp_undo_suspend(SCLP_PM_EVENT_THAW); +} + +static int sclp_restore(struct device *dev) +{ + return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); +} + +static struct dev_pm_ops sclp_pm_ops = { + .freeze = sclp_freeze, + .thaw = sclp_thaw, + .restore = sclp_restore, +}; + +static struct platform_driver sclp_pdrv = { + .driver = { + .name = "sclp", + .owner = THIS_MODULE, + .pm = &sclp_pm_ops, + }, +}; + +static struct platform_device *sclp_pdev; + /* Initialize SCLP driver. Return zero if driver is operational, non-zero * otherwise. */ static int sclp_init(void) { unsigned long flags; - int rc; + int rc = 0; spin_lock_irqsave(&sclp_lock, flags); /* Check for previous or running initialization */ - if (sclp_init_state != sclp_init_state_uninitialized) { - spin_unlock_irqrestore(&sclp_lock, flags); - return 0; - } + if (sclp_init_state != sclp_init_state_uninitialized) + goto fail_unlock; sclp_init_state = sclp_init_state_initializing; /* Set up variables */ INIT_LIST_HEAD(&sclp_req_queue); @@ -904,27 +1057,17 @@ sclp_init(void) spin_unlock_irqrestore(&sclp_lock, flags); rc = sclp_check_interface(); spin_lock_irqsave(&sclp_lock, flags); - if (rc) { - sclp_init_state = sclp_init_state_uninitialized; - spin_unlock_irqrestore(&sclp_lock, flags); - return rc; - } + if (rc) + goto fail_init_state_uninitialized; /* Register reboot handler */ rc = register_reboot_notifier(&sclp_reboot_notifier); - if (rc) { - sclp_init_state = sclp_init_state_uninitialized; - spin_unlock_irqrestore(&sclp_lock, flags); - return rc; - } + if (rc) + goto fail_init_state_uninitialized; /* Register interrupt handler */ rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, &ext_int_info_hwc); - if (rc) { - unregister_reboot_notifier(&sclp_reboot_notifier); - sclp_init_state = sclp_init_state_uninitialized; - spin_unlock_irqrestore(&sclp_lock, flags); - return rc; - } + if (rc) + goto fail_unregister_reboot_notifier; sclp_init_state = sclp_init_state_initialized; spin_unlock_irqrestore(&sclp_lock, flags); /* Enable service-signal external interruption - needs to happen with @@ -932,11 +1075,56 @@ sclp_init(void) ctl_set_bit(0, 9); sclp_init_mask(1); return 0; + +fail_unregister_reboot_notifier: + unregister_reboot_notifier(&sclp_reboot_notifier); +fail_init_state_uninitialized: + sclp_init_state = sclp_init_state_uninitialized; +fail_unlock: + spin_unlock_irqrestore(&sclp_lock, flags); + return rc; } +/* + * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able + * to print the panic message. + */ +static int sclp_panic_notify(struct notifier_block *self, + unsigned long event, void *data) +{ + if (sclp_suspend_state == sclp_suspend_state_suspended) + sclp_undo_suspend(SCLP_PM_EVENT_THAW); + return NOTIFY_OK; +} + +static struct notifier_block sclp_on_panic_nb = { + .notifier_call = sclp_panic_notify, + .priority = SCLP_PANIC_PRIO, +}; + static __init int sclp_initcall(void) { + int rc; + + rc = platform_driver_register(&sclp_pdrv); + if (rc) + return rc; + sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); + rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; + if (rc) + goto fail_platform_driver_unregister; + rc = atomic_notifier_chain_register(&panic_notifier_list, + &sclp_on_panic_nb); + if (rc) + goto fail_platform_device_unregister; + return sclp_init(); + +fail_platform_device_unregister: + platform_device_unregister(sclp_pdev); +fail_platform_driver_unregister: + platform_driver_unregister(&sclp_pdrv); + return rc; } arch_initcall(sclp_initcall); diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index bac80e8..60e7cb0 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -1,10 +1,8 @@ /* - * drivers/s390/char/sclp.h + * Copyright IBM Corp. 1999, 2009 * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke <mpeschke@de.ibm.com> - * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Author(s): Martin Peschke <mpeschke@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #ifndef __SCLP_H__ @@ -17,7 +15,7 @@ /* maximum number of pages concerning our own memory management */ #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) -#define MAX_CONSOLE_PAGES 4 +#define MAX_CONSOLE_PAGES 6 #define EVTYP_OPCMD 0x01 #define EVTYP_MSG 0x02 @@ -68,6 +66,15 @@ typedef unsigned int sclp_cmdw_t; #define GDS_KEY_SELFDEFTEXTMSG 0x31 +enum sclp_pm_event { + SCLP_PM_EVENT_FREEZE, + SCLP_PM_EVENT_THAW, + SCLP_PM_EVENT_RESTORE, +}; + +#define SCLP_PANIC_PRIO 1 +#define SCLP_PANIC_PRIO_CLIENT 0 + typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ struct sccb_header { @@ -134,6 +141,10 @@ struct sclp_register { void (*state_change_fn)(struct sclp_register *); /* called for events in cp_receive_mask/sclp_receive_mask */ void (*receiver_fn)(struct evbuf_header *); + /* called for power management events */ + void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event); + /* pm event posted flag */ + int pm_event_posted; }; /* externals from sclp.c */ diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 77ab6e3..5cc11c6 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -1,9 +1,8 @@ /* - * drivers/s390/char/sclp_cmd.c + * Copyright IBM Corp. 2007, 2009 * - * Copyright IBM Corp. 2007 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, - * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #define KMSG_COMPONENT "sclp_cmd" @@ -12,11 +11,13 @@ #include <linux/completion.h> #include <linux/init.h> #include <linux/errno.h> +#include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/memory.h> +#include <linux/platform_device.h> #include <asm/chpid.h> #include <asm/sclp.h> #include <asm/setup.h> @@ -292,6 +293,7 @@ static DEFINE_MUTEX(sclp_mem_mutex); static LIST_HEAD(sclp_mem_list); static u8 sclp_max_storage_id; static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; +static int sclp_mem_state_changed; struct memory_increment { struct list_head list; @@ -450,6 +452,8 @@ static int sclp_mem_notifier(struct notifier_block *nb, rc = -EINVAL; break; } + if (!rc) + sclp_mem_state_changed = 1; mutex_unlock(&sclp_mem_mutex); return rc ? NOTIFY_BAD : NOTIFY_OK; } @@ -525,6 +529,14 @@ static void __init insert_increment(u16 rn, int standby, int assigned) list_add(&new_incr->list, prev); } +static int sclp_mem_freeze(struct device *dev) +{ + if (!sclp_mem_state_changed) + return 0; + pr_err("Memory hotplug state changed, suspend refused.\n"); + return -EPERM; +} + struct read_storage_sccb { struct sccb_header header; u16 max_id; @@ -534,8 +546,20 @@ struct read_storage_sccb { u32 entries[0]; } __packed; +static struct dev_pm_ops sclp_mem_pm_ops = { + .freeze = sclp_mem_freeze, +}; + +static struct platform_driver sclp_mem_pdrv = { + .driver = { + .name = "sclp_mem", + .pm = &sclp_mem_pm_ops, + }, +}; + static int __init sclp_detect_standby_memory(void) { + struct platform_device *sclp_pdev; struct read_storage_sccb *sccb; int i, id, assigned, rc; @@ -588,7 +612,17 @@ static int __init sclp_detect_standby_memory(void) rc = register_memory_notifier(&sclp_mem_nb); if (rc) goto out; + rc = platform_driver_register(&sclp_mem_pdrv); + if (rc) + goto out; + sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); + rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; + if (rc) + goto out_driver; sclp_add_standby_memory(); + goto out; +out_driver: + platform_driver_unregister(&sclp_mem_pdrv); out: free_page((unsigned long) sccb); return rc; diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 9a25c4b..336811a 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -1,11 +1,9 @@ /* - * drivers/s390/char/sclp_con.c - * SCLP line mode console driver + * SCLP line mode console driver * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke <mpeschke@de.ibm.com> - * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corp. 1999, 2009 + * Author(s): Martin Peschke <mpeschke@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/kmod.h> @@ -32,13 +30,14 @@ static spinlock_t sclp_con_lock; static struct list_head sclp_con_pages; /* List of full struct sclp_buffer structures ready for output */ static struct list_head sclp_con_outqueue; -/* Counter how many buffers are emitted (max 1) and how many */ -/* are on the output queue. */ -static int sclp_con_buffer_count; /* Pointer to current console buffer */ static struct sclp_buffer *sclp_conbuf; /* Timer for delayed output of console messages */ static struct timer_list sclp_con_timer; +/* Suspend mode flag */ +static int sclp_con_suspended; +/* Flag that output queue is currently running */ +static int sclp_con_queue_running; /* Output format for console messages */ static unsigned short sclp_con_columns; @@ -53,42 +52,71 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) do { page = sclp_unmake_buffer(buffer); spin_lock_irqsave(&sclp_con_lock, flags); + /* Remove buffer from outqueue */ list_del(&buffer->list); - sclp_con_buffer_count--; list_add_tail((struct list_head *) page, &sclp_con_pages); + /* Check if there is a pending buffer on the out queue. */ buffer = NULL; if (!list_empty(&sclp_con_outqueue)) - buffer = list_entry(sclp_con_outqueue.next, - struct sclp_buffer, list); + buffer = list_first_entry(&sclp_con_outqueue, + struct sclp_buffer, list); + if (!buffer || sclp_con_suspended) { + sclp_con_queue_running = 0; + spin_unlock_irqrestore(&sclp_con_lock, flags); + break; + } spin_unlock_irqrestore(&sclp_con_lock, flags); - } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); + } while (sclp_emit_buffer(buffer, sclp_conbuf_callback)); } -static void -sclp_conbuf_emit(void) +/* + * Finalize and emit first pending buffer. + */ +static void sclp_conbuf_emit(void) { struct sclp_buffer* buffer; unsigned long flags; - int count; int rc; spin_lock_irqsave(&sclp_con_lock, flags); - buffer = sclp_conbuf; + if (sclp_conbuf) + list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue); sclp_conbuf = NULL; - if (buffer == NULL) { - spin_unlock_irqrestore(&sclp_con_lock, flags); - return; - } - list_add_tail(&buffer->list, &sclp_con_outqueue); - count = sclp_con_buffer_count++; + if (sclp_con_queue_running || sclp_con_suspended) + goto out_unlock; + if (list_empty(&sclp_con_outqueue)) + goto out_unlock; + buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer, + list); + sclp_con_queue_running = 1; spin_unlock_irqrestore(&sclp_con_lock, flags); - if (count) - return; + rc = sclp_emit_buffer(buffer, sclp_conbuf_callback); if (rc) sclp_conbuf_callback(buffer, rc); + return; +out_unlock: + spin_unlock_irqrestore(&sclp_con_lock, flags); +} + +/* + * Wait until out queue is empty + */ +static void sclp_console_sync_queue(void) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_con_lock, flags); + if (timer_pending(&sclp_con_timer)) + del_timer_sync(&sclp_con_timer); + while (sclp_con_queue_running) { + spin_unlock_irqrestore(&sclp_con_lock, flags); + sclp_sync_wait(); + spin_lock_irqsave(&sclp_con_lock, flags); + } + spin_unlock_irqrestore(&sclp_con_lock, flags); } /* @@ -123,6 +151,8 @@ sclp_console_write(struct console *console, const char *message, /* make sure we have a console output buffer */ if (sclp_conbuf == NULL) { while (list_empty(&sclp_con_pages)) { + if (sclp_con_suspended) + goto out; spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_sync_wait(); spin_lock_irqsave(&sclp_con_lock, flags); @@ -157,6 +187,7 @@ sclp_console_write(struct console *console, const char *message, sclp_con_timer.expires = jiffies + HZ/10; add_timer(&sclp_con_timer); } +out: spin_unlock_irqrestore(&sclp_con_lock, flags); } @@ -168,30 +199,43 @@ sclp_console_device(struct console *c, int *index) } /* - * This routine is called from panic when the kernel - * is going to give up. We have to make sure that all buffers - * will be flushed to the SCLP. + * Make sure that all buffers will be flushed to the SCLP. */ static void sclp_console_flush(void) { + sclp_conbuf_emit(); + sclp_console_sync_queue(); +} + +/* + * Resume console: If there are cached messages, emit them. + */ +static void sclp_console_resume(void) +{ unsigned long flags; + spin_lock_irqsave(&sclp_con_lock, flags); + sclp_con_suspended = 0; + spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_conbuf_emit(); +} + +/* + * Suspend console: Set suspend flag and flush console + */ +static void sclp_console_suspend(void) +{ + unsigned long flags; + spin_lock_irqsave(&sclp_con_lock, flags); - if (timer_pending(&sclp_con_timer)) - del_timer(&sclp_con_timer); - while (sclp_con_buffer_count > 0) { - spin_unlock_irqrestore(&sclp_con_lock, flags); - sclp_sync_wait(); - spin_lock_irqsave(&sclp_con_lock, flags); - } + sclp_con_suspended = 1; spin_unlock_irqrestore(&sclp_con_lock, flags); + sclp_console_flush(); } -static int -sclp_console_notify(struct notifier_block *self, - unsigned long event, void *data) +static int sclp_console_notify(struct notifier_block *self, + unsigned long event, void *data) { sclp_console_flush(); return NOTIFY_OK; @@ -199,7 +243,7 @@ sclp_console_notify(struct notifier_block *self, static struct notifier_block on_panic_nb = { .notifier_call = sclp_console_notify, - .priority = 1, + .priority = SCLP_PANIC_PRIO_CLIENT, }; static struct notifier_block on_reboot_nb = { @@ -221,6 +265,22 @@ static struct console sclp_console = }; /* + * This function is called for SCLP suspend and resume events. + */ +void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) +{ + switch (sclp_pm_event) { + case SCLP_PM_EVENT_FREEZE: + sclp_console_suspend(); + break; + case SCLP_PM_EVENT_RESTORE: + case SCLP_PM_EVENT_THAW: + sclp_console_resume(); + break; + } +} + +/* * called by console_init() in drivers/char/tty_io.c at boot-time. */ static int __init @@ -243,7 +303,6 @@ sclp_console_init(void) } INIT_LIST_HEAD(&sclp_con_outqueue); spin_lock_init(&sclp_con_lock); - sclp_con_buffer_count = 0; sclp_conbuf = NULL; init_timer(&sclp_con_timer); diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 710af42..4be63be 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -1,11 +1,10 @@ /* - * drivers/s390/char/sclp_rw.c - * driver: reading from and writing to system console on S/390 via SCLP + * driver: reading from and writing to system console on S/390 via SCLP * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke <mpeschke@de.ibm.com> - * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Peschke <mpeschke@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/kmod.h> @@ -26,9 +25,16 @@ */ #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) +static void sclp_rw_pm_event(struct sclp_register *reg, + enum sclp_pm_event sclp_pm_event) +{ + sclp_console_pm_event(sclp_pm_event); +} + /* Event type structure for write message and write priority message */ static struct sclp_register sclp_rw_event = { - .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK + .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK, + .pm_event_fn = sclp_rw_pm_event, }; /* diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h index 6aa7a69..85f491e 100644 --- a/drivers/s390/char/sclp_rw.h +++ b/drivers/s390/char/sclp_rw.h @@ -1,11 +1,10 @@ /* - * drivers/s390/char/sclp_rw.h - * interface to the SCLP-read/write driver + * interface to the SCLP-read/write driver * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke <mpeschke@de.ibm.com> - * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corporation 1999, 2009 + * + * Author(s): Martin Peschke <mpeschke@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #ifndef __SCLP_RW_H__ @@ -93,4 +92,5 @@ void sclp_set_columns(struct sclp_buffer *, unsigned short); void sclp_set_htab(struct sclp_buffer *, unsigned short); int sclp_chars_in_buffer(struct sclp_buffer *); +void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event); #endif /* __SCLP_RW_H__ */ diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index a839aa5..5518e24 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -1,10 +1,9 @@ /* - * drivers/s390/char/sclp_vt220.c - * SCLP VT220 terminal driver. + * SCLP VT220 terminal driver. * - * S390 version - * Copyright IBM Corp. 2003,2008 - * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com> + * Copyright IBM Corp. 2003, 2009 + * + * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com> */ #include <linux/module.h> @@ -69,8 +68,11 @@ static struct list_head sclp_vt220_empty; /* List of pending requests */ static struct list_head sclp_vt220_outqueue; -/* Number of requests in outqueue */ -static int sclp_vt220_outqueue_count; +/* Suspend mode flag */ +static int sclp_vt220_suspended; + +/* Flag that output queue is currently running */ +static int sclp_vt220_queue_running; /* Timer used for delaying write requests to merge subsequent messages into * a single buffer */ @@ -92,6 +94,8 @@ static int __initdata sclp_vt220_init_count; static int sclp_vt220_flush_later; static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf); +static void sclp_vt220_pm_event_fn(struct sclp_register *reg, + enum sclp_pm_event sclp_pm_event); static int __sclp_vt220_emit(struct sclp_vt220_request *request); static void sclp_vt220_emit_current(void); @@ -100,7 +104,8 @@ static struct sclp_register sclp_vt220_register = { .send_mask = EVTYP_VT220MSG_MASK, .receive_mask = EVTYP_VT220MSG_MASK, .state_change_fn = NULL, - .receiver_fn = sclp_vt220_receiver_fn + .receiver_fn = sclp_vt220_receiver_fn, + .pm_event_fn = sclp_vt220_pm_event_fn, }; @@ -120,15 +125,19 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request) spin_lock_irqsave(&sclp_vt220_lock, flags); /* Move request from outqueue to empty queue */ list_del(&request->list); - sclp_vt220_outqueue_count--; list_add_tail((struct list_head *) page, &sclp_vt220_empty); /* Check if there is a pending buffer on the out queue. */ request = NULL; if (!list_empty(&sclp_vt220_outqueue)) request = list_entry(sclp_vt220_outqueue.next, struct sclp_vt220_request, list); + if (!request || sclp_vt220_suspended) { + sclp_vt220_queue_running = 0; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + break; + } spin_unlock_irqrestore(&sclp_vt220_lock, flags); - } while (request && __sclp_vt220_emit(request)); + } while (__sclp_vt220_emit(request)); if (request == NULL && sclp_vt220_flush_later) sclp_vt220_emit_current(); /* Check if the tty needs a wake up call */ @@ -212,26 +221,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request) } /* - * Queue and emit given request. - */ -static void -sclp_vt220_emit(struct sclp_vt220_request *request) -{ - unsigned long flags; - int count; - - spin_lock_irqsave(&sclp_vt220_lock, flags); - list_add_tail(&request->list, &sclp_vt220_outqueue); - count = sclp_vt220_outqueue_count++; - spin_unlock_irqrestore(&sclp_vt220_lock, flags); - /* Emit only the first buffer immediately - callback takes care of - * the rest */ - if (count == 0 && __sclp_vt220_emit(request)) - sclp_vt220_process_queue(request); -} - -/* - * Queue and emit current request. Return zero on success, non-zero otherwise. + * Queue and emit current request. */ static void sclp_vt220_emit_current(void) @@ -241,22 +231,33 @@ sclp_vt220_emit_current(void) struct sclp_vt220_sccb *sccb; spin_lock_irqsave(&sclp_vt220_lock, flags); - request = NULL; - if (sclp_vt220_current_request != NULL) { + if (sclp_vt220_current_request) { sccb = (struct sclp_vt220_sccb *) sclp_vt220_current_request->sclp_req.sccb; /* Only emit buffers with content */ if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) { - request = sclp_vt220_current_request; + list_add_tail(&sclp_vt220_current_request->list, + &sclp_vt220_outqueue); sclp_vt220_current_request = NULL; if (timer_pending(&sclp_vt220_timer)) del_timer(&sclp_vt220_timer); } sclp_vt220_flush_later = 0; } + if (sclp_vt220_queue_running || sclp_vt220_suspended) + goto out_unlock; + if (list_empty(&sclp_vt220_outqueue)) + goto out_unlock; + request = list_first_entry(&sclp_vt220_outqueue, + struct sclp_vt220_request, list); + sclp_vt220_queue_running = 1; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + + if (__sclp_vt220_emit(request)) + sclp_vt220_process_queue(request); + return; +out_unlock: spin_unlock_irqrestore(&sclp_vt220_lock, flags); - if (request != NULL) - sclp_vt220_emit(request); } #define SCLP_NORMAL_WRITE 0x00 @@ -396,7 +397,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, if (sclp_vt220_current_request == NULL) { while (list_empty(&sclp_vt220_empty)) { spin_unlock_irqrestore(&sclp_vt220_lock, flags); - if (may_fail) + if (may_fail || sclp_vt220_suspended) goto out; else sclp_sync_wait(); @@ -531,7 +532,7 @@ sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) static void sclp_vt220_flush_chars(struct tty_struct *tty) { - if (sclp_vt220_outqueue_count == 0) + if (!sclp_vt220_queue_running) sclp_vt220_emit_current(); else sclp_vt220_flush_later = 1; @@ -635,7 +636,6 @@ static int __init __sclp_vt220_init(int num_pages) init_timer(&sclp_vt220_timer); sclp_vt220_current_request = NULL; sclp_vt220_buffered_chars = 0; - sclp_vt220_outqueue_count = 0; sclp_vt220_tty = NULL; sclp_vt220_flush_later = 0; @@ -736,7 +736,7 @@ static void __sclp_vt220_flush_buffer(void) spin_lock_irqsave(&sclp_vt220_lock, flags); if (timer_pending(&sclp_vt220_timer)) del_timer(&sclp_vt220_timer); - while (sclp_vt220_outqueue_count > 0) { + while (sclp_vt220_queue_running) { spin_unlock_irqrestore(&sclp_vt220_lock, flags); sclp_sync_wait(); spin_lock_irqsave(&sclp_vt220_lock, flags); @@ -744,6 +744,46 @@ static void __sclp_vt220_flush_buffer(void) spin_unlock_irqrestore(&sclp_vt220_lock, flags); } +/* + * Resume console: If there are cached messages, emit them. + */ +static void sclp_vt220_resume(void) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_vt220_lock, flags); + sclp_vt220_suspended = 0; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + sclp_vt220_emit_current(); +} + +/* + * Suspend console: Set suspend flag and flush console + */ +static void sclp_vt220_suspend(void) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_vt220_lock, flags); + sclp_vt220_suspended = 1; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + __sclp_vt220_flush_buffer(); +} + +static void sclp_vt220_pm_event_fn(struct sclp_register *reg, + enum sclp_pm_event sclp_pm_event) +{ + switch (sclp_pm_event) { + case SCLP_PM_EVENT_FREEZE: + sclp_vt220_suspend(); + break; + case SCLP_PM_EVENT_RESTORE: + case SCLP_PM_EVENT_THAW: + sclp_vt220_resume(); + break; + } +} + static int sclp_vt220_notify(struct notifier_block *self, unsigned long event, void *data) diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 5469e09..a263337 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -3,7 +3,7 @@ * tape device driver for 3480/3490E/3590 tapes. * * S390 and zSeries version - * Copyright IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001, 2009 * Author(s): Carsten Otte <cotte@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -286,6 +286,7 @@ extern void tape_state_set(struct tape_device *, enum tape_state); extern int tape_generic_online(struct tape_device *, struct tape_discipline *); extern int tape_generic_offline(struct ccw_device *); +extern int tape_generic_pm_suspend(struct ccw_device *); /* Externals from tape_devmap.c */ extern int tape_generic_probe(struct ccw_device *); diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 2d00a38..144d2a5 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -2,7 +2,7 @@ * drivers/s390/char/tape_34xx.c * tape device discipline for 3480/3490 tapes. * - * Copyright (C) IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001, 2009 * Author(s): Carsten Otte <cotte@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -1302,6 +1302,7 @@ static struct ccw_driver tape_34xx_driver = { .remove = tape_generic_remove, .set_online = tape_34xx_online, .set_offline = tape_generic_offline, + .freeze = tape_generic_pm_suspend, }; static int diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index c453b2f..23e6598 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -2,7 +2,7 @@ * drivers/s390/char/tape_3590.c * tape device discipline for 3590 tapes. * - * Copyright IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001, 2009 * Author(s): Stefan Bader <shbader@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -1715,6 +1715,7 @@ static struct ccw_driver tape_3590_driver = { .remove = tape_generic_remove, .set_offline = tape_generic_offline, .set_online = tape_3590_online, + .freeze = tape_generic_pm_suspend, }; /* diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 8a109f3..3ebaa8e 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -3,7 +3,7 @@ * basic function of the tape device driver * * S390 and zSeries version - * Copyright IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001, 2009 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> @@ -380,6 +380,55 @@ tape_cleanup_device(struct tape_device *device) } /* + * Suspend device. + * + * Called by the common I/O layer if the drive should be suspended on user + * request. We refuse to suspend if the device is loaded or in use for the + * following reason: + * While the Linux guest is suspended, it might be logged off which causes + * devices to be detached. Tape devices are automatically rewound and unloaded + * during DETACH processing (unless the tape device was attached with the + * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to + * resume the original state of the tape device, since we would need to + * manually re-load the cartridge which was active at suspend time. + */ +int tape_generic_pm_suspend(struct ccw_device *cdev) +{ + struct tape_device *device; + + device = cdev->dev.driver_data; + if (!device) { + return -ENODEV; + } + + DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n", + device->cdev_id, device); + + if (device->medium_state != MS_UNLOADED) { + pr_err("A cartridge is loaded in tape device %s, " + "refusing to suspend\n", dev_name(&cdev->dev)); + return -EBUSY; + } + + spin_lock_irq(get_ccwdev_lock(device->cdev)); + switch (device->tape_state) { + case TS_INIT: + case TS_NOT_OPER: + case TS_UNUSED: + spin_unlock_irq(get_ccwdev_lock(device->cdev)); + break; + default: + pr_err("Tape device %s is busy, refusing to " + "suspend\n", dev_name(&cdev->dev)); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); + return -EBUSY; + } + + DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); + return 0; +} + +/* * Set device offline. * * Called by the common I/O layer if the drive should set offline on user @@ -1273,6 +1322,7 @@ EXPORT_SYMBOL(tape_generic_remove); EXPORT_SYMBOL(tape_generic_probe); EXPORT_SYMBOL(tape_generic_online); EXPORT_SYMBOL(tape_generic_offline); +EXPORT_SYMBOL(tape_generic_pm_suspend); EXPORT_SYMBOL(tape_put_device); EXPORT_SYMBOL(tape_get_device_reference); EXPORT_SYMBOL(tape_state_verbose); diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index d8a2289..e925808 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -3,7 +3,7 @@ * character device driver for reading z/VM system service records * * - * Copyright 2004 IBM Corporation + * Copyright IBM Corp. 2004, 2009 * character device driver for reading z/VM system service records, * Version 1.0 * Author(s): Xenia Tkatschow <xenia@us.ibm.com> @@ -660,6 +660,29 @@ static struct attribute *vmlogrdr_attrs[] = { NULL, }; +static int vmlogrdr_pm_prepare(struct device *dev) +{ + int rc; + struct vmlogrdr_priv_t *priv = dev->driver_data; + + rc = 0; + if (priv) { + spin_lock_bh(&priv->priv_lock); + if (priv->dev_in_use) + rc = -EBUSY; + spin_unlock_bh(&priv->priv_lock); + } + if (rc) + pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n", + dev_name(dev)); + return rc; +} + + +static struct dev_pm_ops vmlogrdr_pm_ops = { + .prepare = vmlogrdr_pm_prepare, +}; + static struct attribute_group vmlogrdr_attr_group = { .attrs = vmlogrdr_attrs, }; @@ -668,6 +691,7 @@ static struct class *vmlogrdr_class; static struct device_driver vmlogrdr_driver = { .name = "vmlogrdr", .bus = &iucv_bus, + .pm = &vmlogrdr_pm_ops, }; @@ -729,6 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) dev->bus = &iucv_bus; dev->parent = iucv_root; dev->driver = &vmlogrdr_driver; + dev->driver_data = priv; /* * The release function could be called after the * module has been unloaded. It's _only_ task is to diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 5dcef81..9245821 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -2,7 +2,7 @@ * Linux driver for System z and s390 unit record devices * (z/VM virtual punch, reader, printer) * - * Copyright IBM Corp. 2001, 2007 + * Copyright IBM Corp. 2001, 2009 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Frank Munzert <munzert@de.ibm.com> @@ -60,6 +60,7 @@ static int ur_probe(struct ccw_device *cdev); static void ur_remove(struct ccw_device *cdev); static int ur_set_online(struct ccw_device *cdev); static int ur_set_offline(struct ccw_device *cdev); +static int ur_pm_suspend(struct ccw_device *cdev); static struct ccw_driver ur_driver = { .name = "vmur", @@ -69,6 +70,7 @@ static struct ccw_driver ur_driver = { .remove = ur_remove, .set_online = ur_set_online, .set_offline = ur_set_offline, + .freeze = ur_pm_suspend, }; static DEFINE_MUTEX(vmur_mutex); @@ -158,6 +160,28 @@ static void urdev_put(struct urdev *urd) } /* + * State and contents of ur devices can be changed by class D users issuing + * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended. + * Also the Linux guest might be logged off, which causes all active spool + * files to be closed. + * So we cannot guarantee that spool files are still the same when the Linux + * guest is resumed. In order to avoid unpredictable results at resume time + * we simply refuse to suspend if a ur device node is open. + */ +static int ur_pm_suspend(struct ccw_device *cdev) +{ + struct urdev *urd = cdev->dev.driver_data; + + TRACE("ur_pm_suspend: cdev=%p\n", cdev); + if (urd->open_flag) { + pr_err("Unit record device %s is busy, %s refusing to " + "suspend.\n", dev_name(&cdev->dev), ur_banner); + return -EBUSY; + } + return 0; +} + +/* * Low-level functions to do I/O to a ur device. * alloc_chan_prog * free_chan_prog diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 21a2a82..cb7854c 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -1,17 +1,23 @@ /* * Watchdog implementation based on z/VM Watchdog Timer API * + * Copyright IBM Corp. 2004,2009 + * * The user space watchdog daemon can use this driver as * /dev/vmwatchdog to have z/VM execute the specified CP * command when the timeout expires. The default command is * "IPL", which which cause an immediate reboot. */ +#define KMSG_COMPONENT "vmwatchdog" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/init.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/suspend.h> #include <linux/watchdog.h> #include <linux/smp_lock.h> @@ -43,6 +49,9 @@ static unsigned int vmwdt_interval = 60; static unsigned long vmwdt_is_open; static int vmwdt_expect_close; +#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */ +#define VMWDT_RUNNING 1 /* The watchdog is armed */ + enum vmwdt_func { /* function codes */ wdt_init = 0, @@ -92,6 +101,7 @@ static int vmwdt_keepalive(void) EBC_TOUPPER(ebc_cmd, MAX_CMDLEN); func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; + set_bit(VMWDT_RUNNING, &vmwdt_is_open); ret = __diag288(func, vmwdt_interval, ebc_cmd, len); WARN_ON(ret != 0); kfree(ebc_cmd); @@ -102,6 +112,7 @@ static int vmwdt_disable(void) { int ret = __diag288(wdt_cancel, 0, "", 0); WARN_ON(ret != 0); + clear_bit(VMWDT_RUNNING, &vmwdt_is_open); return ret; } @@ -123,13 +134,13 @@ static int vmwdt_open(struct inode *i, struct file *f) { int ret; lock_kernel(); - if (test_and_set_bit(0, &vmwdt_is_open)) { + if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { unlock_kernel(); return -EBUSY; } ret = vmwdt_keepalive(); if (ret) - clear_bit(0, &vmwdt_is_open); + clear_bit(VMWDT_OPEN, &vmwdt_is_open); unlock_kernel(); return ret ? ret : nonseekable_open(i, f); } @@ -139,7 +150,7 @@ static int vmwdt_close(struct inode *i, struct file *f) if (vmwdt_expect_close == 42) vmwdt_disable(); vmwdt_expect_close = 0; - clear_bit(0, &vmwdt_is_open); + clear_bit(VMWDT_OPEN, &vmwdt_is_open); return 0; } @@ -223,6 +234,57 @@ static ssize_t vmwdt_write(struct file *f, const char __user *buf, return count; } +static int vmwdt_resume(void) +{ + clear_bit(VMWDT_OPEN, &vmwdt_is_open); + return NOTIFY_DONE; +} + +/* + * It makes no sense to go into suspend while the watchdog is running. + * Depending on the memory size, the watchdog might trigger, while we + * are still saving the memory. + * We reuse the open flag to ensure that suspend and watchdog open are + * exclusive operations + */ +static int vmwdt_suspend(void) +{ + if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { + pr_err("The watchdog is in use. " + "This prevents hibernation or suspend.\n"); + return NOTIFY_BAD; + } + if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { + clear_bit(VMWDT_OPEN, &vmwdt_is_open); + pr_err("The watchdog is running. " + "This prevents hibernation or suspend.\n"); + return NOTIFY_BAD; + } + return NOTIFY_DONE; +} + +/* + * This function is called for suspend and resume. + */ +static int vmwdt_power_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + switch (event) { + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + return vmwdt_resume(); + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + return vmwdt_suspend(); + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block vmwdt_power_notifier = { + .notifier_call = vmwdt_power_event, +}; + static const struct file_operations vmwdt_fops = { .open = &vmwdt_open, .release = &vmwdt_close, @@ -244,12 +306,21 @@ static int __init vmwdt_init(void) ret = vmwdt_probe(); if (ret) return ret; - return misc_register(&vmwdt_dev); + ret = register_pm_notifier(&vmwdt_power_notifier); + if (ret) + return ret; + ret = misc_register(&vmwdt_dev); + if (ret) { + unregister_pm_notifier(&vmwdt_power_notifier); + return ret; + } + return 0; } module_init(vmwdt_init); static void __exit vmwdt_exit(void) { - WARN_ON(misc_deregister(&vmwdt_dev) != 0); + unregister_pm_notifier(&vmwdt_power_notifier); + misc_deregister(&vmwdt_dev); } module_exit(vmwdt_exit); diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 22ce765..a5a62f1 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -1,11 +1,10 @@ /* - * drivers/s390/cio/ccwgroup.c * bus driver for ccwgroup * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Arnd Bergmann (arndb@de.ibm.com) - * Cornelia Huck (cornelia.huck@de.ibm.com) + * Copyright IBM Corp. 2002, 2009 + * + * Author(s): Arnd Bergmann (arndb@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) */ #include <linux/module.h> #include <linux/errno.h> @@ -501,6 +500,74 @@ static void ccwgroup_shutdown(struct device *dev) gdrv->shutdown(gdev); } +static int ccwgroup_pm_prepare(struct device *dev) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); + + /* Fail while device is being set online/offline. */ + if (atomic_read(&gdev->onoff)) + return -EAGAIN; + + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return 0; + + return gdrv->prepare ? gdrv->prepare(gdev) : 0; +} + +static void ccwgroup_pm_complete(struct device *dev) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); + + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return; + + if (gdrv->complete) + gdrv->complete(gdev); +} + +static int ccwgroup_pm_freeze(struct device *dev) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); + + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return 0; + + return gdrv->freeze ? gdrv->freeze(gdev) : 0; +} + +static int ccwgroup_pm_thaw(struct device *dev) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); + + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return 0; + + return gdrv->thaw ? gdrv->thaw(gdev) : 0; +} + +static int ccwgroup_pm_restore(struct device *dev) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); + + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return 0; + + return gdrv->restore ? gdrv->restore(gdev) : 0; +} + +static struct dev_pm_ops ccwgroup_pm_ops = { + .prepare = ccwgroup_pm_prepare, + .complete = ccwgroup_pm_complete, + .freeze = ccwgroup_pm_freeze, + .thaw = ccwgroup_pm_thaw, + .restore = ccwgroup_pm_restore, +}; + static struct bus_type ccwgroup_bus_type = { .name = "ccwgroup", .match = ccwgroup_bus_match, @@ -508,6 +575,7 @@ static struct bus_type ccwgroup_bus_type = { .probe = ccwgroup_probe, .remove = ccwgroup_remove, .shutdown = ccwgroup_shutdown, + .pm = &ccwgroup_pm_ops, }; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 883f16f..1ecd3e5 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -549,8 +549,7 @@ cleanup: return ret; } -static int -__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) +int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) { struct { struct chsc_header request; diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index ba59bce..425e8f8 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -90,6 +90,7 @@ extern void chsc_free_sei_area(void); extern int chsc_enable_facility(int); struct channel_subsystem; extern int chsc_secm(struct channel_subsystem *, int); +int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); int chsc_chp_vary(struct chp_id chpid, int on); int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 93eca17..cc5144b 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -1,7 +1,8 @@ /* * Driver for s390 chsc subchannels * - * Copyright IBM Corp. 2008 + * Copyright IBM Corp. 2008, 2009 + * * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * */ @@ -112,6 +113,31 @@ static void chsc_subchannel_shutdown(struct subchannel *sch) cio_disable_subchannel(sch); } +static int chsc_subchannel_prepare(struct subchannel *sch) +{ + int cc; + struct schib schib; + /* + * Don't allow suspend while the subchannel is not idle + * since we don't have a way to clear the subchannel and + * cannot disable it with a request running. + */ + cc = stsch(sch->schid, &schib); + if (!cc && scsw_stctl(&schib.scsw)) + return -EAGAIN; + return 0; +} + +static int chsc_subchannel_freeze(struct subchannel *sch) +{ + return cio_disable_subchannel(sch); +} + +static int chsc_subchannel_restore(struct subchannel *sch) +{ + return cio_enable_subchannel(sch, (u32)(unsigned long)sch); +} + static struct css_device_id chsc_subchannel_ids[] = { { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, { /* end of list */ }, @@ -125,6 +151,10 @@ static struct css_driver chsc_subchannel_driver = { .probe = chsc_subchannel_probe, .remove = chsc_subchannel_remove, .shutdown = chsc_subchannel_shutdown, + .prepare = chsc_subchannel_prepare, + .freeze = chsc_subchannel_freeze, + .thaw = chsc_subchannel_restore, + .restore = chsc_subchannel_restore, .name = "chsc_subchannel", }; diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index dc98b2c..30f5161 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -1204,6 +1204,11 @@ static ssize_t cmb_enable_store(struct device *dev, DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store); +int ccw_set_cmf(struct ccw_device *cdev, int enable) +{ + return cmbops->set(cdev, enable ? 2 : 0); +} + /** * enable_cmf() - switch on the channel measurement for a specific device * @cdev: The ccw device to be enabled diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 0085d89..85d43c6 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1,10 +1,10 @@ /* - * drivers/s390/cio/css.c - * driver for channel subsystem + * driver for channel subsystem * - * Copyright IBM Corp. 2002,2008 - * Author(s): Arnd Bergmann (arndb@de.ibm.com) - * Cornelia Huck (cornelia.huck@de.ibm.com) + * Copyright IBM Corp. 2002, 2009 + * + * Author(s): Arnd Bergmann (arndb@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) */ #define KMSG_COMPONENT "cio" @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/list.h> #include <linux/reboot.h> +#include <linux/suspend.h> #include <asm/isc.h> #include <asm/crw.h> @@ -780,6 +781,79 @@ static struct notifier_block css_reboot_notifier = { }; /* + * Since the css devices are neither on a bus nor have a class + * nor have a special device type, we cannot stop/restart channel + * path measurements via the normal suspend/resume callbacks, but have + * to use notifiers. + */ +static int css_power_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + void *secm_area; + int ret, i; + + switch (event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + ret = NOTIFY_DONE; + for (i = 0; i <= __MAX_CSSID; i++) { + struct channel_subsystem *css; + + css = channel_subsystems[i]; + mutex_lock(&css->mutex); + if (!css->cm_enabled) { + mutex_unlock(&css->mutex); + continue; + } + secm_area = (void *)get_zeroed_page(GFP_KERNEL | + GFP_DMA); + if (secm_area) { + if (__chsc_do_secm(css, 0, secm_area)) + ret = NOTIFY_BAD; + free_page((unsigned long)secm_area); + } else + ret = NOTIFY_BAD; + + mutex_unlock(&css->mutex); + } + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + ret = NOTIFY_DONE; + for (i = 0; i <= __MAX_CSSID; i++) { + struct channel_subsystem *css; + + css = channel_subsystems[i]; + mutex_lock(&css->mutex); + if (!css->cm_enabled) { + mutex_unlock(&css->mutex); + continue; + } + secm_area = (void *)get_zeroed_page(GFP_KERNEL | + GFP_DMA); + if (secm_area) { + if (__chsc_do_secm(css, 1, secm_area)) + ret = NOTIFY_BAD; + free_page((unsigned long)secm_area); + } else + ret = NOTIFY_BAD; + + mutex_unlock(&css->mutex); + } + /* search for subchannels, which appeared during hibernation */ + css_schedule_reprobe(); + break; + default: + ret = NOTIFY_DONE; + } + return ret; + +} +static struct notifier_block css_power_notifier = { + .notifier_call = css_power_event, +}; + +/* * Now that the driver core is running, we can setup our channel subsystem. * The struct subchannel's are created during probing (except for the * static console subchannel). @@ -852,6 +926,11 @@ init_channel_subsystem (void) ret = register_reboot_notifier(&css_reboot_notifier); if (ret) goto out_unregister; + ret = register_pm_notifier(&css_power_notifier); + if (ret) { + unregister_reboot_notifier(&css_reboot_notifier); + goto out_unregister; + } css_init_done = 1; /* Enable default isc for I/O subchannels. */ @@ -953,6 +1032,73 @@ static int css_uevent(struct device *dev, struct kobj_uevent_env *env) return ret; } +static int css_pm_prepare(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (mutex_is_locked(&sch->reg_mutex)) + return -EAGAIN; + if (!sch->dev.driver) + return 0; + drv = to_cssdriver(sch->dev.driver); + /* Notify drivers that they may not register children. */ + return drv->prepare ? drv->prepare(sch) : 0; +} + +static void css_pm_complete(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (!sch->dev.driver) + return; + drv = to_cssdriver(sch->dev.driver); + if (drv->complete) + drv->complete(sch); +} + +static int css_pm_freeze(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (!sch->dev.driver) + return 0; + drv = to_cssdriver(sch->dev.driver); + return drv->freeze ? drv->freeze(sch) : 0; +} + +static int css_pm_thaw(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (!sch->dev.driver) + return 0; + drv = to_cssdriver(sch->dev.driver); + return drv->thaw ? drv->thaw(sch) : 0; +} + +static int css_pm_restore(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (!sch->dev.driver) + return 0; + drv = to_cssdriver(sch->dev.driver); + return drv->restore ? drv->restore(sch) : 0; +} + +static struct dev_pm_ops css_pm_ops = { + .prepare = css_pm_prepare, + .complete = css_pm_complete, + .freeze = css_pm_freeze, + .thaw = css_pm_thaw, + .restore = css_pm_restore, +}; + struct bus_type css_bus_type = { .name = "css", .match = css_bus_match, @@ -960,6 +1106,7 @@ struct bus_type css_bus_type = { .remove = css_remove, .shutdown = css_shutdown, .uevent = css_uevent, + .pm = &css_pm_ops, }; /** diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 57ebf12..9763eee 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -70,6 +70,11 @@ struct chp_link; * @probe: function called on probe * @remove: function called on remove * @shutdown: called at device shutdown + * @prepare: prepare for pm state transition + * @complete: undo work done in @prepare + * @freeze: callback for freezing during hibernation snapshotting + * @thaw: undo work done in @freeze + * @restore: callback for restoring after hibernation * @name: name of the device driver */ struct css_driver { @@ -82,6 +87,11 @@ struct css_driver { int (*probe)(struct subchannel *); int (*remove)(struct subchannel *); void (*shutdown)(struct subchannel *); + int (*prepare) (struct subchannel *); + void (*complete) (struct subchannel *); + int (*freeze)(struct subchannel *); + int (*thaw) (struct subchannel *); + int (*restore)(struct subchannel *); const char *name; }; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 35441fa..3c57c1a 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -138,6 +138,19 @@ static struct css_device_id io_subchannel_ids[] = { }; MODULE_DEVICE_TABLE(css, io_subchannel_ids); +static int io_subchannel_prepare(struct subchannel *sch) +{ + struct ccw_device *cdev; + /* + * Don't allow suspend while a ccw device registration + * is still outstanding. + */ + cdev = sch_get_cdev(sch); + if (cdev && !device_is_registered(&cdev->dev)) + return -EAGAIN; + return 0; +} + static struct css_driver io_subchannel_driver = { .owner = THIS_MODULE, .subchannel_type = io_subchannel_ids, @@ -148,6 +161,7 @@ static struct css_driver io_subchannel_driver = { .probe = io_subchannel_probe, .remove = io_subchannel_remove, .shutdown = io_subchannel_shutdown, + .prepare = io_subchannel_prepare, }; struct workqueue_struct *ccw_device_work; @@ -1775,6 +1789,15 @@ ccw_device_probe_console(void) return &console_cdev; } +static int ccw_device_pm_restore(struct device *dev); + +int ccw_device_force_console(void) +{ + if (!console_cdev_in_use) + return -ENODEV; + return ccw_device_pm_restore(&console_cdev.dev); +} +EXPORT_SYMBOL_GPL(ccw_device_force_console); const char *cio_get_console_cdev_name(struct subchannel *sch) { @@ -1895,6 +1918,242 @@ static void ccw_device_shutdown(struct device *dev) disable_cmf(cdev); } +static int ccw_device_pm_prepare(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + + if (work_pending(&cdev->private->kick_work)) + return -EAGAIN; + /* Fail while device is being set online/offline. */ + if (atomic_read(&cdev->private->onoff)) + return -EAGAIN; + + if (cdev->online && cdev->drv && cdev->drv->prepare) + return cdev->drv->prepare(cdev); + + return 0; +} + +static void ccw_device_pm_complete(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + + if (cdev->online && cdev->drv && cdev->drv->complete) + cdev->drv->complete(cdev); +} + +static int ccw_device_pm_freeze(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch = to_subchannel(cdev->dev.parent); + int ret, cm_enabled; + + /* Fail suspend while device is in transistional state. */ + if (!dev_fsm_final_state(cdev)) + return -EAGAIN; + if (!cdev->online) + return 0; + if (cdev->drv && cdev->drv->freeze) { + ret = cdev->drv->freeze(cdev); + if (ret) + return ret; + } + + spin_lock_irq(sch->lock); + cm_enabled = cdev->private->cmb != NULL; + spin_unlock_irq(sch->lock); + if (cm_enabled) { + /* Don't have the css write on memory. */ + ret = ccw_set_cmf(cdev, 0); + if (ret) + return ret; + } + /* From here on, disallow device driver I/O. */ + spin_lock_irq(sch->lock); + ret = cio_disable_subchannel(sch); + spin_unlock_irq(sch->lock); + + return ret; +} + +static int ccw_device_pm_thaw(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch = to_subchannel(cdev->dev.parent); + int ret, cm_enabled; + + if (!cdev->online) + return 0; + + spin_lock_irq(sch->lock); + /* Allow device driver I/O again. */ + ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); + cm_enabled = cdev->private->cmb != NULL; + spin_unlock_irq(sch->lock); + if (ret) + return ret; + + if (cm_enabled) { + ret = ccw_set_cmf(cdev, 1); + if (ret) + return ret; + } + + if (cdev->drv && cdev->drv->thaw) + ret = cdev->drv->thaw(cdev); + + return ret; +} + +static void __ccw_device_pm_restore(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + int ret; + + if (cio_is_console(sch->schid)) + goto out; + /* + * While we were sleeping, devices may have gone or become + * available again. Kick re-detection. + */ + spin_lock_irq(sch->lock); + cdev->private->flags.resuming = 1; + ret = ccw_device_recognition(cdev); + spin_unlock_irq(sch->lock); + if (ret) { + CIO_MSG_EVENT(0, "Couldn't start recognition for device " + "%s (ret=%d)\n", dev_name(&cdev->dev), ret); + spin_lock_irq(sch->lock); + cdev->private->state = DEV_STATE_DISCONNECTED; + spin_unlock_irq(sch->lock); + /* notify driver after the resume cb */ + goto out; + } + wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || + cdev->private->state == DEV_STATE_DISCONNECTED); + +out: + cdev->private->flags.resuming = 0; +} + +static int resume_handle_boxed(struct ccw_device *cdev) +{ + cdev->private->state = DEV_STATE_BOXED; + if (ccw_device_notify(cdev, CIO_BOXED)) + return 0; + ccw_device_schedule_sch_unregister(cdev); + return -ENODEV; +} + +static int resume_handle_disc(struct ccw_device *cdev) +{ + cdev->private->state = DEV_STATE_DISCONNECTED; + if (ccw_device_notify(cdev, CIO_GONE)) + return 0; + ccw_device_schedule_sch_unregister(cdev); + return -ENODEV; +} + +static int ccw_device_pm_restore(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch = to_subchannel(cdev->dev.parent); + int ret = 0, cm_enabled; + + __ccw_device_pm_restore(cdev); + spin_lock_irq(sch->lock); + if (cio_is_console(sch->schid)) { + cio_enable_subchannel(sch, (u32)(addr_t)sch); + spin_unlock_irq(sch->lock); + goto out_restore; + } + cdev->private->flags.donotify = 0; + /* check recognition results */ + switch (cdev->private->state) { + case DEV_STATE_OFFLINE: + break; + case DEV_STATE_BOXED: + ret = resume_handle_boxed(cdev); + spin_unlock_irq(sch->lock); + if (ret) + goto out; + goto out_restore; + case DEV_STATE_DISCONNECTED: + goto out_disc_unlock; + default: + goto out_unreg_unlock; + } + /* check if the device id has changed */ + if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { + CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from " + "%04x to %04x)\n", dev_name(&sch->dev), + cdev->private->dev_id.devno, + sch->schib.pmcw.dev); + goto out_unreg_unlock; + } + /* check if the device type has changed */ + if (!ccw_device_test_sense_data(cdev)) { + ccw_device_update_sense_data(cdev); + PREPARE_WORK(&cdev->private->kick_work, + ccw_device_do_unbind_bind); + queue_work(ccw_device_work, &cdev->private->kick_work); + ret = -ENODEV; + goto out_unlock; + } + if (!cdev->online) { + ret = 0; + goto out_unlock; + } + ret = ccw_device_online(cdev); + if (ret) + goto out_disc_unlock; + + cm_enabled = cdev->private->cmb != NULL; + spin_unlock_irq(sch->lock); + + wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); + if (cdev->private->state != DEV_STATE_ONLINE) { + spin_lock_irq(sch->lock); + goto out_disc_unlock; + } + if (cm_enabled) { + ret = ccw_set_cmf(cdev, 1); + if (ret) { + CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed " + "(rc=%d)\n", dev_name(&cdev->dev), ret); + ret = 0; + } + } + +out_restore: + if (cdev->online && cdev->drv && cdev->drv->restore) + ret = cdev->drv->restore(cdev); +out: + return ret; + +out_disc_unlock: + ret = resume_handle_disc(cdev); + spin_unlock_irq(sch->lock); + if (ret) + return ret; + goto out_restore; + +out_unreg_unlock: + ccw_device_schedule_sch_unregister(cdev); + ret = -ENODEV; +out_unlock: + spin_unlock_irq(sch->lock); + return ret; +} + +static struct dev_pm_ops ccw_pm_ops = { + .prepare = ccw_device_pm_prepare, + .complete = ccw_device_pm_complete, + .freeze = ccw_device_pm_freeze, + .thaw = ccw_device_pm_thaw, + .restore = ccw_device_pm_restore, +}; + struct bus_type ccw_bus_type = { .name = "ccw", .match = ccw_bus_match, @@ -1902,6 +2161,7 @@ struct bus_type ccw_bus_type = { .probe = ccw_device_probe, .remove = ccw_device_remove, .shutdown = ccw_device_shutdown, + .pm = &ccw_pm_ops, }; /** diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index f1cbbd9..e397510 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -87,6 +87,8 @@ int ccw_device_is_orphan(struct ccw_device *); int ccw_device_recognition(struct ccw_device *); int ccw_device_online(struct ccw_device *); int ccw_device_offline(struct ccw_device *); +void ccw_device_update_sense_data(struct ccw_device *); +int ccw_device_test_sense_data(struct ccw_device *); void ccw_device_schedule_sch_unregister(struct ccw_device *); int ccw_purge_blacklisted(void); @@ -133,5 +135,6 @@ extern struct bus_type ccw_bus_type; void retry_set_schib(struct ccw_device *cdev); void cmf_retry_copy_block(struct ccw_device *); int cmf_reenable(struct ccw_device *); +int ccw_set_cmf(struct ccw_device *cdev, int enable); extern struct device_attribute dev_attr_cmb_enable; #endif diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index e460492..3db88c5 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -177,29 +177,21 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) panic("Can't stop i/o on subchannel.\n"); } -static int -ccw_device_handle_oper(struct ccw_device *cdev) +void ccw_device_update_sense_data(struct ccw_device *cdev) { - struct subchannel *sch; + memset(&cdev->id, 0, sizeof(cdev->id)); + cdev->id.cu_type = cdev->private->senseid.cu_type; + cdev->id.cu_model = cdev->private->senseid.cu_model; + cdev->id.dev_type = cdev->private->senseid.dev_type; + cdev->id.dev_model = cdev->private->senseid.dev_model; +} - sch = to_subchannel(cdev->dev.parent); - cdev->private->flags.recog_done = 1; - /* - * Check if cu type and device type still match. If - * not, it is certainly another device and we have to - * de- and re-register. - */ - if (cdev->id.cu_type != cdev->private->senseid.cu_type || - cdev->id.cu_model != cdev->private->senseid.cu_model || - cdev->id.dev_type != cdev->private->senseid.dev_type || - cdev->id.dev_model != cdev->private->senseid.dev_model) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_do_unbind_bind); - queue_work(ccw_device_work, &cdev->private->kick_work); - return 0; - } - cdev->private->flags.donotify = 1; - return 1; +int ccw_device_test_sense_data(struct ccw_device *cdev) +{ + return cdev->id.cu_type == cdev->private->senseid.cu_type && + cdev->id.cu_model == cdev->private->senseid.cu_model && + cdev->id.dev_type == cdev->private->senseid.dev_type && + cdev->id.dev_model == cdev->private->senseid.dev_model; } /* @@ -233,7 +225,7 @@ static void ccw_device_recog_done(struct ccw_device *cdev, int state) { struct subchannel *sch; - int notify, old_lpm, same_dev; + int old_lpm; sch = to_subchannel(cdev->dev.parent); @@ -263,8 +255,12 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) wake_up(&cdev->private->wait_q); return; } - notify = 0; - same_dev = 0; /* Keep the compiler quiet... */ + if (cdev->private->flags.resuming) { + cdev->private->state = state; + cdev->private->flags.recog_done = 1; + wake_up(&cdev->private->wait_q); + return; + } switch (state) { case DEV_STATE_NOT_OPER: CIO_MSG_EVENT(2, "SenseID : unknown device %04x on " @@ -273,34 +269,31 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) sch->schid.ssid, sch->schid.sch_no); break; case DEV_STATE_OFFLINE: - if (cdev->online) { - same_dev = ccw_device_handle_oper(cdev); - notify = 1; + if (!cdev->online) { + ccw_device_update_sense_data(cdev); + /* Issue device info message. */ + CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: " + "CU Type/Mod = %04X/%02X, Dev Type/Mod " + "= %04X/%02X\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, + cdev->id.cu_type, cdev->id.cu_model, + cdev->id.dev_type, cdev->id.dev_model); + break; } - /* fill out sense information */ - memset(&cdev->id, 0, sizeof(cdev->id)); - cdev->id.cu_type = cdev->private->senseid.cu_type; - cdev->id.cu_model = cdev->private->senseid.cu_model; - cdev->id.dev_type = cdev->private->senseid.dev_type; - cdev->id.dev_model = cdev->private->senseid.dev_model; - if (notify) { - cdev->private->state = DEV_STATE_OFFLINE; - if (same_dev) { - /* Get device online again. */ - ccw_device_online(cdev); - wake_up(&cdev->private->wait_q); - } - return; + cdev->private->state = DEV_STATE_OFFLINE; + cdev->private->flags.recog_done = 1; + if (ccw_device_test_sense_data(cdev)) { + cdev->private->flags.donotify = 1; + ccw_device_online(cdev); + wake_up(&cdev->private->wait_q); + } else { + ccw_device_update_sense_data(cdev); + PREPARE_WORK(&cdev->private->kick_work, + ccw_device_do_unbind_bind); + queue_work(ccw_device_work, &cdev->private->kick_work); } - /* Issue device info message. */ - CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: " - "CU Type/Mod = %04X/%02X, Dev Type/Mod = " - "%04X/%02X\n", - cdev->private->dev_id.ssid, - cdev->private->dev_id.devno, - cdev->id.cu_type, cdev->id.cu_model, - cdev->id.dev_type, cdev->id.dev_model); - break; + return; case DEV_STATE_BOXED: CIO_MSG_EVENT(0, "SenseID : boxed device %04x on " " subchannel 0.%x.%04x\n", @@ -502,9 +495,6 @@ ccw_device_recognition(struct ccw_device *cdev) struct subchannel *sch; int ret; - if ((cdev->private->state != DEV_STATE_NOT_OPER) && - (cdev->private->state != DEV_STATE_BOXED)) - return -EINVAL; sch = to_subchannel(cdev->dev.parent); ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); if (ret != 0) diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index bf0a24a..2d0efee 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -1,10 +1,8 @@ /* - * drivers/s390/cio/device_ops.c + * Copyright IBM Corp. 2002, 2009 * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * Cornelia Huck (cornelia.huck@de.ibm.com) + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) */ #include <linux/module.h> #include <linux/init.h> @@ -116,12 +114,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) if (!cdev || !cdev->dev.parent) return -ENODEV; + sch = to_subchannel(cdev->dev.parent); + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE && cdev->private->state != DEV_STATE_W4SENSE) return -EINVAL; - sch = to_subchannel(cdev->dev.parent); + ret = cio_clear(sch); if (ret == 0) cdev->private->intparm = intparm; @@ -162,6 +163,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, if (!cdev || !cdev->dev.parent) return -ENODEV; sch = to_subchannel(cdev->dev.parent); + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state == DEV_STATE_VERIFY || @@ -337,12 +340,15 @@ int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) if (!cdev || !cdev->dev.parent) return -ENODEV; + sch = to_subchannel(cdev->dev.parent); + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE && cdev->private->state != DEV_STATE_W4SENSE) return -EINVAL; - sch = to_subchannel(cdev->dev.parent); + ret = cio_halt(sch); if (ret == 0) cdev->private->intparm = intparm; @@ -369,6 +375,8 @@ int ccw_device_resume(struct ccw_device *cdev) if (!cdev || !cdev->dev.parent) return -ENODEV; sch = to_subchannel(cdev->dev.parent); + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE || @@ -580,6 +588,8 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, int rc; sch = to_subchannel(cdev->dev.parent); + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state != DEV_STATE_ONLINE) return -EIO; /* Adjust requested path mask to excluded varied off paths. */ @@ -669,6 +679,8 @@ int ccw_device_tm_intrg(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state != DEV_STATE_ONLINE) return -EIO; if (!scsw_is_tm(&sch->schib.scsw) || diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index c4f3e7c..0b8f381 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -107,6 +107,7 @@ struct ccw_device_private { unsigned int recog_done:1; /* dev. recog. complete */ unsigned int fake_irb:1; /* deliver faked irb */ unsigned int intretry:1; /* retry internal operation */ + unsigned int resuming:1; /* recognition while resume */ } __attribute__((packed)) flags; unsigned long intparm; /* user interruption parameter */ struct qdio_irq *qdio_data; diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 7b6f46d..d40f7a9 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -3,12 +3,12 @@ * ESCON CLAW network driver * * Linux for zSeries version - * Copyright (C) 2002,2005 IBM Corporation + * Copyright IBM Corp. 2002, 2009 * Author(s) Original code written by: - * Kazuo Iimura (iimura@jp.ibm.com) + * Kazuo Iimura <iimura@jp.ibm.com> * Rewritten by - * Andy Richter (richtera@us.ibm.com) - * Marc Price (mwprice@us.ibm.com) + * Andy Richter <richtera@us.ibm.com> + * Marc Price <mwprice@us.ibm.com> * * sysfs parms: * group x.x.rrrr,x.x.wwww @@ -253,6 +253,11 @@ static void claw_free_wrt_buf(struct net_device *dev); /* Functions for unpack reads */ static void unpack_read(struct net_device *dev); +static int claw_pm_prepare(struct ccwgroup_device *gdev) +{ + return -EPERM; +} + /* ccwgroup table */ static struct ccwgroup_driver claw_group_driver = { @@ -264,6 +269,7 @@ static struct ccwgroup_driver claw_group_driver = { .remove = claw_remove_device, .set_online = claw_new_device, .set_offline = claw_shutdown_device, + .prepare = claw_pm_prepare, }; /* diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 54c4649..222e473 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1,7 +1,7 @@ /* * drivers/s390/net/ctcm_main.c * - * Copyright IBM Corp. 2001, 2007 + * Copyright IBM Corp. 2001, 2009 * Author(s): * Original CTC driver(s): * Fritz Elfert (felfert@millenux.com) @@ -1688,6 +1688,38 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev) put_device(&cgdev->dev); } +static int ctcm_pm_suspend(struct ccwgroup_device *gdev) +{ + struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev); + + if (gdev->state == CCWGROUP_OFFLINE) + return 0; + netif_device_detach(priv->channel[READ]->netdev); + ctcm_close(priv->channel[READ]->netdev); + ccw_device_set_offline(gdev->cdev[1]); + ccw_device_set_offline(gdev->cdev[0]); + return 0; +} + +static int ctcm_pm_resume(struct ccwgroup_device *gdev) +{ + struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev); + int rc; + + if (gdev->state == CCWGROUP_OFFLINE) + return 0; + rc = ccw_device_set_online(gdev->cdev[1]); + if (rc) + goto err_out; + rc = ccw_device_set_online(gdev->cdev[0]); + if (rc) + goto err_out; + ctcm_open(priv->channel[READ]->netdev); +err_out: + netif_device_attach(priv->channel[READ]->netdev); + return rc; +} + static struct ccwgroup_driver ctcm_group_driver = { .owner = THIS_MODULE, .name = CTC_DRIVER_NAME, @@ -1697,6 +1729,9 @@ static struct ccwgroup_driver ctcm_group_driver = { .remove = ctcm_remove_device, .set_online = ctcm_new_device, .set_offline = ctcm_shutdown_device, + .freeze = ctcm_pm_suspend, + .thaw = ctcm_pm_resume, + .restore = ctcm_pm_resume, }; diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index a45bc24..07a25c3 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1,15 +1,12 @@ /* - * linux/drivers/s390/net/lcs.c - * * Linux for S/390 Lan Channel Station Network Driver * - * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Original Code written by - * DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) - * Rewritten by - * Frank Pavlic (fpavlic@de.ibm.com) and - * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Copyright IBM Corp. 1999, 2009 + * Author(s): Original Code written by + * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com> + * Rewritten by + * Frank Pavlic <fpavlic@de.ibm.com> and + * Martin Schwidefsky <schwidefsky@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -2313,6 +2310,60 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev) put_device(&ccwgdev->dev); } +static int lcs_pm_suspend(struct lcs_card *card) +{ + if (card->dev) + netif_device_detach(card->dev); + lcs_set_allowed_threads(card, 0); + lcs_wait_for_threads(card, 0xffffffff); + if (card->state != DEV_STATE_DOWN) + __lcs_shutdown_device(card->gdev, 1); + return 0; +} + +static int lcs_pm_resume(struct lcs_card *card) +{ + int rc = 0; + + if (card->state == DEV_STATE_RECOVER) + rc = lcs_new_device(card->gdev); + if (card->dev) + netif_device_attach(card->dev); + if (rc) { + dev_warn(&card->gdev->dev, "The lcs device driver " + "failed to recover the device\n"); + } + return rc; +} + +static int lcs_prepare(struct ccwgroup_device *gdev) +{ + return 0; +} + +static void lcs_complete(struct ccwgroup_device *gdev) +{ + return; +} + +static int lcs_freeze(struct ccwgroup_device *gdev) +{ + struct lcs_card *card = dev_get_drvdata(&gdev->dev); + return lcs_pm_suspend(card); +} + +static int lcs_thaw(struct ccwgroup_device *gdev) +{ + struct lcs_card *card = dev_get_drvdata(&gdev->dev); + return lcs_pm_resume(card); +} + +static int lcs_restore(struct ccwgroup_device *gdev) +{ + struct lcs_card *card = dev_get_drvdata(&gdev->dev); + return lcs_pm_resume(card); +} + /** * LCS ccwgroup driver registration */ @@ -2325,6 +2376,11 @@ static struct ccwgroup_driver lcs_group_driver = { .remove = lcs_remove_device, .set_online = lcs_new_device, .set_offline = lcs_shutdown_device, + .prepare = lcs_prepare, + .complete = lcs_complete, + .freeze = lcs_freeze, + .thaw = lcs_thaw, + .restore = lcs_restore, }; /** diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index aec9e5d..fdb02d0 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1,11 +1,15 @@ /* * IUCV network driver * - * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * Copyright IBM Corp. 2001, 2009 * - * Sysfs integration and all bugs therein by Cornelia Huck - * (cornelia.huck@de.ibm.com) + * Author(s): + * Original netiucv driver: + * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * Sysfs integration and all bugs therein: + * Cornelia Huck (cornelia.huck@de.ibm.com) + * PM functions: + * Ursula Braun (ursula.braun@de.ibm.com) * * Documentation used: * the source of the original IUCV driver by: @@ -149,10 +153,27 @@ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ #define PRINTK_HEADER " iucv: " /* for debugging */ +/* dummy device to make sure netiucv_pm functions are called */ +static struct device *netiucv_dev; + +static int netiucv_pm_prepare(struct device *); +static void netiucv_pm_complete(struct device *); +static int netiucv_pm_freeze(struct device *); +static int netiucv_pm_restore_thaw(struct device *); + +static struct dev_pm_ops netiucv_pm_ops = { + .prepare = netiucv_pm_prepare, + .complete = netiucv_pm_complete, + .freeze = netiucv_pm_freeze, + .thaw = netiucv_pm_restore_thaw, + .restore = netiucv_pm_restore_thaw, +}; + static struct device_driver netiucv_driver = { .owner = THIS_MODULE, .name = "netiucv", .bus = &iucv_bus, + .pm = &netiucv_pm_ops, }; static int netiucv_callback_connreq(struct iucv_path *, @@ -233,6 +254,7 @@ struct netiucv_priv { fsm_instance *fsm; struct iucv_connection *conn; struct device *dev; + int pm_state; }; /** @@ -1265,6 +1287,72 @@ static int netiucv_close(struct net_device *dev) return 0; } +static int netiucv_pm_prepare(struct device *dev) +{ + IUCV_DBF_TEXT(trace, 3, __func__); + return 0; +} + +static void netiucv_pm_complete(struct device *dev) +{ + IUCV_DBF_TEXT(trace, 3, __func__); + return; +} + +/** + * netiucv_pm_freeze() - Freeze PM callback + * @dev: netiucv device + * + * close open netiucv interfaces + */ +static int netiucv_pm_freeze(struct device *dev) +{ + struct netiucv_priv *priv = dev->driver_data; + struct net_device *ndev = NULL; + int rc = 0; + + IUCV_DBF_TEXT(trace, 3, __func__); + if (priv && priv->conn) + ndev = priv->conn->netdev; + if (!ndev) + goto out; + netif_device_detach(ndev); + priv->pm_state = fsm_getstate(priv->fsm); + rc = netiucv_close(ndev); +out: + return rc; +} + +/** + * netiucv_pm_restore_thaw() - Thaw and restore PM callback + * @dev: netiucv device + * + * re-open netiucv interfaces closed during freeze + */ +static int netiucv_pm_restore_thaw(struct device *dev) +{ + struct netiucv_priv *priv = dev->driver_data; + struct net_device *ndev = NULL; + int rc = 0; + + IUCV_DBF_TEXT(trace, 3, __func__); + if (priv && priv->conn) + ndev = priv->conn->netdev; + if (!ndev) + goto out; + switch (priv->pm_state) { + case DEV_STATE_RUNNING: + case DEV_STATE_STARTWAIT: + rc = netiucv_open(ndev); + break; + default: + break; + } + netif_device_attach(ndev); +out: + return rc; +} + /** * Start transmission of a packet. * Called from generic network device layer. @@ -1731,7 +1819,6 @@ static int netiucv_register_device(struct net_device *ndev) struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); int ret; - IUCV_DBF_TEXT(trace, 3, __func__); if (dev) { @@ -2100,6 +2187,7 @@ static void __exit netiucv_exit(void) netiucv_unregister_device(dev); } + device_unregister(netiucv_dev); driver_unregister(&netiucv_driver); iucv_unregister(&netiucv_handler, 1); iucv_unregister_dbf_views(); @@ -2125,10 +2213,25 @@ static int __init netiucv_init(void) IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); goto out_iucv; } - + /* establish dummy device */ + netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!netiucv_dev) { + rc = -ENOMEM; + goto out_driver; + } + dev_set_name(netiucv_dev, "netiucv"); + netiucv_dev->bus = &iucv_bus; + netiucv_dev->parent = iucv_root; + netiucv_dev->release = (void (*)(struct device *))kfree; + netiucv_dev->driver = &netiucv_driver; + rc = device_register(netiucv_dev); + if (rc) + goto out_driver; netiucv_banner(); return rc; +out_driver: + driver_unregister(&netiucv_driver); out_iucv: iucv_unregister(&netiucv_handler, 1); out_dbf: diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 74c49d9..d53621c 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1,7 +1,7 @@ /* * drivers/s390/net/qeth_core_main.c * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, @@ -4195,6 +4195,50 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev) card->discipline.ccwgdriver->shutdown(gdev); } +static int qeth_core_prepare(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (card->discipline.ccwgdriver && + card->discipline.ccwgdriver->prepare) + return card->discipline.ccwgdriver->prepare(gdev); + return 0; +} + +static void qeth_core_complete(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (card->discipline.ccwgdriver && + card->discipline.ccwgdriver->complete) + card->discipline.ccwgdriver->complete(gdev); +} + +static int qeth_core_freeze(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (card->discipline.ccwgdriver && + card->discipline.ccwgdriver->freeze) + return card->discipline.ccwgdriver->freeze(gdev); + return 0; +} + +static int qeth_core_thaw(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (card->discipline.ccwgdriver && + card->discipline.ccwgdriver->thaw) + return card->discipline.ccwgdriver->thaw(gdev); + return 0; +} + +static int qeth_core_restore(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (card->discipline.ccwgdriver && + card->discipline.ccwgdriver->restore) + return card->discipline.ccwgdriver->restore(gdev); + return 0; +} + static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .owner = THIS_MODULE, .name = "qeth", @@ -4204,6 +4248,11 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .set_online = qeth_core_set_online, .set_offline = qeth_core_set_offline, .shutdown = qeth_core_shutdown, + .prepare = qeth_core_prepare, + .complete = qeth_core_complete, + .freeze = qeth_core_freeze, + .thaw = qeth_core_thaw, + .restore = qeth_core_restore, }; static ssize_t diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ecd3d06..81d7f26 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1,7 +1,7 @@ /* * drivers/s390/net/qeth_l2_main.c * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, @@ -1141,12 +1141,62 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev) qeth_clear_qdio_buffers(card); } +static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + + if (card->dev) + netif_device_detach(card->dev); + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + if (gdev->state == CCWGROUP_OFFLINE) + return 0; + if (card->state == CARD_STATE_UP) { + card->use_hard_stop = 1; + __qeth_l2_set_offline(card->gdev, 1); + } else + __qeth_l2_set_offline(card->gdev, 0); + return 0; +} + +static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc = 0; + + if (gdev->state == CCWGROUP_OFFLINE) + goto out; + + if (card->state == CARD_STATE_RECOVER) { + rc = __qeth_l2_set_online(card->gdev, 1); + if (rc) { + if (card->dev) { + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + } + } + } else + rc = __qeth_l2_set_online(card->gdev, 0); +out: + qeth_set_allowed_threads(card, 0xffffffff, 0); + if (card->dev) + netif_device_attach(card->dev); + if (rc) + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + return rc; +} + struct ccwgroup_driver qeth_l2_ccwgroup_driver = { .probe = qeth_l2_probe_device, .remove = qeth_l2_remove_device, .set_online = qeth_l2_set_online, .set_offline = qeth_l2_set_offline, .shutdown = qeth_l2_shutdown, + .freeze = qeth_l2_pm_suspend, + .thaw = qeth_l2_pm_resume, + .restore = qeth_l2_pm_resume, }; EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6f2386e..5487240 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1,7 +1,7 @@ /* * drivers/s390/net/qeth_l3_main.c * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, @@ -3283,12 +3283,62 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev) qeth_clear_qdio_buffers(card); } +static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + + if (card->dev) + netif_device_detach(card->dev); + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + if (gdev->state == CCWGROUP_OFFLINE) + return 0; + if (card->state == CARD_STATE_UP) { + card->use_hard_stop = 1; + __qeth_l3_set_offline(card->gdev, 1); + } else + __qeth_l3_set_offline(card->gdev, 0); + return 0; +} + +static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc = 0; + + if (gdev->state == CCWGROUP_OFFLINE) + goto out; + + if (card->state == CARD_STATE_RECOVER) { + rc = __qeth_l3_set_online(card->gdev, 1); + if (rc) { + if (card->dev) { + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + } + } + } else + rc = __qeth_l3_set_online(card->gdev, 0); +out: + qeth_set_allowed_threads(card, 0xffffffff, 0); + if (card->dev) + netif_device_attach(card->dev); + if (rc) + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + return rc; +} + struct ccwgroup_driver qeth_l3_ccwgroup_driver = { .probe = qeth_l3_probe_device, .remove = qeth_l3_remove_device, .set_online = qeth_l3_set_online, .set_offline = qeth_l3_set_offline, .shutdown = qeth_l3_shutdown, + .freeze = qeth_l3_pm_suspend, + .thaw = qeth_l3_pm_resume, + .restore = qeth_l3_pm_resume, }; EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver); diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 164e090..e76a320 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -1,7 +1,8 @@ /* * IUCV special message driver * - * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2003, 2009 + * * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * * This program is free software; you can redistribute it and/or modify @@ -40,6 +41,8 @@ MODULE_AUTHOR MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); static struct iucv_path *smsg_path; +/* dummy device used as trigger for PM functions */ +static struct device *smsg_dev; static DEFINE_SPINLOCK(smsg_list_lock); static LIST_HEAD(smsg_list); @@ -132,14 +135,51 @@ void smsg_unregister_callback(char *prefix, kfree(cb); } +static int smsg_pm_freeze(struct device *dev) +{ +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "smsg_pm_freeze\n"); +#endif + if (smsg_path) + iucv_path_sever(smsg_path, NULL); + return 0; +} + +static int smsg_pm_restore_thaw(struct device *dev) +{ + int rc; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "smsg_pm_restore_thaw\n"); +#endif + if (smsg_path) { + memset(smsg_path, 0, sizeof(*smsg_path)); + smsg_path->msglim = 255; + smsg_path->flags = 0; + rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", + NULL, NULL, NULL); + printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); + } + return 0; +} + +static struct dev_pm_ops smsg_pm_ops = { + .freeze = smsg_pm_freeze, + .thaw = smsg_pm_restore_thaw, + .restore = smsg_pm_restore_thaw, +}; + static struct device_driver smsg_driver = { + .owner = THIS_MODULE, .name = "SMSGIUCV", .bus = &iucv_bus, + .pm = &smsg_pm_ops, }; static void __exit smsg_exit(void) { cpcmd("SET SMSG IUCV", NULL, 0, NULL); + device_unregister(smsg_dev); iucv_unregister(&smsg_handler, 1); driver_unregister(&smsg_driver); } @@ -166,12 +206,29 @@ static int __init smsg_init(void) rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", NULL, NULL, NULL); if (rc) - goto out_free; + goto out_free_path; + smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!smsg_dev) { + rc = -ENOMEM; + goto out_free_path; + } + dev_set_name(smsg_dev, "smsg_iucv"); + smsg_dev->bus = &iucv_bus; + smsg_dev->parent = iucv_root; + smsg_dev->release = (void (*)(struct device *))kfree; + smsg_dev->driver = &smsg_driver; + rc = device_register(smsg_dev); + if (rc) + goto out_free_dev; + cpcmd("SET SMSG IUCV", NULL, 0, NULL); return 0; -out_free: +out_free_dev: + kfree(smsg_dev); +out_free_path: iucv_path_free(smsg_path); + smsg_path = NULL; out_register: iucv_unregister(&smsg_handler, 1); out_driver: diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index b2fe5cd..d9da5c4 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -13,6 +13,36 @@ #define ZFCP_MODEL_PRIV 0x4 +static int zfcp_ccw_suspend(struct ccw_device *cdev) + +{ + struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); + + down(&zfcp_data.config_sema); + + zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); + zfcp_erp_wait(adapter); + + up(&zfcp_data.config_sema); + + return 0; +} + +static int zfcp_ccw_activate(struct ccw_device *cdev) + +{ + struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); + + zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, + ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, + "ccresu2", NULL); + zfcp_erp_wait(adapter); + flush_work(&adapter->scan_work); + + return 0; +} + static struct ccw_device_id zfcp_ccw_device_id[] = { { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) }, @@ -227,6 +257,9 @@ static struct ccw_driver zfcp_ccw_driver = { .set_offline = zfcp_ccw_set_offline, .notify = zfcp_ccw_notify, .shutdown = zfcp_ccw_shutdown, + .freeze = zfcp_ccw_suspend, + .thaw = zfcp_ccw_activate, + .restore = zfcp_ccw_activate, }; /** |