diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 8 | ||||
-rw-r--r-- | net/core/sock.c | 2 | ||||
-rw-r--r-- | net/ipv4/inet_timewait_sock.c | 3 | ||||
-rw-r--r-- | net/iucv/af_iucv.c | 147 | ||||
-rw-r--r-- | net/iucv/iucv.c | 268 |
5 files changed, 423 insertions, 5 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1a94a30..5c93435 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -39,6 +39,7 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> +#include <linux/kmemcheck.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> @@ -201,6 +202,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; + kmemcheck_annotate_bitfield(skb, flags1); + kmemcheck_annotate_bitfield(skb, flags2); /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); atomic_set(&shinfo->dataref, 1); @@ -217,6 +220,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, struct sk_buff *child = skb + 1; atomic_t *fclone_ref = (atomic_t *) (child + 1); + kmemcheck_annotate_bitfield(child, flags1); + kmemcheck_annotate_bitfield(child, flags2); skb->fclone = SKB_FCLONE_ORIG; atomic_set(fclone_ref, 1); @@ -635,6 +640,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); if (!n) return NULL; + + kmemcheck_annotate_bitfield(n, flags1); + kmemcheck_annotate_bitfield(n, flags2); n->fclone = SKB_FCLONE_UNAVAILABLE; } diff --git a/net/core/sock.c b/net/core/sock.c index 06e26b7..b0ba569 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -945,6 +945,8 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, sk = kmalloc(prot->obj_size, priority); if (sk != NULL) { + kmemcheck_annotate_bitfield(sk, flags); + if (security_sk_alloc(sk, family, priority)) goto out_free; diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 68a8d89..61283f9 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -9,6 +9,7 @@ */ #include <linux/kernel.h> +#include <linux/kmemcheck.h> #include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> #include <net/ip.h> @@ -120,6 +121,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat if (tw != NULL) { const struct inet_sock *inet = inet_sk(sk); + kmemcheck_annotate_bitfield(tw, flags); + /* Give us an identity. */ tw->tw_daddr = inet->daddr; tw->tw_rcv_saddr = inet->rcv_saddr; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a9b3a6f..656cbd1 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1,11 +1,12 @@ /* - * linux/net/iucv/af_iucv.c - * * IUCV protocol stack for Linux on zSeries * - * Copyright 2006 IBM Corporation + * Copyright IBM Corp. 2006, 2009 * * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> + * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * PM functions: + * Ursula Braun <ursula.braun@de.ibm.com> */ #define KMSG_COMPONENT "af_iucv" @@ -90,6 +91,122 @@ static inline void low_nmcpy(unsigned char *dst, char *src) memcpy(&dst[8], src, 8); } +static int afiucv_pm_prepare(struct device *dev) +{ +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "afiucv_pm_prepare\n"); +#endif + return 0; +} + +static void afiucv_pm_complete(struct device *dev) +{ +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "afiucv_pm_complete\n"); +#endif + return; +} + +/** + * afiucv_pm_freeze() - Freeze PM callback + * @dev: AFIUCV dummy device + * + * Sever all established IUCV communication pathes + */ +static int afiucv_pm_freeze(struct device *dev) +{ + struct iucv_sock *iucv; + struct sock *sk; + struct hlist_node *node; + int err = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "afiucv_pm_freeze\n"); +#endif + read_lock(&iucv_sk_list.lock); + sk_for_each(sk, node, &iucv_sk_list.head) { + iucv = iucv_sk(sk); + skb_queue_purge(&iucv->send_skb_q); + skb_queue_purge(&iucv->backlog_skb_q); + switch (sk->sk_state) { + case IUCV_SEVERED: + case IUCV_DISCONN: + case IUCV_CLOSING: + case IUCV_CONNECTED: + if (iucv->path) { + err = iucv_path_sever(iucv->path, NULL); + iucv_path_free(iucv->path); + iucv->path = NULL; + } + break; + case IUCV_OPEN: + case IUCV_BOUND: + case IUCV_LISTEN: + case IUCV_CLOSED: + default: + break; + } + } + read_unlock(&iucv_sk_list.lock); + return err; +} + +/** + * afiucv_pm_restore_thaw() - Thaw and restore PM callback + * @dev: AFIUCV dummy device + * + * socket clean up after freeze + */ +static int afiucv_pm_restore_thaw(struct device *dev) +{ + struct iucv_sock *iucv; + struct sock *sk; + struct hlist_node *node; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); +#endif + read_lock(&iucv_sk_list.lock); + sk_for_each(sk, node, &iucv_sk_list.head) { + iucv = iucv_sk(sk); + switch (sk->sk_state) { + case IUCV_CONNECTED: + sk->sk_err = EPIPE; + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + break; + case IUCV_DISCONN: + case IUCV_SEVERED: + case IUCV_CLOSING: + case IUCV_LISTEN: + case IUCV_BOUND: + case IUCV_OPEN: + default: + break; + } + } + read_unlock(&iucv_sk_list.lock); + return 0; +} + +static struct dev_pm_ops afiucv_pm_ops = { + .prepare = afiucv_pm_prepare, + .complete = afiucv_pm_complete, + .freeze = afiucv_pm_freeze, + .thaw = afiucv_pm_restore_thaw, + .restore = afiucv_pm_restore_thaw, +}; + +static struct device_driver af_iucv_driver = { + .owner = THIS_MODULE, + .name = "afiucv", + .bus = &iucv_bus, + .pm = &afiucv_pm_ops, +}; + +/* dummy device used as trigger for PM functions */ +static struct device *af_iucv_dev; + /** * iucv_msg_length() - Returns the length of an iucv message. * @msg: Pointer to struct iucv_message, MUST NOT be NULL @@ -1556,8 +1673,30 @@ static int __init afiucv_init(void) err = sock_register(&iucv_sock_family_ops); if (err) goto out_proto; + /* establish dummy device */ + err = driver_register(&af_iucv_driver); + if (err) + goto out_sock; + af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!af_iucv_dev) { + err = -ENOMEM; + goto out_driver; + } + dev_set_name(af_iucv_dev, "af_iucv"); + af_iucv_dev->bus = &iucv_bus; + af_iucv_dev->parent = iucv_root; + af_iucv_dev->release = (void (*)(struct device *))kfree; + af_iucv_dev->driver = &af_iucv_driver; + err = device_register(af_iucv_dev); + if (err) + goto out_driver; + return 0; +out_driver: + driver_unregister(&af_iucv_driver); +out_sock: + sock_unregister(PF_IUCV); out_proto: proto_unregister(&iucv_proto); out_iucv: @@ -1568,6 +1707,8 @@ out: static void __exit afiucv_exit(void) { + device_unregister(af_iucv_dev); + driver_unregister(&af_iucv_driver); sock_unregister(PF_IUCV); proto_unregister(&iucv_proto); iucv_unregister(&af_iucv_handler, 0); diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 61e8038..c833481d 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -1,7 +1,8 @@ /* * IUCV base infrastructure. * - * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001, 2009 + * * Author(s): * Original source: * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 @@ -10,6 +11,8 @@ * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) * Rewritten for af_iucv: * Martin Schwidefsky <schwidefsky@de.ibm.com> + * PM functions: + * Ursula Braun (ursula.braun@de.ibm.com) * * Documentation used: * The original source @@ -45,6 +48,7 @@ #include <linux/err.h> #include <linux/device.h> #include <linux/cpu.h> +#include <linux/reboot.h> #include <net/iucv/iucv.h> #include <asm/atomic.h> #include <asm/ebcdic.h> @@ -75,9 +79,24 @@ static int iucv_bus_match(struct device *dev, struct device_driver *drv) return 0; } +static int iucv_pm_prepare(struct device *); +static void iucv_pm_complete(struct device *); +static int iucv_pm_freeze(struct device *); +static int iucv_pm_thaw(struct device *); +static int iucv_pm_restore(struct device *); + +static struct dev_pm_ops iucv_pm_ops = { + .prepare = iucv_pm_prepare, + .complete = iucv_pm_complete, + .freeze = iucv_pm_freeze, + .thaw = iucv_pm_thaw, + .restore = iucv_pm_restore, +}; + struct bus_type iucv_bus = { .name = "iucv", .match = iucv_bus_match, + .pm = &iucv_pm_ops, }; EXPORT_SYMBOL(iucv_bus); @@ -147,6 +166,7 @@ enum iucv_command_codes { IUCV_RESUME = 14, IUCV_SEVER = 15, IUCV_SETMASK = 16, + IUCV_SETCONTROLMASK = 17, }; /* @@ -364,6 +384,18 @@ static void iucv_allow_cpu(void *data) parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETMASK, parm); + /* + * Enable all iucv control interrupts. + * ipmask contains bits for the different interrupts + * 0x80 - Flag to allow pending connections interrupts + * 0x40 - Flag to allow connection complete interrupts + * 0x20 - Flag to allow connection severed interrupts + * 0x10 - Flag to allow connection quiesced interrupts + * 0x08 - Flag to allow connection resumed interrupts + */ + memset(parm, 0, sizeof(union iucv_param)); + parm->set_mask.ipmask = 0xf8; + iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); /* Set indication that iucv interrupts are allowed for this cpu. */ cpu_set(cpu, iucv_irq_cpumask); } @@ -389,6 +421,31 @@ static void iucv_block_cpu(void *data) } /** + * iucv_block_cpu_almost + * @data: unused + * + * Allow connection-severed interrupts only on this cpu. + */ +static void iucv_block_cpu_almost(void *data) +{ + int cpu = smp_processor_id(); + union iucv_param *parm; + + /* Allow iucv control interrupts only */ + parm = iucv_param_irq[cpu]; + memset(parm, 0, sizeof(union iucv_param)); + parm->set_mask.ipmask = 0x08; + iucv_call_b2f0(IUCV_SETMASK, parm); + /* Allow iucv-severed interrupt only */ + memset(parm, 0, sizeof(union iucv_param)); + parm->set_mask.ipmask = 0x20; + iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); + + /* Clear indication that iucv interrupts are allowed for this cpu. */ + cpu_clear(cpu, iucv_irq_cpumask); +} + +/** * iucv_declare_cpu * @data: unused * @@ -758,6 +815,28 @@ void iucv_unregister(struct iucv_handler *handler, int smp) } EXPORT_SYMBOL(iucv_unregister); +static int iucv_reboot_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int i, rc; + + get_online_cpus(); + on_each_cpu(iucv_block_cpu, NULL, 1); + preempt_disable(); + for (i = 0; i < iucv_max_pathid; i++) { + if (iucv_path_table[i]) + rc = iucv_sever_pathid(i, NULL); + } + preempt_enable(); + put_online_cpus(); + iucv_disable(); + return NOTIFY_DONE; +} + +static struct notifier_block iucv_reboot_notifier = { + .notifier_call = iucv_reboot_event, +}; + /** * iucv_path_accept * @path: address of iucv path structure @@ -777,6 +856,10 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, int rc; local_bh_disable(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } /* Prepare parameter block. */ parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); @@ -792,6 +875,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, path->msglim = parm->ctrl.ipmsglim; path->flags = parm->ctrl.ipflags1; } +out: local_bh_enable(); return rc; } @@ -821,6 +905,10 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, spin_lock_bh(&iucv_table_lock); iucv_cleanup_queue(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ipmsglim = path->msglim; @@ -855,6 +943,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, rc = -EIO; } } +out: spin_unlock_bh(&iucv_table_lock); return rc; } @@ -876,12 +965,17 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) int rc; local_bh_disable(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = path->pathid; rc = iucv_call_b2f0(IUCV_QUIESCE, parm); +out: local_bh_enable(); return rc; } @@ -903,12 +997,17 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) int rc; local_bh_disable(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = path->pathid; rc = iucv_call_b2f0(IUCV_RESUME, parm); +out: local_bh_enable(); return rc; } @@ -927,6 +1026,10 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) int rc; preempt_disable(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } if (iucv_active_cpu != smp_processor_id()) spin_lock_bh(&iucv_table_lock); rc = iucv_sever_pathid(path->pathid, userdata); @@ -934,6 +1037,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) list_del_init(&path->list); if (iucv_active_cpu != smp_processor_id()) spin_unlock_bh(&iucv_table_lock); +out: preempt_enable(); return rc; } @@ -956,6 +1060,10 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->purge.ippathid = path->pathid; @@ -967,6 +1075,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; msg->tag = parm->purge.ipmsgtag; } +out: local_bh_enable(); return rc; } @@ -1043,6 +1152,10 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, if (msg->flags & IUCV_IPRMDATA) return iucv_message_receive_iprmdata(path, msg, flags, buffer, size, residual); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ipbfadr1 = (u32)(addr_t) buffer; @@ -1058,6 +1171,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, if (residual) *residual = parm->db.ipbfln1f; } +out: return rc; } EXPORT_SYMBOL(__iucv_message_receive); @@ -1111,6 +1225,10 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) int rc; local_bh_disable(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ippathid = path->pathid; @@ -1118,6 +1236,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) parm->db.iptrgcls = msg->class; parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); rc = iucv_call_b2f0(IUCV_REJECT, parm); +out: local_bh_enable(); return rc; } @@ -1145,6 +1264,10 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { @@ -1162,6 +1285,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, parm->db.iptrgcls = msg->class; } rc = iucv_call_b2f0(IUCV_REPLY, parm); +out: local_bh_enable(); return rc; } @@ -1190,6 +1314,10 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, union iucv_param *parm; int rc; + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { @@ -1212,6 +1340,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, rc = iucv_call_b2f0(IUCV_SEND, parm); if (!rc) msg->id = parm->db.ipmsgid; +out: return rc; } EXPORT_SYMBOL(__iucv_message_send); @@ -1272,6 +1401,10 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); + if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { @@ -1297,6 +1430,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, rc = iucv_call_b2f0(IUCV_SEND, parm); if (!rc) msg->id = parm->db.ipmsgid; +out: local_bh_enable(); return rc; } @@ -1687,6 +1821,130 @@ static void iucv_external_interrupt(u16 code) spin_unlock(&iucv_queue_lock); } +static int iucv_pm_prepare(struct device *dev) +{ + int rc = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_INFO "iucv_pm_prepare\n"); +#endif + if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) + rc = dev->driver->pm->prepare(dev); + return rc; +} + +static void iucv_pm_complete(struct device *dev) +{ +#ifdef CONFIG_PM_DEBUG + printk(KERN_INFO "iucv_pm_complete\n"); +#endif + if (dev->driver && dev->driver->pm && dev->driver->pm->complete) + dev->driver->pm->complete(dev); +} + +/** + * iucv_path_table_empty() - determine if iucv path table is empty + * + * Returns 0 if there are still iucv pathes defined + * 1 if there are no iucv pathes defined + */ +int iucv_path_table_empty(void) +{ + int i; + + for (i = 0; i < iucv_max_pathid; i++) { + if (iucv_path_table[i]) + return 0; + } + return 1; +} + +/** + * iucv_pm_freeze() - Freeze PM callback + * @dev: iucv-based device + * + * disable iucv interrupts + * invoke callback function of the iucv-based driver + * shut down iucv, if no iucv-pathes are established anymore + */ +static int iucv_pm_freeze(struct device *dev) +{ + int cpu; + int rc = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "iucv_pm_freeze\n"); +#endif + for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) + smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1); + if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) + rc = dev->driver->pm->freeze(dev); + if (iucv_path_table_empty()) + iucv_disable(); + return rc; +} + +/** + * iucv_pm_thaw() - Thaw PM callback + * @dev: iucv-based device + * + * make iucv ready for use again: allocate path table, declare interrupt buffers + * and enable iucv interrupts + * invoke callback function of the iucv-based driver + */ +static int iucv_pm_thaw(struct device *dev) +{ + int rc = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "iucv_pm_thaw\n"); +#endif + if (!iucv_path_table) { + rc = iucv_enable(); + if (rc) + goto out; + } + if (cpus_empty(iucv_irq_cpumask)) { + if (iucv_nonsmp_handler) + /* enable interrupts on one cpu */ + iucv_allow_cpu(NULL); + else + /* enable interrupts on all cpus */ + iucv_setmask_mp(); + } + if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) + rc = dev->driver->pm->thaw(dev); +out: + return rc; +} + +/** + * iucv_pm_restore() - Restore PM callback + * @dev: iucv-based device + * + * make iucv ready for use again: allocate path table, declare interrupt buffers + * and enable iucv interrupts + * invoke callback function of the iucv-based driver + */ +static int iucv_pm_restore(struct device *dev) +{ + int rc = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); +#endif + if (cpus_empty(iucv_irq_cpumask)) { + rc = iucv_query_maxconn(); + rc = iucv_enable(); + if (rc) + goto out; + } + if (dev->driver && dev->driver->pm && dev->driver->pm->restore) + rc = dev->driver->pm->restore(dev); +out: + return rc; +} + /** * iucv_init * @@ -1740,15 +1998,20 @@ static int __init iucv_init(void) rc = register_hotcpu_notifier(&iucv_cpu_notifier); if (rc) goto out_free; + rc = register_reboot_notifier(&iucv_reboot_notifier); + if (rc) + goto out_cpu; ASCEBC(iucv_error_no_listener, 16); ASCEBC(iucv_error_no_memory, 16); ASCEBC(iucv_error_pathid, 16); iucv_available = 1; rc = bus_register(&iucv_bus); if (rc) - goto out_cpu; + goto out_reboot; return 0; +out_reboot: + unregister_reboot_notifier(&iucv_reboot_notifier); out_cpu: unregister_hotcpu_notifier(&iucv_cpu_notifier); out_free: @@ -1783,6 +2046,7 @@ static void __exit iucv_exit(void) list_for_each_entry_safe(p, n, &iucv_work_queue, list) kfree(p); spin_unlock_irq(&iucv_queue_lock); + unregister_reboot_notifier(&iucv_reboot_notifier); unregister_hotcpu_notifier(&iucv_cpu_notifier); for_each_possible_cpu(cpu) { kfree(iucv_param_irq[cpu]); |