summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/connector/Kconfig13
-rw-r--r--drivers/connector/Makefile3
-rw-r--r--drivers/connector/cn_queue.c173
-rw-r--r--drivers/connector/connector.c486
6 files changed, 679 insertions, 0 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 46d655f..48f446d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -4,6 +4,8 @@ menu "Device Drivers"
source "drivers/base/Kconfig"
+source "drivers/connector/Kconfig"
+
source "drivers/mtd/Kconfig"
source "drivers/parport/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 86c8654..1a109a6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,8 @@ obj-$(CONFIG_PNP) += pnp/
# default.
obj-y += char/
+obj-$(CONFIG_CONNECTOR) += connector/
+
# i810fb and intelfb depend on char/agp/
obj-$(CONFIG_FB_I810) += video/i810/
obj-$(CONFIG_FB_INTEL) += video/intelfb/
diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig
new file mode 100644
index 0000000..0bc2059
--- /dev/null
+++ b/drivers/connector/Kconfig
@@ -0,0 +1,13 @@
+menu "Connector - unified userspace <-> kernelspace linker"
+
+config CONNECTOR
+ tristate "Connector - unified userspace <-> kernelspace linker"
+ depends on NET
+ ---help---
+ This is unified userspace <-> kernelspace connector working on top
+ of the netlink socket protocol.
+
+ Connector support can also be built as a module. If so, the module
+ will be called cn.ko.
+
+endmenu
diff --git a/drivers/connector/Makefile b/drivers/connector/Makefile
new file mode 100644
index 0000000..12ca79e
--- /dev/null
+++ b/drivers/connector/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CONNECTOR) += cn.o
+
+cn-y += cn_queue.o connector.o
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
new file mode 100644
index 0000000..96663218
--- /dev/null
+++ b/drivers/connector/cn_queue.c
@@ -0,0 +1,173 @@
+/*
+ * cn_queue.c
+ *
+ * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/suspend.h>
+#include <linux/connector.h>
+#include <linux/delay.h>
+
+static void cn_queue_wrapper(void *data)
+{
+ struct cn_callback_entry *cbq = data;
+
+ cbq->cb->callback(cbq->cb->priv);
+ cbq->destruct_data(cbq->ddata);
+ cbq->ddata = NULL;
+}
+
+static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb)
+{
+ struct cn_callback_entry *cbq;
+
+ cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
+ if (!cbq) {
+ printk(KERN_ERR "Failed to create new callback queue.\n");
+ return NULL;
+ }
+
+ cbq->cb = cb;
+ INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq);
+ return cbq;
+}
+
+static void cn_queue_free_callback(struct cn_callback_entry *cbq)
+{
+ cancel_delayed_work(&cbq->work);
+ flush_workqueue(cbq->pdev->cn_queue);
+
+ kfree(cbq);
+}
+
+int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
+{
+ return ((i1->idx == i2->idx) && (i1->val == i2->val));
+}
+
+int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
+{
+ struct cn_callback_entry *cbq, *__cbq;
+ int found = 0;
+
+ cbq = cn_queue_alloc_callback_entry(cb);
+ if (!cbq)
+ return -ENOMEM;
+
+ atomic_inc(&dev->refcnt);
+ cbq->pdev = dev;
+
+ spin_lock_bh(&dev->queue_lock);
+ list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
+ if (cn_cb_equal(&__cbq->cb->id, &cb->id)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ list_add_tail(&cbq->callback_entry, &dev->queue_list);
+ spin_unlock_bh(&dev->queue_lock);
+
+ if (found) {
+ atomic_dec(&dev->refcnt);
+ cn_queue_free_callback(cbq);
+ return -EINVAL;
+ }
+
+ cbq->nls = dev->nls;
+ cbq->seq = 0;
+ cbq->group = cbq->cb->id.idx;
+
+ return 0;
+}
+
+void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
+{
+ struct cn_callback_entry *cbq, *n;
+ int found = 0;
+
+ spin_lock_bh(&dev->queue_lock);
+ list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
+ if (cn_cb_equal(&cbq->cb->id, id)) {
+ list_del(&cbq->callback_entry);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&dev->queue_lock);
+
+ if (found) {
+ cn_queue_free_callback(cbq);
+ atomic_dec_and_test(&dev->refcnt);
+ }
+}
+
+struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
+{
+ struct cn_queue_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ snprintf(dev->name, sizeof(dev->name), "%s", name);
+ atomic_set(&dev->refcnt, 0);
+ INIT_LIST_HEAD(&dev->queue_list);
+ spin_lock_init(&dev->queue_lock);
+
+ dev->nls = nls;
+ dev->netlink_groups = 0;
+
+ dev->cn_queue = create_workqueue(dev->name);
+ if (!dev->cn_queue) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+void cn_queue_free_dev(struct cn_queue_dev *dev)
+{
+ struct cn_callback_entry *cbq, *n;
+
+ flush_workqueue(dev->cn_queue);
+ destroy_workqueue(dev->cn_queue);
+
+ spin_lock_bh(&dev->queue_lock);
+ list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
+ list_del(&cbq->callback_entry);
+ spin_unlock_bh(&dev->queue_lock);
+
+ while (atomic_read(&dev->refcnt)) {
+ printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
+ dev->name, atomic_read(&dev->refcnt));
+ msleep(1000);
+ }
+
+ kfree(dev);
+ dev = NULL;
+}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
new file mode 100644
index 0000000..aaf6d46
--- /dev/null
+++ b/drivers/connector/connector.c
@@ -0,0 +1,486 @@
+/*
+ * connector.c
+ *
+ * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/moduleparam.h>
+#include <linux/connector.h>
+
+#include <net/sock.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
+
+static u32 cn_idx = CN_IDX_CONNECTOR;
+static u32 cn_val = CN_VAL_CONNECTOR;
+
+module_param(cn_idx, uint, 0);
+module_param(cn_val, uint, 0);
+MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
+MODULE_PARM_DESC(cn_val, "Connector's main device val.");
+
+static DECLARE_MUTEX(notify_lock);
+static LIST_HEAD(notify_list);
+
+static struct cn_dev cdev;
+
+int cn_already_initialized = 0;
+
+/*
+ * msg->seq and msg->ack are used to determine message genealogy.
+ * When someone sends message it puts there locally unique sequence
+ * and random acknowledge numbers. Sequence number may be copied into
+ * nlmsghdr->nlmsg_seq too.
+ *
+ * Sequence number is incremented with each message to be sent.
+ *
+ * If we expect reply to our message then the sequence number in
+ * received message MUST be the same as in original message, and
+ * acknowledge number MUST be the same + 1.
+ *
+ * If we receive a message and its sequence number is not equal to the
+ * one we are expecting then it is a new message.
+ *
+ * If we receive a message and its sequence number is the same as one
+ * we are expecting but it's acknowledgement number is not equal to
+ * the acknowledgement number in the original message + 1, then it is
+ * a new message.
+ *
+ */
+int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask)
+{
+ struct cn_callback_entry *__cbq;
+ unsigned int size;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct cn_msg *data;
+ struct cn_dev *dev = &cdev;
+ u32 group = 0;
+ int found = 0;
+
+ if (!__group) {
+ spin_lock_bh(&dev->cbdev->queue_lock);
+ list_for_each_entry(__cbq, &dev->cbdev->queue_list,
+ callback_entry) {
+ if (cn_cb_equal(&__cbq->cb->id, &msg->id)) {
+ found = 1;
+ group = __cbq->group;
+ }
+ }
+ spin_unlock_bh(&dev->cbdev->queue_lock);
+
+ if (!found)
+ return -ENODEV;
+ } else {
+ group = __group;
+ }
+
+ size = NLMSG_SPACE(sizeof(*msg) + msg->len);
+
+ skb = alloc_skb(size, gfp_mask);
+ if (!skb)
+ return -ENOMEM;
+
+ nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh));
+
+ data = NLMSG_DATA(nlh);
+
+ memcpy(data, msg, sizeof(*data) + msg->len);
+
+ NETLINK_CB(skb).dst_group = group;
+
+ netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
+
+ return 0;
+
+nlmsg_failure:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+/*
+ * Callback helper - queues work and setup destructor for given data.
+ */
+static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data)
+{
+ struct cn_callback_entry *__cbq;
+ struct cn_dev *dev = &cdev;
+ int found = 0;
+
+ spin_lock_bh(&dev->cbdev->queue_lock);
+ list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
+ if (cn_cb_equal(&__cbq->cb->id, &msg->id)) {
+ /*
+ * Let's scream if there is some magic and the
+ * data will arrive asynchronously here.
+ * [i.e. netlink messages will be queued].
+ * After the first warning I will fix it
+ * quickly, but now I think it is
+ * impossible. --zbr (2004_04_27).
+ */
+ if (likely(!test_bit(0, &__cbq->work.pending) &&
+ __cbq->ddata == NULL)) {
+ __cbq->cb->priv = msg;
+
+ __cbq->ddata = data;
+ __cbq->destruct_data = destruct_data;
+
+ if (queue_work(dev->cbdev->cn_queue,
+ &__cbq->work))
+ found = 1;
+ } else {
+ printk("%s: cbq->data=%p, "
+ "work->pending=%08lx.\n",
+ __func__, __cbq->ddata,
+ __cbq->work.pending);
+ WARN_ON(1);
+ }
+ break;
+ }
+ }
+ spin_unlock_bh(&dev->cbdev->queue_lock);
+
+ return found ? 0 : -ENODEV;
+}
+
+/*
+ * Skb receive helper - checks skb and msg size and calls callback
+ * helper.
+ */
+static int __cn_rx_skb(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ u32 pid, uid, seq, group;
+ struct cn_msg *msg;
+
+ pid = NETLINK_CREDS(skb)->pid;
+ uid = NETLINK_CREDS(skb)->uid;
+ seq = nlh->nlmsg_seq;
+ group = NETLINK_CB((skb)).dst_group;
+ msg = NLMSG_DATA(nlh);
+
+ return cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
+}
+
+/*
+ * Main netlink receiving function.
+ *
+ * It checks skb and netlink header sizes and calls the skb receive
+ * helper with a shared skb.
+ */
+static void cn_rx_skb(struct sk_buff *__skb)
+{
+ struct nlmsghdr *nlh;
+ u32 len;
+ int err;
+ struct sk_buff *skb;
+
+ skb = skb_get(__skb);
+
+ if (skb->len >= NLMSG_SPACE(0)) {
+ nlh = (struct nlmsghdr *)skb->data;
+
+ if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
+ skb->len < nlh->nlmsg_len ||
+ nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ len = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (len > skb->len)
+ len = skb->len;
+
+ err = __cn_rx_skb(skb, nlh);
+ if (err < 0)
+ kfree_skb(skb);
+ }
+
+out:
+ kfree_skb(__skb);
+}
+
+/*
+ * Netlink socket input callback - dequeues the skbs and calls the
+ * main netlink receiving function.
+ */
+static void cn_input(struct sock *sk, int len)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL)
+ cn_rx_skb(skb);
+}
+
+/*
+ * Notification routing.
+ *
+ * Gets id and checks if there are notification request for it's idx
+ * and val. If there are such requests notify the listeners with the
+ * given notify event.
+ *
+ */
+static void cn_notify(struct cb_id *id, u32 notify_event)
+{
+ struct cn_ctl_entry *ent;
+
+ down(&notify_lock);
+ list_for_each_entry(ent, &notify_list, notify_entry) {
+ int i;
+ struct cn_notify_req *req;
+ struct cn_ctl_msg *ctl = ent->msg;
+ int idx_found, val_found;
+
+ idx_found = val_found = 0;
+
+ req = (struct cn_notify_req *)ctl->data;
+ for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
+ if (id->idx >= req->first &&
+ id->idx < req->first + req->range) {
+ idx_found = 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
+ if (id->val >= req->first &&
+ id->val < req->first + req->range) {
+ val_found = 1;
+ break;
+ }
+ }
+
+ if (idx_found && val_found) {
+ struct cn_msg m = { .ack = notify_event, };
+
+ memcpy(&m.id, id, sizeof(m.id));
+ cn_netlink_send(&m, ctl->group, GFP_KERNEL);
+ }
+ }
+ up(&notify_lock);
+}
+
+/*
+ * Callback add routing - adds callback with given ID and name.
+ * If there is registered callback with the same ID it will not be added.
+ *
+ * May sleep.
+ */
+int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *))
+{
+ int err;
+ struct cn_dev *dev = &cdev;
+ struct cn_callback *cb;
+
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
+ if (!cb)
+ return -ENOMEM;
+
+ scnprintf(cb->name, sizeof(cb->name), "%s", name);
+
+ memcpy(&cb->id, id, sizeof(cb->id));
+ cb->callback = callback;
+
+ err = cn_queue_add_callback(dev->cbdev, cb);
+ if (err) {
+ kfree(cb);
+ return err;
+ }
+
+ cn_notify(id, 0);
+
+ return 0;
+}
+
+/*
+ * Callback remove routing - removes callback
+ * with given ID.
+ * If there is no registered callback with given
+ * ID nothing happens.
+ *
+ * May sleep while waiting for reference counter to become zero.
+ */
+void cn_del_callback(struct cb_id *id)
+{
+ struct cn_dev *dev = &cdev;
+
+ cn_queue_del_callback(dev->cbdev, id);
+ cn_notify(id, 1);
+}
+
+/*
+ * Checks two connector's control messages to be the same.
+ * Returns 1 if they are the same or if the first one is corrupted.
+ */
+static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
+{
+ int i;
+ struct cn_notify_req *req1, *req2;
+
+ if (m1->idx_notify_num != m2->idx_notify_num)
+ return 0;
+
+ if (m1->val_notify_num != m2->val_notify_num)
+ return 0;
+
+ if (m1->len != m2->len)
+ return 0;
+
+ if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
+ m1->len)
+ return 1;
+
+ req1 = (struct cn_notify_req *)m1->data;
+ req2 = (struct cn_notify_req *)m2->data;
+
+ for (i = 0; i < m1->idx_notify_num; ++i) {
+ if (req1->first != req2->first || req1->range != req2->range)
+ return 0;
+ req1++;
+ req2++;
+ }
+
+ for (i = 0; i < m1->val_notify_num; ++i) {
+ if (req1->first != req2->first || req1->range != req2->range)
+ return 0;
+ req1++;
+ req2++;
+ }
+
+ return 1;
+}
+
+/*
+ * Main connector device's callback.
+ *
+ * Used for notification of a request's processing.
+ */
+static void cn_callback(void *data)
+{
+ struct cn_msg *msg = data;
+ struct cn_ctl_msg *ctl;
+ struct cn_ctl_entry *ent;
+ u32 size;
+
+ if (msg->len < sizeof(*ctl))
+ return;
+
+ ctl = (struct cn_ctl_msg *)msg->data;
+
+ size = (sizeof(*ctl) + ((ctl->idx_notify_num +
+ ctl->val_notify_num) *
+ sizeof(struct cn_notify_req)));
+
+ if (msg->len != size)
+ return;
+
+ if (ctl->len + sizeof(*ctl) != msg->len)
+ return;
+
+ /*
+ * Remove notification.
+ */
+ if (ctl->group == 0) {
+ struct cn_ctl_entry *n;
+
+ down(&notify_lock);
+ list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
+ if (cn_ctl_msg_equals(ent->msg, ctl)) {
+ list_del(&ent->notify_entry);
+ kfree(ent);
+ }
+ }
+ up(&notify_lock);
+
+ return;
+ }
+
+ size += sizeof(*ent);
+
+ ent = kzalloc(size, GFP_KERNEL);
+ if (!ent)
+ return;
+
+ ent->msg = (struct cn_ctl_msg *)(ent + 1);
+
+ memcpy(ent->msg, ctl, size - sizeof(*ent));
+
+ down(&notify_lock);
+ list_add(&ent->notify_entry, &notify_list);
+ up(&notify_lock);
+}
+
+static int __init cn_init(void)
+{
+ struct cn_dev *dev = &cdev;
+ int err;
+
+ dev->input = cn_input;
+ dev->id.idx = cn_idx;
+ dev->id.val = cn_val;
+
+ dev->nls = netlink_kernel_create(NETLINK_CONNECTOR,
+ CN_NETLINK_USERS + 0xf,
+ dev->input, THIS_MODULE);
+ if (!dev->nls)
+ return -EIO;
+
+ dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
+ if (!dev->cbdev) {
+ if (dev->nls->sk_socket)
+ sock_release(dev->nls->sk_socket);
+ return -EINVAL;
+ }
+
+ err = cn_add_callback(&dev->id, "connector", &cn_callback);
+ if (err) {
+ cn_queue_free_dev(dev->cbdev);
+ if (dev->nls->sk_socket)
+ sock_release(dev->nls->sk_socket);
+ return -EINVAL;
+ }
+
+ cn_already_initialized = 1;
+
+ return 0;
+}
+
+static void __exit cn_fini(void)
+{
+ struct cn_dev *dev = &cdev;
+
+ cn_already_initialized = 0;
+
+ cn_del_callback(&dev->id);
+ cn_queue_free_dev(dev->cbdev);
+ if (dev->nls->sk_socket)
+ sock_release(dev->nls->sk_socket);
+}
+
+module_init(cn_init);
+module_exit(cn_fini);
+
+EXPORT_SYMBOL_GPL(cn_add_callback);
+EXPORT_SYMBOL_GPL(cn_del_callback);
+EXPORT_SYMBOL_GPL(cn_netlink_send);
OpenPOWER on IntegriCloud