summaryrefslogtreecommitdiffstats
path: root/drivers/iio/industrialio-event.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iio/industrialio-event.c')
-rw-r--r--drivers/iio/industrialio-event.c232
1 files changed, 198 insertions, 34 deletions
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 6be65ef..dac15b9 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -76,6 +76,9 @@ static unsigned int iio_event_poll(struct file *filep,
struct iio_event_interface *ev_int = indio_dev->event_interface;
unsigned int events = 0;
+ if (!indio_dev->info)
+ return -ENODEV;
+
poll_wait(filep, &ev_int->wait, wait);
spin_lock_irq(&ev_int->wait.lock);
@@ -96,6 +99,9 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
unsigned int copied;
int ret;
+ if (!indio_dev->info)
+ return -ENODEV;
+
if (count < sizeof(struct iio_event_data))
return -EINVAL;
@@ -107,9 +113,14 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
}
/* Blocking on device; waiting for something to be there */
ret = wait_event_interruptible_locked_irq(ev_int->wait,
- !kfifo_is_empty(&ev_int->det_events));
+ !kfifo_is_empty(&ev_int->det_events) ||
+ indio_dev->info == NULL);
if (ret)
goto error_unlock;
+ if (indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto error_unlock;
+ }
/* Single access device so no one else can get the data */
}
@@ -166,7 +177,7 @@ int iio_event_getfd(struct iio_dev *indio_dev)
iio_device_get(indio_dev);
fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
- indio_dev, O_RDONLY);
+ indio_dev, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
spin_lock_irq(&ev_int->wait.lock);
__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
@@ -190,6 +201,27 @@ static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_FALLING] = "falling"
};
+static const char * const iio_ev_info_text[] = {
+ [IIO_EV_INFO_ENABLE] = "en",
+ [IIO_EV_INFO_VALUE] = "value",
+ [IIO_EV_INFO_HYSTERESIS] = "hysteresis",
+};
+
+static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
+{
+ return attr->c->event_spec[attr->address & 0xffff].dir;
+}
+
+static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
+{
+ return attr->c->event_spec[attr->address & 0xffff].type;
+}
+
+static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
+{
+ return (attr->address >> 16) & 0xffff;
+}
+
static ssize_t iio_ev_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -204,9 +236,14 @@ static ssize_t iio_ev_state_store(struct device *dev,
if (ret < 0)
return ret;
- ret = indio_dev->info->write_event_config(indio_dev,
- this_attr->address,
- val);
+ if (indio_dev->info->write_event_config)
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->address, val);
+ else
+ ret = indio_dev->info->write_event_config_new(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), val);
+
return (ret < 0) ? ret : len;
}
@@ -216,9 +253,15 @@ static ssize_t iio_ev_state_show(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val = indio_dev->info->read_event_config(indio_dev,
- this_attr->address);
+ int val;
+ if (indio_dev->info->read_event_config)
+ val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->address);
+ else
+ val = indio_dev->info->read_event_config_new(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr));
if (val < 0)
return val;
else
@@ -231,14 +274,24 @@ static ssize_t iio_ev_value_show(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val, ret;
-
- ret = indio_dev->info->read_event_value(indio_dev,
- this_attr->address, &val);
- if (ret < 0)
- return ret;
+ int val, val2;
+ int ret;
- return sprintf(buf, "%d\n", val);
+ if (indio_dev->info->read_event_value) {
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->address, &val);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", val);
+ } else {
+ ret = indio_dev->info->read_event_value_new(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ &val, &val2);
+ if (ret < 0)
+ return ret;
+ return iio_format_value(buf, ret, val, val2);
+ }
}
static ssize_t iio_ev_value_store(struct device *dev,
@@ -248,25 +301,120 @@ static ssize_t iio_ev_value_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val;
+ int val, val2;
int ret;
- if (!indio_dev->info->write_event_value)
+ if (!indio_dev->info->write_event_value &&
+ !indio_dev->info->write_event_value_new)
return -EINVAL;
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
- val);
+ if (indio_dev->info->write_event_value) {
+ ret = kstrtoint(buf, 10, &val);
+ if (ret)
+ return ret;
+ ret = indio_dev->info->write_event_value(indio_dev,
+ this_attr->address, val);
+ } else {
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
+ if (ret)
+ return ret;
+ ret = indio_dev->info->write_event_value_new(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ val, val2);
+ }
if (ret < 0)
return ret;
return len;
}
-static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
+static int iio_device_add_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int spec_index,
+ enum iio_event_type type, enum iio_event_direction dir,
+ enum iio_shared_by shared_by, const unsigned long *mask)
+{
+ ssize_t (*show)(struct device *, struct device_attribute *, char *);
+ ssize_t (*store)(struct device *, struct device_attribute *,
+ const char *, size_t);
+ unsigned int attrcount = 0;
+ unsigned int i;
+ char *postfix;
+ int ret;
+
+ for_each_set_bit(i, mask, sizeof(*mask)) {
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_ev_type_text[type], iio_ev_dir_text[dir],
+ iio_ev_info_text[i]);
+ if (postfix == NULL)
+ return -ENOMEM;
+
+ if (i == IIO_EV_INFO_ENABLE) {
+ show = iio_ev_state_show;
+ store = iio_ev_state_store;
+ } else {
+ show = iio_ev_value_show;
+ store = iio_ev_value_store;
+ }
+
+ ret = __iio_add_chan_devattr(postfix, chan, show, store,
+ (i << 16) | spec_index, shared_by, &indio_dev->dev,
+ &indio_dev->event_interface->dev_attr_list);
+ kfree(postfix);
+
+ if (ret)
+ return ret;
+
+ attrcount++;
+ }
+
+ return attrcount;
+}
+
+static int iio_device_add_event_sysfs_new(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ int ret = 0, i, attrcount = 0;
+ enum iio_event_direction dir;
+ enum iio_event_type type;
+
+ for (i = 0; i < chan->num_event_specs; i++) {
+ type = chan->event_spec[i].type;
+ dir = chan->event_spec[i].dir;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SEPARATE, &chan->event_spec[i].mask_separate);
+ if (ret < 0)
+ goto error_ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_TYPE,
+ &chan->event_spec[i].mask_shared_by_type);
+ if (ret < 0)
+ goto error_ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_DIR,
+ &chan->event_spec[i].mask_shared_by_dir);
+ if (ret < 0)
+ goto error_ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_ALL,
+ &chan->event_spec[i].mask_shared_by_all);
+ if (ret < 0)
+ goto error_ret;
+ attrcount += ret;
+ }
+ ret = attrcount;
+error_ret:
+ return ret;
+}
+
+static int iio_device_add_event_sysfs_old(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret = 0, i, attrcount = 0;
@@ -339,15 +487,14 @@ error_ret:
return ret;
}
-static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
+
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
{
- struct iio_dev_attr *p, *n;
- list_for_each_entry_safe(p, n,
- &indio_dev->event_interface->
- dev_attr_list, l) {
- kfree(p->dev_attr.attr.name);
- kfree(p);
- }
+ if (chan->event_mask)
+ return iio_device_add_event_sysfs_old(indio_dev, chan);
+ else
+ return iio_device_add_event_sysfs_new(indio_dev, chan);
}
static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
@@ -369,9 +516,12 @@ static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
{
int j;
- for (j = 0; j < indio_dev->num_channels; j++)
+ for (j = 0; j < indio_dev->num_channels; j++) {
if (indio_dev->channels[j].event_mask != 0)
return true;
+ if (indio_dev->channels[j].num_event_specs != 0)
+ return true;
+ }
return false;
}
@@ -441,18 +591,32 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
return 0;
error_free_setup_event_lines:
- __iio_remove_event_config_attrs(indio_dev);
+ iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface);
error_ret:
return ret;
}
+/**
+ * iio_device_wakeup_eventset - Wakes up the event waitqueue
+ * @indio_dev: The IIO device
+ *
+ * Wakes up the event waitqueue used for poll() and blocking read().
+ * Should usually be called when the device is unregistered.
+ */
+void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
+{
+ if (indio_dev->event_interface == NULL)
+ return;
+ wake_up(&indio_dev->event_interface->wait);
+}
+
void iio_device_unregister_eventset(struct iio_dev *indio_dev)
{
if (indio_dev->event_interface == NULL)
return;
- __iio_remove_event_config_attrs(indio_dev);
+ iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface->group.attrs);
kfree(indio_dev->event_interface);
}
OpenPOWER on IntegriCloud