diff options
Diffstat (limited to 'drivers/iio')
-rw-r--r-- | drivers/iio/Kconfig | 51 | ||||
-rw-r--r-- | drivers/iio/Makefile | 10 | ||||
-rw-r--r-- | drivers/iio/iio_core.h | 62 | ||||
-rw-r--r-- | drivers/iio/iio_core_trigger.h | 46 | ||||
-rw-r--r-- | drivers/iio/industrialio-buffer.c | 755 | ||||
-rw-r--r-- | drivers/iio/industrialio-core.c | 916 | ||||
-rw-r--r-- | drivers/iio/industrialio-event.c | 453 | ||||
-rw-r--r-- | drivers/iio/industrialio-trigger.c | 509 | ||||
-rw-r--r-- | drivers/iio/inkern.c | 293 | ||||
-rw-r--r-- | drivers/iio/kfifo_buf.c | 150 |
10 files changed, 3245 insertions, 0 deletions
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig new file mode 100644 index 00000000000..3ab7d48d0b0 --- /dev/null +++ b/drivers/iio/Kconfig @@ -0,0 +1,51 @@ +# +# Industrial I/O subsytem configuration +# + +menuconfig IIO + tristate "Industrial I/O support" + depends on GENERIC_HARDIRQS + help + The industrial I/O subsystem provides a unified framework for + drivers for many different types of embedded sensors using a + number of different physical interfaces (i2c, spi, etc). See + Documentation/iio for more information. + +if IIO + +config IIO_BUFFER + bool "Enable buffer support within IIO" + help + Provide core support for various buffer based data + acquisition methods. + +if IIO_BUFFER + +config IIO_KFIFO_BUF + select IIO_TRIGGER + tristate "Industrial I/O buffering based on kfifo" + help + A simple fifo based on kfifo. Use this if you want a fifo + rather than a ring buffer. Note that this currently provides + no buffer events so it is up to userspace to work out how + often to read from the buffer. + +endif # IIO_BUFFER + +config IIO_TRIGGER + boolean "Enable triggered sampling support" + help + Provides IIO core support for triggers. Currently these + are used to initialize capture of samples to push into + ring buffers. The triggers are effectively a 'capture + data now' interrupt. + +config IIO_CONSUMERS_PER_TRIGGER + int "Maximum number of consumers per trigger" + depends on IIO_TRIGGER + default "2" + help + This value controls the maximum number of consumers that a + given trigger may handle. Default is 2. + +endif # IIO diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile new file mode 100644 index 00000000000..d5fc57d12ea --- /dev/null +++ b/drivers/iio/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the industrial I/O core. +# + +obj-$(CONFIG_IIO) += industrialio.o +industrialio-y := industrialio-core.o industrialio-event.o inkern.o +industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o +industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o + +obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h new file mode 100644 index 00000000000..f652e6ae5a3 --- /dev/null +++ b/drivers/iio/iio_core.h @@ -0,0 +1,62 @@ +/* The industrial I/O core function defs. + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * These definitions are meant for use only within the IIO core, not individual + * drivers. + */ + +#ifndef _IIO_CORE_H_ +#define _IIO_CORE_H_ +#include <linux/kernel.h> +#include <linux/device.h> + +struct iio_chan_spec; +struct iio_dev; + + +int __iio_add_chan_devattr(const char *postfix, + struct iio_chan_spec const *chan, + ssize_t (*func)(struct device *dev, + struct device_attribute *attr, + char *buf), + ssize_t (*writefunc)(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len), + u64 mask, + bool generic, + struct device *dev, + struct list_head *attr_list); + +/* Event interface flags */ +#define IIO_BUSY_BIT_POS 1 + +#ifdef CONFIG_IIO_BUFFER +struct poll_table_struct; + +unsigned int iio_buffer_poll(struct file *filp, + struct poll_table_struct *wait); +ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, + size_t n, loff_t *f_ps); + + +#define iio_buffer_poll_addr (&iio_buffer_poll) +#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer) + +#else + +#define iio_buffer_poll_addr NULL +#define iio_buffer_read_first_n_outer_addr NULL + +#endif + +int iio_device_register_eventset(struct iio_dev *indio_dev); +void iio_device_unregister_eventset(struct iio_dev *indio_dev); +int iio_event_getfd(struct iio_dev *indio_dev); + +#endif diff --git a/drivers/iio/iio_core_trigger.h b/drivers/iio/iio_core_trigger.h new file mode 100644 index 00000000000..6f7c56fcbe7 --- /dev/null +++ b/drivers/iio/iio_core_trigger.h @@ -0,0 +1,46 @@ + +/* The industrial I/O core, trigger consumer handling functions + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifdef CONFIG_IIO_TRIGGER +/** + * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers + * @indio_dev: iio_dev associated with the device that will consume the trigger + **/ +void iio_device_register_trigger_consumer(struct iio_dev *indio_dev); + +/** + * iio_device_unregister_trigger_consumer() - reverse the registration process + * @indio_dev: iio_dev associated with the device that consumed the trigger + **/ +void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev); + +#else + +/** + * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers + * @indio_dev: iio_dev associated with the device that will consume the trigger + **/ +static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev) +{ + return 0; +}; + +/** + * iio_device_unregister_trigger_consumer() - reverse the registration process + * @indio_dev: iio_dev associated with the device that consumed the trigger + **/ +static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) +{ +}; + +#endif /* CONFIG_TRIGGER_CONSUMER */ + + + diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c new file mode 100644 index 00000000000..b5b2c38045c --- /dev/null +++ b/drivers/iio/industrialio-buffer.c @@ -0,0 +1,755 @@ +/* The industrial I/O core + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * Handling of buffer allocation / resizing. + * + * + * Things to look at here. + * - Better memory allocation techniques? + * - Alternative access techniques? + */ +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/cdev.h> +#include <linux/slab.h> +#include <linux/poll.h> + +#include <linux/iio/iio.h> +#include "iio_core.h" +#include <linux/iio/sysfs.h> +#include <linux/iio/buffer.h> + +static const char * const iio_endian_prefix[] = { + [IIO_BE] = "be", + [IIO_LE] = "le", +}; + +/** + * iio_buffer_read_first_n_outer() - chrdev read for buffer access + * + * This function relies on all buffer implementations having an + * iio_buffer as their first element. + **/ +ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, + size_t n, loff_t *f_ps) +{ + struct iio_dev *indio_dev = filp->private_data; + struct iio_buffer *rb = indio_dev->buffer; + + if (!rb || !rb->access->read_first_n) + return -EINVAL; + return rb->access->read_first_n(rb, n, buf); +} + +/** + * iio_buffer_poll() - poll the buffer to find out if it has data + */ +unsigned int iio_buffer_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct iio_dev *indio_dev = filp->private_data; + struct iio_buffer *rb = indio_dev->buffer; + + poll_wait(filp, &rb->pollq, wait); + if (rb->stufftoread) + return POLLIN | POLLRDNORM; + /* need a way of knowing if there may be enough data... */ + return 0; +} + +void iio_buffer_init(struct iio_buffer *buffer) +{ + INIT_LIST_HEAD(&buffer->demux_list); + init_waitqueue_head(&buffer->pollq); +} +EXPORT_SYMBOL(iio_buffer_init); + +static ssize_t iio_show_scan_index(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); +} + +static ssize_t iio_show_fixed_type(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + u8 type = this_attr->c->scan_type.endianness; + + if (type == IIO_CPU) { +#ifdef __LITTLE_ENDIAN + type = IIO_LE; +#else + type = IIO_BE; +#endif + } + return sprintf(buf, "%s:%c%d/%d>>%u\n", + iio_endian_prefix[type], + this_attr->c->scan_type.sign, + this_attr->c->scan_type.realbits, + this_attr->c->scan_type.storagebits, + this_attr->c->scan_type.shift); +} + +static ssize_t iio_scan_el_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int ret; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + + ret = test_bit(to_iio_dev_attr(attr)->address, + indio_dev->buffer->scan_mask); + + return sprintf(buf, "%d\n", ret); +} + +static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) +{ + clear_bit(bit, buffer->scan_mask); + return 0; +} + +static ssize_t iio_scan_el_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + int ret; + bool state; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + + ret = strtobool(buf, &state); + if (ret < 0) + return ret; + mutex_lock(&indio_dev->mlock); + if (iio_buffer_enabled(indio_dev)) { + ret = -EBUSY; + goto error_ret; + } + ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); + if (ret < 0) + goto error_ret; + if (!state && ret) { + ret = iio_scan_mask_clear(buffer, this_attr->address); + if (ret) + goto error_ret; + } else if (state && !ret) { + ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); + if (ret) + goto error_ret; + } + +error_ret: + mutex_unlock(&indio_dev->mlock); + + return ret < 0 ? ret : len; + +} + +static ssize_t iio_scan_el_ts_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); +} + +static ssize_t iio_scan_el_ts_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + int ret; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + bool state; + + ret = strtobool(buf, &state); + if (ret < 0) + return ret; + + mutex_lock(&indio_dev->mlock); + if (iio_buffer_enabled(indio_dev)) { + ret = -EBUSY; + goto error_ret; + } + indio_dev->buffer->scan_timestamp = state; + indio_dev->scan_timestamp = state; +error_ret: + mutex_unlock(&indio_dev->mlock); + + return ret ? ret : len; +} + +static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + int ret, attrcount = 0; + struct iio_buffer *buffer = indio_dev->buffer; + + ret = __iio_add_chan_devattr("index", + chan, + &iio_show_scan_index, + NULL, + 0, + 0, + &indio_dev->dev, + &buffer->scan_el_dev_attr_list); + if (ret) + goto error_ret; + attrcount++; + ret = __iio_add_chan_devattr("type", + chan, + &iio_show_fixed_type, + NULL, + 0, + 0, + &indio_dev->dev, + &buffer->scan_el_dev_attr_list); + if (ret) + goto error_ret; + attrcount++; + if (chan->type != IIO_TIMESTAMP) + ret = __iio_add_chan_devattr("en", + chan, + &iio_scan_el_show, + &iio_scan_el_store, + chan->scan_index, + 0, + &indio_dev->dev, + &buffer->scan_el_dev_attr_list); + else + ret = __iio_add_chan_devattr("en", + chan, + &iio_scan_el_ts_show, + &iio_scan_el_ts_store, + chan->scan_index, + 0, + &indio_dev->dev, + &buffer->scan_el_dev_attr_list); + attrcount++; + ret = attrcount; +error_ret: + return ret; +} + +static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev, + struct iio_dev_attr *p) +{ + kfree(p->dev_attr.attr.name); + kfree(p); +} + +static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev) +{ + struct iio_dev_attr *p, *n; + struct iio_buffer *buffer = indio_dev->buffer; + + list_for_each_entry_safe(p, n, + &buffer->scan_el_dev_attr_list, l) + iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p); +} + +static const char * const iio_scan_elements_group_name = "scan_elements"; + +int iio_buffer_register(struct iio_dev *indio_dev, + const struct iio_chan_spec *channels, + int num_channels) +{ + struct iio_dev_attr *p; + struct attribute **attr; + struct iio_buffer *buffer = indio_dev->buffer; + int ret, i, attrn, attrcount, attrcount_orig = 0; + + if (buffer->attrs) + indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; + + if (buffer->scan_el_attrs != NULL) { + attr = buffer->scan_el_attrs->attrs; + while (*attr++ != NULL) + attrcount_orig++; + } + attrcount = attrcount_orig; + INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); + if (channels) { + /* new magic */ + for (i = 0; i < num_channels; i++) { + /* Establish necessary mask length */ + if (channels[i].scan_index > + (int)indio_dev->masklength - 1) + indio_dev->masklength + = indio_dev->channels[i].scan_index + 1; + + ret = iio_buffer_add_channel_sysfs(indio_dev, + &channels[i]); + if (ret < 0) + goto error_cleanup_dynamic; + attrcount += ret; + if (channels[i].type == IIO_TIMESTAMP) + indio_dev->scan_index_timestamp = + channels[i].scan_index; + } + if (indio_dev->masklength && buffer->scan_mask == NULL) { + buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), + sizeof(*buffer->scan_mask), + GFP_KERNEL); + if (buffer->scan_mask == NULL) { + ret = -ENOMEM; + goto error_cleanup_dynamic; + } + } + } + + buffer->scan_el_group.name = iio_scan_elements_group_name; + + buffer->scan_el_group.attrs = kcalloc(attrcount + 1, + sizeof(buffer->scan_el_group.attrs[0]), + GFP_KERNEL); + if (buffer->scan_el_group.attrs == NULL) { + ret = -ENOMEM; + goto error_free_scan_mask; + } + if (buffer->scan_el_attrs) + memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, + sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); + attrn = attrcount_orig; + + list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) + buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; + indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; + + return 0; + +error_free_scan_mask: + kfree(buffer->scan_mask); +error_cleanup_dynamic: + __iio_buffer_attr_cleanup(indio_dev); + + return ret; +} +EXPORT_SYMBOL(iio_buffer_register); + +void iio_buffer_unregister(struct iio_dev *indio_dev) +{ + kfree(indio_dev->buffer->scan_mask); + kfree(indio_dev->buffer->scan_el_group.attrs); + __iio_buffer_attr_cleanup(indio_dev); +} +EXPORT_SYMBOL(iio_buffer_unregister); + +ssize_t iio_buffer_read_length(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + + if (buffer->access->get_length) + return sprintf(buf, "%d\n", + buffer->access->get_length(buffer)); + + return 0; +} +EXPORT_SYMBOL(iio_buffer_read_length); + +ssize_t iio_buffer_write_length(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + int ret; + ulong val; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + return ret; + + if (buffer->access->get_length) + if (val == buffer->access->get_length(buffer)) + return len; + + mutex_lock(&indio_dev->mlock); + if (iio_buffer_enabled(indio_dev)) { + ret = -EBUSY; + } else { + if (buffer->access->set_length) + buffer->access->set_length(buffer, val); + ret = 0; + } + mutex_unlock(&indio_dev->mlock); + + return ret ? ret : len; +} +EXPORT_SYMBOL(iio_buffer_write_length); + +ssize_t iio_buffer_store_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + int ret; + bool requested_state, current_state; + int previous_mode; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + + mutex_lock(&indio_dev->mlock); + previous_mode = indio_dev->currentmode; + requested_state = !(buf[0] == '0'); + current_state = iio_buffer_enabled(indio_dev); + if (current_state == requested_state) { + printk(KERN_INFO "iio-buffer, current state requested again\n"); + goto done; + } + if (requested_state) { + if (indio_dev->setup_ops->preenable) { + ret = indio_dev->setup_ops->preenable(indio_dev); + if (ret) { + printk(KERN_ERR + "Buffer not started:" + "buffer preenable failed\n"); + goto error_ret; + } + } + if (buffer->access->request_update) { + ret = buffer->access->request_update(buffer); + if (ret) { + printk(KERN_INFO + "Buffer not started:" + "buffer parameter update failed\n"); + goto error_ret; + } + } + /* Definitely possible for devices to support both of these.*/ + if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { + if (!indio_dev->trig) { + printk(KERN_INFO + "Buffer not started: no trigger\n"); + ret = -EINVAL; + goto error_ret; + } + indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; + } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) + indio_dev->currentmode = INDIO_BUFFER_HARDWARE; + else { /* should never be reached */ + ret = -EINVAL; + goto error_ret; + } + + if (indio_dev->setup_ops->postenable) { + ret = indio_dev->setup_ops->postenable(indio_dev); + if (ret) { + printk(KERN_INFO + "Buffer not started:" + "postenable failed\n"); + indio_dev->currentmode = previous_mode; + if (indio_dev->setup_ops->postdisable) + indio_dev->setup_ops-> + postdisable(indio_dev); + goto error_ret; + } + } + } else { + if (indio_dev->setup_ops->predisable) { + ret = indio_dev->setup_ops->predisable(indio_dev); + if (ret) + goto error_ret; + } + indio_dev->currentmode = INDIO_DIRECT_MODE; + if (indio_dev->setup_ops->postdisable) { + ret = indio_dev->setup_ops->postdisable(indio_dev); + if (ret) + goto error_ret; + } + } +done: + mutex_unlock(&indio_dev->mlock); + return len; + +error_ret: + mutex_unlock(&indio_dev->mlock); + return ret; +} +EXPORT_SYMBOL(iio_buffer_store_enable); + +ssize_t iio_buffer_show_enable(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev)); +} +EXPORT_SYMBOL(iio_buffer_show_enable); + +/* note NULL used as error indicator as it doesn't make sense. */ +static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, + unsigned int masklength, + const unsigned long *mask) +{ + if (bitmap_empty(mask, masklength)) + return NULL; + while (*av_masks) { + if (bitmap_subset(mask, av_masks, masklength)) + return av_masks; + av_masks += BITS_TO_LONGS(masklength); + } + return NULL; +} + +static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask, + bool timestamp) +{ + const struct iio_chan_spec *ch; + unsigned bytes = 0; + int length, i; + + /* How much space will the demuxed element take? */ + for_each_set_bit(i, mask, + indio_dev->masklength) { + ch = iio_find_channel_from_si(indio_dev, i); + length = ch->scan_type.storagebits / 8; + bytes = ALIGN(bytes, length); + bytes += length; + } + if (timestamp) { + ch = iio_find_channel_from_si(indio_dev, + indio_dev->scan_index_timestamp); + length = ch->scan_type.storagebits / 8; + bytes = ALIGN(bytes, length); + bytes += length; + } + return bytes; +} + +int iio_sw_buffer_preenable(struct iio_dev *indio_dev) +{ + struct iio_buffer *buffer = indio_dev->buffer; + dev_dbg(&indio_dev->dev, "%s\n", __func__); + + /* How much space will the demuxed element take? */ + indio_dev->scan_bytes = + iio_compute_scan_bytes(indio_dev, buffer->scan_mask, + buffer->scan_timestamp); + buffer->access->set_bytes_per_datum(buffer, indio_dev->scan_bytes); + + /* What scan mask do we actually have ?*/ + if (indio_dev->available_scan_masks) + indio_dev->active_scan_mask = + iio_scan_mask_match(indio_dev->available_scan_masks, + indio_dev->masklength, + buffer->scan_mask); + else + indio_dev->active_scan_mask = buffer->scan_mask; + iio_update_demux(indio_dev); + + if (indio_dev->info->update_scan_mode) + return indio_dev->info + ->update_scan_mode(indio_dev, + indio_dev->active_scan_mask); + return 0; +} +EXPORT_SYMBOL(iio_sw_buffer_preenable); + +/** + * iio_scan_mask_set() - set particular bit in the scan mask + * @buffer: the buffer whose scan mask we are interested in + * @bit: the bit to be set. + **/ +int iio_scan_mask_set(struct iio_dev *indio_dev, + struct iio_buffer *buffer, int bit) +{ + const unsigned long *mask; + unsigned long *trialmask; + + trialmask = kmalloc(sizeof(*trialmask)* + BITS_TO_LONGS(indio_dev->masklength), + GFP_KERNEL); + + if (trialmask == NULL) + return -ENOMEM; + if (!indio_dev->masklength) { + WARN_ON("trying to set scanmask prior to registering buffer\n"); + kfree(trialmask); + return -EINVAL; + } + bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); + set_bit(bit, trialmask); + + if (indio_dev->available_scan_masks) { + mask = iio_scan_mask_match(indio_dev->available_scan_masks, + indio_dev->masklength, + trialmask); + if (!mask) { + kfree(trialmask); + return -EINVAL; + } + } + bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); + + kfree(trialmask); + + return 0; +}; +EXPORT_SYMBOL_GPL(iio_scan_mask_set); + +int iio_scan_mask_query(struct iio_dev *indio_dev, + struct iio_buffer *buffer, int bit) +{ + if (bit > indio_dev->masklength) + return -EINVAL; + + if (!buffer->scan_mask) + return 0; + + return test_bit(bit, buffer->scan_mask); +}; +EXPORT_SYMBOL_GPL(iio_scan_mask_query); + +/** + * struct iio_demux_table() - table describing demux memcpy ops + * @from: index to copy from + * @to: index to copy to + * @length: how many bytes to copy + * @l: list head used for management + */ +struct iio_demux_table { + unsigned from; + unsigned to; + unsigned length; + struct list_head l; +}; + +static unsigned char *iio_demux(struct iio_buffer *buffer, + unsigned char *datain) +{ + struct iio_demux_table *t; + + if (list_empty(&buffer->demux_list)) + return datain; + list_for_each_entry(t, &buffer->demux_list, l) + memcpy(buffer->demux_bounce + t->to, + datain + t->from, t->length); + + return buffer->demux_bounce; +} + +int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data, + s64 timestamp) +{ + unsigned char *dataout = iio_demux(buffer, data); + + return buffer->access->store_to(buffer, dataout, timestamp); +} +EXPORT_SYMBOL_GPL(iio_push_to_buffer); + +static void iio_buffer_demux_free(struct iio_buffer *buffer) +{ + struct iio_demux_table *p, *q; + list_for_each_entry_safe(p, q, &buffer->demux_list, l) { + list_del(&p->l); + kfree(p); + } +} + +int iio_update_demux(struct iio_dev *indio_dev) +{ + const struct iio_chan_spec *ch; + struct iio_buffer *buffer = indio_dev->buffer; + int ret, in_ind = -1, out_ind, length; + unsigned in_loc = 0, out_loc = 0; + struct iio_demux_table *p; + + /* Clear out any old demux */ + iio_buffer_demux_free(buffer); + kfree(buffer->demux_bounce); + buffer->demux_bounce = NULL; + + /* First work out which scan mode we will actually have */ + if (bitmap_equal(indio_dev->active_scan_mask, + buffer->scan_mask, + indio_dev->masklength)) + return 0; + + /* Now we have the two masks, work from least sig and build up sizes */ + for_each_set_bit(out_ind, + indio_dev->active_scan_mask, + indio_dev->masklength) { + in_ind = find_next_bit(indio_dev->active_scan_mask, + indio_dev->masklength, + in_ind + 1); + while (in_ind != out_ind) { + in_ind = find_next_bit(indio_dev->active_scan_mask, + indio_dev->masklength, + in_ind + 1); + ch = iio_find_channel_from_si(indio_dev, in_ind); + length = ch->scan_type.storagebits/8; + /* Make sure we are aligned */ + in_loc += length; + if (in_loc % length) + in_loc += length - in_loc % length; + } + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (p == NULL) { + ret = -ENOMEM; + goto error_clear_mux_table; + } + ch = iio_find_channel_from_si(indio_dev, in_ind); + length = ch->scan_type.storagebits/8; + if (out_loc % length) + out_loc += length - out_loc % length; + if (in_loc % length) + in_loc += length - in_loc % length; + p->from = in_loc; + p->to = out_loc; + p->length = length; + list_add_tail(&p->l, &buffer->demux_list); + out_loc += length; + in_loc += length; + } + /* Relies on scan_timestamp being last */ + if (buffer->scan_timestamp) { + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (p == NULL) { + ret = -ENOMEM; + goto error_clear_mux_table; + } + ch = iio_find_channel_from_si(indio_dev, + indio_dev->scan_index_timestamp); + length = ch->scan_type.storagebits/8; + if (out_loc % length) + out_loc += length - out_loc % length; + if (in_loc % length) + in_loc += length - in_loc % length; + p->from = in_loc; + p->to = out_loc; + p->length = length; + list_add_tail(&p->l, &buffer->demux_list); + out_loc += length; + in_loc += length; + } + buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); + if (buffer->demux_bounce == NULL) { + ret = -ENOMEM; + goto error_clear_mux_table; + } + return 0; + +error_clear_mux_table: + iio_buffer_demux_free(buffer); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_update_demux); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c new file mode 100644 index 00000000000..72e33b819f9 --- /dev/null +++ b/drivers/iio/industrialio-core.c @@ -0,0 +1,916 @@ +/* The industrial I/O core + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * Based on elements of hwmon and input subsystems. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/idr.h> +#include <linux/kdev_t.h> +#include <linux/err.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/cdev.h> +#include <linux/slab.h> +#include <linux/anon_inodes.h> +#include <linux/debugfs.h> +#include <linux/iio/iio.h> +#include "iio_core.h" +#include "iio_core_trigger.h" +#include <linux/iio/sysfs.h> +#include <linux/iio/events.h> + +/* IDA to assign each registered device a unique id*/ +static DEFINE_IDA(iio_ida); + +static dev_t iio_devt; + +#define IIO_DEV_MAX 256 +struct bus_type iio_bus_type = { + .name = "iio", +}; +EXPORT_SYMBOL(iio_bus_type); + +static struct dentry *iio_debugfs_dentry; + +static const char * const iio_direction[] = { + [0] = "in", + [1] = "out", +}; + +static const char * const iio_chan_type_name_spec[] = { + [IIO_VOLTAGE] = "voltage", + [IIO_CURRENT] = "current", + [IIO_POWER] = "power", + [IIO_ACCEL] = "accel", + [IIO_ANGL_VEL] = "anglvel", + [IIO_MAGN] = "magn", + [IIO_LIGHT] = "illuminance", + [IIO_INTENSITY] = "intensity", + [IIO_PROXIMITY] = "proximity", + [IIO_TEMP] = "temp", + [IIO_INCLI] = "incli", + [IIO_ROT] = "rot", + [IIO_ANGL] = "angl", + [IIO_TIMESTAMP] = "timestamp", + [IIO_CAPACITANCE] = "capacitance", + [IIO_ALTVOLTAGE] = "altvoltage", +}; + +static const char * const iio_modifier_names[] = { + [IIO_MOD_X] = "x", + [IIO_MOD_Y] = "y", + [IIO_MOD_Z] = "z", + [IIO_MOD_LIGHT_BOTH] = "both", + [IIO_MOD_LIGHT_IR] = "ir", +}; + +/* relies on pairs of these shared then separate */ +static const char * const iio_chan_info_postfix[] = { + [IIO_CHAN_INFO_RAW] = "raw", + [IIO_CHAN_INFO_PROCESSED] = "input", + [IIO_CHAN_INFO_SCALE] = "scale", + [IIO_CHAN_INFO_OFFSET] = "offset", + [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", + [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", + [IIO_CHAN_INFO_PEAK] = "peak_raw", + [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", + [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", + [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", + [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] + = "filter_low_pass_3db_frequency", + [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", + [IIO_CHAN_INFO_FREQUENCY] = "frequency", + [IIO_CHAN_INFO_PHASE] = "phase", +}; + +const struct iio_chan_spec +*iio_find_channel_from_si(struct iio_dev *indio_dev, int si) +{ + int i; + + for (i = 0; i < indio_dev->num_channels; i++) + if (indio_dev->channels[i].scan_index == si) + return &indio_dev->channels[i]; + return NULL; +} + +/* This turns up an awful lot */ +ssize_t iio_read_const_attr(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string); +} +EXPORT_SYMBOL(iio_read_const_attr); + +static int __init iio_init(void) +{ + int ret; + + /* Register sysfs bus */ + ret = bus_register(&iio_bus_type); + if (ret < 0) { + printk(KERN_ERR + "%s could not register bus type\n", + __FILE__); + goto error_nothing; + } + + ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); + if (ret < 0) { + printk(KERN_ERR "%s: failed to allocate char dev region\n", + __FILE__); + goto error_unregister_bus_type; + } + + iio_debugfs_dentry = debugfs_create_dir("iio", NULL); + + return 0; + +error_unregister_bus_type: + bus_unregister(&iio_bus_type); +error_nothing: + return ret; +} + +static void __exit iio_exit(void) +{ + if (iio_devt) + unregister_chrdev_region(iio_devt, IIO_DEV_MAX); + bus_unregister(&iio_bus_type); + debugfs_remove(iio_debugfs_dentry); +} + +#if defined(CONFIG_DEBUG_FS) +static int iio_debugfs_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} + +static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iio_dev *indio_dev = file->private_data; + char buf[20]; + unsigned val = 0; + ssize_t len; + int ret; + + ret = indio_dev->info->debugfs_reg_access(indio_dev, + indio_dev->cached_reg_addr, + 0, &val); + if (ret) + dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); + + len = snprintf(buf, sizeof(buf), "0x%X\n", val); + + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} + +static ssize_t iio_debugfs_write_reg(struct file *file, + const char __user *userbuf, size_t count, loff_t *ppos) +{ + struct iio_dev *indio_dev = file->private_data; + unsigned reg, val; + char buf[80]; + int ret; + + count = min_t(size_t, count, (sizeof(buf)-1)); + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + buf[count] = 0; + + ret = sscanf(buf, "%i %i", ®, &val); + + switch (ret) { + case 1: + indio_dev->cached_reg_addr = reg; + break; + case 2: + indio_dev->cached_reg_addr = reg; + ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, + val, NULL); + if (ret) { + dev_err(indio_dev->dev.parent, "%s: write failed\n", + __func__); + return ret; + } + break; + default: + return -EINVAL; + } + + return count; +} + +static const struct file_operations iio_debugfs_reg_fops = { + .open = iio_debugfs_open, + .read = iio_debugfs_read_reg, + .write = iio_debugfs_write_reg, +}; + +static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) +{ + debugfs_remove_recursive(indio_dev->debugfs_dentry); +} + +static int iio_device_register_debugfs(struct iio_dev *indio_dev) +{ + struct dentry *d; + + if (indio_dev->info->debugfs_reg_access == NULL) + return 0; + + if (IS_ERR(iio_debugfs_dentry)) + return 0; + + indio_dev->debugfs_dentry = + debugfs_create_dir(dev_name(&indio_dev->dev), + iio_debugfs_dentry); + if (IS_ERR(indio_dev->debugfs_dentry)) + return PTR_ERR(indio_dev->debugfs_dentry); + + if (indio_dev->debugfs_dentry == NULL) { + dev_warn(indio_dev->dev.parent, + "Failed to create debugfs directory\n"); + return -EFAULT; + } + + d = debugfs_create_file("direct_reg_access", 0644, + indio_dev->debugfs_dentry, + indio_dev, &iio_debugfs_reg_fops); + if (!d) { + iio_device_unregister_debugfs(indio_dev); + return -ENOMEM; + } + + return 0; +} +#else +static int iio_device_register_debugfs(struct iio_dev *indio_dev) +{ + return 0; +} + +static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +static ssize_t iio_read_channel_ext_info(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + const struct iio_chan_spec_ext_info *ext_info; + + ext_info = &this_attr->c->ext_info[this_attr->address]; + + return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); +} + +static ssize_t iio_write_channel_ext_info(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + const struct iio_chan_spec_ext_info *ext_info; + + ext_info = &this_attr->c->ext_info[this_attr->address]; + + return ext_info->write(indio_dev, ext_info->private, + this_attr->c, buf, len); +} + +static ssize_t iio_read_channel_info(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + int val, val2; + int ret = indio_dev->info->read_raw(indio_dev, this_attr->c, + &val, &val2, this_attr->address); + + if (ret < 0) + return ret; + + if (ret == IIO_VAL_INT) + return sprintf(buf, "%d\n", val); + else if (ret == IIO_VAL_INT_PLUS_MICRO) { + if (val2 < 0) + return sprintf(buf, "-%d.%06u\n", val, -val2); + else + return sprintf(buf, "%d.%06u\n", val, val2); + } else if (ret == IIO_VAL_INT_PLUS_NANO) { + if (val2 < 0) + return sprintf(buf, "-%d.%09u\n", val, -val2); + else + return sprintf(buf, "%d.%09u\n", val, val2); + } else + return 0; +} + +static ssize_t iio_write_channel_info(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + int ret, integer = 0, fract = 0, fract_mult = 100000; + bool integer_part = true, negative = false; + + /* Assumes decimal - precision based on number of digits */ + if (!indio_dev->info->write_raw) + return -EINVAL; + + if (indio_dev->info->write_raw_get_fmt) + switch (indio_dev->info->write_raw_get_fmt(indio_dev, + this_attr->c, this_attr->address)) { + case IIO_VAL_INT_PLUS_MICRO: + fract_mult = 100000; + break; + case IIO_VAL_INT_PLUS_NANO: + fract_mult = 100000000; + break; + default: + return -EINVAL; + } + + if (buf[0] == '-') { + negative = true; + buf++; + } + + while (*buf) { + if ('0' <= *buf && *buf <= '9') { + if (integer_part) + integer = integer*10 + *buf - '0'; + else { + fract += fract_mult*(*buf - '0'); + if (fract_mult == 1) + break; + fract_mult /= 10; + } + } else if (*buf == '\n') { + if (*(buf + 1) == '\0') + break; + else + return -EINVAL; + } else if (*buf == '.') { + integer_part = false; + } else { + return -EINVAL; + } + buf++; + } + if (negative) { + if (integer) + integer = -integer; + else + fract = -fract; + } + + ret = indio_dev->info->write_raw(indio_dev, this_attr->c, + integer, fract, this_attr->address); + if (ret) + return ret; + + return len; +} + +static +int __iio_device_attr_init(struct device_attribute *dev_attr, + const char *postfix, + struct iio_chan_spec const *chan, + ssize_t (*readfunc)(struct device *dev, + struct device_attribute *attr, + char *buf), + ssize_t (*writefunc)(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len), + bool generic) +{ + int ret; + char *name_format, *full_postfix; + sysfs_attr_init(&dev_attr->attr); + + /* Build up postfix of <extend_name>_<modifier>_postfix */ + if (chan->modified && !generic) { + if (chan->extend_name) + full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", + iio_modifier_names[chan + ->channel2], + chan->extend_name, + postfix); + else + full_postfix = kasprintf(GFP_KERNEL, "%s_%s", + iio_modifier_names[chan + ->channel2], + postfix); + } else { + if (chan->extend_name == NULL) + full_postfix = kstrdup(postfix, GFP_KERNEL); + else + full_postfix = kasprintf(GFP_KERNEL, + "%s_%s", + chan->extend_name, + postfix); + } + if (full_postfix == NULL) { + ret = -ENOMEM; + goto error_ret; + } + + if (chan->differential) { /* Differential can not have modifier */ + if (generic) + name_format + = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", + iio_direction[chan->output], + iio_chan_type_name_spec[chan->type], + iio_chan_type_name_spec[chan->type], + full_postfix); + else if (chan->indexed) + name_format + = kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s", + iio_direction[chan->output], + iio_chan_type_name_spec[chan->type], + chan->channel, + iio_chan_type_name_spec[chan->type], + chan->channel2, + full_postfix); + else { + WARN_ON("Differential channels must be indexed\n"); + ret = -EINVAL; + goto error_free_full_postfix; + } + } else { /* Single ended */ + if (generic) + name_format + = kasprintf(GFP_KERNEL, "%s_%s_%s", + iio_direction[chan->output], + iio_chan_type_name_spec[chan->type], + full_postfix); + else if (chan->indexed) + name_format + = kasprintf(GFP_KERNEL, "%s_%s%d_%s", + iio_direction[chan->output], + iio_chan_type_name_spec[chan->type], + chan->channel, + full_postfix); + else + name_format + = kasprintf(GFP_KERNEL, "%s_%s_%s", + iio_direction[chan->output], + iio_chan_type_name_spec[chan->type], + full_postfix); + } + if (name_format == NULL) { + ret = -ENOMEM; + goto error_free_full_postfix; + } + dev_attr->attr.name = kasprintf(GFP_KERNEL, + name_format, + chan->channel, + chan->channel2); + if (dev_attr->attr.name == NULL) { + ret = -ENOMEM; + goto error_free_name_format; + } + + if (readfunc) { + dev_attr->attr.mode |= S_IRUGO; + dev_attr->show = readfunc; + } + + if (writefunc) { + dev_attr->attr.mode |= S_IWUSR; + dev_attr->store = writefunc; + } + kfree(name_format); + kfree(full_postfix); + + return 0; + +error_free_name_format: + kfree(name_format); +error_free_full_postfix: + kfree(full_postfix); +error_ret: + return ret; +} + +static void __iio_device_attr_deinit(struct device_attribute *dev_attr) +{ + kfree(dev_attr->attr.name); +} + +int __iio_add_chan_devattr(const char *postfix, + struct iio_chan_spec const *chan, + ssize_t (*readfunc)(struct device *dev, + struct device_attribute *attr, + char *buf), + ssize_t (*writefunc)(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len), + u64 mask, + bool generic, + struct device *dev, + struct list_head *attr_list) +{ + int ret; + struct iio_dev_attr *iio_attr, *t; + + iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL); + if (iio_attr == NULL) { + ret = -ENOMEM; + goto error_ret; + } + ret = __iio_device_attr_init(&iio_attr->dev_attr, + postfix, chan, + readfunc, writefunc, generic); + if (ret) + goto error_iio_dev_attr_free; + iio_attr->c = chan; + iio_attr->address = mask; + list_for_each_entry(t, attr_list, l) + if (strcmp(t->dev_attr.attr.name, + iio_attr->dev_attr.attr.name) == 0) { + if (!generic) + dev_err(dev, "tried to double register : %s\n", + t->dev_attr.attr.name); + ret = -EBUSY; + goto error_device_attr_deinit; + } + list_add(&iio_attr->l, attr_list); + + return 0; + +error_device_attr_deinit: + __iio_device_attr_deinit(&iio_attr->dev_attr); +error_iio_dev_attr_free: + kfree(iio_attr); +error_ret: + return ret; +} + +static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan) +{ + int ret, attrcount = 0; + int i; + const struct iio_chan_spec_ext_info *ext_info; + + if (chan->channel < 0) + return 0; + for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) { + ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2], + chan, + &iio_read_channel_info, + &iio_write_channel_info, + i/2, + !(i%2), + &indio_dev->dev, + &indio_dev->channel_attr_list); + if (ret == -EBUSY && (i%2 == 0)) { + ret = 0; + continue; + } + if (ret < 0) + goto error_ret; + attrcount++; + } + + if (chan->ext_info) { + unsigned int i = 0; + for (ext_info = chan->ext_info; ext_info->name; ext_info++) { + ret = __iio_add_chan_devattr(ext_info->name, + chan, + ext_info->read ? + &iio_read_channel_ext_info : NULL, + ext_info->write ? + &iio_write_channel_ext_info : NULL, + i, + ext_info->shared, + &indio_dev->dev, + &indio_dev->channel_attr_list); + i++; + if (ret == -EBUSY && ext_info->shared) + continue; + + if (ret) + goto error_ret; + + attrcount++; + } + } + + ret = attrcount; +error_ret: + return ret; +} + +static void iio_device_remove_and_free_read_attr(struct iio_dev *indio_dev, + struct iio_dev_attr *p) +{ + kfree(p->dev_attr.attr.name); + kfree(p); +} + +static ssize_t iio_show_dev_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", indio_dev->name); +} + +static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL); + +static int iio_device_register_sysfs(struct iio_dev *indio_dev) +{ + int i, ret = 0, attrcount, attrn, attrcount_orig = 0; + struct iio_dev_attr *p, *n; + struct attribute **attr; + + /* First count elements in any existing group */ + if (indio_dev->info->attrs) { + attr = indio_dev->info->attrs->attrs; + while (*attr++ != NULL) + attrcount_orig++; + } + attrcount = attrcount_orig; + /* + * New channel registration method - relies on the fact a group does + * not need to be initialized if it is name is NULL. + */ + INIT_LIST_HEAD(&indio_dev->channel_attr_list); + if (indio_dev->channels) + for (i = 0; i < indio_dev->num_channels; i++) { + ret = iio_device_add_channel_sysfs(indio_dev, + &indio_dev + ->channels[i]); + if (ret < 0) + goto error_clear_attrs; + attrcount += ret; + } + + if (indio_dev->name) + attrcount++; + + indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1, + sizeof(indio_dev->chan_attr_group.attrs[0]), + GFP_KERNEL); + if (indio_dev->chan_attr_group.attrs == NULL) { + ret = -ENOMEM; + goto error_clear_attrs; + } + /* Copy across original attributes */ + if (indio_dev->info->attrs) + memcpy(indio_dev->chan_attr_group.attrs, + indio_dev->info->attrs->attrs, + sizeof(indio_dev->chan_attr_group.attrs[0]) + *attrcount_orig); + attrn = attrcount_orig; + /* Add all elements from the list. */ + list_for_each_entry(p, &indio_dev->channel_attr_list, l) + indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; + if (indio_dev->name) + indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; + + indio_dev->groups[indio_dev->groupcounter++] = + &indio_dev->chan_attr_group; + + return 0; + +error_clear_attrs: + list_for_each_entry_safe(p, n, + &indio_dev->channel_attr_list, l) { + list_del(&p->l); + iio_device_remove_and_free_read_attr(indio_dev, p); + } + + return ret; +} + +static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) +{ + + struct iio_dev_attr *p, *n; + + list_for_each_entry_safe(p, n, &indio_dev->channel_attr_list, l) { + list_del(&p->l); + iio_device_remove_and_free_read_attr(indio_dev, p); + } + kfree(indio_dev->chan_attr_group.attrs); +} + +static void iio_dev_release(struct device *device) +{ + struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev); + cdev_del(&indio_dev->chrdev); + if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) + iio_device_unregister_trigger_consumer(indio_dev); + iio_device_unregister_eventset(indio_dev); + iio_device_unregister_sysfs(indio_dev); + iio_device_unregister_debugfs(indio_dev); +} + +static struct device_type iio_dev_type = { + .name = "iio_device", + .release = iio_dev_release, +}; + +struct iio_dev *iio_device_alloc(int sizeof_priv) +{ + struct iio_dev *dev; + size_t alloc_size; + + alloc_size = sizeof(struct iio_dev); + if (sizeof_priv) { + alloc_size = ALIGN(alloc_size, IIO_ALIGN); + alloc_size += sizeof_priv; + } + /* ensure 32-byte alignment of whole construct ? */ + alloc_size += IIO_ALIGN - 1; + + dev = kzalloc(alloc_size, GFP_KERNEL); + + if (dev) { + dev->dev.groups = dev->groups; + dev->dev.type = &iio_dev_type; + dev->dev.bus = &iio_bus_type; + device_initialize(&dev->dev); + dev_set_drvdata(&dev->dev, (void *)dev); + mutex_init(&dev->mlock); + mutex_init(&dev->info_exist_lock); + + dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); + if (dev->id < 0) { + /* cannot use a dev_err as the name isn't available */ + printk(KERN_ERR "Failed to get id\n"); + kfree(dev); + return NULL; + } + dev_set_name(&dev->dev, "iio:device%d", dev->id); + } + + return dev; +} +EXPORT_SYMBOL(iio_device_alloc); + +void iio_device_free(struct iio_dev *dev) +{ + if (dev) { + ida_simple_remove(&iio_ida, dev->id); + kfree(dev); + } +} +EXPORT_SYMBOL(iio_device_free); + +/** + * iio_chrdev_open() - chrdev file open for buffer access and ioctls + **/ +static int iio_chrdev_open(struct inode *inode, struct file *filp) +{ + struct iio_dev *indio_dev = container_of(inode->i_cdev, + struct iio_dev, chrdev); + + if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags)) + return -EBUSY; + + filp->private_data = indio_dev; + + return 0; +} + +/** + * iio_chrdev_release() - chrdev file close buffer access and ioctls + **/ +static int iio_chrdev_release(struct inode *inode, struct file *filp) +{ + struct iio_dev *indio_dev = container_of(inode->i_cdev, + struct iio_dev, chrdev); + clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags); + return 0; +} + +/* Somewhat of a cross file organization violation - ioctls here are actually + * event related */ +static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct iio_dev *indio_dev = filp->private_data; + int __user *ip = (int __user *)arg; + int fd; + + if (cmd == IIO_GET_EVENT_FD_IOCTL) { + fd = iio_event_getfd(indio_dev); + if (copy_to_user(ip, &fd, sizeof(fd))) + return -EFAULT; + return 0; + } + return -EINVAL; +} + +static const struct file_operations iio_buffer_fileops = { + .read = iio_buffer_read_first_n_outer_addr, + .release = iio_chrdev_release, + .open = iio_chrdev_open, + .poll = iio_buffer_poll_addr, + .owner = THIS_MODULE, + .llseek = noop_llseek, + .unlocked_ioctl = iio_ioctl, + .compat_ioctl = iio_ioctl, +}; + +static const struct iio_buffer_setup_ops noop_ring_setup_ops; + +int iio_device_register(struct iio_dev *indio_dev) +{ + int ret; + + /* configure elements for the chrdev */ + indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id); + + ret = iio_device_register_debugfs(indio_dev); + if (ret) { + dev_err(indio_dev->dev.parent, + "Failed to register debugfs interfaces\n"); + goto error_ret; + } + ret = iio_device_register_sysfs(indio_dev); + if (ret) { + dev_err(indio_dev->dev.parent, + "Failed to register sysfs interfaces\n"); + goto error_unreg_debugfs; + } + ret = iio_device_register_eventset(indio_dev); + if (ret) { + dev_err(indio_dev->dev.parent, + "Failed to register event set\n"); + goto error_free_sysfs; + } + if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) + iio_device_register_trigger_consumer(indio_dev); + + if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && + indio_dev->setup_ops == NULL) + indio_dev->setup_ops = &noop_ring_setup_ops; + + ret = device_add(&indio_dev->dev); + if (ret < 0) + goto error_unreg_eventset; + cdev_init(&indio_dev->chrdev, &iio_buffer_fileops); + indio_dev->chrdev.owner = indio_dev->info->driver_module; + ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1); + if (ret < 0) + goto error_del_device; + return 0; + +error_del_device: + device_del(&indio_dev->dev); +error_unreg_eventset: + iio_device_unregister_eventset(indio_dev); +error_free_sysfs: + iio_device_unregister_sysfs(indio_dev); +error_unreg_debugfs: + iio_device_unregister_debugfs(indio_dev); +error_ret: + return ret; +} +EXPORT_SYMBOL(iio_device_register); + +void iio_device_unregister(struct iio_dev *indio_dev) +{ + mutex_lock(&indio_dev->info_exist_lock); + indio_dev->info = NULL; + mutex_unlock(&indio_dev->info_exist_lock); + device_unregister(&indio_dev->dev); +} +EXPORT_SYMBOL(iio_device_unregister); +subsys_initcall(iio_init); +module_exit(iio_exit); + +MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); +MODULE_DESCRIPTION("Industrial I/O core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c new file mode 100644 index 00000000000..5fcf50b1ae5 --- /dev/null +++ b/drivers/iio/industrialio-event.c @@ -0,0 +1,453 @@ +/* Industrial I/O event handling + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * Based on elements of hwmon and input subsystems. + */ + +#include <linux/anon_inodes.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/kfifo.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/wait.h> +#include <linux/iio/iio.h> +#include "iio_core.h" +#include <linux/iio/sysfs.h> +#include <linux/iio/events.h> + +/** + * struct iio_event_interface - chrdev interface for an event line + * @wait: wait queue to allow blocking reads of events + * @det_events: list of detected events + * @dev_attr_list: list of event interface sysfs attribute + * @flags: file operations related flags including busy flag. + * @group: event interface sysfs attribute group + */ +struct iio_event_interface { + wait_queue_head_t wait; + DECLARE_KFIFO(det_events, struct iio_event_data, 16); + + struct list_head dev_attr_list; + unsigned long flags; + struct attribute_group group; +}; + +int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) +{ + struct iio_event_interface *ev_int = indio_dev->event_interface; + struct iio_event_data ev; + int copied; + + /* Does anyone care? */ + spin_lock(&ev_int->wait.lock); + if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { + + ev.id = ev_code; + ev.timestamp = timestamp; + + copied = kfifo_put(&ev_int->det_events, &ev); + if (copied != 0) + wake_up_locked_poll(&ev_int->wait, POLLIN); + } + spin_unlock(&ev_int->wait.lock); + + return 0; +} +EXPORT_SYMBOL(iio_push_event); + +/** + * iio_event_poll() - poll the event queue to find out if it has data + */ +static unsigned int iio_event_poll(struct file *filep, + struct poll_table_struct *wait) +{ + struct iio_event_interface *ev_int = filep->private_data; + unsigned int events = 0; + + poll_wait(filep, &ev_int->wait, wait); + + spin_lock(&ev_int->wait.lock); + if (!kfifo_is_empty(&ev_int->det_events)) + events = POLLIN | POLLRDNORM; + spin_unlock(&ev_int->wait.lock); + + return events; +} + +static ssize_t iio_event_chrdev_read(struct file *filep, + char __user *buf, + size_t count, + loff_t *f_ps) +{ + struct iio_event_interface *ev_int = filep->private_data; + unsigned int copied; + int ret; + + if (count < sizeof(struct iio_event_data)) + return -EINVAL; + + spin_lock(&ev_int->wait.lock); + if (kfifo_is_empty(&ev_int->det_events)) { + if (filep->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + goto error_unlock; + } + /* Blocking on device; waiting for something to be there */ + ret = wait_event_interruptible_locked(ev_int->wait, + !kfifo_is_empty(&ev_int->det_events)); + if (ret) + goto error_unlock; + /* Single access device so no one else can get the data */ + } + + ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); + +error_unlock: + spin_unlock(&ev_int->wait.lock); + + return ret ? ret : copied; +} + +static int iio_event_chrdev_release(struct inode *inode, struct file *filep) +{ + struct iio_event_interface *ev_int = filep->private_data; + + spin_lock(&ev_int->wait.lock); + __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); + /* + * In order to maintain a clean state for reopening, + * clear out any awaiting events. The mask will prevent + * any new __iio_push_event calls running. + */ + kfifo_reset_out(&ev_int->det_events); + spin_unlock(&ev_int->wait.lock); + + return 0; +} + +static const struct file_operations iio_event_chrdev_fileops = { + .read = iio_event_chrdev_read, + .poll = iio_event_poll, + .release = iio_event_chrdev_release, + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +int iio_event_getfd(struct iio_dev *indio_dev) +{ + struct iio_event_interface *ev_int = indio_dev->event_interface; + int fd; + + if (ev_int == NULL) + return -ENODEV; + + spin_lock(&ev_int->wait.lock); + if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { + spin_unlock(&ev_int->wait.lock); + return -EBUSY; + } + spin_unlock(&ev_int->wait.lock); + fd = anon_inode_getfd("iio:event", + &iio_event_chrdev_fileops, ev_int, O_RDONLY); + if (fd < 0) { + spin_lock(&ev_int->wait.lock); + __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); + spin_unlock(&ev_int->wait.lock); + } + return fd; +} + +static const char * const iio_ev_type_text[] = { + [IIO_EV_TYPE_THRESH] = "thresh", + [IIO_EV_TYPE_MAG] = "mag", + [IIO_EV_TYPE_ROC] = "roc", + [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", + [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", +}; + +static const char * const iio_ev_dir_text[] = { + [IIO_EV_DIR_EITHER] = "either", + [IIO_EV_DIR_RISING] = "rising", + [IIO_EV_DIR_FALLING] = "falling" +}; + +static ssize_t iio_ev_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + int ret; + bool val; + + ret = strtobool(buf, &val); + if (ret < 0) + return ret; + + ret = indio_dev->info->write_event_config(indio_dev, + this_attr->address, + val); + return (ret < 0) ? ret : len; +} + +static ssize_t iio_ev_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + int val = indio_dev->info->read_event_config(indio_dev, + this_attr->address); + + if (val < 0) + return val; + else + return sprintf(buf, "%d\n", val); +} + +static ssize_t iio_ev_value_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + int val, ret; + + ret = indio_dev->info->read_event_value(indio_dev, + this_attr->address, &val); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", val); +} + +static ssize_t iio_ev_value_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + unsigned long val; + int ret; + + if (!indio_dev->info->write_event_value) + return -EINVAL; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + return ret; + + ret = indio_dev->info->write_event_value(indio_dev, this_attr->address, + val); + if (ret < 0) + return ret; + + return len; +} + +static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan) +{ + int ret = 0, i, attrcount = 0; + u64 mask = 0; + char *postfix; + if (!chan->event_mask) + return 0; + + for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) { + postfix = kasprintf(GFP_KERNEL, "%s_%s_en", + iio_ev_type_text[i/IIO_EV_DIR_MAX], + iio_ev_dir_text[i%IIO_EV_DIR_MAX]); + if (postfix == NULL) { + ret = -ENOMEM; + goto error_ret; + } + if (chan->modified) + mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel, + i/IIO_EV_DIR_MAX, + i%IIO_EV_DIR_MAX); + else if (chan->differential) + mask = IIO_EVENT_CODE(chan->type, + 0, 0, + i%IIO_EV_DIR_MAX, + i/IIO_EV_DIR_MAX, + 0, + chan->channel, + chan->channel2); + else + mask = IIO_UNMOD_EVENT_CODE(chan->type, + chan->channel, + i/IIO_EV_DIR_MAX, + i%IIO_EV_DIR_MAX); + + ret = __iio_add_chan_devattr(postfix, + chan, + &iio_ev_state_show, + iio_ev_state_store, + mask, + 0, + &indio_dev->dev, + &indio_dev->event_interface-> + dev_attr_list); + kfree(postfix); + if (ret) + goto error_ret; + attrcount++; + postfix = kasprintf(GFP_KERNEL, "%s_%s_value", + iio_ev_type_text[i/IIO_EV_DIR_MAX], + iio_ev_dir_text[i%IIO_EV_DIR_MAX]); + if (postfix == NULL) { + ret = -ENOMEM; + goto error_ret; + } + ret = __iio_add_chan_devattr(postfix, chan, + iio_ev_value_show, + iio_ev_value_store, + mask, + 0, + &indio_dev->dev, + &indio_dev->event_interface-> + dev_attr_list); + kfree(postfix); + if (ret) + goto error_ret; + attrcount++; + } + ret = attrcount; +error_ret: + return ret; +} + +static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev) +{ + struct iio_dev_attr *p, *n; + list_for_each_entry_safe(p, n, + &indio_dev->event_interface-> + dev_attr_list, l) { + kfree(p->dev_attr.attr.name); + kfree(p); + } +} + +static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) +{ + int j, ret, attrcount = 0; + + INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list); + /* Dynically created from the channels array */ + for (j = 0; j < indio_dev->num_channels; j++) { + ret = iio_device_add_event_sysfs(indio_dev, + &indio_dev->channels[j]); + if (ret < 0) + goto error_clear_attrs; + attrcount += ret; + } + return attrcount; + +error_clear_attrs: + __iio_remove_event_config_attrs(indio_dev); + + return ret; +} + +static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) +{ + int j; + + for (j = 0; j < indio_dev->num_channels; j++) + if (indio_dev->channels[j].event_mask != 0) + return true; + return false; +} + +static void iio_setup_ev_int(struct iio_event_interface *ev_int) +{ + INIT_KFIFO(ev_int->det_events); + init_waitqueue_head(&ev_int->wait); +} + +static const char *iio_event_group_name = "events"; +int iio_device_register_eventset(struct iio_dev *indio_dev) +{ + struct iio_dev_attr *p; + int ret = 0, attrcount_orig = 0, attrcount, attrn; + struct attribute **attr; + + if (!(indio_dev->info->event_attrs || + iio_check_for_dynamic_events(indio_dev))) + return 0; + + indio_dev->event_interface = + kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL); + if (indio_dev->event_interface == NULL) { + ret = -ENOMEM; + goto error_ret; + } + + iio_setup_ev_int(indio_dev->event_interface); + if (indio_dev->info->event_attrs != NULL) { + attr = indio_dev->info->event_attrs->attrs; + while (*attr++ != NULL) + attrcount_orig++; + } + attrcount = attrcount_orig; + if (indio_dev->channels) { + ret = __iio_add_event_config_attrs(indio_dev); + if (ret < 0) + goto error_free_setup_event_lines; + attrcount += ret; + } + + indio_dev->event_interface->group.name = iio_event_group_name; + indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, + sizeof(indio_dev->event_interface->group.attrs[0]), + GFP_KERNEL); + if (indio_dev->event_interface->group.attrs == NULL) { + ret = -ENOMEM; + goto error_free_setup_event_lines; + } + if (indio_dev->info->event_attrs) + memcpy(indio_dev->event_interface->group.attrs, + indio_dev->info->event_attrs->attrs, + sizeof(indio_dev->event_interface->group.attrs[0]) + *attrcount_orig); + attrn = attrcount_orig; + /* Add all elements from the list. */ + list_for_each_entry(p, + &indio_dev->event_interface->dev_attr_list, + l) + indio_dev->event_interface->group.attrs[attrn++] = + &p->dev_attr.attr; + indio_dev->groups[indio_dev->groupcounter++] = + &indio_dev->event_interface->group; + + return 0; + +error_free_setup_event_lines: + __iio_remove_event_config_attrs(indio_dev); + kfree(indio_dev->event_interface); +error_ret: + + return ret; +} + +void iio_device_unregister_eventset(struct iio_dev *indio_dev) +{ + if (indio_dev->event_interface == NULL) + return; + __iio_remove_event_config_attrs(indio_dev); + kfree(indio_dev->event_interface->group.attrs); + kfree(indio_dev->event_interface); +} diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c new file mode 100644 index 00000000000..1dbd7b86a69 --- /dev/null +++ b/drivers/iio/industrialio-trigger.c @@ -0,0 +1,509 @@ +/* The industrial I/O core, trigger handling functions + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/idr.h> +#include <linux/err.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/slab.h> + +#include <linux/iio/iio.h> +#include <linux/iio/trigger.h> +#include "iio_core.h" +#include "iio_core_trigger.h" +#include <linux/iio/trigger_consumer.h> + +/* RFC - Question of approach + * Make the common case (single sensor single trigger) + * simple by starting trigger capture from when first sensors + * is added. + * + * Complex simultaneous start requires use of 'hold' functionality + * of the trigger. (not implemented) + * + * Any other suggestions? + */ + +static DEFINE_IDA(iio_trigger_ida); + +/* Single list of all available triggers */ +static LIST_HEAD(iio_trigger_list); +static DEFINE_MUTEX(iio_trigger_list_lock); + +/** + * iio_trigger_read_name() - retrieve useful identifying name + **/ +static ssize_t iio_trigger_read_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_trigger *trig = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", trig->name); +} + +static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL); + +/** + * iio_trigger_register_sysfs() - create a device for this trigger + * @trig_info: the trigger + * + * Also adds any control attribute registered by the trigger driver + **/ +static int iio_trigger_register_sysfs(struct iio_trigger *trig_info) +{ + return sysfs_add_file_to_group(&trig_info->dev.kobj, + &dev_attr_name.attr, + NULL); +} + +static void iio_trigger_unregister_sysfs(struct iio_trigger *trig_info) +{ + sysfs_remove_file_from_group(&trig_info->dev.kobj, + &dev_attr_name.attr, + NULL); +} + +int iio_trigger_register(struct iio_trigger *trig_info) +{ + int ret; + + trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); + if (trig_info->id < 0) { + ret = trig_info->id; + goto error_ret; + } + /* Set the name used for the sysfs directory etc */ + dev_set_name(&trig_info->dev, "trigger%ld", + (unsigned long) trig_info->id); + + ret = device_add(&trig_info->dev); + if (ret) + goto error_unregister_id; + + ret = iio_trigger_register_sysfs(trig_info); + if (ret) + goto error_device_del; + + /* Add to list of available triggers held by the IIO core */ + mutex_lock(&iio_trigger_list_lock); + list_add_tail(&trig_info->list, &iio_trigger_list); + mutex_unlock(&iio_trigger_list_lock); + + return 0; + +error_device_del: + device_del(&trig_info->dev); +error_unregister_id: + ida_simple_remove(&iio_trigger_ida, trig_info->id); +error_ret: + return ret; +} +EXPORT_SYMBOL(iio_trigger_register); + +void iio_trigger_unregister(struct iio_trigger *trig_info) +{ + mutex_lock(&iio_trigger_list_lock); + list_del(&trig_info->list); + mutex_unlock(&iio_trigger_list_lock); + + iio_trigger_unregister_sysfs(trig_info); + ida_simple_remove(&iio_trigger_ida, trig_info->id); + /* Possible issue in here */ + device_unregister(&trig_info->dev); +} +EXPORT_SYMBOL(iio_trigger_unregister); + +static struct iio_trigger *iio_trigger_find_by_name(const char *name, + size_t len) +{ + struct iio_trigger *trig = NULL, *iter; + + mutex_lock(&iio_trigger_list_lock); + list_for_each_entry(iter, &iio_trigger_list, list) + if (sysfs_streq(iter->name, name)) { + trig = iter; + break; + } + mutex_unlock(&iio_trigger_list_lock); + + return trig; +} + +void iio_trigger_poll(struct iio_trigger *trig, s64 time) +{ + int i; + if (!trig->use_count) + for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) + if (trig->subirqs[i].enabled) { + trig->use_count++; + generic_handle_irq(trig->subirq_base + i); + } +} +EXPORT_SYMBOL(iio_trigger_poll); + +irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private) +{ + iio_trigger_poll(private, iio_get_time_ns()); + return IRQ_HANDLED; +} +EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); + +void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) +{ + int i; + if (!trig->use_count) + for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) + if (trig->subirqs[i].enabled) { + trig->use_count++; + handle_nested_irq(trig->subirq_base + i); + } +} +EXPORT_SYMBOL(iio_trigger_poll_chained); + +void iio_trigger_notify_done(struct iio_trigger *trig) +{ + trig->use_count--; + if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable) + if (trig->ops->try_reenable(trig)) + /* Missed and interrupt so launch new poll now */ + iio_trigger_poll(trig, 0); +} +EXPORT_SYMBOL(iio_trigger_notify_done); + +/* Trigger Consumer related functions */ +static int iio_trigger_get_irq(struct iio_trigger *trig) +{ + int ret; + mutex_lock(&trig->pool_lock); + ret = bitmap_find_free_region(trig->pool, + CONFIG_IIO_CONSUMERS_PER_TRIGGER, + ilog2(1)); + mutex_unlock(&trig->pool_lock); + if (ret >= 0) + ret += trig->subirq_base; + + return ret; +} + +static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) +{ + mutex_lock(&trig->pool_lock); + clear_bit(irq - trig->subirq_base, trig->pool); + mutex_unlock(&trig->pool_lock); +} + +/* Complexity in here. With certain triggers (datardy) an acknowledgement + * may be needed if the pollfuncs do not include the data read for the + * triggering device. + * This is not currently handled. Alternative of not enabling trigger unless + * the relevant function is in there may be the best option. + */ +/* Worth protecting against double additions?*/ +static int iio_trigger_attach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) +{ + int ret = 0; + bool notinuse + = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); + + /* Prevent the module being removed whilst attached to a trigger */ + __module_get(pf->indio_dev->info->driver_module); + pf->irq = iio_trigger_get_irq(trig); + ret = request_threaded_irq(pf->irq, pf->h, pf->thread, + pf->type, pf->name, + pf); + if (ret < 0) { + module_put(pf->indio_dev->info->driver_module); + return ret; + } + + if (trig->ops && trig->ops->set_trigger_state && notinuse) { + ret = trig->ops->set_trigger_state(trig, true); + if (ret < 0) + module_put(pf->indio_dev->info->driver_module); + } + + return ret; +} + +static int iio_trigger_dettach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) +{ + int ret = 0; + bool no_other_users + = (bitmap_weight(trig->pool, + CONFIG_IIO_CONSUMERS_PER_TRIGGER) + == 1); + if (trig->ops && trig->ops->set_trigger_state && no_other_users) { + ret = trig->ops->set_trigger_state(trig, false); + if (ret) + goto error_ret; + } + iio_trigger_put_irq(trig, pf->irq); + free_irq(pf->irq, pf); + module_put(pf->indio_dev->info->driver_module); + +error_ret: + return ret; +} + +irqreturn_t iio_pollfunc_store_time(int irq, void *p) +{ + struct iio_poll_func *pf = p; + pf->timestamp = iio_get_time_ns(); + return IRQ_WAKE_THREAD; +} +EXPORT_SYMBOL(iio_pollfunc_store_time); + +struct iio_poll_func +*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p), + int type, + struct iio_dev *indio_dev, + const char *fmt, + ...) +{ + va_list vargs; + struct iio_poll_func *pf; + + pf = kmalloc(sizeof *pf, GFP_KERNEL); + if (pf == NULL) + return NULL; + va_start(vargs, fmt); + pf->name = kvasprintf(GFP_KERNEL, fmt, vargs); + va_end(vargs); + if (pf->name == NULL) { + kfree(pf); + return NULL; + } + pf->h = h; + pf->thread = thread; + pf->type = type; + pf->indio_dev = indio_dev; + + return pf; +} +EXPORT_SYMBOL_GPL(iio_alloc_pollfunc); + +void iio_dealloc_pollfunc(struct iio_poll_func *pf) +{ + kfree(pf->name); + kfree(pf); +} +EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc); + +/** + * iio_trigger_read_current() - trigger consumer sysfs query which trigger + * + * For trigger consumers the current_trigger interface allows the trigger + * used by the device to be queried. + **/ +static ssize_t iio_trigger_read_current(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + + if (indio_dev->trig) + return sprintf(buf, "%s\n", indio_dev->trig->name); + return 0; +} + +/** + * iio_trigger_write_current() trigger consumer sysfs set current trigger + * + * For trigger consumers the current_trigger interface allows the trigger + * used for this device to be specified at run time based on the triggers + * name. + **/ +static ssize_t iio_trigger_write_current(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_trigger *oldtrig = indio_dev->trig; + struct iio_trigger *trig; + int ret; + + mutex_lock(&indio_dev->mlock); + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + mutex_unlock(&indio_dev->mlock); + return -EBUSY; + } + mutex_unlock(&indio_dev->mlock); + + trig = iio_trigger_find_by_name(buf, len); + if (oldtrig == trig) + return len; + + if (trig && indio_dev->info->validate_trigger) { + ret = indio_dev->info->validate_trigger(indio_dev, trig); + if (ret) + return ret; + } + + if (trig && trig->ops && trig->ops->validate_device) { + ret = trig->ops->validate_device(trig, indio_dev); + if (ret) + return ret; + } + + indio_dev->trig = trig; + + if (oldtrig && indio_dev->trig != oldtrig) + iio_trigger_put(oldtrig); + if (indio_dev->trig) + iio_trigger_get(indio_dev->trig); + + return len; +} + +static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR, + iio_trigger_read_current, + iio_trigger_write_current); + +static struct attribute *iio_trigger_consumer_attrs[] = { + &dev_attr_current_trigger.attr, + NULL, +}; + +static const struct attribute_group iio_trigger_consumer_attr_group = { + .name = "trigger", + .attrs = iio_trigger_consumer_attrs, +}; + +static void iio_trig_release(struct device *device) +{ + struct iio_trigger *trig = to_iio_trigger(device); + int i; + + if (trig->subirq_base) { + for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { + irq_modify_status(trig->subirq_base + i, + IRQ_NOAUTOEN, + IRQ_NOREQUEST | IRQ_NOPROBE); + irq_set_chip(trig->subirq_base + i, + NULL); + irq_set_handler(trig->subirq_base + i, + NULL); + } + + irq_free_descs(trig->subirq_base, + CONFIG_IIO_CONSUMERS_PER_TRIGGER); + } + kfree(trig->name); + kfree(trig); +} + +static struct device_type iio_trig_type = { + .release = iio_trig_release, +}; + +static void iio_trig_subirqmask(struct irq_data *d) +{ + struct irq_chip *chip = irq_data_get_irq_chip(d); + struct iio_trigger *trig + = container_of(chip, + struct iio_trigger, subirq_chip); + trig->subirqs[d->irq - trig->subirq_base].enabled = false; +} + +static void iio_trig_subirqunmask(struct irq_data *d) +{ + struct irq_chip *chip = irq_data_get_irq_chip(d); + struct iio_trigger *trig + = container_of(chip, + struct iio_trigger, subirq_chip); + trig->subirqs[d->irq - trig->subirq_base].enabled = true; +} + +struct iio_trigger *iio_trigger_alloc(const char *fmt, ...) +{ + va_list vargs; + struct iio_trigger *trig; + trig = kzalloc(sizeof *trig, GFP_KERNEL); + if (trig) { + int i; + trig->dev.type = &iio_trig_type; + trig->dev.bus = &iio_bus_type; + device_initialize(&trig->dev); + dev_set_drvdata(&trig->dev, (void *)trig); + + mutex_init(&trig->pool_lock); + trig->subirq_base + = irq_alloc_descs(-1, 0, + CONFIG_IIO_CONSUMERS_PER_TRIGGER, + 0); + if (trig->subirq_base < 0) { + kfree(trig); + return NULL; + } + va_start(vargs, fmt); + trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); + va_end(vargs); + if (trig->name == NULL) { + irq_free_descs(trig->subirq_base, + CONFIG_IIO_CONSUMERS_PER_TRIGGER); + kfree(trig); + return NULL; + } + trig->subirq_chip.name = trig->name; + trig->subirq_chip.irq_mask = &iio_trig_subirqmask; + trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; + for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { + irq_set_chip(trig->subirq_base + i, + &trig->subirq_chip); + irq_set_handler(trig->subirq_base + i, + &handle_simple_irq); + irq_modify_status(trig->subirq_base + i, + IRQ_NOREQUEST | IRQ_NOAUTOEN, + IRQ_NOPROBE); + } + get_device(&trig->dev); + } + return trig; +} +EXPORT_SYMBOL(iio_trigger_alloc); + +void iio_trigger_free(struct iio_trigger *trig) +{ + if (trig) + put_device(&trig->dev); +} +EXPORT_SYMBOL(iio_trigger_free); + +void iio_device_register_trigger_consumer(struct iio_dev *indio_dev) +{ + indio_dev->groups[indio_dev->groupcounter++] = + &iio_trigger_consumer_attr_group; +} + +void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) +{ + /* Clean up and associated but not attached triggers references */ + if (indio_dev->trig) + iio_trigger_put(indio_dev->trig); +} + +int iio_triggered_buffer_postenable(struct iio_dev *indio_dev) +{ + return iio_trigger_attach_poll_func(indio_dev->trig, + indio_dev->pollfunc); +} +EXPORT_SYMBOL(iio_triggered_buffer_postenable); + +int iio_triggered_buffer_predisable(struct iio_dev *indio_dev) +{ + return iio_trigger_dettach_poll_func(indio_dev->trig, + indio_dev->pollfunc); +} +EXPORT_SYMBOL(iio_triggered_buffer_predisable); diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c new file mode 100644 index 00000000000..922645893dc --- /dev/null +++ b/drivers/iio/inkern.c @@ -0,0 +1,293 @@ +/* The industrial I/O core in kernel channel mapping + * + * Copyright (c) 2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#include <linux/err.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/mutex.h> + +#include <linux/iio/iio.h> +#include "iio_core.h" +#include <linux/iio/machine.h> +#include <linux/iio/driver.h> +#include <linux/iio/consumer.h> + +struct iio_map_internal { + struct iio_dev *indio_dev; + struct iio_map *map; + struct list_head l; +}; + +static LIST_HEAD(iio_map_list); +static DEFINE_MUTEX(iio_map_list_lock); + +int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps) +{ + int i = 0, ret = 0; + struct iio_map_internal *mapi; + + if (maps == NULL) + return 0; + + mutex_lock(&iio_map_list_lock); + while (maps[i].consumer_dev_name != NULL) { + mapi = kzalloc(sizeof(*mapi), GFP_KERNEL); + if (mapi == NULL) { + ret = -ENOMEM; + goto error_ret; + } + mapi->map = &maps[i]; + mapi->indio_dev = indio_dev; + list_add(&mapi->l, &iio_map_list); + i++; + } +error_ret: + mutex_unlock(&iio_map_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_map_array_register); + + +/* Assumes the exact same array (e.g. memory locations) + * used at unregistration as used at registration rather than + * more complex checking of contents. + */ +int iio_map_array_unregister(struct iio_dev *indio_dev, + struct iio_map *maps) +{ + int i = 0, ret = 0; + bool found_it; + struct iio_map_internal *mapi; + + if (maps == NULL) + return 0; + + mutex_lock(&iio_map_list_lock); + while (maps[i].consumer_dev_name != NULL) { + found_it = false; + list_for_each_entry(mapi, &iio_map_list, l) + if (&maps[i] == mapi->map) { + list_del(&mapi->l); + kfree(mapi); + found_it = true; + break; + } + if (found_it == false) { + ret = -ENODEV; + goto error_ret; + } + i++; + } +error_ret: + mutex_unlock(&iio_map_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_map_array_unregister); + +static const struct iio_chan_spec +*iio_chan_spec_from_name(const struct iio_dev *indio_dev, + const char *name) +{ + int i; + const struct iio_chan_spec *chan = NULL; + + for (i = 0; i < indio_dev->num_channels; i++) + if (indio_dev->channels[i].datasheet_name && + strcmp(name, indio_dev->channels[i].datasheet_name) == 0) { + chan = &indio_dev->channels[i]; + break; + } + return chan; +} + + +struct iio_channel *iio_st_channel_get(const char *name, + const char *channel_name) +{ + struct iio_map_internal *c_i = NULL, *c = NULL; + struct iio_channel *channel; + + if (name == NULL && channel_name == NULL) + return ERR_PTR(-ENODEV); + + /* first find matching entry the channel map */ + mutex_lock(&iio_map_list_lock); + list_for_each_entry(c_i, &iio_map_list, l) { + if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) || + (channel_name && + strcmp(channel_name, c_i->map->consumer_channel) != 0)) + continue; + c = c_i; + get_device(&c->indio_dev->dev); + break; + } + mutex_unlock(&iio_map_list_lock); + if (c == NULL) + return ERR_PTR(-ENODEV); + + channel = kmalloc(sizeof(*channel), GFP_KERNEL); + if (channel == NULL) + return ERR_PTR(-ENOMEM); + + channel->indio_dev = c->indio_dev; + + if (c->map->adc_channel_label) + channel->channel = + iio_chan_spec_from_name(channel->indio_dev, + c->map->adc_channel_label); + + return channel; +} +EXPORT_SYMBOL_GPL(iio_st_channel_get); + +void iio_st_channel_release(struct iio_channel *channel) +{ + put_device(&channel->indio_dev->dev); + kfree(channel); +} +EXPORT_SYMBOL_GPL(iio_st_channel_release); + +struct iio_channel *iio_st_channel_get_all(const char *name) +{ + struct iio_channel *chans; + struct iio_map_internal *c = NULL; + int nummaps = 0; + int mapind = 0; + int i, ret; + + if (name == NULL) + return ERR_PTR(-EINVAL); + + mutex_lock(&iio_map_list_lock); + /* first count the matching maps */ + list_for_each_entry(c, &iio_map_list, l) + if (name && strcmp(name, c->map->consumer_dev_name) != 0) + continue; + else + nummaps++; + + if (nummaps == 0) { + ret = -ENODEV; + goto error_ret; + } + + /* NULL terminated array to save passing size */ + chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL); + if (chans == NULL) { + ret = -ENOMEM; + goto error_ret; + } + + /* for each map fill in the chans element */ + list_for_each_entry(c, &iio_map_list, l) { + if (name && strcmp(name, c->map->consumer_dev_name) != 0) + continue; + chans[mapind].indio_dev = c->indio_dev; + chans[mapind].channel = + iio_chan_spec_from_name(chans[mapind].indio_dev, + c->map->adc_channel_label); + if (chans[mapind].channel == NULL) { + ret = -EINVAL; + put_device(&chans[mapind].indio_dev->dev); + goto error_free_chans; + } + get_device(&chans[mapind].indio_dev->dev); + mapind++; + } + mutex_unlock(&iio_map_list_lock); + if (mapind == 0) { + ret = -ENODEV; + goto error_free_chans; + } + return chans; + +error_free_chans: + for (i = 0; i < nummaps; i++) + if (chans[i].indio_dev) + put_device(&chans[i].indio_dev->dev); + kfree(chans); +error_ret: + mutex_unlock(&iio_map_list_lock); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(iio_st_channel_get_all); + +void iio_st_channel_release_all(struct iio_channel *channels) +{ + struct iio_channel *chan = &channels[0]; + + while (chan->indio_dev) { + put_device(&chan->indio_dev->dev); + chan++; + } + kfree(channels); +} +EXPORT_SYMBOL_GPL(iio_st_channel_release_all); + +int iio_st_read_channel_raw(struct iio_channel *chan, int *val) +{ + int val2, ret; + + mutex_lock(&chan->indio_dev->info_exist_lock); + if (chan->indio_dev->info == NULL) { + ret = -ENODEV; + goto err_unlock; + } + + ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel, + val, &val2, 0); +err_unlock: + mutex_unlock(&chan->indio_dev->info_exist_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_st_read_channel_raw); + +int iio_st_read_channel_scale(struct iio_channel *chan, int *val, int *val2) +{ + int ret; + + mutex_lock(&chan->indio_dev->info_exist_lock); + if (chan->indio_dev->info == NULL) { + ret = -ENODEV; + goto err_unlock; + } + + ret = chan->indio_dev->info->read_raw(chan->indio_dev, + chan->channel, + val, val2, + IIO_CHAN_INFO_SCALE); +err_unlock: + mutex_unlock(&chan->indio_dev->info_exist_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_st_read_channel_scale); + +int iio_st_get_channel_type(struct iio_channel *chan, + enum iio_chan_type *type) +{ + int ret = 0; + /* Need to verify underlying driver has not gone away */ + + mutex_lock(&chan->indio_dev->info_exist_lock); + if (chan->indio_dev->info == NULL) { + ret = -ENODEV; + goto err_unlock; + } + + *type = chan->channel->type; +err_unlock: + mutex_unlock(&chan->indio_dev->info_exist_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iio_st_get_channel_type); diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c new file mode 100644 index 00000000000..6bf9d05f484 --- /dev/null +++ b/drivers/iio/kfifo_buf.c @@ -0,0 +1,150 @@ +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/workqueue.h> +#include <linux/kfifo.h> +#include <linux/mutex.h> +#include <linux/iio/kfifo_buf.h> + +struct iio_kfifo { + struct iio_buffer buffer; + struct kfifo kf; + int update_needed; +}; + +#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) + +static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, + int bytes_per_datum, int length) +{ + if ((length == 0) || (bytes_per_datum == 0)) + return -EINVAL; + + __iio_update_buffer(&buf->buffer, bytes_per_datum, length); + return kfifo_alloc(&buf->kf, bytes_per_datum*length, GFP_KERNEL); +} + +static int iio_request_update_kfifo(struct iio_buffer *r) +{ + int ret = 0; + struct iio_kfifo *buf = iio_to_kfifo(r); + + if (!buf->update_needed) + goto error_ret; + kfifo_free(&buf->kf); + ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum, + buf->buffer.length); +error_ret: + return ret; +} + +static int iio_get_length_kfifo(struct iio_buffer *r) +{ + return r->length; +} + +static IIO_BUFFER_ENABLE_ATTR; +static IIO_BUFFER_LENGTH_ATTR; + +static struct attribute *iio_kfifo_attributes[] = { + &dev_attr_length.attr, + &dev_attr_enable.attr, + NULL, +}; + +static struct attribute_group iio_kfifo_attribute_group = { + .attrs = iio_kfifo_attributes, + .name = "buffer", +}; + +static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r) +{ + return r->bytes_per_datum; +} + +static int iio_mark_update_needed_kfifo(struct iio_buffer *r) +{ + struct iio_kfifo *kf = iio_to_kfifo(r); + kf->update_needed = true; + return 0; +} + +static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd) +{ + if (r->bytes_per_datum != bpd) { + r->bytes_per_datum = bpd; + iio_mark_update_needed_kfifo(r); + } + return 0; +} + +static int iio_set_length_kfifo(struct iio_buffer *r, int length) +{ + if (r->length != length) { + r->length = length; + iio_mark_update_needed_kfifo(r); + } + return 0; +} + +static int iio_store_to_kfifo(struct iio_buffer *r, + u8 *data, + s64 timestamp) +{ + int ret; + struct iio_kfifo *kf = iio_to_kfifo(r); + ret = kfifo_in(&kf->kf, data, r->bytes_per_datum); + if (ret != r->bytes_per_datum) + return -EBUSY; + return 0; +} + +static int iio_read_first_n_kfifo(struct iio_buffer *r, + size_t n, char __user *buf) +{ + int ret, copied; + struct iio_kfifo *kf = iio_to_kfifo(r); + + if (n < r->bytes_per_datum) + return -EINVAL; + + n = rounddown(n, r->bytes_per_datum); + ret = kfifo_to_user(&kf->kf, buf, n, &copied); + + return copied; +} + +static const struct iio_buffer_access_funcs kfifo_access_funcs = { + .store_to = &iio_store_to_kfifo, + .read_first_n = &iio_read_first_n_kfifo, + .request_update = &iio_request_update_kfifo, + .get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo, + .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo, + .get_length = &iio_get_length_kfifo, + .set_length = &iio_set_length_kfifo, +}; + +struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev) +{ + struct iio_kfifo *kf; + + kf = kzalloc(sizeof *kf, GFP_KERNEL); + if (!kf) + return NULL; + kf->update_needed = true; + iio_buffer_init(&kf->buffer); + kf->buffer.attrs = &iio_kfifo_attribute_group; + kf->buffer.access = &kfifo_access_funcs; + + return &kf->buffer; +} +EXPORT_SYMBOL(iio_kfifo_allocate); + +void iio_kfifo_free(struct iio_buffer *r) +{ + kfree(iio_to_kfifo(r)); +} +EXPORT_SYMBOL(iio_kfifo_free); + +MODULE_LICENSE("GPL"); |