Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 1 | /* The industrial I/O core |
| 2 | * |
| 3 | * Copyright (c) 2008 Jonathan Cameron |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | * Handling of buffer allocation / resizing. |
| 10 | * |
| 11 | * |
| 12 | * Things to look at here. |
| 13 | * - Better memory allocation techniques? |
| 14 | * - Alternative access techniques? |
| 15 | */ |
| 16 | #include <linux/kernel.h> |
Paul Gortmaker | 8e336a7 | 2011-07-10 13:09:12 -0400 | [diff] [blame] | 17 | #include <linux/export.h> |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 18 | #include <linux/device.h> |
| 19 | #include <linux/fs.h> |
| 20 | #include <linux/cdev.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/poll.h> |
Lars-Peter Clausen | d2f0a48 | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 23 | #include <linux/sched.h> |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 24 | |
Jonathan Cameron | 06458e2 | 2012-04-25 15:54:58 +0100 | [diff] [blame] | 25 | #include <linux/iio/iio.h> |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 26 | #include "iio_core.h" |
Jonathan Cameron | 06458e2 | 2012-04-25 15:54:58 +0100 | [diff] [blame] | 27 | #include <linux/iio/sysfs.h> |
| 28 | #include <linux/iio/buffer.h> |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 29 | |
| 30 | static const char * const iio_endian_prefix[] = { |
| 31 | [IIO_BE] = "be", |
| 32 | [IIO_LE] = "le", |
| 33 | }; |
| 34 | |
Lars-Peter Clausen | 705ee2c | 2013-09-15 16:31:00 +0100 | [diff] [blame] | 35 | static bool iio_buffer_is_active(struct iio_buffer *buf) |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 36 | { |
Lars-Peter Clausen | 705ee2c | 2013-09-15 16:31:00 +0100 | [diff] [blame] | 37 | return !list_empty(&buf->buffer_list); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 38 | } |
| 39 | |
Lars-Peter Clausen | 647cc7b | 2013-11-25 14:56:00 +0000 | [diff] [blame] | 40 | static bool iio_buffer_data_available(struct iio_buffer *buf) |
| 41 | { |
Josselin Costanzi | 9dd4694 | 2014-06-27 17:20:00 +0100 | [diff] [blame] | 42 | return buf->access->data_available(buf); |
Lars-Peter Clausen | 647cc7b | 2013-11-25 14:56:00 +0000 | [diff] [blame] | 43 | } |
| 44 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 45 | /** |
| 46 | * iio_buffer_read_first_n_outer() - chrdev read for buffer access |
| 47 | * |
| 48 | * This function relies on all buffer implementations having an |
| 49 | * iio_buffer as their first element. |
| 50 | **/ |
| 51 | ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, |
| 52 | size_t n, loff_t *f_ps) |
| 53 | { |
| 54 | struct iio_dev *indio_dev = filp->private_data; |
| 55 | struct iio_buffer *rb = indio_dev->buffer; |
Lars-Peter Clausen | ee551a1 | 2013-11-25 14:56:00 +0000 | [diff] [blame] | 56 | int ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 57 | |
Lars-Peter Clausen | f18e7a0 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 58 | if (!indio_dev->info) |
| 59 | return -ENODEV; |
| 60 | |
Jonathan Cameron | 96e00f1 | 2011-10-26 17:27:45 +0100 | [diff] [blame] | 61 | if (!rb || !rb->access->read_first_n) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 62 | return -EINVAL; |
Lars-Peter Clausen | ee551a1 | 2013-11-25 14:56:00 +0000 | [diff] [blame] | 63 | |
| 64 | do { |
| 65 | if (!iio_buffer_data_available(rb)) { |
| 66 | if (filp->f_flags & O_NONBLOCK) |
| 67 | return -EAGAIN; |
| 68 | |
| 69 | ret = wait_event_interruptible(rb->pollq, |
| 70 | iio_buffer_data_available(rb) || |
| 71 | indio_dev->info == NULL); |
| 72 | if (ret) |
| 73 | return ret; |
| 74 | if (indio_dev->info == NULL) |
| 75 | return -ENODEV; |
| 76 | } |
| 77 | |
| 78 | ret = rb->access->read_first_n(rb, n, buf); |
| 79 | if (ret == 0 && (filp->f_flags & O_NONBLOCK)) |
| 80 | ret = -EAGAIN; |
| 81 | } while (ret == 0); |
| 82 | |
| 83 | return ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | /** |
| 87 | * iio_buffer_poll() - poll the buffer to find out if it has data |
| 88 | */ |
| 89 | unsigned int iio_buffer_poll(struct file *filp, |
| 90 | struct poll_table_struct *wait) |
| 91 | { |
| 92 | struct iio_dev *indio_dev = filp->private_data; |
| 93 | struct iio_buffer *rb = indio_dev->buffer; |
| 94 | |
Lars-Peter Clausen | f18e7a0 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 95 | if (!indio_dev->info) |
| 96 | return -ENODEV; |
| 97 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 98 | poll_wait(filp, &rb->pollq, wait); |
Lars-Peter Clausen | 647cc7b | 2013-11-25 14:56:00 +0000 | [diff] [blame] | 99 | if (iio_buffer_data_available(rb)) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 100 | return POLLIN | POLLRDNORM; |
| 101 | /* need a way of knowing if there may be enough data... */ |
| 102 | return 0; |
| 103 | } |
| 104 | |
Lars-Peter Clausen | d2f0a48 | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 105 | /** |
| 106 | * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue |
| 107 | * @indio_dev: The IIO device |
| 108 | * |
| 109 | * Wakes up the event waitqueue used for poll(). Should usually |
| 110 | * be called when the device is unregistered. |
| 111 | */ |
| 112 | void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) |
| 113 | { |
| 114 | if (!indio_dev->buffer) |
| 115 | return; |
| 116 | |
| 117 | wake_up(&indio_dev->buffer->pollq); |
| 118 | } |
| 119 | |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 120 | void iio_buffer_init(struct iio_buffer *buffer) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 121 | { |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 122 | INIT_LIST_HEAD(&buffer->demux_list); |
Lars-Peter Clausen | 705ee2c | 2013-09-15 16:31:00 +0100 | [diff] [blame] | 123 | INIT_LIST_HEAD(&buffer->buffer_list); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 124 | init_waitqueue_head(&buffer->pollq); |
Lars-Peter Clausen | 9e69c93 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 125 | kref_init(&buffer->ref); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 126 | } |
| 127 | EXPORT_SYMBOL(iio_buffer_init); |
| 128 | |
| 129 | static ssize_t iio_show_scan_index(struct device *dev, |
| 130 | struct device_attribute *attr, |
| 131 | char *buf) |
| 132 | { |
| 133 | return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); |
| 134 | } |
| 135 | |
| 136 | static ssize_t iio_show_fixed_type(struct device *dev, |
| 137 | struct device_attribute *attr, |
| 138 | char *buf) |
| 139 | { |
| 140 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
| 141 | u8 type = this_attr->c->scan_type.endianness; |
| 142 | |
| 143 | if (type == IIO_CPU) { |
Jonathan Cameron | 9d5d115 | 2011-10-04 16:02:08 +0100 | [diff] [blame] | 144 | #ifdef __LITTLE_ENDIAN |
| 145 | type = IIO_LE; |
| 146 | #else |
| 147 | type = IIO_BE; |
| 148 | #endif |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 149 | } |
Srinivas Pandruvada | 0ee8546 | 2014-04-29 00:51:00 +0100 | [diff] [blame] | 150 | if (this_attr->c->scan_type.repeat > 1) |
| 151 | return sprintf(buf, "%s:%c%d/%dX%d>>%u\n", |
| 152 | iio_endian_prefix[type], |
| 153 | this_attr->c->scan_type.sign, |
| 154 | this_attr->c->scan_type.realbits, |
| 155 | this_attr->c->scan_type.storagebits, |
| 156 | this_attr->c->scan_type.repeat, |
| 157 | this_attr->c->scan_type.shift); |
| 158 | else |
| 159 | return sprintf(buf, "%s:%c%d/%d>>%u\n", |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 160 | iio_endian_prefix[type], |
| 161 | this_attr->c->scan_type.sign, |
| 162 | this_attr->c->scan_type.realbits, |
| 163 | this_attr->c->scan_type.storagebits, |
| 164 | this_attr->c->scan_type.shift); |
| 165 | } |
| 166 | |
| 167 | static ssize_t iio_scan_el_show(struct device *dev, |
| 168 | struct device_attribute *attr, |
| 169 | char *buf) |
| 170 | { |
| 171 | int ret; |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 172 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 173 | |
Alec Berg | 2076a20 | 2014-03-19 18:50:00 +0000 | [diff] [blame] | 174 | /* Ensure ret is 0 or 1. */ |
| 175 | ret = !!test_bit(to_iio_dev_attr(attr)->address, |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 176 | indio_dev->buffer->scan_mask); |
| 177 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 178 | return sprintf(buf, "%d\n", ret); |
| 179 | } |
| 180 | |
Lars-Peter Clausen | 217a5cf | 2014-11-26 18:55:09 +0100 | [diff] [blame] | 181 | /* Note NULL used as error indicator as it doesn't make sense. */ |
| 182 | static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, |
| 183 | unsigned int masklength, |
| 184 | const unsigned long *mask) |
| 185 | { |
| 186 | if (bitmap_empty(mask, masklength)) |
| 187 | return NULL; |
| 188 | while (*av_masks) { |
| 189 | if (bitmap_subset(mask, av_masks, masklength)) |
| 190 | return av_masks; |
| 191 | av_masks += BITS_TO_LONGS(masklength); |
| 192 | } |
| 193 | return NULL; |
| 194 | } |
| 195 | |
| 196 | static bool iio_validate_scan_mask(struct iio_dev *indio_dev, |
| 197 | const unsigned long *mask) |
| 198 | { |
| 199 | if (!indio_dev->setup_ops->validate_scan_mask) |
| 200 | return true; |
| 201 | |
| 202 | return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); |
| 203 | } |
| 204 | |
| 205 | /** |
| 206 | * iio_scan_mask_set() - set particular bit in the scan mask |
| 207 | * @indio_dev: the iio device |
| 208 | * @buffer: the buffer whose scan mask we are interested in |
| 209 | * @bit: the bit to be set. |
| 210 | * |
| 211 | * Note that at this point we have no way of knowing what other |
| 212 | * buffers might request, hence this code only verifies that the |
| 213 | * individual buffers request is plausible. |
| 214 | */ |
| 215 | static int iio_scan_mask_set(struct iio_dev *indio_dev, |
| 216 | struct iio_buffer *buffer, int bit) |
| 217 | { |
| 218 | const unsigned long *mask; |
| 219 | unsigned long *trialmask; |
| 220 | |
| 221 | trialmask = kmalloc(sizeof(*trialmask)* |
| 222 | BITS_TO_LONGS(indio_dev->masklength), |
| 223 | GFP_KERNEL); |
| 224 | |
| 225 | if (trialmask == NULL) |
| 226 | return -ENOMEM; |
| 227 | if (!indio_dev->masklength) { |
| 228 | WARN_ON("Trying to set scanmask prior to registering buffer\n"); |
| 229 | goto err_invalid_mask; |
| 230 | } |
| 231 | bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); |
| 232 | set_bit(bit, trialmask); |
| 233 | |
| 234 | if (!iio_validate_scan_mask(indio_dev, trialmask)) |
| 235 | goto err_invalid_mask; |
| 236 | |
| 237 | if (indio_dev->available_scan_masks) { |
| 238 | mask = iio_scan_mask_match(indio_dev->available_scan_masks, |
| 239 | indio_dev->masklength, |
| 240 | trialmask); |
| 241 | if (!mask) |
| 242 | goto err_invalid_mask; |
| 243 | } |
| 244 | bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); |
| 245 | |
| 246 | kfree(trialmask); |
| 247 | |
| 248 | return 0; |
| 249 | |
| 250 | err_invalid_mask: |
| 251 | kfree(trialmask); |
| 252 | return -EINVAL; |
| 253 | } |
| 254 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 255 | static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) |
| 256 | { |
| 257 | clear_bit(bit, buffer->scan_mask); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 258 | return 0; |
| 259 | } |
| 260 | |
| 261 | static ssize_t iio_scan_el_store(struct device *dev, |
| 262 | struct device_attribute *attr, |
| 263 | const char *buf, |
| 264 | size_t len) |
| 265 | { |
Jonathan Cameron | a714af2 | 2012-04-21 10:09:32 +0100 | [diff] [blame] | 266 | int ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 267 | bool state; |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 268 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 269 | struct iio_buffer *buffer = indio_dev->buffer; |
| 270 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
| 271 | |
Jonathan Cameron | a714af2 | 2012-04-21 10:09:32 +0100 | [diff] [blame] | 272 | ret = strtobool(buf, &state); |
| 273 | if (ret < 0) |
| 274 | return ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 275 | mutex_lock(&indio_dev->mlock); |
Lars-Peter Clausen | 705ee2c | 2013-09-15 16:31:00 +0100 | [diff] [blame] | 276 | if (iio_buffer_is_active(indio_dev->buffer)) { |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 277 | ret = -EBUSY; |
| 278 | goto error_ret; |
| 279 | } |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 280 | ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 281 | if (ret < 0) |
| 282 | goto error_ret; |
| 283 | if (!state && ret) { |
| 284 | ret = iio_scan_mask_clear(buffer, this_attr->address); |
| 285 | if (ret) |
| 286 | goto error_ret; |
| 287 | } else if (state && !ret) { |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 288 | ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 289 | if (ret) |
| 290 | goto error_ret; |
| 291 | } |
| 292 | |
| 293 | error_ret: |
| 294 | mutex_unlock(&indio_dev->mlock); |
| 295 | |
Lars-Peter Clausen | 5a2a6e1 | 2011-12-08 18:35:53 +0100 | [diff] [blame] | 296 | return ret < 0 ? ret : len; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 297 | |
| 298 | } |
| 299 | |
| 300 | static ssize_t iio_scan_el_ts_show(struct device *dev, |
| 301 | struct device_attribute *attr, |
| 302 | char *buf) |
| 303 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 304 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 305 | return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | static ssize_t iio_scan_el_ts_store(struct device *dev, |
| 309 | struct device_attribute *attr, |
| 310 | const char *buf, |
| 311 | size_t len) |
| 312 | { |
Jonathan Cameron | a714af2 | 2012-04-21 10:09:32 +0100 | [diff] [blame] | 313 | int ret; |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 314 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 315 | bool state; |
| 316 | |
Jonathan Cameron | a714af2 | 2012-04-21 10:09:32 +0100 | [diff] [blame] | 317 | ret = strtobool(buf, &state); |
| 318 | if (ret < 0) |
| 319 | return ret; |
| 320 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 321 | mutex_lock(&indio_dev->mlock); |
Lars-Peter Clausen | 705ee2c | 2013-09-15 16:31:00 +0100 | [diff] [blame] | 322 | if (iio_buffer_is_active(indio_dev->buffer)) { |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 323 | ret = -EBUSY; |
| 324 | goto error_ret; |
| 325 | } |
| 326 | indio_dev->buffer->scan_timestamp = state; |
| 327 | error_ret: |
| 328 | mutex_unlock(&indio_dev->mlock); |
| 329 | |
| 330 | return ret ? ret : len; |
| 331 | } |
| 332 | |
| 333 | static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, |
| 334 | const struct iio_chan_spec *chan) |
| 335 | { |
| 336 | int ret, attrcount = 0; |
| 337 | struct iio_buffer *buffer = indio_dev->buffer; |
| 338 | |
| 339 | ret = __iio_add_chan_devattr("index", |
| 340 | chan, |
| 341 | &iio_show_scan_index, |
| 342 | NULL, |
| 343 | 0, |
Jonathan Cameron | 3704432 | 2013-09-08 14:57:00 +0100 | [diff] [blame] | 344 | IIO_SEPARATE, |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 345 | &indio_dev->dev, |
| 346 | &buffer->scan_el_dev_attr_list); |
| 347 | if (ret) |
Hartmut Knaack | 92825ff | 2014-02-16 11:53:00 +0000 | [diff] [blame] | 348 | return ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 349 | attrcount++; |
| 350 | ret = __iio_add_chan_devattr("type", |
| 351 | chan, |
| 352 | &iio_show_fixed_type, |
| 353 | NULL, |
| 354 | 0, |
| 355 | 0, |
| 356 | &indio_dev->dev, |
| 357 | &buffer->scan_el_dev_attr_list); |
| 358 | if (ret) |
Hartmut Knaack | 92825ff | 2014-02-16 11:53:00 +0000 | [diff] [blame] | 359 | return ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 360 | attrcount++; |
| 361 | if (chan->type != IIO_TIMESTAMP) |
| 362 | ret = __iio_add_chan_devattr("en", |
| 363 | chan, |
| 364 | &iio_scan_el_show, |
| 365 | &iio_scan_el_store, |
| 366 | chan->scan_index, |
| 367 | 0, |
| 368 | &indio_dev->dev, |
| 369 | &buffer->scan_el_dev_attr_list); |
| 370 | else |
| 371 | ret = __iio_add_chan_devattr("en", |
| 372 | chan, |
| 373 | &iio_scan_el_ts_show, |
| 374 | &iio_scan_el_ts_store, |
| 375 | chan->scan_index, |
| 376 | 0, |
| 377 | &indio_dev->dev, |
| 378 | &buffer->scan_el_dev_attr_list); |
Peter Meerwald | 9572588 | 2013-09-17 23:42:00 +0100 | [diff] [blame] | 379 | if (ret) |
Hartmut Knaack | 92825ff | 2014-02-16 11:53:00 +0000 | [diff] [blame] | 380 | return ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 381 | attrcount++; |
| 382 | ret = attrcount; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 383 | return ret; |
| 384 | } |
| 385 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 386 | ssize_t iio_buffer_read_length(struct device *dev, |
| 387 | struct device_attribute *attr, |
| 388 | char *buf) |
| 389 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 390 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 391 | struct iio_buffer *buffer = indio_dev->buffer; |
| 392 | |
| 393 | if (buffer->access->get_length) |
| 394 | return sprintf(buf, "%d\n", |
| 395 | buffer->access->get_length(buffer)); |
| 396 | |
| 397 | return 0; |
| 398 | } |
| 399 | EXPORT_SYMBOL(iio_buffer_read_length); |
| 400 | |
| 401 | ssize_t iio_buffer_write_length(struct device *dev, |
| 402 | struct device_attribute *attr, |
| 403 | const char *buf, |
| 404 | size_t len) |
| 405 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 406 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 407 | struct iio_buffer *buffer = indio_dev->buffer; |
Lars-Peter Clausen | 948ad20 | 2012-10-18 14:47:00 +0100 | [diff] [blame] | 408 | unsigned int val; |
| 409 | int ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 410 | |
Lars-Peter Clausen | 948ad20 | 2012-10-18 14:47:00 +0100 | [diff] [blame] | 411 | ret = kstrtouint(buf, 10, &val); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 412 | if (ret) |
| 413 | return ret; |
| 414 | |
| 415 | if (buffer->access->get_length) |
| 416 | if (val == buffer->access->get_length(buffer)) |
| 417 | return len; |
| 418 | |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 419 | mutex_lock(&indio_dev->mlock); |
Lars-Peter Clausen | 705ee2c | 2013-09-15 16:31:00 +0100 | [diff] [blame] | 420 | if (iio_buffer_is_active(indio_dev->buffer)) { |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 421 | ret = -EBUSY; |
| 422 | } else { |
Lars-Peter Clausen | 869871b | 2011-12-19 15:23:48 +0100 | [diff] [blame] | 423 | if (buffer->access->set_length) |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 424 | buffer->access->set_length(buffer, val); |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 425 | ret = 0; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 426 | } |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 427 | mutex_unlock(&indio_dev->mlock); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 428 | |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 429 | return ret ? ret : len; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 430 | } |
| 431 | EXPORT_SYMBOL(iio_buffer_write_length); |
| 432 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 433 | ssize_t iio_buffer_show_enable(struct device *dev, |
| 434 | struct device_attribute *attr, |
| 435 | char *buf) |
| 436 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 437 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Lars-Peter Clausen | 705ee2c | 2013-09-15 16:31:00 +0100 | [diff] [blame] | 438 | return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 439 | } |
| 440 | EXPORT_SYMBOL(iio_buffer_show_enable); |
| 441 | |
Peter Meerwald | 183f417 | 2013-09-18 22:10:00 +0100 | [diff] [blame] | 442 | static int iio_compute_scan_bytes(struct iio_dev *indio_dev, |
| 443 | const unsigned long *mask, bool timestamp) |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 444 | { |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 445 | const struct iio_chan_spec *ch; |
| 446 | unsigned bytes = 0; |
| 447 | int length, i; |
Jonathan Cameron | 6b3b58e | 2012-04-21 10:09:33 +0100 | [diff] [blame] | 448 | |
| 449 | /* How much space will the demuxed element take? */ |
| 450 | for_each_set_bit(i, mask, |
| 451 | indio_dev->masklength) { |
| 452 | ch = iio_find_channel_from_si(indio_dev, i); |
Srinivas Pandruvada | 0ee8546 | 2014-04-29 00:51:00 +0100 | [diff] [blame] | 453 | if (ch->scan_type.repeat > 1) |
| 454 | length = ch->scan_type.storagebits / 8 * |
| 455 | ch->scan_type.repeat; |
| 456 | else |
| 457 | length = ch->scan_type.storagebits / 8; |
Jonathan Cameron | 6b3b58e | 2012-04-21 10:09:33 +0100 | [diff] [blame] | 458 | bytes = ALIGN(bytes, length); |
| 459 | bytes += length; |
| 460 | } |
| 461 | if (timestamp) { |
| 462 | ch = iio_find_channel_from_si(indio_dev, |
Jonathan Cameron | f126480 | 2012-04-21 10:09:34 +0100 | [diff] [blame] | 463 | indio_dev->scan_index_timestamp); |
Srinivas Pandruvada | 0ee8546 | 2014-04-29 00:51:00 +0100 | [diff] [blame] | 464 | if (ch->scan_type.repeat > 1) |
| 465 | length = ch->scan_type.storagebits / 8 * |
| 466 | ch->scan_type.repeat; |
| 467 | else |
| 468 | length = ch->scan_type.storagebits / 8; |
Jonathan Cameron | 6b3b58e | 2012-04-21 10:09:33 +0100 | [diff] [blame] | 469 | bytes = ALIGN(bytes, length); |
| 470 | bytes += length; |
| 471 | } |
| 472 | return bytes; |
| 473 | } |
| 474 | |
Lars-Peter Clausen | 9e69c93 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 475 | static void iio_buffer_activate(struct iio_dev *indio_dev, |
| 476 | struct iio_buffer *buffer) |
| 477 | { |
| 478 | iio_buffer_get(buffer); |
| 479 | list_add(&buffer->buffer_list, &indio_dev->buffer_list); |
| 480 | } |
| 481 | |
| 482 | static void iio_buffer_deactivate(struct iio_buffer *buffer) |
| 483 | { |
| 484 | list_del_init(&buffer->buffer_list); |
| 485 | iio_buffer_put(buffer); |
| 486 | } |
| 487 | |
Lars-Peter Clausen | a87c82e | 2013-09-18 21:02:00 +0100 | [diff] [blame] | 488 | void iio_disable_all_buffers(struct iio_dev *indio_dev) |
| 489 | { |
| 490 | struct iio_buffer *buffer, *_buffer; |
| 491 | |
| 492 | if (list_empty(&indio_dev->buffer_list)) |
| 493 | return; |
| 494 | |
| 495 | if (indio_dev->setup_ops->predisable) |
| 496 | indio_dev->setup_ops->predisable(indio_dev); |
| 497 | |
| 498 | list_for_each_entry_safe(buffer, _buffer, |
| 499 | &indio_dev->buffer_list, buffer_list) |
Lars-Peter Clausen | 9e69c93 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 500 | iio_buffer_deactivate(buffer); |
Lars-Peter Clausen | a87c82e | 2013-09-18 21:02:00 +0100 | [diff] [blame] | 501 | |
| 502 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 503 | if (indio_dev->setup_ops->postdisable) |
| 504 | indio_dev->setup_ops->postdisable(indio_dev); |
Lars-Peter Clausen | e086ed7 | 2013-10-15 09:38:00 +0100 | [diff] [blame] | 505 | |
| 506 | if (indio_dev->available_scan_masks == NULL) |
| 507 | kfree(indio_dev->active_scan_mask); |
Lars-Peter Clausen | a87c82e | 2013-09-18 21:02:00 +0100 | [diff] [blame] | 508 | } |
| 509 | |
Lars-Peter Clausen | 8e05099 | 2013-10-14 17:49:00 +0100 | [diff] [blame] | 510 | static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, |
| 511 | struct iio_buffer *buffer) |
| 512 | { |
| 513 | unsigned int bytes; |
| 514 | |
| 515 | if (!buffer->access->set_bytes_per_datum) |
| 516 | return; |
| 517 | |
| 518 | bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, |
| 519 | buffer->scan_timestamp); |
| 520 | |
| 521 | buffer->access->set_bytes_per_datum(buffer, bytes); |
| 522 | } |
| 523 | |
Lars-Peter Clausen | a951945 | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 524 | static int __iio_update_buffers(struct iio_dev *indio_dev, |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 525 | struct iio_buffer *insert_buffer, |
| 526 | struct iio_buffer *remove_buffer) |
Jonathan Cameron | 6b3b58e | 2012-04-21 10:09:33 +0100 | [diff] [blame] | 527 | { |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 528 | int ret; |
| 529 | int success = 0; |
| 530 | struct iio_buffer *buffer; |
| 531 | unsigned long *compound_mask; |
| 532 | const unsigned long *old_mask; |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 533 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 534 | /* Wind down existing buffers - iff there are any */ |
| 535 | if (!list_empty(&indio_dev->buffer_list)) { |
| 536 | if (indio_dev->setup_ops->predisable) { |
| 537 | ret = indio_dev->setup_ops->predisable(indio_dev); |
| 538 | if (ret) |
Hartmut Knaack | 92825ff | 2014-02-16 11:53:00 +0000 | [diff] [blame] | 539 | return ret; |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 540 | } |
| 541 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 542 | if (indio_dev->setup_ops->postdisable) { |
| 543 | ret = indio_dev->setup_ops->postdisable(indio_dev); |
| 544 | if (ret) |
Hartmut Knaack | 92825ff | 2014-02-16 11:53:00 +0000 | [diff] [blame] | 545 | return ret; |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 546 | } |
| 547 | } |
| 548 | /* Keep a copy of current setup to allow roll back */ |
| 549 | old_mask = indio_dev->active_scan_mask; |
| 550 | if (!indio_dev->available_scan_masks) |
| 551 | indio_dev->active_scan_mask = NULL; |
| 552 | |
| 553 | if (remove_buffer) |
Lars-Peter Clausen | 9e69c93 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 554 | iio_buffer_deactivate(remove_buffer); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 555 | if (insert_buffer) |
Lars-Peter Clausen | 9e69c93 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 556 | iio_buffer_activate(indio_dev, insert_buffer); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 557 | |
| 558 | /* If no buffers in list, we are done */ |
| 559 | if (list_empty(&indio_dev->buffer_list)) { |
| 560 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 561 | if (indio_dev->available_scan_masks == NULL) |
| 562 | kfree(old_mask); |
| 563 | return 0; |
| 564 | } |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 565 | |
Peter Meerwald | 9572588 | 2013-09-17 23:42:00 +0100 | [diff] [blame] | 566 | /* What scan mask do we actually have? */ |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 567 | compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
| 568 | sizeof(long), GFP_KERNEL); |
| 569 | if (compound_mask == NULL) { |
| 570 | if (indio_dev->available_scan_masks == NULL) |
| 571 | kfree(old_mask); |
| 572 | return -ENOMEM; |
| 573 | } |
| 574 | indio_dev->scan_timestamp = 0; |
| 575 | |
| 576 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { |
| 577 | bitmap_or(compound_mask, compound_mask, buffer->scan_mask, |
| 578 | indio_dev->masklength); |
| 579 | indio_dev->scan_timestamp |= buffer->scan_timestamp; |
| 580 | } |
| 581 | if (indio_dev->available_scan_masks) { |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 582 | indio_dev->active_scan_mask = |
| 583 | iio_scan_mask_match(indio_dev->available_scan_masks, |
| 584 | indio_dev->masklength, |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 585 | compound_mask); |
| 586 | if (indio_dev->active_scan_mask == NULL) { |
| 587 | /* |
| 588 | * Roll back. |
| 589 | * Note can only occur when adding a buffer. |
| 590 | */ |
Lars-Peter Clausen | 9e69c93 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 591 | iio_buffer_deactivate(insert_buffer); |
Peter Meerwald | d66e045 | 2013-09-18 22:10:00 +0100 | [diff] [blame] | 592 | if (old_mask) { |
| 593 | indio_dev->active_scan_mask = old_mask; |
| 594 | success = -EINVAL; |
| 595 | } |
| 596 | else { |
| 597 | kfree(compound_mask); |
| 598 | ret = -EINVAL; |
Hartmut Knaack | 92825ff | 2014-02-16 11:53:00 +0000 | [diff] [blame] | 599 | return ret; |
Peter Meerwald | d66e045 | 2013-09-18 22:10:00 +0100 | [diff] [blame] | 600 | } |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 601 | } |
| 602 | } else { |
| 603 | indio_dev->active_scan_mask = compound_mask; |
| 604 | } |
Lars-Peter Clausen | aff1eb4 | 2012-06-15 18:08:59 +0200 | [diff] [blame] | 605 | |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 606 | iio_update_demux(indio_dev); |
| 607 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 608 | /* Wind up again */ |
| 609 | if (indio_dev->setup_ops->preenable) { |
| 610 | ret = indio_dev->setup_ops->preenable(indio_dev); |
| 611 | if (ret) { |
| 612 | printk(KERN_ERR |
Michał Mirosław | bec1889 | 2013-05-04 14:19:00 +0100 | [diff] [blame] | 613 | "Buffer not started: buffer preenable failed (%d)\n", ret); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 614 | goto error_remove_inserted; |
| 615 | } |
| 616 | } |
| 617 | indio_dev->scan_bytes = |
| 618 | iio_compute_scan_bytes(indio_dev, |
| 619 | indio_dev->active_scan_mask, |
| 620 | indio_dev->scan_timestamp); |
Lars-Peter Clausen | 8e05099 | 2013-10-14 17:49:00 +0100 | [diff] [blame] | 621 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { |
| 622 | iio_buffer_update_bytes_per_datum(indio_dev, buffer); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 623 | if (buffer->access->request_update) { |
| 624 | ret = buffer->access->request_update(buffer); |
| 625 | if (ret) { |
| 626 | printk(KERN_INFO |
Michał Mirosław | bec1889 | 2013-05-04 14:19:00 +0100 | [diff] [blame] | 627 | "Buffer not started: buffer parameter update failed (%d)\n", ret); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 628 | goto error_run_postdisable; |
| 629 | } |
| 630 | } |
Lars-Peter Clausen | 8e05099 | 2013-10-14 17:49:00 +0100 | [diff] [blame] | 631 | } |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 632 | if (indio_dev->info->update_scan_mode) { |
| 633 | ret = indio_dev->info |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 634 | ->update_scan_mode(indio_dev, |
| 635 | indio_dev->active_scan_mask); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 636 | if (ret < 0) { |
Michał Mirosław | bec1889 | 2013-05-04 14:19:00 +0100 | [diff] [blame] | 637 | printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 638 | goto error_run_postdisable; |
| 639 | } |
| 640 | } |
Peter Meerwald | 9572588 | 2013-09-17 23:42:00 +0100 | [diff] [blame] | 641 | /* Definitely possible for devices to support both of these. */ |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 642 | if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { |
| 643 | if (!indio_dev->trig) { |
| 644 | printk(KERN_INFO "Buffer not started: no trigger\n"); |
| 645 | ret = -EINVAL; |
| 646 | /* Can only occur on first buffer */ |
| 647 | goto error_run_postdisable; |
| 648 | } |
| 649 | indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; |
| 650 | } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { |
| 651 | indio_dev->currentmode = INDIO_BUFFER_HARDWARE; |
Peter Meerwald | 9572588 | 2013-09-17 23:42:00 +0100 | [diff] [blame] | 652 | } else { /* Should never be reached */ |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 653 | ret = -EINVAL; |
| 654 | goto error_run_postdisable; |
| 655 | } |
| 656 | |
| 657 | if (indio_dev->setup_ops->postenable) { |
| 658 | ret = indio_dev->setup_ops->postenable(indio_dev); |
| 659 | if (ret) { |
| 660 | printk(KERN_INFO |
Michał Mirosław | bec1889 | 2013-05-04 14:19:00 +0100 | [diff] [blame] | 661 | "Buffer not started: postenable failed (%d)\n", ret); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 662 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 663 | if (indio_dev->setup_ops->postdisable) |
| 664 | indio_dev->setup_ops->postdisable(indio_dev); |
| 665 | goto error_disable_all_buffers; |
| 666 | } |
| 667 | } |
| 668 | |
| 669 | if (indio_dev->available_scan_masks) |
| 670 | kfree(compound_mask); |
| 671 | else |
| 672 | kfree(old_mask); |
| 673 | |
| 674 | return success; |
| 675 | |
| 676 | error_disable_all_buffers: |
| 677 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 678 | error_run_postdisable: |
| 679 | if (indio_dev->setup_ops->postdisable) |
| 680 | indio_dev->setup_ops->postdisable(indio_dev); |
| 681 | error_remove_inserted: |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 682 | if (insert_buffer) |
Lars-Peter Clausen | 9e69c93 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 683 | iio_buffer_deactivate(insert_buffer); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 684 | indio_dev->active_scan_mask = old_mask; |
| 685 | kfree(compound_mask); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 686 | return ret; |
| 687 | } |
Lars-Peter Clausen | a951945 | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 688 | |
| 689 | int iio_update_buffers(struct iio_dev *indio_dev, |
| 690 | struct iio_buffer *insert_buffer, |
| 691 | struct iio_buffer *remove_buffer) |
| 692 | { |
| 693 | int ret; |
| 694 | |
Lars-Peter Clausen | 3909fab | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 695 | if (insert_buffer == remove_buffer) |
| 696 | return 0; |
| 697 | |
Lars-Peter Clausen | a951945 | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 698 | mutex_lock(&indio_dev->info_exist_lock); |
| 699 | mutex_lock(&indio_dev->mlock); |
| 700 | |
Lars-Peter Clausen | 3909fab | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 701 | if (insert_buffer && iio_buffer_is_active(insert_buffer)) |
| 702 | insert_buffer = NULL; |
| 703 | |
| 704 | if (remove_buffer && !iio_buffer_is_active(remove_buffer)) |
| 705 | remove_buffer = NULL; |
| 706 | |
| 707 | if (!insert_buffer && !remove_buffer) { |
| 708 | ret = 0; |
| 709 | goto out_unlock; |
| 710 | } |
| 711 | |
Lars-Peter Clausen | a951945 | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 712 | if (indio_dev->info == NULL) { |
| 713 | ret = -ENODEV; |
| 714 | goto out_unlock; |
| 715 | } |
| 716 | |
| 717 | ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); |
| 718 | |
| 719 | out_unlock: |
| 720 | mutex_unlock(&indio_dev->mlock); |
| 721 | mutex_unlock(&indio_dev->info_exist_lock); |
| 722 | |
| 723 | return ret; |
| 724 | } |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 725 | EXPORT_SYMBOL_GPL(iio_update_buffers); |
| 726 | |
| 727 | ssize_t iio_buffer_store_enable(struct device *dev, |
| 728 | struct device_attribute *attr, |
| 729 | const char *buf, |
| 730 | size_t len) |
| 731 | { |
| 732 | int ret; |
| 733 | bool requested_state; |
| 734 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 735 | bool inlist; |
| 736 | |
| 737 | ret = strtobool(buf, &requested_state); |
| 738 | if (ret < 0) |
| 739 | return ret; |
| 740 | |
| 741 | mutex_lock(&indio_dev->mlock); |
| 742 | |
| 743 | /* Find out if it is in the list */ |
Lars-Peter Clausen | 705ee2c | 2013-09-15 16:31:00 +0100 | [diff] [blame] | 744 | inlist = iio_buffer_is_active(indio_dev->buffer); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 745 | /* Already in desired state */ |
| 746 | if (inlist == requested_state) |
| 747 | goto done; |
| 748 | |
| 749 | if (requested_state) |
Lars-Peter Clausen | a951945 | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 750 | ret = __iio_update_buffers(indio_dev, |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 751 | indio_dev->buffer, NULL); |
| 752 | else |
Lars-Peter Clausen | a951945 | 2013-10-04 12:07:00 +0100 | [diff] [blame] | 753 | ret = __iio_update_buffers(indio_dev, |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 754 | NULL, indio_dev->buffer); |
| 755 | |
| 756 | if (ret < 0) |
| 757 | goto done; |
| 758 | done: |
| 759 | mutex_unlock(&indio_dev->mlock); |
| 760 | return (ret < 0) ? ret : len; |
| 761 | } |
| 762 | EXPORT_SYMBOL(iio_buffer_store_enable); |
| 763 | |
Lars-Peter Clausen | d967cb6 | 2014-11-26 18:55:14 +0100 | [diff] [blame^] | 764 | static const char * const iio_scan_elements_group_name = "scan_elements"; |
| 765 | |
| 766 | int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) |
| 767 | { |
| 768 | struct iio_dev_attr *p; |
| 769 | struct attribute **attr; |
| 770 | struct iio_buffer *buffer = indio_dev->buffer; |
| 771 | int ret, i, attrn, attrcount, attrcount_orig = 0; |
| 772 | const struct iio_chan_spec *channels; |
| 773 | |
| 774 | if (!buffer) |
| 775 | return 0; |
| 776 | |
| 777 | if (buffer->scan_el_attrs != NULL) { |
| 778 | attr = buffer->scan_el_attrs->attrs; |
| 779 | while (*attr++ != NULL) |
| 780 | attrcount_orig++; |
| 781 | } |
| 782 | attrcount = attrcount_orig; |
| 783 | INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); |
| 784 | channels = indio_dev->channels; |
| 785 | if (channels) { |
| 786 | /* new magic */ |
| 787 | for (i = 0; i < indio_dev->num_channels; i++) { |
| 788 | if (channels[i].scan_index < 0) |
| 789 | continue; |
| 790 | |
| 791 | /* Establish necessary mask length */ |
| 792 | if (channels[i].scan_index > |
| 793 | (int)indio_dev->masklength - 1) |
| 794 | indio_dev->masklength |
| 795 | = channels[i].scan_index + 1; |
| 796 | |
| 797 | ret = iio_buffer_add_channel_sysfs(indio_dev, |
| 798 | &channels[i]); |
| 799 | if (ret < 0) |
| 800 | goto error_cleanup_dynamic; |
| 801 | attrcount += ret; |
| 802 | if (channels[i].type == IIO_TIMESTAMP) |
| 803 | indio_dev->scan_index_timestamp = |
| 804 | channels[i].scan_index; |
| 805 | } |
| 806 | if (indio_dev->masklength && buffer->scan_mask == NULL) { |
| 807 | buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
| 808 | sizeof(*buffer->scan_mask), |
| 809 | GFP_KERNEL); |
| 810 | if (buffer->scan_mask == NULL) { |
| 811 | ret = -ENOMEM; |
| 812 | goto error_cleanup_dynamic; |
| 813 | } |
| 814 | } |
| 815 | } |
| 816 | |
| 817 | buffer->scan_el_group.name = iio_scan_elements_group_name; |
| 818 | |
| 819 | buffer->scan_el_group.attrs = kcalloc(attrcount + 1, |
| 820 | sizeof(buffer->scan_el_group.attrs[0]), |
| 821 | GFP_KERNEL); |
| 822 | if (buffer->scan_el_group.attrs == NULL) { |
| 823 | ret = -ENOMEM; |
| 824 | goto error_free_scan_mask; |
| 825 | } |
| 826 | if (buffer->scan_el_attrs) |
| 827 | memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, |
| 828 | sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); |
| 829 | attrn = attrcount_orig; |
| 830 | |
| 831 | list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) |
| 832 | buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; |
| 833 | indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; |
| 834 | |
| 835 | return 0; |
| 836 | |
| 837 | error_free_scan_mask: |
| 838 | kfree(buffer->scan_mask); |
| 839 | error_cleanup_dynamic: |
| 840 | iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); |
| 841 | |
| 842 | return ret; |
| 843 | } |
| 844 | |
| 845 | void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) |
| 846 | { |
| 847 | if (!indio_dev->buffer) |
| 848 | return; |
| 849 | |
| 850 | kfree(indio_dev->buffer->scan_mask); |
| 851 | kfree(indio_dev->buffer->scan_el_group.attrs); |
| 852 | iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); |
| 853 | } |
| 854 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 855 | /** |
Lars-Peter Clausen | 8163663 | 2012-07-09 10:00:00 +0100 | [diff] [blame] | 856 | * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected |
| 857 | * @indio_dev: the iio device |
| 858 | * @mask: scan mask to be checked |
| 859 | * |
| 860 | * Return true if exactly one bit is set in the scan mask, false otherwise. It |
| 861 | * can be used for devices where only one channel can be active for sampling at |
| 862 | * a time. |
| 863 | */ |
| 864 | bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, |
| 865 | const unsigned long *mask) |
| 866 | { |
| 867 | return bitmap_weight(mask, indio_dev->masklength) == 1; |
| 868 | } |
| 869 | EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); |
| 870 | |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 871 | int iio_scan_mask_query(struct iio_dev *indio_dev, |
| 872 | struct iio_buffer *buffer, int bit) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 873 | { |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 874 | if (bit > indio_dev->masklength) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 875 | return -EINVAL; |
| 876 | |
| 877 | if (!buffer->scan_mask) |
| 878 | return 0; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 879 | |
Alec Berg | 2076a20 | 2014-03-19 18:50:00 +0000 | [diff] [blame] | 880 | /* Ensure return value is 0 or 1. */ |
| 881 | return !!test_bit(bit, buffer->scan_mask); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 882 | }; |
| 883 | EXPORT_SYMBOL_GPL(iio_scan_mask_query); |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 884 | |
| 885 | /** |
| 886 | * struct iio_demux_table() - table describing demux memcpy ops |
| 887 | * @from: index to copy from |
Peter Meerwald | 99698b4 | 2012-08-26 13:43:00 +0100 | [diff] [blame] | 888 | * @to: index to copy to |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 889 | * @length: how many bytes to copy |
| 890 | * @l: list head used for management |
| 891 | */ |
| 892 | struct iio_demux_table { |
| 893 | unsigned from; |
| 894 | unsigned to; |
| 895 | unsigned length; |
| 896 | struct list_head l; |
| 897 | }; |
| 898 | |
Lars-Peter Clausen | 5d65d92 | 2013-09-15 17:50:00 +0100 | [diff] [blame] | 899 | static const void *iio_demux(struct iio_buffer *buffer, |
| 900 | const void *datain) |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 901 | { |
| 902 | struct iio_demux_table *t; |
| 903 | |
| 904 | if (list_empty(&buffer->demux_list)) |
| 905 | return datain; |
| 906 | list_for_each_entry(t, &buffer->demux_list, l) |
| 907 | memcpy(buffer->demux_bounce + t->to, |
| 908 | datain + t->from, t->length); |
| 909 | |
| 910 | return buffer->demux_bounce; |
| 911 | } |
| 912 | |
Lars-Peter Clausen | 5d65d92 | 2013-09-15 17:50:00 +0100 | [diff] [blame] | 913 | static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 914 | { |
Lars-Peter Clausen | 5d65d92 | 2013-09-15 17:50:00 +0100 | [diff] [blame] | 915 | const void *dataout = iio_demux(buffer, data); |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 916 | |
Lars-Peter Clausen | ce56ade | 2012-09-04 13:38:00 +0100 | [diff] [blame] | 917 | return buffer->access->store_to(buffer, dataout); |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 918 | } |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 919 | |
Jonathan Cameron | 842cd10 | 2012-04-21 10:09:45 +0100 | [diff] [blame] | 920 | static void iio_buffer_demux_free(struct iio_buffer *buffer) |
| 921 | { |
| 922 | struct iio_demux_table *p, *q; |
| 923 | list_for_each_entry_safe(p, q, &buffer->demux_list, l) { |
| 924 | list_del(&p->l); |
| 925 | kfree(p); |
| 926 | } |
| 927 | } |
| 928 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 929 | |
Lars-Peter Clausen | 5d65d92 | 2013-09-15 17:50:00 +0100 | [diff] [blame] | 930 | int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 931 | { |
| 932 | int ret; |
| 933 | struct iio_buffer *buf; |
| 934 | |
| 935 | list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { |
| 936 | ret = iio_push_to_buffer(buf, data); |
| 937 | if (ret < 0) |
| 938 | return ret; |
| 939 | } |
| 940 | |
| 941 | return 0; |
| 942 | } |
| 943 | EXPORT_SYMBOL_GPL(iio_push_to_buffers); |
| 944 | |
Lars-Peter Clausen | cbe88bc | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 945 | static int iio_buffer_add_demux(struct iio_buffer *buffer, |
| 946 | struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc, |
| 947 | unsigned int length) |
| 948 | { |
| 949 | |
| 950 | if (*p && (*p)->from + (*p)->length == in_loc && |
| 951 | (*p)->to + (*p)->length == out_loc) { |
| 952 | (*p)->length += length; |
| 953 | } else { |
Jonathan Cameron | 7cdca178 | 2014-08-08 09:43:00 +0100 | [diff] [blame] | 954 | *p = kmalloc(sizeof(**p), GFP_KERNEL); |
Lars-Peter Clausen | cbe88bc | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 955 | if (*p == NULL) |
| 956 | return -ENOMEM; |
| 957 | (*p)->from = in_loc; |
| 958 | (*p)->to = out_loc; |
| 959 | (*p)->length = length; |
| 960 | list_add_tail(&(*p)->l, &buffer->demux_list); |
| 961 | } |
| 962 | |
| 963 | return 0; |
| 964 | } |
| 965 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 966 | static int iio_buffer_update_demux(struct iio_dev *indio_dev, |
| 967 | struct iio_buffer *buffer) |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 968 | { |
| 969 | const struct iio_chan_spec *ch; |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 970 | int ret, in_ind = -1, out_ind, length; |
| 971 | unsigned in_loc = 0, out_loc = 0; |
Lars-Peter Clausen | cbe88bc | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 972 | struct iio_demux_table *p = NULL; |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 973 | |
| 974 | /* Clear out any old demux */ |
Jonathan Cameron | 842cd10 | 2012-04-21 10:09:45 +0100 | [diff] [blame] | 975 | iio_buffer_demux_free(buffer); |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 976 | kfree(buffer->demux_bounce); |
| 977 | buffer->demux_bounce = NULL; |
| 978 | |
| 979 | /* First work out which scan mode we will actually have */ |
| 980 | if (bitmap_equal(indio_dev->active_scan_mask, |
| 981 | buffer->scan_mask, |
| 982 | indio_dev->masklength)) |
| 983 | return 0; |
| 984 | |
| 985 | /* Now we have the two masks, work from least sig and build up sizes */ |
| 986 | for_each_set_bit(out_ind, |
Lars-Peter Clausen | 61bd55c | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 987 | buffer->scan_mask, |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 988 | indio_dev->masklength) { |
| 989 | in_ind = find_next_bit(indio_dev->active_scan_mask, |
| 990 | indio_dev->masklength, |
| 991 | in_ind + 1); |
| 992 | while (in_ind != out_ind) { |
| 993 | in_ind = find_next_bit(indio_dev->active_scan_mask, |
| 994 | indio_dev->masklength, |
| 995 | in_ind + 1); |
| 996 | ch = iio_find_channel_from_si(indio_dev, in_ind); |
Srinivas Pandruvada | 0ee8546 | 2014-04-29 00:51:00 +0100 | [diff] [blame] | 997 | if (ch->scan_type.repeat > 1) |
| 998 | length = ch->scan_type.storagebits / 8 * |
| 999 | ch->scan_type.repeat; |
| 1000 | else |
| 1001 | length = ch->scan_type.storagebits / 8; |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 1002 | /* Make sure we are aligned */ |
Lars-Peter Clausen | 61072db | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 1003 | in_loc = roundup(in_loc, length) + length; |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 1004 | } |
| 1005 | ch = iio_find_channel_from_si(indio_dev, in_ind); |
Srinivas Pandruvada | 0ee8546 | 2014-04-29 00:51:00 +0100 | [diff] [blame] | 1006 | if (ch->scan_type.repeat > 1) |
| 1007 | length = ch->scan_type.storagebits / 8 * |
| 1008 | ch->scan_type.repeat; |
| 1009 | else |
| 1010 | length = ch->scan_type.storagebits / 8; |
Lars-Peter Clausen | 61072db | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 1011 | out_loc = roundup(out_loc, length); |
| 1012 | in_loc = roundup(in_loc, length); |
Lars-Peter Clausen | cbe88bc | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 1013 | ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); |
| 1014 | if (ret) |
| 1015 | goto error_clear_mux_table; |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 1016 | out_loc += length; |
| 1017 | in_loc += length; |
| 1018 | } |
| 1019 | /* Relies on scan_timestamp being last */ |
| 1020 | if (buffer->scan_timestamp) { |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 1021 | ch = iio_find_channel_from_si(indio_dev, |
Jonathan Cameron | f126480 | 2012-04-21 10:09:34 +0100 | [diff] [blame] | 1022 | indio_dev->scan_index_timestamp); |
Srinivas Pandruvada | 0ee8546 | 2014-04-29 00:51:00 +0100 | [diff] [blame] | 1023 | if (ch->scan_type.repeat > 1) |
| 1024 | length = ch->scan_type.storagebits / 8 * |
| 1025 | ch->scan_type.repeat; |
| 1026 | else |
| 1027 | length = ch->scan_type.storagebits / 8; |
Lars-Peter Clausen | 61072db | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 1028 | out_loc = roundup(out_loc, length); |
| 1029 | in_loc = roundup(in_loc, length); |
Lars-Peter Clausen | cbe88bc | 2014-07-17 16:59:00 +0100 | [diff] [blame] | 1030 | ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); |
| 1031 | if (ret) |
| 1032 | goto error_clear_mux_table; |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 1033 | out_loc += length; |
| 1034 | in_loc += length; |
| 1035 | } |
| 1036 | buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); |
| 1037 | if (buffer->demux_bounce == NULL) { |
| 1038 | ret = -ENOMEM; |
| 1039 | goto error_clear_mux_table; |
| 1040 | } |
| 1041 | return 0; |
| 1042 | |
| 1043 | error_clear_mux_table: |
Jonathan Cameron | 842cd10 | 2012-04-21 10:09:45 +0100 | [diff] [blame] | 1044 | iio_buffer_demux_free(buffer); |
| 1045 | |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 1046 | return ret; |
| 1047 | } |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 1048 | |
| 1049 | int iio_update_demux(struct iio_dev *indio_dev) |
| 1050 | { |
| 1051 | struct iio_buffer *buffer; |
| 1052 | int ret; |
| 1053 | |
| 1054 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { |
| 1055 | ret = iio_buffer_update_demux(indio_dev, buffer); |
| 1056 | if (ret < 0) |
| 1057 | goto error_clear_mux_table; |
| 1058 | } |
| 1059 | return 0; |
| 1060 | |
| 1061 | error_clear_mux_table: |
| 1062 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) |
| 1063 | iio_buffer_demux_free(buffer); |
| 1064 | |
| 1065 | return ret; |
| 1066 | } |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 1067 | EXPORT_SYMBOL_GPL(iio_update_demux); |
Lars-Peter Clausen | 9e69c93 | 2013-10-04 12:06:00 +0100 | [diff] [blame] | 1068 | |
| 1069 | /** |
| 1070 | * iio_buffer_release() - Free a buffer's resources |
| 1071 | * @ref: Pointer to the kref embedded in the iio_buffer struct |
| 1072 | * |
| 1073 | * This function is called when the last reference to the buffer has been |
| 1074 | * dropped. It will typically free all resources allocated by the buffer. Do not |
| 1075 | * call this function manually, always use iio_buffer_put() when done using a |
| 1076 | * buffer. |
| 1077 | */ |
| 1078 | static void iio_buffer_release(struct kref *ref) |
| 1079 | { |
| 1080 | struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); |
| 1081 | |
| 1082 | buffer->access->release(buffer); |
| 1083 | } |
| 1084 | |
| 1085 | /** |
| 1086 | * iio_buffer_get() - Grab a reference to the buffer |
| 1087 | * @buffer: The buffer to grab a reference for, may be NULL |
| 1088 | * |
| 1089 | * Returns the pointer to the buffer that was passed into the function. |
| 1090 | */ |
| 1091 | struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) |
| 1092 | { |
| 1093 | if (buffer) |
| 1094 | kref_get(&buffer->ref); |
| 1095 | |
| 1096 | return buffer; |
| 1097 | } |
| 1098 | EXPORT_SYMBOL_GPL(iio_buffer_get); |
| 1099 | |
| 1100 | /** |
| 1101 | * iio_buffer_put() - Release the reference to the buffer |
| 1102 | * @buffer: The buffer to release the reference for, may be NULL |
| 1103 | */ |
| 1104 | void iio_buffer_put(struct iio_buffer *buffer) |
| 1105 | { |
| 1106 | if (buffer) |
| 1107 | kref_put(&buffer->ref, iio_buffer_release); |
| 1108 | } |
| 1109 | EXPORT_SYMBOL_GPL(iio_buffer_put); |