blob: 6d4776a7f002e4b1efa4435799a78a558bd83f19 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Jonathan Cameron14555b12011-09-21 11:15:57 +01002/* The industrial I/O core
3 *
4 * Copyright (c) 2008 Jonathan Cameron
5 *
Jonathan Cameron14555b12011-09-21 11:15:57 +01006 * Handling of buffer allocation / resizing.
7 *
Jonathan Cameron14555b12011-09-21 11:15:57 +01008 * Things to look at here.
9 * - Better memory allocation techniques?
10 * - Alternative access techniques?
11 */
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +020012#include <linux/anon_inodes.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010013#include <linux/kernel.h>
Paul Gortmaker8e336a72011-07-10 13:09:12 -040014#include <linux/export.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010015#include <linux/device.h>
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +020016#include <linux/file.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010017#include <linux/fs.h>
18#include <linux/cdev.h>
19#include <linux/slab.h>
20#include <linux/poll.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010021#include <linux/sched/signal.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010022
Jonathan Cameron06458e22012-04-25 15:54:58 +010023#include <linux/iio/iio.h>
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +030024#include <linux/iio/iio-opaque.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010025#include "iio_core.h"
Lars-Peter Clausenf11d59d2020-05-25 14:38:53 +030026#include "iio_core_trigger.h"
Jonathan Cameron06458e22012-04-25 15:54:58 +010027#include <linux/iio/sysfs.h>
28#include <linux/iio/buffer.h>
Jonathan Cameron33dd94c2017-01-02 19:28:34 +000029#include <linux/iio/buffer_impl.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010030
31static const char * const iio_endian_prefix[] = {
32 [IIO_BE] = "be",
33 [IIO_LE] = "le",
34};
35
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010036static bool iio_buffer_is_active(struct iio_buffer *buf)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +010037{
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010038 return !list_empty(&buf->buffer_list);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +010039}
40
Josselin Costanzi37d34552015-03-22 20:33:38 +020041static size_t iio_buffer_data_available(struct iio_buffer *buf)
Lars-Peter Clausen647cc7b2013-11-25 14:56:00 +000042{
Josselin Costanzi9dd46942014-06-27 17:20:00 +010043 return buf->access->data_available(buf);
Lars-Peter Clausen647cc7b2013-11-25 14:56:00 +000044}
45
Octavian Purdilaf4f46732015-03-22 20:33:39 +020046static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
47 struct iio_buffer *buf, size_t required)
Josselin Costanzi37d34552015-03-22 20:33:38 +020048{
Octavian Purdilaf4f46732015-03-22 20:33:39 +020049 if (!indio_dev->info->hwfifo_flush_to_buffer)
50 return -ENODEV;
51
52 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
53}
54
55static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56 size_t to_wait, int to_flush)
57{
58 size_t avail;
59 int flushed = 0;
60
Josselin Costanzi37d34552015-03-22 20:33:38 +020061 /* wakeup if the device was unregistered */
62 if (!indio_dev->info)
63 return true;
64
65 /* drain the buffer if it was disabled */
Octavian Purdilaf4f46732015-03-22 20:33:39 +020066 if (!iio_buffer_is_active(buf)) {
Josselin Costanzi37d34552015-03-22 20:33:38 +020067 to_wait = min_t(size_t, to_wait, 1);
Octavian Purdilaf4f46732015-03-22 20:33:39 +020068 to_flush = 0;
69 }
Josselin Costanzi37d34552015-03-22 20:33:38 +020070
Octavian Purdilaf4f46732015-03-22 20:33:39 +020071 avail = iio_buffer_data_available(buf);
72
73 if (avail >= to_wait) {
74 /* force a flush for non-blocking reads */
Octavian Purdilac6f67a12015-06-05 15:56:47 +030075 if (!to_wait && avail < to_flush)
76 iio_buffer_flush_hwfifo(indio_dev, buf,
77 to_flush - avail);
Octavian Purdilaf4f46732015-03-22 20:33:39 +020078 return true;
79 }
80
81 if (to_flush)
82 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
83 to_wait - avail);
84 if (flushed <= 0)
85 return false;
86
87 if (avail + flushed >= to_wait)
Josselin Costanzi37d34552015-03-22 20:33:38 +020088 return true;
89
90 return false;
91}
92
Jonathan Cameron14555b12011-09-21 11:15:57 +010093/**
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +020094 * iio_buffer_read() - chrdev read for buffer access
Cristina Opriceana01236352015-07-24 16:18:09 +030095 * @filp: File structure pointer for the char device
96 * @buf: Destination buffer for iio buffer read
97 * @n: First n bytes to read
98 * @f_ps: Long offset provided by the user as a seek position
Jonathan Cameron14555b12011-09-21 11:15:57 +010099 *
100 * This function relies on all buffer implementations having an
101 * iio_buffer as their first element.
Cristina Opriceana01236352015-07-24 16:18:09 +0300102 *
103 * Return: negative values corresponding to error codes or ret != 0
104 * for ending the reading activity
Jonathan Cameron14555b12011-09-21 11:15:57 +0100105 **/
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +0200106static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
107 size_t n, loff_t *f_ps)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100108{
Alexandru Ardeleanbe24dcb2021-02-15 12:40:35 +0200109 struct iio_dev_buffer_pair *ib = filp->private_data;
110 struct iio_buffer *rb = ib->buffer;
111 struct iio_dev *indio_dev = ib->indio_dev;
Brian Norrisfcf68f32016-08-08 17:19:38 -0700112 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Josselin Costanzi37d34552015-03-22 20:33:38 +0200113 size_t datum_size;
Octavian Purdilac6f67a12015-06-05 15:56:47 +0300114 size_t to_wait;
Colin Ian King5dba4b12016-09-05 15:39:06 +0100115 int ret = 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100116
Lars-Peter Clausenf18e7a02013-10-04 12:06:00 +0100117 if (!indio_dev->info)
118 return -ENODEV;
119
Lars-Peter Clausenf6d40332019-12-11 12:43:00 +0200120 if (!rb || !rb->access->read)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100121 return -EINVAL;
Lars-Peter Clausenee551a12013-11-25 14:56:00 +0000122
Josselin Costanzi37d34552015-03-22 20:33:38 +0200123 datum_size = rb->bytes_per_datum;
Lars-Peter Clausenee551a12013-11-25 14:56:00 +0000124
Josselin Costanzi37d34552015-03-22 20:33:38 +0200125 /*
126 * If datum_size is 0 there will never be anything to read from the
127 * buffer, so signal end of file now.
128 */
129 if (!datum_size)
130 return 0;
131
Octavian Purdilac6f67a12015-06-05 15:56:47 +0300132 if (filp->f_flags & O_NONBLOCK)
133 to_wait = 0;
134 else
135 to_wait = min_t(size_t, n / datum_size, rb->watermark);
Josselin Costanzi37d34552015-03-22 20:33:38 +0200136
Brian Norrisfcf68f32016-08-08 17:19:38 -0700137 add_wait_queue(&rb->pollq, &wait);
Josselin Costanzi37d34552015-03-22 20:33:38 +0200138 do {
Brian Norrisfcf68f32016-08-08 17:19:38 -0700139 if (!indio_dev->info) {
140 ret = -ENODEV;
141 break;
142 }
Josselin Costanzi37d34552015-03-22 20:33:38 +0200143
Brian Norrisfcf68f32016-08-08 17:19:38 -0700144 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
145 if (signal_pending(current)) {
146 ret = -ERESTARTSYS;
147 break;
148 }
149
150 wait_woken(&wait, TASK_INTERRUPTIBLE,
151 MAX_SCHEDULE_TIMEOUT);
152 continue;
153 }
Lars-Peter Clausenee551a12013-11-25 14:56:00 +0000154
Lars-Peter Clausenf6d40332019-12-11 12:43:00 +0200155 ret = rb->access->read(rb, n, buf);
Lars-Peter Clausenee551a12013-11-25 14:56:00 +0000156 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
157 ret = -EAGAIN;
Colin Ian King5dba4b12016-09-05 15:39:06 +0100158 } while (ret == 0);
Brian Norrisfcf68f32016-08-08 17:19:38 -0700159 remove_wait_queue(&rb->pollq, &wait);
Lars-Peter Clausenee551a12013-11-25 14:56:00 +0000160
161 return ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100162}
163
164/**
165 * iio_buffer_poll() - poll the buffer to find out if it has data
Cristina Opriceana01236352015-07-24 16:18:09 +0300166 * @filp: File structure pointer for device access
167 * @wait: Poll table structure pointer for which the driver adds
168 * a wait queue
169 *
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800170 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
Cristina Opriceana01236352015-07-24 16:18:09 +0300171 * or 0 for other cases
Jonathan Cameron14555b12011-09-21 11:15:57 +0100172 */
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +0200173static __poll_t iio_buffer_poll(struct file *filp,
174 struct poll_table_struct *wait)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100175{
Alexandru Ardeleanbe24dcb2021-02-15 12:40:35 +0200176 struct iio_dev_buffer_pair *ib = filp->private_data;
177 struct iio_buffer *rb = ib->buffer;
178 struct iio_dev *indio_dev = ib->indio_dev;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100179
Stefan Windfeldt-Prytz4cd140b2018-02-15 15:02:53 +0100180 if (!indio_dev->info || rb == NULL)
Cristina Opriceana1bdc0292015-08-03 13:37:40 +0300181 return 0;
Lars-Peter Clausenf18e7a02013-10-04 12:06:00 +0100182
Jonathan Cameron14555b12011-09-21 11:15:57 +0100183 poll_wait(filp, &rb->pollq, wait);
Octavian Purdilaf4f46732015-03-22 20:33:39 +0200184 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800185 return EPOLLIN | EPOLLRDNORM;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100186 return 0;
187}
188
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +0200189ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
190 size_t n, loff_t *f_ps)
191{
192 struct iio_dev_buffer_pair *ib = filp->private_data;
193 struct iio_buffer *rb = ib->buffer;
194
195 /* check if buffer was opened through new API */
196 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
197 return -EBUSY;
198
199 return iio_buffer_read(filp, buf, n, f_ps);
200}
201
202__poll_t iio_buffer_poll_wrapper(struct file *filp,
203 struct poll_table_struct *wait)
204{
205 struct iio_dev_buffer_pair *ib = filp->private_data;
206 struct iio_buffer *rb = ib->buffer;
207
208 /* check if buffer was opened through new API */
209 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
210 return 0;
211
212 return iio_buffer_poll(filp, wait);
213}
214
Lars-Peter Clausend2f0a482013-10-04 12:07:00 +0100215/**
216 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
217 * @indio_dev: The IIO device
218 *
219 * Wakes up the event waitqueue used for poll(). Should usually
220 * be called when the device is unregistered.
221 */
222void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
223{
Alexandru Ardeleanee708e62021-02-15 12:40:38 +0200224 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
225 struct iio_buffer *buffer;
226 unsigned int i;
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300227
Alexandru Ardeleanee708e62021-02-15 12:40:38 +0200228 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
229 buffer = iio_dev_opaque->attached_buffers[i];
230 wake_up(&buffer->pollq);
231 }
Lars-Peter Clausend2f0a482013-10-04 12:07:00 +0100232}
233
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000234void iio_buffer_init(struct iio_buffer *buffer)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100235{
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000236 INIT_LIST_HEAD(&buffer->demux_list);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100237 INIT_LIST_HEAD(&buffer->buffer_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100238 init_waitqueue_head(&buffer->pollq);
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100239 kref_init(&buffer->ref);
Lars-Peter Clausen4a605352015-10-13 18:10:25 +0200240 if (!buffer->watermark)
241 buffer->watermark = 1;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100242}
243EXPORT_SYMBOL(iio_buffer_init);
244
Alexandru Ardelean218bc532021-03-07 20:54:44 +0200245void iio_device_detach_buffers(struct iio_dev *indio_dev)
Alexandru Ardeleanee708e62021-02-15 12:40:38 +0200246{
247 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
248 struct iio_buffer *buffer;
249 unsigned int i;
250
251 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
252 buffer = iio_dev_opaque->attached_buffers[i];
253 iio_buffer_put(buffer);
254 }
Alexandru Ardelean218bc532021-03-07 20:54:44 +0200255
256 kfree(iio_dev_opaque->attached_buffers);
Alexandru Ardeleanee708e62021-02-15 12:40:38 +0200257}
258
Jonathan Cameron14555b12011-09-21 11:15:57 +0100259static ssize_t iio_show_scan_index(struct device *dev,
260 struct device_attribute *attr,
261 char *buf)
262{
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +0100263 return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100264}
265
266static ssize_t iio_show_fixed_type(struct device *dev,
267 struct device_attribute *attr,
268 char *buf)
269{
270 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
271 u8 type = this_attr->c->scan_type.endianness;
272
273 if (type == IIO_CPU) {
Jonathan Cameron9d5d1152011-10-04 16:02:08 +0100274#ifdef __LITTLE_ENDIAN
275 type = IIO_LE;
276#else
277 type = IIO_BE;
278#endif
Jonathan Cameron14555b12011-09-21 11:15:57 +0100279 }
Srinivas Pandruvada0ee85462014-04-29 00:51:00 +0100280 if (this_attr->c->scan_type.repeat > 1)
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +0100281 return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
Srinivas Pandruvada0ee85462014-04-29 00:51:00 +0100282 iio_endian_prefix[type],
283 this_attr->c->scan_type.sign,
284 this_attr->c->scan_type.realbits,
285 this_attr->c->scan_type.storagebits,
286 this_attr->c->scan_type.repeat,
287 this_attr->c->scan_type.shift);
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +0100288 else
289 return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
Jonathan Cameron14555b12011-09-21 11:15:57 +0100290 iio_endian_prefix[type],
291 this_attr->c->scan_type.sign,
292 this_attr->c->scan_type.realbits,
293 this_attr->c->scan_type.storagebits,
294 this_attr->c->scan_type.shift);
295}
296
297static ssize_t iio_scan_el_show(struct device *dev,
298 struct device_attribute *attr,
299 char *buf)
300{
301 int ret;
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200302 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100303
Alec Berg2076a202014-03-19 18:50:00 +0000304 /* Ensure ret is 0 or 1. */
305 ret = !!test_bit(to_iio_dev_attr(attr)->address,
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300306 buffer->scan_mask);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000307
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +0100308 return sysfs_emit(buf, "%d\n", ret);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100309}
310
Lars-Peter Clausen217a5cf2014-11-26 18:55:09 +0100311/* Note NULL used as error indicator as it doesn't make sense. */
312static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
313 unsigned int masklength,
Lars-Peter Clausen1e1ec282015-05-29 18:14:22 +0200314 const unsigned long *mask,
315 bool strict)
Lars-Peter Clausen217a5cf2014-11-26 18:55:09 +0100316{
317 if (bitmap_empty(mask, masklength))
318 return NULL;
319 while (*av_masks) {
Lars-Peter Clausen1e1ec282015-05-29 18:14:22 +0200320 if (strict) {
321 if (bitmap_equal(mask, av_masks, masklength))
322 return av_masks;
323 } else {
324 if (bitmap_subset(mask, av_masks, masklength))
325 return av_masks;
326 }
Lars-Peter Clausen217a5cf2014-11-26 18:55:09 +0100327 av_masks += BITS_TO_LONGS(masklength);
328 }
329 return NULL;
330}
331
332static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
333 const unsigned long *mask)
334{
335 if (!indio_dev->setup_ops->validate_scan_mask)
336 return true;
337
338 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
339}
340
341/**
342 * iio_scan_mask_set() - set particular bit in the scan mask
343 * @indio_dev: the iio device
344 * @buffer: the buffer whose scan mask we are interested in
345 * @bit: the bit to be set.
346 *
347 * Note that at this point we have no way of knowing what other
348 * buffers might request, hence this code only verifies that the
349 * individual buffers request is plausible.
350 */
351static int iio_scan_mask_set(struct iio_dev *indio_dev,
352 struct iio_buffer *buffer, int bit)
353{
354 const unsigned long *mask;
355 unsigned long *trialmask;
356
Christophe JAILLET45851652021-07-14 13:14:41 +0200357 trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
358 if (!trialmask)
Lars-Peter Clausen217a5cf2014-11-26 18:55:09 +0100359 return -ENOMEM;
360 if (!indio_dev->masklength) {
Dan Carpenter231bfe52015-11-21 13:33:00 +0300361 WARN(1, "Trying to set scanmask prior to registering buffer\n");
Lars-Peter Clausen217a5cf2014-11-26 18:55:09 +0100362 goto err_invalid_mask;
363 }
364 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
365 set_bit(bit, trialmask);
366
367 if (!iio_validate_scan_mask(indio_dev, trialmask))
368 goto err_invalid_mask;
369
370 if (indio_dev->available_scan_masks) {
371 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
372 indio_dev->masklength,
Lars-Peter Clausen1e1ec282015-05-29 18:14:22 +0200373 trialmask, false);
Lars-Peter Clausen217a5cf2014-11-26 18:55:09 +0100374 if (!mask)
375 goto err_invalid_mask;
376 }
377 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
378
Andy Shevchenko38628282019-03-04 10:55:40 +0200379 bitmap_free(trialmask);
Lars-Peter Clausen217a5cf2014-11-26 18:55:09 +0100380
381 return 0;
382
383err_invalid_mask:
Andy Shevchenko38628282019-03-04 10:55:40 +0200384 bitmap_free(trialmask);
Lars-Peter Clausen217a5cf2014-11-26 18:55:09 +0100385 return -EINVAL;
386}
387
Jonathan Cameron14555b12011-09-21 11:15:57 +0100388static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
389{
390 clear_bit(bit, buffer->scan_mask);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100391 return 0;
392}
393
Jonathan Cameronc2bf8d52017-01-02 19:28:27 +0000394static int iio_scan_mask_query(struct iio_dev *indio_dev,
395 struct iio_buffer *buffer, int bit)
396{
397 if (bit > indio_dev->masklength)
398 return -EINVAL;
399
400 if (!buffer->scan_mask)
401 return 0;
402
403 /* Ensure return value is 0 or 1. */
404 return !!test_bit(bit, buffer->scan_mask);
405};
406
Jonathan Cameron14555b12011-09-21 11:15:57 +0100407static ssize_t iio_scan_el_store(struct device *dev,
408 struct device_attribute *attr,
409 const char *buf,
410 size_t len)
411{
Jonathan Camerona714af22012-04-21 10:09:32 +0100412 int ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100413 bool state;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200414 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100415 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200416 struct iio_buffer *buffer = this_attr->buffer;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100417
Jonathan Camerona714af22012-04-21 10:09:32 +0100418 ret = strtobool(buf, &state);
419 if (ret < 0)
420 return ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100421 mutex_lock(&indio_dev->mlock);
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300422 if (iio_buffer_is_active(buffer)) {
Jonathan Cameron14555b12011-09-21 11:15:57 +0100423 ret = -EBUSY;
424 goto error_ret;
425 }
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000426 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100427 if (ret < 0)
428 goto error_ret;
429 if (!state && ret) {
430 ret = iio_scan_mask_clear(buffer, this_attr->address);
431 if (ret)
432 goto error_ret;
433 } else if (state && !ret) {
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000434 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100435 if (ret)
436 goto error_ret;
437 }
438
439error_ret:
440 mutex_unlock(&indio_dev->mlock);
441
Lars-Peter Clausen5a2a6e12011-12-08 18:35:53 +0100442 return ret < 0 ? ret : len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100443
444}
445
446static ssize_t iio_scan_el_ts_show(struct device *dev,
447 struct device_attribute *attr,
448 char *buf)
449{
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200450 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300451
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +0100452 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100453}
454
455static ssize_t iio_scan_el_ts_store(struct device *dev,
456 struct device_attribute *attr,
457 const char *buf,
458 size_t len)
459{
Jonathan Camerona714af22012-04-21 10:09:32 +0100460 int ret;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200461 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200462 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100463 bool state;
464
Jonathan Camerona714af22012-04-21 10:09:32 +0100465 ret = strtobool(buf, &state);
466 if (ret < 0)
467 return ret;
468
Jonathan Cameron14555b12011-09-21 11:15:57 +0100469 mutex_lock(&indio_dev->mlock);
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300470 if (iio_buffer_is_active(buffer)) {
Jonathan Cameron14555b12011-09-21 11:15:57 +0100471 ret = -EBUSY;
472 goto error_ret;
473 }
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300474 buffer->scan_timestamp = state;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100475error_ret:
476 mutex_unlock(&indio_dev->mlock);
477
478 return ret ? ret : len;
479}
480
481static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300482 struct iio_buffer *buffer,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100483 const struct iio_chan_spec *chan)
484{
485 int ret, attrcount = 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100486
487 ret = __iio_add_chan_devattr("index",
488 chan,
489 &iio_show_scan_index,
490 NULL,
491 0,
Jonathan Cameron37044322013-09-08 14:57:00 +0100492 IIO_SEPARATE,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100493 &indio_dev->dev,
Alexandru Ardelean3e3d11b2021-02-15 12:40:32 +0200494 buffer,
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200495 &buffer->buffer_attr_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100496 if (ret)
Hartmut Knaack92825ff2014-02-16 11:53:00 +0000497 return ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100498 attrcount++;
499 ret = __iio_add_chan_devattr("type",
500 chan,
501 &iio_show_fixed_type,
502 NULL,
503 0,
504 0,
505 &indio_dev->dev,
Alexandru Ardelean3e3d11b2021-02-15 12:40:32 +0200506 buffer,
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200507 &buffer->buffer_attr_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100508 if (ret)
Hartmut Knaack92825ff2014-02-16 11:53:00 +0000509 return ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100510 attrcount++;
511 if (chan->type != IIO_TIMESTAMP)
512 ret = __iio_add_chan_devattr("en",
513 chan,
514 &iio_scan_el_show,
515 &iio_scan_el_store,
516 chan->scan_index,
517 0,
518 &indio_dev->dev,
Alexandru Ardelean3e3d11b2021-02-15 12:40:32 +0200519 buffer,
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200520 &buffer->buffer_attr_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100521 else
522 ret = __iio_add_chan_devattr("en",
523 chan,
524 &iio_scan_el_ts_show,
525 &iio_scan_el_ts_store,
526 chan->scan_index,
527 0,
528 &indio_dev->dev,
Alexandru Ardelean3e3d11b2021-02-15 12:40:32 +0200529 buffer,
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200530 &buffer->buffer_attr_list);
Peter Meerwald95725882013-09-17 23:42:00 +0100531 if (ret)
Hartmut Knaack92825ff2014-02-16 11:53:00 +0000532 return ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100533 attrcount++;
534 ret = attrcount;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100535 return ret;
536}
537
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +0100538static ssize_t iio_buffer_read_length(struct device *dev,
539 struct device_attribute *attr,
540 char *buf)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100541{
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200542 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100543
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +0100544 return sysfs_emit(buf, "%d\n", buffer->length);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100545}
Jonathan Cameron14555b12011-09-21 11:15:57 +0100546
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +0100547static ssize_t iio_buffer_write_length(struct device *dev,
548 struct device_attribute *attr,
549 const char *buf, size_t len)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100550{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200551 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200552 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Lars-Peter Clausen948ad202012-10-18 14:47:00 +0100553 unsigned int val;
554 int ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100555
Lars-Peter Clausen948ad202012-10-18 14:47:00 +0100556 ret = kstrtouint(buf, 10, &val);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100557 if (ret)
558 return ret;
559
Lars-Peter Clausen37495662014-11-26 18:55:17 +0100560 if (val == buffer->length)
561 return len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100562
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100563 mutex_lock(&indio_dev->mlock);
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300564 if (iio_buffer_is_active(buffer)) {
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100565 ret = -EBUSY;
566 } else {
Lars-Peter Clausen8d92db22014-11-26 18:55:16 +0100567 buffer->access->set_length(buffer, val);
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100568 ret = 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100569 }
Josselin Costanzi37d34552015-03-22 20:33:38 +0200570 if (ret)
571 goto out;
572 if (buffer->length && buffer->length < buffer->watermark)
573 buffer->watermark = buffer->length;
574out:
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100575 mutex_unlock(&indio_dev->mlock);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100576
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100577 return ret ? ret : len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100578}
Jonathan Cameron14555b12011-09-21 11:15:57 +0100579
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +0100580static ssize_t iio_buffer_show_enable(struct device *dev,
581 struct device_attribute *attr,
582 char *buf)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100583{
Alexandru Ardelean15097c72021-02-15 12:40:33 +0200584 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +0300585
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +0100586 return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
Jonathan Cameron14555b12011-09-21 11:15:57 +0100587}
Jonathan Cameron14555b12011-09-21 11:15:57 +0100588
Lars-Peter Clausen182b4902016-02-02 16:27:24 +0100589static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
590 unsigned int scan_index)
591{
592 const struct iio_chan_spec *ch;
593 unsigned int bytes;
594
595 ch = iio_find_channel_from_si(indio_dev, scan_index);
596 bytes = ch->scan_type.storagebits / 8;
597 if (ch->scan_type.repeat > 1)
598 bytes *= ch->scan_type.repeat;
599 return bytes;
600}
601
602static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
603{
Jonathan Cameron62f4f362021-04-26 18:49:07 +0100604 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
605
Lars-Peter Clausen182b4902016-02-02 16:27:24 +0100606 return iio_storage_bytes_for_si(indio_dev,
Jonathan Cameron62f4f362021-04-26 18:49:07 +0100607 iio_dev_opaque->scan_index_timestamp);
Lars-Peter Clausen182b4902016-02-02 16:27:24 +0100608}
609
Peter Meerwald183f4172013-09-18 22:10:00 +0100610static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
611 const unsigned long *mask, bool timestamp)
Jonathan Cameron959d2952011-12-05 21:37:13 +0000612{
Jonathan Cameron959d2952011-12-05 21:37:13 +0000613 unsigned bytes = 0;
Lars Möllendorf883f6162019-12-13 14:50:55 +0100614 int length, i, largest = 0;
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100615
616 /* How much space will the demuxed element take? */
617 for_each_set_bit(i, mask,
618 indio_dev->masklength) {
Lars-Peter Clausen182b4902016-02-02 16:27:24 +0100619 length = iio_storage_bytes_for_si(indio_dev, i);
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100620 bytes = ALIGN(bytes, length);
621 bytes += length;
Lars Möllendorf883f6162019-12-13 14:50:55 +0100622 largest = max(largest, length);
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100623 }
Lars-Peter Clausen182b4902016-02-02 16:27:24 +0100624
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100625 if (timestamp) {
Lars-Peter Clausen182b4902016-02-02 16:27:24 +0100626 length = iio_storage_bytes_for_timestamp(indio_dev);
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100627 bytes = ALIGN(bytes, length);
628 bytes += length;
Lars Möllendorf883f6162019-12-13 14:50:55 +0100629 largest = max(largest, length);
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100630 }
Lars Möllendorf883f6162019-12-13 14:50:55 +0100631
632 bytes = ALIGN(bytes, largest);
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100633 return bytes;
634}
635
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100636static void iio_buffer_activate(struct iio_dev *indio_dev,
637 struct iio_buffer *buffer)
638{
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300639 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
640
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100641 iio_buffer_get(buffer);
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300642 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100643}
644
645static void iio_buffer_deactivate(struct iio_buffer *buffer)
646{
647 list_del_init(&buffer->buffer_list);
Josselin Costanzi37d34552015-03-22 20:33:38 +0200648 wake_up_interruptible(&buffer->pollq);
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100649 iio_buffer_put(buffer);
650}
651
Lars-Peter Clausen12501862015-05-18 13:34:49 +0200652static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
653{
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300654 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Lars-Peter Clausen12501862015-05-18 13:34:49 +0200655 struct iio_buffer *buffer, *_buffer;
656
657 list_for_each_entry_safe(buffer, _buffer,
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300658 &iio_dev_opaque->buffer_list, buffer_list)
Lars-Peter Clausen12501862015-05-18 13:34:49 +0200659 iio_buffer_deactivate(buffer);
660}
661
Lars-Peter Clausene18a2ad2015-10-13 18:10:27 +0200662static int iio_buffer_enable(struct iio_buffer *buffer,
663 struct iio_dev *indio_dev)
664{
665 if (!buffer->access->enable)
666 return 0;
667 return buffer->access->enable(buffer, indio_dev);
668}
669
670static int iio_buffer_disable(struct iio_buffer *buffer,
671 struct iio_dev *indio_dev)
672{
673 if (!buffer->access->disable)
674 return 0;
675 return buffer->access->disable(buffer, indio_dev);
676}
677
Lars-Peter Clausen8e050992013-10-14 17:49:00 +0100678static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
679 struct iio_buffer *buffer)
680{
681 unsigned int bytes;
682
683 if (!buffer->access->set_bytes_per_datum)
684 return;
685
686 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
687 buffer->scan_timestamp);
688
689 buffer->access->set_bytes_per_datum(buffer, bytes);
690}
691
Lars-Peter Clausenfcc1b2f2015-05-13 16:04:46 +0200692static int iio_buffer_request_update(struct iio_dev *indio_dev,
693 struct iio_buffer *buffer)
694{
695 int ret;
696
697 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
698 if (buffer->access->request_update) {
699 ret = buffer->access->request_update(buffer);
700 if (ret) {
701 dev_dbg(&indio_dev->dev,
702 "Buffer not started: buffer parameter update failed (%d)\n",
703 ret);
704 return ret;
705 }
706 }
707
708 return 0;
709}
710
Lars-Peter Clausen248be5a2015-05-13 16:04:45 +0200711static void iio_free_scan_mask(struct iio_dev *indio_dev,
712 const unsigned long *mask)
713{
714 /* If the mask is dynamically allocated free it, otherwise do nothing */
715 if (!indio_dev->available_scan_masks)
Andy Shevchenko38628282019-03-04 10:55:40 +0200716 bitmap_free(mask);
Lars-Peter Clausen248be5a2015-05-13 16:04:45 +0200717}
718
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200719struct iio_device_config {
720 unsigned int mode;
Lars-Peter Clausenf0566c02015-10-13 18:10:24 +0200721 unsigned int watermark;
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200722 const unsigned long *scan_mask;
723 unsigned int scan_bytes;
724 bool scan_timestamp;
725};
726
727static int iio_verify_update(struct iio_dev *indio_dev,
728 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
729 struct iio_device_config *config)
730{
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300731 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200732 unsigned long *compound_mask;
733 const unsigned long *scan_mask;
Lars-Peter Clausen1e1ec282015-05-29 18:14:22 +0200734 bool strict_scanmask = false;
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200735 struct iio_buffer *buffer;
736 bool scan_timestamp;
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200737 unsigned int modes;
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200738
Lars-Peter Clausenb7329242020-03-26 11:30:12 +0200739 if (insert_buffer &&
740 bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
741 dev_dbg(&indio_dev->dev,
742 "At least one scan element must be enabled first\n");
743 return -EINVAL;
744 }
745
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200746 memset(config, 0, sizeof(*config));
Irina Tirdea1bef2c12016-03-24 11:09:45 +0200747 config->watermark = ~0;
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200748
749 /*
750 * If there is just one buffer and we are removing it there is nothing
751 * to verify.
752 */
753 if (remove_buffer && !insert_buffer &&
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300754 list_is_singular(&iio_dev_opaque->buffer_list))
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200755 return 0;
756
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200757 modes = indio_dev->modes;
758
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300759 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200760 if (buffer == remove_buffer)
761 continue;
762 modes &= buffer->access->modes;
Lars-Peter Clausenf0566c02015-10-13 18:10:24 +0200763 config->watermark = min(config->watermark, buffer->watermark);
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200764 }
765
Lars-Peter Clausenf0566c02015-10-13 18:10:24 +0200766 if (insert_buffer) {
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200767 modes &= insert_buffer->access->modes;
Lars-Peter Clausenf0566c02015-10-13 18:10:24 +0200768 config->watermark = min(config->watermark,
769 insert_buffer->watermark);
770 }
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200771
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200772 /* Definitely possible for devices to support both of these. */
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200773 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200774 config->mode = INDIO_BUFFER_TRIGGERED;
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200775 } else if (modes & INDIO_BUFFER_HARDWARE) {
Lars-Peter Clausen1e1ec282015-05-29 18:14:22 +0200776 /*
777 * Keep things simple for now and only allow a single buffer to
778 * be connected in hardware mode.
779 */
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300780 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
Lars-Peter Clausen1e1ec282015-05-29 18:14:22 +0200781 return -EINVAL;
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200782 config->mode = INDIO_BUFFER_HARDWARE;
Lars-Peter Clausen1e1ec282015-05-29 18:14:22 +0200783 strict_scanmask = true;
Lars-Peter Clausen225d59a2015-05-29 18:14:21 +0200784 } else if (modes & INDIO_BUFFER_SOFTWARE) {
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200785 config->mode = INDIO_BUFFER_SOFTWARE;
786 } else {
787 /* Can only occur on first buffer */
788 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
789 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
790 return -EINVAL;
791 }
792
793 /* What scan mask do we actually have? */
Andy Shevchenko38628282019-03-04 10:55:40 +0200794 compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200795 if (compound_mask == NULL)
796 return -ENOMEM;
797
798 scan_timestamp = false;
799
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300800 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200801 if (buffer == remove_buffer)
802 continue;
803 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
804 indio_dev->masklength);
805 scan_timestamp |= buffer->scan_timestamp;
806 }
807
808 if (insert_buffer) {
809 bitmap_or(compound_mask, compound_mask,
810 insert_buffer->scan_mask, indio_dev->masklength);
811 scan_timestamp |= insert_buffer->scan_timestamp;
812 }
813
814 if (indio_dev->available_scan_masks) {
815 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
816 indio_dev->masklength,
Lars-Peter Clausen1e1ec282015-05-29 18:14:22 +0200817 compound_mask,
818 strict_scanmask);
Andy Shevchenko38628282019-03-04 10:55:40 +0200819 bitmap_free(compound_mask);
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +0200820 if (scan_mask == NULL)
821 return -EINVAL;
822 } else {
823 scan_mask = compound_mask;
824 }
825
826 config->scan_bytes = iio_compute_scan_bytes(indio_dev,
827 scan_mask, scan_timestamp);
828 config->scan_mask = scan_mask;
829 config->scan_timestamp = scan_timestamp;
830
831 return 0;
832}
833
Jonathan Cameron78c99812017-01-02 19:28:24 +0000834/**
835 * struct iio_demux_table - table describing demux memcpy ops
836 * @from: index to copy from
837 * @to: index to copy to
838 * @length: how many bytes to copy
839 * @l: list head used for management
840 */
841struct iio_demux_table {
842 unsigned from;
843 unsigned to;
844 unsigned length;
845 struct list_head l;
846};
847
848static void iio_buffer_demux_free(struct iio_buffer *buffer)
849{
850 struct iio_demux_table *p, *q;
851 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
852 list_del(&p->l);
853 kfree(p);
854 }
855}
856
857static int iio_buffer_add_demux(struct iio_buffer *buffer,
858 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
859 unsigned int length)
860{
861
862 if (*p && (*p)->from + (*p)->length == in_loc &&
863 (*p)->to + (*p)->length == out_loc) {
864 (*p)->length += length;
865 } else {
866 *p = kmalloc(sizeof(**p), GFP_KERNEL);
867 if (*p == NULL)
868 return -ENOMEM;
869 (*p)->from = in_loc;
870 (*p)->to = out_loc;
871 (*p)->length = length;
872 list_add_tail(&(*p)->l, &buffer->demux_list);
873 }
874
875 return 0;
876}
877
878static int iio_buffer_update_demux(struct iio_dev *indio_dev,
879 struct iio_buffer *buffer)
880{
881 int ret, in_ind = -1, out_ind, length;
882 unsigned in_loc = 0, out_loc = 0;
883 struct iio_demux_table *p = NULL;
884
885 /* Clear out any old demux */
886 iio_buffer_demux_free(buffer);
887 kfree(buffer->demux_bounce);
888 buffer->demux_bounce = NULL;
889
890 /* First work out which scan mode we will actually have */
891 if (bitmap_equal(indio_dev->active_scan_mask,
892 buffer->scan_mask,
893 indio_dev->masklength))
894 return 0;
895
896 /* Now we have the two masks, work from least sig and build up sizes */
897 for_each_set_bit(out_ind,
898 buffer->scan_mask,
899 indio_dev->masklength) {
900 in_ind = find_next_bit(indio_dev->active_scan_mask,
901 indio_dev->masklength,
902 in_ind + 1);
903 while (in_ind != out_ind) {
Jonathan Cameron78c99812017-01-02 19:28:24 +0000904 length = iio_storage_bytes_for_si(indio_dev, in_ind);
905 /* Make sure we are aligned */
906 in_loc = roundup(in_loc, length) + length;
Nuno Sá19ef7b72020-11-12 15:43:22 +0100907 in_ind = find_next_bit(indio_dev->active_scan_mask,
908 indio_dev->masklength,
909 in_ind + 1);
Jonathan Cameron78c99812017-01-02 19:28:24 +0000910 }
911 length = iio_storage_bytes_for_si(indio_dev, in_ind);
912 out_loc = roundup(out_loc, length);
913 in_loc = roundup(in_loc, length);
914 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
915 if (ret)
916 goto error_clear_mux_table;
917 out_loc += length;
918 in_loc += length;
919 }
920 /* Relies on scan_timestamp being last */
921 if (buffer->scan_timestamp) {
922 length = iio_storage_bytes_for_timestamp(indio_dev);
923 out_loc = roundup(out_loc, length);
924 in_loc = roundup(in_loc, length);
925 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
926 if (ret)
927 goto error_clear_mux_table;
928 out_loc += length;
Jonathan Cameron78c99812017-01-02 19:28:24 +0000929 }
930 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
931 if (buffer->demux_bounce == NULL) {
932 ret = -ENOMEM;
933 goto error_clear_mux_table;
934 }
935 return 0;
936
937error_clear_mux_table:
938 iio_buffer_demux_free(buffer);
939
940 return ret;
941}
942
943static int iio_update_demux(struct iio_dev *indio_dev)
944{
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300945 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Jonathan Cameron78c99812017-01-02 19:28:24 +0000946 struct iio_buffer *buffer;
947 int ret;
948
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300949 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
Jonathan Cameron78c99812017-01-02 19:28:24 +0000950 ret = iio_buffer_update_demux(indio_dev, buffer);
951 if (ret < 0)
952 goto error_clear_mux_table;
953 }
954 return 0;
955
956error_clear_mux_table:
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300957 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
Jonathan Cameron78c99812017-01-02 19:28:24 +0000958 iio_buffer_demux_free(buffer);
959
960 return ret;
961}
962
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +0200963static int iio_enable_buffers(struct iio_dev *indio_dev,
964 struct iio_device_config *config)
965{
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +0300966 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Lars-Peter Clausene18a2ad2015-10-13 18:10:27 +0200967 struct iio_buffer *buffer;
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +0200968 int ret;
969
970 indio_dev->active_scan_mask = config->scan_mask;
971 indio_dev->scan_timestamp = config->scan_timestamp;
972 indio_dev->scan_bytes = config->scan_bytes;
Lars-Peter Clausen5cb1a542020-04-30 11:24:55 +0300973 indio_dev->currentmode = config->mode;
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +0200974
975 iio_update_demux(indio_dev);
976
977 /* Wind up again */
978 if (indio_dev->setup_ops->preenable) {
979 ret = indio_dev->setup_ops->preenable(indio_dev);
980 if (ret) {
981 dev_dbg(&indio_dev->dev,
982 "Buffer not started: buffer preenable failed (%d)\n", ret);
983 goto err_undo_config;
984 }
985 }
986
987 if (indio_dev->info->update_scan_mode) {
988 ret = indio_dev->info
989 ->update_scan_mode(indio_dev,
990 indio_dev->active_scan_mask);
991 if (ret < 0) {
992 dev_dbg(&indio_dev->dev,
993 "Buffer not started: update scan mode failed (%d)\n",
994 ret);
995 goto err_run_postdisable;
996 }
997 }
998
Lars-Peter Clausenf0566c02015-10-13 18:10:24 +0200999 if (indio_dev->info->hwfifo_set_watermark)
1000 indio_dev->info->hwfifo_set_watermark(indio_dev,
1001 config->watermark);
1002
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001003 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
Lars-Peter Clausene18a2ad2015-10-13 18:10:27 +02001004 ret = iio_buffer_enable(buffer, indio_dev);
1005 if (ret)
1006 goto err_disable_buffers;
1007 }
1008
Lars-Peter Clausenf11d59d2020-05-25 14:38:53 +03001009 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1010 ret = iio_trigger_attach_poll_func(indio_dev->trig,
1011 indio_dev->pollfunc);
1012 if (ret)
1013 goto err_disable_buffers;
1014 }
1015
Alexandru Ardelean62a30a22020-07-15 07:16:29 +03001016 if (indio_dev->setup_ops->postenable) {
1017 ret = indio_dev->setup_ops->postenable(indio_dev);
1018 if (ret) {
1019 dev_dbg(&indio_dev->dev,
1020 "Buffer not started: postenable failed (%d)\n", ret);
1021 goto err_detach_pollfunc;
1022 }
1023 }
1024
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001025 return 0;
1026
Alexandru Ardelean62a30a22020-07-15 07:16:29 +03001027err_detach_pollfunc:
1028 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1029 iio_trigger_detach_poll_func(indio_dev->trig,
1030 indio_dev->pollfunc);
1031 }
Lars-Peter Clausene18a2ad2015-10-13 18:10:27 +02001032err_disable_buffers:
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001033 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
Lars-Peter Clausene18a2ad2015-10-13 18:10:27 +02001034 buffer_list)
1035 iio_buffer_disable(buffer, indio_dev);
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001036err_run_postdisable:
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001037 if (indio_dev->setup_ops->postdisable)
1038 indio_dev->setup_ops->postdisable(indio_dev);
1039err_undo_config:
Lars-Peter Clausen5cb1a542020-04-30 11:24:55 +03001040 indio_dev->currentmode = INDIO_DIRECT_MODE;
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001041 indio_dev->active_scan_mask = NULL;
1042
1043 return ret;
1044}
1045
1046static int iio_disable_buffers(struct iio_dev *indio_dev)
1047{
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001048 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Lars-Peter Clausene18a2ad2015-10-13 18:10:27 +02001049 struct iio_buffer *buffer;
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001050 int ret = 0;
1051 int ret2;
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001052
1053 /* Wind down existing buffers - iff there are any */
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001054 if (list_empty(&iio_dev_opaque->buffer_list))
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001055 return 0;
1056
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001057 /*
1058 * If things go wrong at some step in disable we still need to continue
1059 * to perform the other steps, otherwise we leave the device in a
1060 * inconsistent state. We return the error code for the first error we
1061 * encountered.
1062 */
1063
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001064 if (indio_dev->setup_ops->predisable) {
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001065 ret2 = indio_dev->setup_ops->predisable(indio_dev);
1066 if (ret2 && !ret)
1067 ret = ret2;
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001068 }
1069
Alexandru Ardelean62a30a22020-07-15 07:16:29 +03001070 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1071 iio_trigger_detach_poll_func(indio_dev->trig,
1072 indio_dev->pollfunc);
1073 }
1074
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001075 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
Lars-Peter Clausene18a2ad2015-10-13 18:10:27 +02001076 ret2 = iio_buffer_disable(buffer, indio_dev);
1077 if (ret2 && !ret)
1078 ret = ret2;
1079 }
1080
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001081 if (indio_dev->setup_ops->postdisable) {
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001082 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1083 if (ret2 && !ret)
1084 ret = ret2;
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001085 }
1086
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001087 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1088 indio_dev->active_scan_mask = NULL;
Lars-Peter Clausen5cb1a542020-04-30 11:24:55 +03001089 indio_dev->currentmode = INDIO_DIRECT_MODE;
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001090
1091 return ret;
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001092}
1093
Lars-Peter Clausena9519452013-10-04 12:07:00 +01001094static int __iio_update_buffers(struct iio_dev *indio_dev,
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001095 struct iio_buffer *insert_buffer,
1096 struct iio_buffer *remove_buffer)
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +01001097{
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001098 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +02001099 struct iio_device_config new_config;
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001100 int ret;
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +02001101
1102 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1103 &new_config);
1104 if (ret)
1105 return ret;
Jonathan Cameron959d2952011-12-05 21:37:13 +00001106
Lars-Peter Clausenfcc1b2f2015-05-13 16:04:46 +02001107 if (insert_buffer) {
1108 ret = iio_buffer_request_update(indio_dev, insert_buffer);
1109 if (ret)
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +02001110 goto err_free_config;
Lars-Peter Clausenfcc1b2f2015-05-13 16:04:46 +02001111 }
1112
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001113 ret = iio_disable_buffers(indio_dev);
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001114 if (ret)
1115 goto err_deactivate_all;
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001116
1117 if (remove_buffer)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +01001118 iio_buffer_deactivate(remove_buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001119 if (insert_buffer)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +01001120 iio_buffer_activate(indio_dev, insert_buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001121
1122 /* If no buffers in list, we are done */
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001123 if (list_empty(&iio_dev_opaque->buffer_list))
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001124 return 0;
Jonathan Cameron959d2952011-12-05 21:37:13 +00001125
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001126 ret = iio_enable_buffers(indio_dev, &new_config);
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001127 if (ret)
1128 goto err_deactivate_all;
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001129
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +02001130 return 0;
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001131
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001132err_deactivate_all:
1133 /*
1134 * We've already verified that the config is valid earlier. If things go
1135 * wrong in either enable or disable the most likely reason is an IO
1136 * error from the device. In this case there is no good recovery
1137 * strategy. Just make sure to disable everything and leave the device
1138 * in a sane state. With a bit of luck the device might come back to
1139 * life again later and userspace can try again.
1140 */
1141 iio_buffer_deactivate_all(indio_dev);
1142
Lars-Peter Clausen6e509c42015-05-18 13:34:47 +02001143err_free_config:
1144 iio_free_scan_mask(indio_dev, new_config.scan_mask);
1145 return ret;
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001146}
Lars-Peter Clausena9519452013-10-04 12:07:00 +01001147
1148int iio_update_buffers(struct iio_dev *indio_dev,
1149 struct iio_buffer *insert_buffer,
1150 struct iio_buffer *remove_buffer)
1151{
Jonathan Cameronb804e2b2021-04-26 18:49:08 +01001152 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Lars-Peter Clausena9519452013-10-04 12:07:00 +01001153 int ret;
1154
Lars-Peter Clausen3909fab2013-10-04 12:07:00 +01001155 if (insert_buffer == remove_buffer)
1156 return 0;
1157
Jonathan Cameronb804e2b2021-04-26 18:49:08 +01001158 mutex_lock(&iio_dev_opaque->info_exist_lock);
Lars-Peter Clausena9519452013-10-04 12:07:00 +01001159 mutex_lock(&indio_dev->mlock);
1160
Lars-Peter Clausen3909fab2013-10-04 12:07:00 +01001161 if (insert_buffer && iio_buffer_is_active(insert_buffer))
1162 insert_buffer = NULL;
1163
1164 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1165 remove_buffer = NULL;
1166
1167 if (!insert_buffer && !remove_buffer) {
1168 ret = 0;
1169 goto out_unlock;
1170 }
1171
Lars-Peter Clausena9519452013-10-04 12:07:00 +01001172 if (indio_dev->info == NULL) {
1173 ret = -ENODEV;
1174 goto out_unlock;
1175 }
1176
1177 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1178
1179out_unlock:
1180 mutex_unlock(&indio_dev->mlock);
Jonathan Cameronb804e2b2021-04-26 18:49:08 +01001181 mutex_unlock(&iio_dev_opaque->info_exist_lock);
Lars-Peter Clausena9519452013-10-04 12:07:00 +01001182
1183 return ret;
1184}
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001185EXPORT_SYMBOL_GPL(iio_update_buffers);
1186
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001187void iio_disable_all_buffers(struct iio_dev *indio_dev)
1188{
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001189 iio_disable_buffers(indio_dev);
Lars-Peter Clausen12501862015-05-18 13:34:49 +02001190 iio_buffer_deactivate_all(indio_dev);
Lars-Peter Clausen623d74e2015-05-18 13:34:48 +02001191}
1192
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +01001193static ssize_t iio_buffer_store_enable(struct device *dev,
1194 struct device_attribute *attr,
1195 const char *buf,
1196 size_t len)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001197{
1198 int ret;
1199 bool requested_state;
1200 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001201 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001202 bool inlist;
1203
1204 ret = strtobool(buf, &requested_state);
1205 if (ret < 0)
1206 return ret;
1207
1208 mutex_lock(&indio_dev->mlock);
1209
1210 /* Find out if it is in the list */
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +03001211 inlist = iio_buffer_is_active(buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001212 /* Already in desired state */
1213 if (inlist == requested_state)
1214 goto done;
1215
1216 if (requested_state)
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +03001217 ret = __iio_update_buffers(indio_dev, buffer, NULL);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001218 else
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +03001219 ret = __iio_update_buffers(indio_dev, NULL, buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001220
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001221done:
1222 mutex_unlock(&indio_dev->mlock);
1223 return (ret < 0) ? ret : len;
1224}
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001225
Josselin Costanzi37d34552015-03-22 20:33:38 +02001226static ssize_t iio_buffer_show_watermark(struct device *dev,
1227 struct device_attribute *attr,
1228 char *buf)
1229{
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001230 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Josselin Costanzi37d34552015-03-22 20:33:38 +02001231
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +01001232 return sysfs_emit(buf, "%u\n", buffer->watermark);
Josselin Costanzi37d34552015-03-22 20:33:38 +02001233}
1234
1235static ssize_t iio_buffer_store_watermark(struct device *dev,
1236 struct device_attribute *attr,
1237 const char *buf,
1238 size_t len)
1239{
1240 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001241 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Josselin Costanzi37d34552015-03-22 20:33:38 +02001242 unsigned int val;
1243 int ret;
1244
1245 ret = kstrtouint(buf, 10, &val);
1246 if (ret)
1247 return ret;
1248 if (!val)
1249 return -EINVAL;
1250
1251 mutex_lock(&indio_dev->mlock);
1252
1253 if (val > buffer->length) {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +03001258 if (iio_buffer_is_active(buffer)) {
Josselin Costanzi37d34552015-03-22 20:33:38 +02001259 ret = -EBUSY;
1260 goto out;
1261 }
1262
1263 buffer->watermark = val;
1264out:
1265 mutex_unlock(&indio_dev->mlock);
1266
1267 return ret ? ret : len;
1268}
1269
Matt Fornero350f6c72017-12-06 14:43:30 -05001270static ssize_t iio_dma_show_data_available(struct device *dev,
1271 struct device_attribute *attr,
1272 char *buf)
1273{
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001274 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
Matt Fornero350f6c72017-12-06 14:43:30 -05001275
Lars-Peter Clausen83ca56b2021-03-20 08:14:02 +01001276 return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
Matt Fornero350f6c72017-12-06 14:43:30 -05001277}
1278
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +01001279static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1280 iio_buffer_write_length);
Lars-Peter Clausen8d92db22014-11-26 18:55:16 +01001281static struct device_attribute dev_attr_length_ro = __ATTR(length,
1282 S_IRUGO, iio_buffer_read_length, NULL);
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +01001283static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1284 iio_buffer_show_enable, iio_buffer_store_enable);
Josselin Costanzi37d34552015-03-22 20:33:38 +02001285static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1286 iio_buffer_show_watermark, iio_buffer_store_watermark);
Lars-Peter Clausenb4406552015-10-13 18:10:26 +02001287static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1288 S_IRUGO, iio_buffer_show_watermark, NULL);
Matt Fornero350f6c72017-12-06 14:43:30 -05001289static DEVICE_ATTR(data_available, S_IRUGO,
1290 iio_dma_show_data_available, NULL);
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +01001291
Octavian Purdila6da9b382015-01-31 02:00:00 +02001292static struct attribute *iio_buffer_attrs[] = {
1293 &dev_attr_length.attr,
1294 &dev_attr_enable.attr,
Josselin Costanzi37d34552015-03-22 20:33:38 +02001295 &dev_attr_watermark.attr,
Matt Fornero350f6c72017-12-06 14:43:30 -05001296 &dev_attr_data_available.attr,
Octavian Purdila6da9b382015-01-31 02:00:00 +02001297};
1298
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001299#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1300
1301static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1302 struct attribute *attr)
1303{
1304 struct device_attribute *dattr = to_dev_attr(attr);
1305 struct iio_dev_attr *iio_attr;
1306
1307 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1308 if (!iio_attr)
1309 return NULL;
1310
1311 iio_attr->buffer = buffer;
1312 memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1313 iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
Alexandru Ardeleanca3e7d52021-04-02 20:42:26 +03001314 sysfs_attr_init(&iio_attr->dev_attr.attr);
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001315
1316 list_add(&iio_attr->l, &buffer->buffer_attr_list);
1317
1318 return &iio_attr->dev_attr.attr;
1319}
1320
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001321static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1322 struct attribute **buffer_attrs,
1323 int buffer_attrcount,
1324 int scan_el_attrcount)
1325{
1326 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1327 struct attribute_group *group;
1328 struct attribute **attrs;
1329 int ret;
1330
1331 attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1332 if (!attrs)
1333 return -ENOMEM;
1334
1335 memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1336
1337 group = &iio_dev_opaque->legacy_buffer_group;
1338 group->attrs = attrs;
1339 group->name = "buffer";
1340
1341 ret = iio_device_register_sysfs_group(indio_dev, group);
1342 if (ret)
1343 goto error_free_buffer_attrs;
1344
1345 attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1346 if (!attrs) {
1347 ret = -ENOMEM;
1348 goto error_free_buffer_attrs;
1349 }
1350
1351 memcpy(attrs, &buffer_attrs[buffer_attrcount],
1352 scan_el_attrcount * sizeof(*attrs));
1353
1354 group = &iio_dev_opaque->legacy_scan_el_group;
1355 group->attrs = attrs;
1356 group->name = "scan_elements";
1357
1358 ret = iio_device_register_sysfs_group(indio_dev, group);
1359 if (ret)
1360 goto error_free_scan_el_attrs;
1361
1362 return 0;
1363
1364error_free_buffer_attrs:
1365 kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1366error_free_scan_el_attrs:
1367 kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1368
1369 return ret;
1370}
1371
1372static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1373{
1374 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1375
1376 kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1377 kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1378}
1379
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +02001380static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1381{
1382 struct iio_dev_buffer_pair *ib = filep->private_data;
1383 struct iio_dev *indio_dev = ib->indio_dev;
1384 struct iio_buffer *buffer = ib->buffer;
1385
1386 wake_up(&buffer->pollq);
1387
1388 kfree(ib);
1389 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1390 iio_device_put(indio_dev);
1391
1392 return 0;
1393}
1394
1395static const struct file_operations iio_buffer_chrdev_fileops = {
1396 .owner = THIS_MODULE,
1397 .llseek = noop_llseek,
1398 .read = iio_buffer_read,
1399 .poll = iio_buffer_poll,
1400 .release = iio_buffer_chrdev_release,
1401};
1402
1403static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1404{
1405 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1406 int __user *ival = (int __user *)arg;
1407 struct iio_dev_buffer_pair *ib;
1408 struct iio_buffer *buffer;
1409 int fd, idx, ret;
1410
1411 if (copy_from_user(&idx, ival, sizeof(idx)))
1412 return -EFAULT;
1413
1414 if (idx >= iio_dev_opaque->attached_buffers_cnt)
1415 return -ENODEV;
1416
1417 iio_device_get(indio_dev);
1418
1419 buffer = iio_dev_opaque->attached_buffers[idx];
1420
1421 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1422 ret = -EBUSY;
1423 goto error_iio_dev_put;
1424 }
1425
1426 ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1427 if (!ib) {
1428 ret = -ENOMEM;
1429 goto error_clear_busy_bit;
1430 }
1431
1432 ib->indio_dev = indio_dev;
1433 ib->buffer = buffer;
1434
1435 fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1436 ib, O_RDWR | O_CLOEXEC);
1437 if (fd < 0) {
1438 ret = fd;
1439 goto error_free_ib;
1440 }
1441
1442 if (copy_to_user(ival, &fd, sizeof(fd))) {
1443 put_unused_fd(fd);
1444 ret = -EFAULT;
1445 goto error_free_ib;
1446 }
1447
Alexandru Ardelean4c822242021-03-22 10:41:35 +02001448 return 0;
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +02001449
1450error_free_ib:
1451 kfree(ib);
1452error_clear_busy_bit:
1453 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1454error_iio_dev_put:
1455 iio_device_put(indio_dev);
1456 return ret;
1457}
1458
1459static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1460 unsigned int cmd, unsigned long arg)
1461{
1462 switch (cmd) {
1463 case IIO_BUFFER_GET_FD_IOCTL:
1464 return iio_device_buffer_getfd(indio_dev, arg);
1465 default:
1466 return IIO_IOCTL_UNHANDLED;
1467 }
1468}
1469
Alexandru Ardeleane16e0a772020-09-17 15:59:51 +03001470static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001471 struct iio_dev *indio_dev,
1472 int index)
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001473{
Jonathan Cameron62f4f362021-04-26 18:49:07 +01001474 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001475 struct iio_dev_attr *p;
1476 struct attribute **attr;
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001477 int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001478 const struct iio_chan_spec *channels;
1479
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001480 buffer_attrcount = 0;
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +01001481 if (buffer->attrs) {
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001482 while (buffer->attrs[buffer_attrcount] != NULL)
1483 buffer_attrcount++;
Lars-Peter Clausen08e7e0a2014-11-26 18:55:15 +01001484 }
1485
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001486 scan_el_attrcount = 0;
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001487 INIT_LIST_HEAD(&buffer->buffer_attr_list);
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001488 channels = indio_dev->channels;
1489 if (channels) {
1490 /* new magic */
1491 for (i = 0; i < indio_dev->num_channels; i++) {
1492 if (channels[i].scan_index < 0)
1493 continue;
1494
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +03001495 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001496 &channels[i]);
1497 if (ret < 0)
1498 goto error_cleanup_dynamic;
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001499 scan_el_attrcount += ret;
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001500 if (channels[i].type == IIO_TIMESTAMP)
Jonathan Cameron62f4f362021-04-26 18:49:07 +01001501 iio_dev_opaque->scan_index_timestamp =
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001502 channels[i].scan_index;
1503 }
1504 if (indio_dev->masklength && buffer->scan_mask == NULL) {
Andy Shevchenko38628282019-03-04 10:55:40 +02001505 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1506 GFP_KERNEL);
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001507 if (buffer->scan_mask == NULL) {
1508 ret = -ENOMEM;
1509 goto error_cleanup_dynamic;
1510 }
1511 }
1512 }
1513
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001514 attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
1515 attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001516 if (!attr) {
1517 ret = -ENOMEM;
1518 goto error_free_scan_mask;
1519 }
1520
1521 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1522 if (!buffer->access->set_length)
1523 attr[0] = &dev_attr_length_ro.attr;
1524
1525 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1526 attr[2] = &dev_attr_watermark_ro.attr;
1527
1528 if (buffer->attrs)
1529 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1530 sizeof(struct attribute *) * buffer_attrcount);
1531
1532 buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001533
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001534 for (i = 0; i < buffer_attrcount; i++) {
1535 struct attribute *wrapped;
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001536
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001537 wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1538 if (!wrapped) {
1539 ret = -ENOMEM;
1540 goto error_free_scan_mask;
1541 }
1542 attr[i] = wrapped;
1543 }
1544
1545 attrn = 0;
1546 list_for_each_entry(p, &buffer->buffer_attr_list, l)
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001547 attr[attrn++] = &p->dev_attr.attr;
1548
1549 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1550 if (!buffer->buffer_group.name) {
1551 ret = -ENOMEM;
1552 goto error_free_buffer_attrs;
1553 }
1554
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001555 buffer->buffer_group.attrs = attr;
1556
1557 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1558 if (ret)
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001559 goto error_free_buffer_attr_group_name;
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001560
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001561 /* we only need to register the legacy groups for the first buffer */
1562 if (index > 0)
1563 return 0;
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001564
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001565 ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1566 buffer_attrcount,
1567 scan_el_attrcount);
Alexandru Ardelean32f17172021-02-15 12:40:29 +02001568 if (ret)
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001569 goto error_free_buffer_attr_group_name;
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001570
1571 return 0;
1572
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001573error_free_buffer_attr_group_name:
1574 kfree(buffer->buffer_group.name);
Alexandru Ardeleane2b4d7ac2021-02-15 12:40:30 +02001575error_free_buffer_attrs:
1576 kfree(buffer->buffer_group.attrs);
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001577error_free_scan_mask:
Andy Shevchenko38628282019-03-04 10:55:40 +02001578 bitmap_free(buffer->scan_mask);
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001579error_cleanup_dynamic:
Alexandru Ardelean15097c72021-02-15 12:40:33 +02001580 iio_free_chan_devattr_list(&buffer->buffer_attr_list);
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001581
1582 return ret;
1583}
1584
Alexandru Ardelean0224af82021-02-15 12:40:36 +02001585static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
1586{
1587 bitmap_free(buffer->scan_mask);
1588 kfree(buffer->buffer_group.name);
1589 kfree(buffer->buffer_group.attrs);
1590 iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1591}
1592
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001593int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
Alexandru Ardeleane16e0a772020-09-17 15:59:51 +03001594{
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001595 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Alexandru Ardeleane16e0a772020-09-17 15:59:51 +03001596 const struct iio_chan_spec *channels;
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001597 struct iio_buffer *buffer;
1598 int unwind_idx;
1599 int ret, i;
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +02001600 size_t sz;
Alexandru Ardeleane16e0a772020-09-17 15:59:51 +03001601
1602 channels = indio_dev->channels;
1603 if (channels) {
1604 int ml = indio_dev->masklength;
1605
1606 for (i = 0; i < indio_dev->num_channels; i++)
1607 ml = max(ml, channels[i].scan_index + 1);
1608 indio_dev->masklength = ml;
1609 }
1610
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001611 if (!iio_dev_opaque->attached_buffers_cnt)
Alexandru Ardeleane16e0a772020-09-17 15:59:51 +03001612 return 0;
1613
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001614 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
1615 buffer = iio_dev_opaque->attached_buffers[i];
1616 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
1617 if (ret) {
1618 unwind_idx = i;
1619 goto error_unwind_sysfs_and_mask;
1620 }
1621 }
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +02001622 unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1;
1623
1624 sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
1625 iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1626 if (!iio_dev_opaque->buffer_ioctl_handler) {
1627 ret = -ENOMEM;
1628 goto error_unwind_sysfs_and_mask;
1629 }
1630
1631 iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1632 iio_device_ioctl_handler_register(indio_dev,
1633 iio_dev_opaque->buffer_ioctl_handler);
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001634
1635 return 0;
1636
1637error_unwind_sysfs_and_mask:
1638 for (; unwind_idx >= 0; unwind_idx--) {
1639 buffer = iio_dev_opaque->attached_buffers[unwind_idx];
1640 __iio_buffer_free_sysfs_and_mask(buffer);
1641 }
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001642 return ret;
Alexandru Ardeleane16e0a772020-09-17 15:59:51 +03001643}
1644
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001645void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001646{
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001647 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1648 struct iio_buffer *buffer;
1649 int i;
Alexandru Ardeleanff3f7e042020-04-24 18:22:43 +03001650
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001651 if (!iio_dev_opaque->attached_buffers_cnt)
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001652 return;
1653
Alexandru Ardeleanf73f7f42021-02-15 12:40:39 +02001654 iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1655 kfree(iio_dev_opaque->buffer_ioctl_handler);
1656
Alexandru Ardeleand9a62572021-02-15 12:40:31 +02001657 iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1658
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001659 for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1660 buffer = iio_dev_opaque->attached_buffers[i];
1661 __iio_buffer_free_sysfs_and_mask(buffer);
1662 }
Lars-Peter Clausend967cb62014-11-26 18:55:14 +01001663}
1664
Jonathan Cameron14555b12011-09-21 11:15:57 +01001665/**
Lars-Peter Clausen81636632012-07-09 10:00:00 +01001666 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1667 * @indio_dev: the iio device
1668 * @mask: scan mask to be checked
1669 *
1670 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1671 * can be used for devices where only one channel can be active for sampling at
1672 * a time.
1673 */
1674bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1675 const unsigned long *mask)
1676{
1677 return bitmap_weight(mask, indio_dev->masklength) == 1;
1678}
1679EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1680
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +01001681static const void *iio_demux(struct iio_buffer *buffer,
1682 const void *datain)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +00001683{
1684 struct iio_demux_table *t;
1685
1686 if (list_empty(&buffer->demux_list))
1687 return datain;
1688 list_for_each_entry(t, &buffer->demux_list, l)
1689 memcpy(buffer->demux_bounce + t->to,
1690 datain + t->from, t->length);
1691
1692 return buffer->demux_bounce;
1693}
1694
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +01001695static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +00001696{
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +01001697 const void *dataout = iio_demux(buffer, data);
Josselin Costanzi37d34552015-03-22 20:33:38 +02001698 int ret;
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +00001699
Josselin Costanzi37d34552015-03-22 20:33:38 +02001700 ret = buffer->access->store_to(buffer, dataout);
1701 if (ret)
1702 return ret;
1703
1704 /*
1705 * We can't just test for watermark to decide if we wake the poll queue
1706 * because read may request less samples than the watermark.
1707 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001708 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
Josselin Costanzi37d34552015-03-22 20:33:38 +02001709 return 0;
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +00001710}
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +00001711
Jonathan Cameron315a19e2017-01-02 19:28:28 +00001712/**
1713 * iio_push_to_buffers() - push to a registered buffer.
1714 * @indio_dev: iio_dev structure for device.
1715 * @data: Full scan.
1716 */
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +01001717int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001718{
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001719 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001720 int ret;
1721 struct iio_buffer *buf;
1722
Alexandru Ardelean6a8c6b22020-06-30 07:57:07 +03001723 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001724 ret = iio_push_to_buffer(buf, data);
1725 if (ret < 0)
1726 return ret;
1727 }
1728
1729 return 0;
1730}
1731EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1732
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +01001733/**
1734 * iio_buffer_release() - Free a buffer's resources
1735 * @ref: Pointer to the kref embedded in the iio_buffer struct
1736 *
1737 * This function is called when the last reference to the buffer has been
1738 * dropped. It will typically free all resources allocated by the buffer. Do not
1739 * call this function manually, always use iio_buffer_put() when done using a
1740 * buffer.
1741 */
1742static void iio_buffer_release(struct kref *ref)
1743{
1744 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1745
1746 buffer->access->release(buffer);
1747}
1748
1749/**
1750 * iio_buffer_get() - Grab a reference to the buffer
1751 * @buffer: The buffer to grab a reference for, may be NULL
1752 *
1753 * Returns the pointer to the buffer that was passed into the function.
1754 */
1755struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1756{
1757 if (buffer)
1758 kref_get(&buffer->ref);
1759
1760 return buffer;
1761}
1762EXPORT_SYMBOL_GPL(iio_buffer_get);
1763
1764/**
1765 * iio_buffer_put() - Release the reference to the buffer
1766 * @buffer: The buffer to release the reference for, may be NULL
1767 */
1768void iio_buffer_put(struct iio_buffer *buffer)
1769{
1770 if (buffer)
1771 kref_put(&buffer->ref, iio_buffer_release);
1772}
1773EXPORT_SYMBOL_GPL(iio_buffer_put);
Jonathan Cameron2b827ad2017-01-02 19:28:32 +00001774
1775/**
1776 * iio_device_attach_buffer - Attach a buffer to a IIO device
1777 * @indio_dev: The device the buffer should be attached to
1778 * @buffer: The buffer to attach to the device
1779 *
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001780 * Return 0 if successful, negative if error.
1781 *
Jonathan Cameron2b827ad2017-01-02 19:28:32 +00001782 * This function attaches a buffer to a IIO device. The buffer stays attached to
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001783 * the device until the device is freed. For legacy reasons, the first attached
1784 * buffer will also be assigned to 'indio_dev->buffer'.
Alexandru Ardelean218bc532021-03-07 20:54:44 +02001785 * The array allocated here, will be free'd via the iio_device_detach_buffers()
1786 * call which is handled by the iio_device_free().
Jonathan Cameron2b827ad2017-01-02 19:28:32 +00001787 */
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001788int iio_device_attach_buffer(struct iio_dev *indio_dev,
1789 struct iio_buffer *buffer)
Jonathan Cameron2b827ad2017-01-02 19:28:32 +00001790{
Alexandru Ardeleanee708e62021-02-15 12:40:38 +02001791 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1792 struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1793 unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1794
1795 cnt++;
1796
1797 new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1798 if (!new)
1799 return -ENOMEM;
1800 iio_dev_opaque->attached_buffers = new;
1801
1802 buffer = iio_buffer_get(buffer);
1803
1804 /* first buffer is legacy; attach it to the IIO device directly */
1805 if (!indio_dev->buffer)
1806 indio_dev->buffer = buffer;
1807
1808 iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
1809 iio_dev_opaque->attached_buffers_cnt = cnt;
1810
1811 return 0;
Jonathan Cameron2b827ad2017-01-02 19:28:32 +00001812}
1813EXPORT_SYMBOL_GPL(iio_device_attach_buffer);