Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 1 | /* The industrial I/O core |
| 2 | * |
| 3 | * Copyright (c) 2008 Jonathan Cameron |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | * Handling of buffer allocation / resizing. |
| 10 | * |
| 11 | * |
| 12 | * Things to look at here. |
| 13 | * - Better memory allocation techniques? |
| 14 | * - Alternative access techniques? |
| 15 | */ |
| 16 | #include <linux/kernel.h> |
Paul Gortmaker | 8e336a7 | 2011-07-10 13:09:12 -0400 | [diff] [blame] | 17 | #include <linux/export.h> |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 18 | #include <linux/device.h> |
| 19 | #include <linux/fs.h> |
| 20 | #include <linux/cdev.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/poll.h> |
| 23 | |
Jonathan Cameron | 06458e2 | 2012-04-25 15:54:58 +0100 | [diff] [blame] | 24 | #include <linux/iio/iio.h> |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 25 | #include "iio_core.h" |
Jonathan Cameron | 06458e2 | 2012-04-25 15:54:58 +0100 | [diff] [blame] | 26 | #include <linux/iio/sysfs.h> |
| 27 | #include <linux/iio/buffer.h> |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 28 | |
| 29 | static const char * const iio_endian_prefix[] = { |
| 30 | [IIO_BE] = "be", |
| 31 | [IIO_LE] = "le", |
| 32 | }; |
| 33 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 34 | static bool iio_buffer_is_active(struct iio_dev *indio_dev, |
| 35 | struct iio_buffer *buf) |
| 36 | { |
| 37 | struct list_head *p; |
| 38 | |
| 39 | list_for_each(p, &indio_dev->buffer_list) |
| 40 | if (p == &buf->buffer_list) |
| 41 | return true; |
| 42 | |
| 43 | return false; |
| 44 | } |
| 45 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 46 | /** |
| 47 | * iio_buffer_read_first_n_outer() - chrdev read for buffer access |
| 48 | * |
| 49 | * This function relies on all buffer implementations having an |
| 50 | * iio_buffer as their first element. |
| 51 | **/ |
| 52 | ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, |
| 53 | size_t n, loff_t *f_ps) |
| 54 | { |
| 55 | struct iio_dev *indio_dev = filp->private_data; |
| 56 | struct iio_buffer *rb = indio_dev->buffer; |
| 57 | |
Jonathan Cameron | 96e00f1 | 2011-10-26 17:27:45 +0100 | [diff] [blame] | 58 | if (!rb || !rb->access->read_first_n) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 59 | return -EINVAL; |
| 60 | return rb->access->read_first_n(rb, n, buf); |
| 61 | } |
| 62 | |
| 63 | /** |
| 64 | * iio_buffer_poll() - poll the buffer to find out if it has data |
| 65 | */ |
| 66 | unsigned int iio_buffer_poll(struct file *filp, |
| 67 | struct poll_table_struct *wait) |
| 68 | { |
| 69 | struct iio_dev *indio_dev = filp->private_data; |
| 70 | struct iio_buffer *rb = indio_dev->buffer; |
| 71 | |
| 72 | poll_wait(filp, &rb->pollq, wait); |
| 73 | if (rb->stufftoread) |
| 74 | return POLLIN | POLLRDNORM; |
| 75 | /* need a way of knowing if there may be enough data... */ |
| 76 | return 0; |
| 77 | } |
| 78 | |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 79 | void iio_buffer_init(struct iio_buffer *buffer) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 80 | { |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 81 | INIT_LIST_HEAD(&buffer->demux_list); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 82 | init_waitqueue_head(&buffer->pollq); |
| 83 | } |
| 84 | EXPORT_SYMBOL(iio_buffer_init); |
| 85 | |
| 86 | static ssize_t iio_show_scan_index(struct device *dev, |
| 87 | struct device_attribute *attr, |
| 88 | char *buf) |
| 89 | { |
| 90 | return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); |
| 91 | } |
| 92 | |
| 93 | static ssize_t iio_show_fixed_type(struct device *dev, |
| 94 | struct device_attribute *attr, |
| 95 | char *buf) |
| 96 | { |
| 97 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
| 98 | u8 type = this_attr->c->scan_type.endianness; |
| 99 | |
| 100 | if (type == IIO_CPU) { |
Jonathan Cameron | 9d5d115 | 2011-10-04 16:02:08 +0100 | [diff] [blame] | 101 | #ifdef __LITTLE_ENDIAN |
| 102 | type = IIO_LE; |
| 103 | #else |
| 104 | type = IIO_BE; |
| 105 | #endif |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 106 | } |
| 107 | return sprintf(buf, "%s:%c%d/%d>>%u\n", |
| 108 | iio_endian_prefix[type], |
| 109 | this_attr->c->scan_type.sign, |
| 110 | this_attr->c->scan_type.realbits, |
| 111 | this_attr->c->scan_type.storagebits, |
| 112 | this_attr->c->scan_type.shift); |
| 113 | } |
| 114 | |
| 115 | static ssize_t iio_scan_el_show(struct device *dev, |
| 116 | struct device_attribute *attr, |
| 117 | char *buf) |
| 118 | { |
| 119 | int ret; |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 120 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 121 | |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 122 | ret = test_bit(to_iio_dev_attr(attr)->address, |
| 123 | indio_dev->buffer->scan_mask); |
| 124 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 125 | return sprintf(buf, "%d\n", ret); |
| 126 | } |
| 127 | |
| 128 | static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) |
| 129 | { |
| 130 | clear_bit(bit, buffer->scan_mask); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | static ssize_t iio_scan_el_store(struct device *dev, |
| 135 | struct device_attribute *attr, |
| 136 | const char *buf, |
| 137 | size_t len) |
| 138 | { |
Jonathan Cameron | a714af2 | 2012-04-21 10:09:32 +0100 | [diff] [blame] | 139 | int ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 140 | bool state; |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 141 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 142 | struct iio_buffer *buffer = indio_dev->buffer; |
| 143 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
| 144 | |
Jonathan Cameron | a714af2 | 2012-04-21 10:09:32 +0100 | [diff] [blame] | 145 | ret = strtobool(buf, &state); |
| 146 | if (ret < 0) |
| 147 | return ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 148 | mutex_lock(&indio_dev->mlock); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 149 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 150 | ret = -EBUSY; |
| 151 | goto error_ret; |
| 152 | } |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 153 | ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 154 | if (ret < 0) |
| 155 | goto error_ret; |
| 156 | if (!state && ret) { |
| 157 | ret = iio_scan_mask_clear(buffer, this_attr->address); |
| 158 | if (ret) |
| 159 | goto error_ret; |
| 160 | } else if (state && !ret) { |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 161 | ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 162 | if (ret) |
| 163 | goto error_ret; |
| 164 | } |
| 165 | |
| 166 | error_ret: |
| 167 | mutex_unlock(&indio_dev->mlock); |
| 168 | |
Lars-Peter Clausen | 5a2a6e1 | 2011-12-08 18:35:53 +0100 | [diff] [blame] | 169 | return ret < 0 ? ret : len; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 170 | |
| 171 | } |
| 172 | |
| 173 | static ssize_t iio_scan_el_ts_show(struct device *dev, |
| 174 | struct device_attribute *attr, |
| 175 | char *buf) |
| 176 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 177 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 178 | return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | static ssize_t iio_scan_el_ts_store(struct device *dev, |
| 182 | struct device_attribute *attr, |
| 183 | const char *buf, |
| 184 | size_t len) |
| 185 | { |
Jonathan Cameron | a714af2 | 2012-04-21 10:09:32 +0100 | [diff] [blame] | 186 | int ret; |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 187 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 188 | bool state; |
| 189 | |
Jonathan Cameron | a714af2 | 2012-04-21 10:09:32 +0100 | [diff] [blame] | 190 | ret = strtobool(buf, &state); |
| 191 | if (ret < 0) |
| 192 | return ret; |
| 193 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 194 | mutex_lock(&indio_dev->mlock); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 195 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 196 | ret = -EBUSY; |
| 197 | goto error_ret; |
| 198 | } |
| 199 | indio_dev->buffer->scan_timestamp = state; |
| 200 | error_ret: |
| 201 | mutex_unlock(&indio_dev->mlock); |
| 202 | |
| 203 | return ret ? ret : len; |
| 204 | } |
| 205 | |
| 206 | static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, |
| 207 | const struct iio_chan_spec *chan) |
| 208 | { |
| 209 | int ret, attrcount = 0; |
| 210 | struct iio_buffer *buffer = indio_dev->buffer; |
| 211 | |
| 212 | ret = __iio_add_chan_devattr("index", |
| 213 | chan, |
| 214 | &iio_show_scan_index, |
| 215 | NULL, |
| 216 | 0, |
| 217 | 0, |
| 218 | &indio_dev->dev, |
| 219 | &buffer->scan_el_dev_attr_list); |
| 220 | if (ret) |
| 221 | goto error_ret; |
| 222 | attrcount++; |
| 223 | ret = __iio_add_chan_devattr("type", |
| 224 | chan, |
| 225 | &iio_show_fixed_type, |
| 226 | NULL, |
| 227 | 0, |
| 228 | 0, |
| 229 | &indio_dev->dev, |
| 230 | &buffer->scan_el_dev_attr_list); |
| 231 | if (ret) |
| 232 | goto error_ret; |
| 233 | attrcount++; |
| 234 | if (chan->type != IIO_TIMESTAMP) |
| 235 | ret = __iio_add_chan_devattr("en", |
| 236 | chan, |
| 237 | &iio_scan_el_show, |
| 238 | &iio_scan_el_store, |
| 239 | chan->scan_index, |
| 240 | 0, |
| 241 | &indio_dev->dev, |
| 242 | &buffer->scan_el_dev_attr_list); |
| 243 | else |
| 244 | ret = __iio_add_chan_devattr("en", |
| 245 | chan, |
| 246 | &iio_scan_el_ts_show, |
| 247 | &iio_scan_el_ts_store, |
| 248 | chan->scan_index, |
| 249 | 0, |
| 250 | &indio_dev->dev, |
| 251 | &buffer->scan_el_dev_attr_list); |
| 252 | attrcount++; |
| 253 | ret = attrcount; |
| 254 | error_ret: |
| 255 | return ret; |
| 256 | } |
| 257 | |
| 258 | static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev, |
| 259 | struct iio_dev_attr *p) |
| 260 | { |
| 261 | kfree(p->dev_attr.attr.name); |
| 262 | kfree(p); |
| 263 | } |
| 264 | |
| 265 | static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev) |
| 266 | { |
| 267 | struct iio_dev_attr *p, *n; |
| 268 | struct iio_buffer *buffer = indio_dev->buffer; |
| 269 | |
| 270 | list_for_each_entry_safe(p, n, |
| 271 | &buffer->scan_el_dev_attr_list, l) |
| 272 | iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p); |
| 273 | } |
| 274 | |
| 275 | static const char * const iio_scan_elements_group_name = "scan_elements"; |
| 276 | |
| 277 | int iio_buffer_register(struct iio_dev *indio_dev, |
| 278 | const struct iio_chan_spec *channels, |
| 279 | int num_channels) |
| 280 | { |
| 281 | struct iio_dev_attr *p; |
| 282 | struct attribute **attr; |
| 283 | struct iio_buffer *buffer = indio_dev->buffer; |
| 284 | int ret, i, attrn, attrcount, attrcount_orig = 0; |
| 285 | |
| 286 | if (buffer->attrs) |
| 287 | indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; |
| 288 | |
| 289 | if (buffer->scan_el_attrs != NULL) { |
| 290 | attr = buffer->scan_el_attrs->attrs; |
| 291 | while (*attr++ != NULL) |
| 292 | attrcount_orig++; |
| 293 | } |
| 294 | attrcount = attrcount_orig; |
| 295 | INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); |
| 296 | if (channels) { |
| 297 | /* new magic */ |
| 298 | for (i = 0; i < num_channels; i++) { |
Lars-Peter Clausen | f5b81dd | 2012-06-18 18:33:47 +0200 | [diff] [blame] | 299 | if (channels[i].scan_index < 0) |
| 300 | continue; |
| 301 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 302 | /* Establish necessary mask length */ |
| 303 | if (channels[i].scan_index > |
| 304 | (int)indio_dev->masklength - 1) |
| 305 | indio_dev->masklength |
Lars-Peter Clausen | e1dc7be | 2012-07-02 14:52:56 +0200 | [diff] [blame] | 306 | = channels[i].scan_index + 1; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 307 | |
| 308 | ret = iio_buffer_add_channel_sysfs(indio_dev, |
| 309 | &channels[i]); |
| 310 | if (ret < 0) |
| 311 | goto error_cleanup_dynamic; |
| 312 | attrcount += ret; |
Jonathan Cameron | beb8060 | 2011-12-05 21:37:11 +0000 | [diff] [blame] | 313 | if (channels[i].type == IIO_TIMESTAMP) |
Jonathan Cameron | f126480 | 2012-04-21 10:09:34 +0100 | [diff] [blame] | 314 | indio_dev->scan_index_timestamp = |
Jonathan Cameron | beb8060 | 2011-12-05 21:37:11 +0000 | [diff] [blame] | 315 | channels[i].scan_index; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 316 | } |
| 317 | if (indio_dev->masklength && buffer->scan_mask == NULL) { |
Thomas Meyer | d83fb18 | 2011-11-29 22:08:00 +0100 | [diff] [blame] | 318 | buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
| 319 | sizeof(*buffer->scan_mask), |
| 320 | GFP_KERNEL); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 321 | if (buffer->scan_mask == NULL) { |
| 322 | ret = -ENOMEM; |
| 323 | goto error_cleanup_dynamic; |
| 324 | } |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | buffer->scan_el_group.name = iio_scan_elements_group_name; |
| 329 | |
Thomas Meyer | d83fb18 | 2011-11-29 22:08:00 +0100 | [diff] [blame] | 330 | buffer->scan_el_group.attrs = kcalloc(attrcount + 1, |
| 331 | sizeof(buffer->scan_el_group.attrs[0]), |
| 332 | GFP_KERNEL); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 333 | if (buffer->scan_el_group.attrs == NULL) { |
| 334 | ret = -ENOMEM; |
| 335 | goto error_free_scan_mask; |
| 336 | } |
| 337 | if (buffer->scan_el_attrs) |
| 338 | memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, |
| 339 | sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); |
| 340 | attrn = attrcount_orig; |
| 341 | |
| 342 | list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) |
| 343 | buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; |
| 344 | indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; |
| 345 | |
| 346 | return 0; |
| 347 | |
| 348 | error_free_scan_mask: |
| 349 | kfree(buffer->scan_mask); |
| 350 | error_cleanup_dynamic: |
| 351 | __iio_buffer_attr_cleanup(indio_dev); |
| 352 | |
| 353 | return ret; |
| 354 | } |
| 355 | EXPORT_SYMBOL(iio_buffer_register); |
| 356 | |
| 357 | void iio_buffer_unregister(struct iio_dev *indio_dev) |
| 358 | { |
| 359 | kfree(indio_dev->buffer->scan_mask); |
| 360 | kfree(indio_dev->buffer->scan_el_group.attrs); |
| 361 | __iio_buffer_attr_cleanup(indio_dev); |
| 362 | } |
| 363 | EXPORT_SYMBOL(iio_buffer_unregister); |
| 364 | |
| 365 | ssize_t iio_buffer_read_length(struct device *dev, |
| 366 | struct device_attribute *attr, |
| 367 | char *buf) |
| 368 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 369 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 370 | struct iio_buffer *buffer = indio_dev->buffer; |
| 371 | |
| 372 | if (buffer->access->get_length) |
| 373 | return sprintf(buf, "%d\n", |
| 374 | buffer->access->get_length(buffer)); |
| 375 | |
| 376 | return 0; |
| 377 | } |
| 378 | EXPORT_SYMBOL(iio_buffer_read_length); |
| 379 | |
| 380 | ssize_t iio_buffer_write_length(struct device *dev, |
| 381 | struct device_attribute *attr, |
| 382 | const char *buf, |
| 383 | size_t len) |
| 384 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 385 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 386 | struct iio_buffer *buffer = indio_dev->buffer; |
Lars-Peter Clausen | 948ad20 | 2012-10-18 14:47:00 +0100 | [diff] [blame] | 387 | unsigned int val; |
| 388 | int ret; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 389 | |
Lars-Peter Clausen | 948ad20 | 2012-10-18 14:47:00 +0100 | [diff] [blame] | 390 | ret = kstrtouint(buf, 10, &val); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 391 | if (ret) |
| 392 | return ret; |
| 393 | |
| 394 | if (buffer->access->get_length) |
| 395 | if (val == buffer->access->get_length(buffer)) |
| 396 | return len; |
| 397 | |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 398 | mutex_lock(&indio_dev->mlock); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 399 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 400 | ret = -EBUSY; |
| 401 | } else { |
Lars-Peter Clausen | 869871b | 2011-12-19 15:23:48 +0100 | [diff] [blame] | 402 | if (buffer->access->set_length) |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 403 | buffer->access->set_length(buffer, val); |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 404 | ret = 0; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 405 | } |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 406 | mutex_unlock(&indio_dev->mlock); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 407 | |
Lars-Peter Clausen | e38c79e | 2011-12-19 15:23:44 +0100 | [diff] [blame] | 408 | return ret ? ret : len; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 409 | } |
| 410 | EXPORT_SYMBOL(iio_buffer_write_length); |
| 411 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 412 | ssize_t iio_buffer_show_enable(struct device *dev, |
| 413 | struct device_attribute *attr, |
| 414 | char *buf) |
| 415 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 416 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 417 | return sprintf(buf, "%d\n", |
| 418 | iio_buffer_is_active(indio_dev, |
| 419 | indio_dev->buffer)); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 420 | } |
| 421 | EXPORT_SYMBOL(iio_buffer_show_enable); |
| 422 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 423 | /* note NULL used as error indicator as it doesn't make sense. */ |
Michael Hennerich | cd4361c | 2012-02-22 13:16:49 +0100 | [diff] [blame] | 424 | static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 425 | unsigned int masklength, |
Michael Hennerich | cd4361c | 2012-02-22 13:16:49 +0100 | [diff] [blame] | 426 | const unsigned long *mask) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 427 | { |
| 428 | if (bitmap_empty(mask, masklength)) |
| 429 | return NULL; |
| 430 | while (*av_masks) { |
| 431 | if (bitmap_subset(mask, av_masks, masklength)) |
| 432 | return av_masks; |
| 433 | av_masks += BITS_TO_LONGS(masklength); |
| 434 | } |
| 435 | return NULL; |
| 436 | } |
| 437 | |
Jonathan Cameron | 6b3b58e | 2012-04-21 10:09:33 +0100 | [diff] [blame] | 438 | static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask, |
| 439 | bool timestamp) |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 440 | { |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 441 | const struct iio_chan_spec *ch; |
| 442 | unsigned bytes = 0; |
| 443 | int length, i; |
Jonathan Cameron | 6b3b58e | 2012-04-21 10:09:33 +0100 | [diff] [blame] | 444 | |
| 445 | /* How much space will the demuxed element take? */ |
| 446 | for_each_set_bit(i, mask, |
| 447 | indio_dev->masklength) { |
| 448 | ch = iio_find_channel_from_si(indio_dev, i); |
| 449 | length = ch->scan_type.storagebits / 8; |
| 450 | bytes = ALIGN(bytes, length); |
| 451 | bytes += length; |
| 452 | } |
| 453 | if (timestamp) { |
| 454 | ch = iio_find_channel_from_si(indio_dev, |
Jonathan Cameron | f126480 | 2012-04-21 10:09:34 +0100 | [diff] [blame] | 455 | indio_dev->scan_index_timestamp); |
Jonathan Cameron | 6b3b58e | 2012-04-21 10:09:33 +0100 | [diff] [blame] | 456 | length = ch->scan_type.storagebits / 8; |
| 457 | bytes = ALIGN(bytes, length); |
| 458 | bytes += length; |
| 459 | } |
| 460 | return bytes; |
| 461 | } |
| 462 | |
Lars-Peter Clausen | a87c82e | 2013-09-18 21:02:00 +0100 | [diff] [blame] | 463 | void iio_disable_all_buffers(struct iio_dev *indio_dev) |
| 464 | { |
| 465 | struct iio_buffer *buffer, *_buffer; |
| 466 | |
| 467 | if (list_empty(&indio_dev->buffer_list)) |
| 468 | return; |
| 469 | |
| 470 | if (indio_dev->setup_ops->predisable) |
| 471 | indio_dev->setup_ops->predisable(indio_dev); |
| 472 | |
| 473 | list_for_each_entry_safe(buffer, _buffer, |
| 474 | &indio_dev->buffer_list, buffer_list) |
| 475 | list_del_init(&buffer->buffer_list); |
| 476 | |
| 477 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 478 | if (indio_dev->setup_ops->postdisable) |
| 479 | indio_dev->setup_ops->postdisable(indio_dev); |
Lars-Peter Clausen | e086ed7 | 2013-10-15 09:38:00 +0100 | [diff] [blame^] | 480 | |
| 481 | if (indio_dev->available_scan_masks == NULL) |
| 482 | kfree(indio_dev->active_scan_mask); |
Lars-Peter Clausen | a87c82e | 2013-09-18 21:02:00 +0100 | [diff] [blame] | 483 | } |
| 484 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 485 | int iio_update_buffers(struct iio_dev *indio_dev, |
| 486 | struct iio_buffer *insert_buffer, |
| 487 | struct iio_buffer *remove_buffer) |
Jonathan Cameron | 6b3b58e | 2012-04-21 10:09:33 +0100 | [diff] [blame] | 488 | { |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 489 | int ret; |
| 490 | int success = 0; |
| 491 | struct iio_buffer *buffer; |
| 492 | unsigned long *compound_mask; |
| 493 | const unsigned long *old_mask; |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 494 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 495 | /* Wind down existing buffers - iff there are any */ |
| 496 | if (!list_empty(&indio_dev->buffer_list)) { |
| 497 | if (indio_dev->setup_ops->predisable) { |
| 498 | ret = indio_dev->setup_ops->predisable(indio_dev); |
| 499 | if (ret) |
| 500 | goto error_ret; |
| 501 | } |
| 502 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 503 | if (indio_dev->setup_ops->postdisable) { |
| 504 | ret = indio_dev->setup_ops->postdisable(indio_dev); |
| 505 | if (ret) |
| 506 | goto error_ret; |
| 507 | } |
| 508 | } |
| 509 | /* Keep a copy of current setup to allow roll back */ |
| 510 | old_mask = indio_dev->active_scan_mask; |
| 511 | if (!indio_dev->available_scan_masks) |
| 512 | indio_dev->active_scan_mask = NULL; |
| 513 | |
| 514 | if (remove_buffer) |
| 515 | list_del(&remove_buffer->buffer_list); |
| 516 | if (insert_buffer) |
| 517 | list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list); |
| 518 | |
| 519 | /* If no buffers in list, we are done */ |
| 520 | if (list_empty(&indio_dev->buffer_list)) { |
| 521 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 522 | if (indio_dev->available_scan_masks == NULL) |
| 523 | kfree(old_mask); |
| 524 | return 0; |
| 525 | } |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 526 | |
| 527 | /* What scan mask do we actually have ?*/ |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 528 | compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
| 529 | sizeof(long), GFP_KERNEL); |
| 530 | if (compound_mask == NULL) { |
| 531 | if (indio_dev->available_scan_masks == NULL) |
| 532 | kfree(old_mask); |
| 533 | return -ENOMEM; |
| 534 | } |
| 535 | indio_dev->scan_timestamp = 0; |
| 536 | |
| 537 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { |
| 538 | bitmap_or(compound_mask, compound_mask, buffer->scan_mask, |
| 539 | indio_dev->masklength); |
| 540 | indio_dev->scan_timestamp |= buffer->scan_timestamp; |
| 541 | } |
| 542 | if (indio_dev->available_scan_masks) { |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 543 | indio_dev->active_scan_mask = |
| 544 | iio_scan_mask_match(indio_dev->available_scan_masks, |
| 545 | indio_dev->masklength, |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 546 | compound_mask); |
| 547 | if (indio_dev->active_scan_mask == NULL) { |
| 548 | /* |
| 549 | * Roll back. |
| 550 | * Note can only occur when adding a buffer. |
| 551 | */ |
| 552 | list_del(&insert_buffer->buffer_list); |
Peter Meerwald | d66e045 | 2013-09-18 22:10:00 +0100 | [diff] [blame] | 553 | if (old_mask) { |
| 554 | indio_dev->active_scan_mask = old_mask; |
| 555 | success = -EINVAL; |
| 556 | } |
| 557 | else { |
| 558 | kfree(compound_mask); |
| 559 | ret = -EINVAL; |
| 560 | goto error_ret; |
| 561 | } |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 562 | } |
| 563 | } else { |
| 564 | indio_dev->active_scan_mask = compound_mask; |
| 565 | } |
Lars-Peter Clausen | aff1eb4 | 2012-06-15 18:08:59 +0200 | [diff] [blame] | 566 | |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 567 | iio_update_demux(indio_dev); |
| 568 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 569 | /* Wind up again */ |
| 570 | if (indio_dev->setup_ops->preenable) { |
| 571 | ret = indio_dev->setup_ops->preenable(indio_dev); |
| 572 | if (ret) { |
| 573 | printk(KERN_ERR |
Michał Mirosław | bec1889 | 2013-05-04 14:19:00 +0100 | [diff] [blame] | 574 | "Buffer not started: buffer preenable failed (%d)\n", ret); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 575 | goto error_remove_inserted; |
| 576 | } |
| 577 | } |
| 578 | indio_dev->scan_bytes = |
| 579 | iio_compute_scan_bytes(indio_dev, |
| 580 | indio_dev->active_scan_mask, |
| 581 | indio_dev->scan_timestamp); |
| 582 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) |
| 583 | if (buffer->access->request_update) { |
| 584 | ret = buffer->access->request_update(buffer); |
| 585 | if (ret) { |
| 586 | printk(KERN_INFO |
Michał Mirosław | bec1889 | 2013-05-04 14:19:00 +0100 | [diff] [blame] | 587 | "Buffer not started: buffer parameter update failed (%d)\n", ret); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 588 | goto error_run_postdisable; |
| 589 | } |
| 590 | } |
| 591 | if (indio_dev->info->update_scan_mode) { |
| 592 | ret = indio_dev->info |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 593 | ->update_scan_mode(indio_dev, |
| 594 | indio_dev->active_scan_mask); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 595 | if (ret < 0) { |
Michał Mirosław | bec1889 | 2013-05-04 14:19:00 +0100 | [diff] [blame] | 596 | printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 597 | goto error_run_postdisable; |
| 598 | } |
| 599 | } |
| 600 | /* Definitely possible for devices to support both of these.*/ |
| 601 | if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { |
| 602 | if (!indio_dev->trig) { |
| 603 | printk(KERN_INFO "Buffer not started: no trigger\n"); |
| 604 | ret = -EINVAL; |
| 605 | /* Can only occur on first buffer */ |
| 606 | goto error_run_postdisable; |
| 607 | } |
| 608 | indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; |
| 609 | } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { |
| 610 | indio_dev->currentmode = INDIO_BUFFER_HARDWARE; |
| 611 | } else { /* should never be reached */ |
| 612 | ret = -EINVAL; |
| 613 | goto error_run_postdisable; |
| 614 | } |
| 615 | |
| 616 | if (indio_dev->setup_ops->postenable) { |
| 617 | ret = indio_dev->setup_ops->postenable(indio_dev); |
| 618 | if (ret) { |
| 619 | printk(KERN_INFO |
Michał Mirosław | bec1889 | 2013-05-04 14:19:00 +0100 | [diff] [blame] | 620 | "Buffer not started: postenable failed (%d)\n", ret); |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 621 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 622 | if (indio_dev->setup_ops->postdisable) |
| 623 | indio_dev->setup_ops->postdisable(indio_dev); |
| 624 | goto error_disable_all_buffers; |
| 625 | } |
| 626 | } |
| 627 | |
| 628 | if (indio_dev->available_scan_masks) |
| 629 | kfree(compound_mask); |
| 630 | else |
| 631 | kfree(old_mask); |
| 632 | |
| 633 | return success; |
| 634 | |
| 635 | error_disable_all_buffers: |
| 636 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
| 637 | error_run_postdisable: |
| 638 | if (indio_dev->setup_ops->postdisable) |
| 639 | indio_dev->setup_ops->postdisable(indio_dev); |
| 640 | error_remove_inserted: |
| 641 | |
| 642 | if (insert_buffer) |
| 643 | list_del(&insert_buffer->buffer_list); |
| 644 | indio_dev->active_scan_mask = old_mask; |
| 645 | kfree(compound_mask); |
| 646 | error_ret: |
| 647 | |
| 648 | return ret; |
| 649 | } |
| 650 | EXPORT_SYMBOL_GPL(iio_update_buffers); |
| 651 | |
| 652 | ssize_t iio_buffer_store_enable(struct device *dev, |
| 653 | struct device_attribute *attr, |
| 654 | const char *buf, |
| 655 | size_t len) |
| 656 | { |
| 657 | int ret; |
| 658 | bool requested_state; |
| 659 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
| 660 | struct iio_buffer *pbuf = indio_dev->buffer; |
| 661 | bool inlist; |
| 662 | |
| 663 | ret = strtobool(buf, &requested_state); |
| 664 | if (ret < 0) |
| 665 | return ret; |
| 666 | |
| 667 | mutex_lock(&indio_dev->mlock); |
| 668 | |
| 669 | /* Find out if it is in the list */ |
| 670 | inlist = iio_buffer_is_active(indio_dev, pbuf); |
| 671 | /* Already in desired state */ |
| 672 | if (inlist == requested_state) |
| 673 | goto done; |
| 674 | |
| 675 | if (requested_state) |
| 676 | ret = iio_update_buffers(indio_dev, |
| 677 | indio_dev->buffer, NULL); |
| 678 | else |
| 679 | ret = iio_update_buffers(indio_dev, |
| 680 | NULL, indio_dev->buffer); |
| 681 | |
| 682 | if (ret < 0) |
| 683 | goto done; |
| 684 | done: |
| 685 | mutex_unlock(&indio_dev->mlock); |
| 686 | return (ret < 0) ? ret : len; |
| 687 | } |
| 688 | EXPORT_SYMBOL(iio_buffer_store_enable); |
| 689 | |
| 690 | int iio_sw_buffer_preenable(struct iio_dev *indio_dev) |
| 691 | { |
| 692 | struct iio_buffer *buffer; |
| 693 | unsigned bytes; |
| 694 | dev_dbg(&indio_dev->dev, "%s\n", __func__); |
| 695 | |
| 696 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) |
| 697 | if (buffer->access->set_bytes_per_datum) { |
| 698 | bytes = iio_compute_scan_bytes(indio_dev, |
| 699 | buffer->scan_mask, |
| 700 | buffer->scan_timestamp); |
| 701 | |
| 702 | buffer->access->set_bytes_per_datum(buffer, bytes); |
| 703 | } |
Jonathan Cameron | 959d295 | 2011-12-05 21:37:13 +0000 | [diff] [blame] | 704 | return 0; |
| 705 | } |
| 706 | EXPORT_SYMBOL(iio_sw_buffer_preenable); |
| 707 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 708 | /** |
Lars-Peter Clausen | 8163663 | 2012-07-09 10:00:00 +0100 | [diff] [blame] | 709 | * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected |
| 710 | * @indio_dev: the iio device |
| 711 | * @mask: scan mask to be checked |
| 712 | * |
| 713 | * Return true if exactly one bit is set in the scan mask, false otherwise. It |
| 714 | * can be used for devices where only one channel can be active for sampling at |
| 715 | * a time. |
| 716 | */ |
| 717 | bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, |
| 718 | const unsigned long *mask) |
| 719 | { |
| 720 | return bitmap_weight(mask, indio_dev->masklength) == 1; |
| 721 | } |
| 722 | EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); |
| 723 | |
Lars-Peter Clausen | 939546d | 2012-07-09 10:00:00 +0100 | [diff] [blame] | 724 | static bool iio_validate_scan_mask(struct iio_dev *indio_dev, |
| 725 | const unsigned long *mask) |
| 726 | { |
| 727 | if (!indio_dev->setup_ops->validate_scan_mask) |
| 728 | return true; |
| 729 | |
| 730 | return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); |
| 731 | } |
| 732 | |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 733 | /** |
| 734 | * iio_scan_mask_set() - set particular bit in the scan mask |
| 735 | * @buffer: the buffer whose scan mask we are interested in |
| 736 | * @bit: the bit to be set. |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 737 | * |
| 738 | * Note that at this point we have no way of knowing what other |
| 739 | * buffers might request, hence this code only verifies that the |
| 740 | * individual buffers request is plausible. |
| 741 | */ |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 742 | int iio_scan_mask_set(struct iio_dev *indio_dev, |
| 743 | struct iio_buffer *buffer, int bit) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 744 | { |
Michael Hennerich | cd4361c | 2012-02-22 13:16:49 +0100 | [diff] [blame] | 745 | const unsigned long *mask; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 746 | unsigned long *trialmask; |
| 747 | |
| 748 | trialmask = kmalloc(sizeof(*trialmask)* |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 749 | BITS_TO_LONGS(indio_dev->masklength), |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 750 | GFP_KERNEL); |
| 751 | |
| 752 | if (trialmask == NULL) |
| 753 | return -ENOMEM; |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 754 | if (!indio_dev->masklength) { |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 755 | WARN_ON("trying to set scanmask prior to registering buffer\n"); |
Lars-Peter Clausen | 939546d | 2012-07-09 10:00:00 +0100 | [diff] [blame] | 756 | goto err_invalid_mask; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 757 | } |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 758 | bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 759 | set_bit(bit, trialmask); |
| 760 | |
Lars-Peter Clausen | 939546d | 2012-07-09 10:00:00 +0100 | [diff] [blame] | 761 | if (!iio_validate_scan_mask(indio_dev, trialmask)) |
| 762 | goto err_invalid_mask; |
| 763 | |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 764 | if (indio_dev->available_scan_masks) { |
| 765 | mask = iio_scan_mask_match(indio_dev->available_scan_masks, |
| 766 | indio_dev->masklength, |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 767 | trialmask); |
Lars-Peter Clausen | 939546d | 2012-07-09 10:00:00 +0100 | [diff] [blame] | 768 | if (!mask) |
| 769 | goto err_invalid_mask; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 770 | } |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 771 | bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 772 | |
| 773 | kfree(trialmask); |
| 774 | |
| 775 | return 0; |
Lars-Peter Clausen | 939546d | 2012-07-09 10:00:00 +0100 | [diff] [blame] | 776 | |
| 777 | err_invalid_mask: |
| 778 | kfree(trialmask); |
| 779 | return -EINVAL; |
| 780 | } |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 781 | EXPORT_SYMBOL_GPL(iio_scan_mask_set); |
| 782 | |
Jonathan Cameron | f79a909 | 2011-12-05 22:18:29 +0000 | [diff] [blame] | 783 | int iio_scan_mask_query(struct iio_dev *indio_dev, |
| 784 | struct iio_buffer *buffer, int bit) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 785 | { |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 786 | if (bit > indio_dev->masklength) |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 787 | return -EINVAL; |
| 788 | |
| 789 | if (!buffer->scan_mask) |
| 790 | return 0; |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 791 | |
Lars-Peter Clausen | 5a2a6e1 | 2011-12-08 18:35:53 +0100 | [diff] [blame] | 792 | return test_bit(bit, buffer->scan_mask); |
Jonathan Cameron | 14555b1 | 2011-09-21 11:15:57 +0100 | [diff] [blame] | 793 | }; |
| 794 | EXPORT_SYMBOL_GPL(iio_scan_mask_query); |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 795 | |
| 796 | /** |
| 797 | * struct iio_demux_table() - table describing demux memcpy ops |
| 798 | * @from: index to copy from |
Peter Meerwald | 99698b4 | 2012-08-26 13:43:00 +0100 | [diff] [blame] | 799 | * @to: index to copy to |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 800 | * @length: how many bytes to copy |
| 801 | * @l: list head used for management |
| 802 | */ |
| 803 | struct iio_demux_table { |
| 804 | unsigned from; |
| 805 | unsigned to; |
| 806 | unsigned length; |
| 807 | struct list_head l; |
| 808 | }; |
| 809 | |
| 810 | static unsigned char *iio_demux(struct iio_buffer *buffer, |
| 811 | unsigned char *datain) |
| 812 | { |
| 813 | struct iio_demux_table *t; |
| 814 | |
| 815 | if (list_empty(&buffer->demux_list)) |
| 816 | return datain; |
| 817 | list_for_each_entry(t, &buffer->demux_list, l) |
| 818 | memcpy(buffer->demux_bounce + t->to, |
| 819 | datain + t->from, t->length); |
| 820 | |
| 821 | return buffer->demux_bounce; |
| 822 | } |
| 823 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 824 | static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data) |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 825 | { |
| 826 | unsigned char *dataout = iio_demux(buffer, data); |
| 827 | |
Lars-Peter Clausen | ce56ade | 2012-09-04 13:38:00 +0100 | [diff] [blame] | 828 | return buffer->access->store_to(buffer, dataout); |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 829 | } |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 830 | |
Jonathan Cameron | 842cd10 | 2012-04-21 10:09:45 +0100 | [diff] [blame] | 831 | static void iio_buffer_demux_free(struct iio_buffer *buffer) |
| 832 | { |
| 833 | struct iio_demux_table *p, *q; |
| 834 | list_for_each_entry_safe(p, q, &buffer->demux_list, l) { |
| 835 | list_del(&p->l); |
| 836 | kfree(p); |
| 837 | } |
| 838 | } |
| 839 | |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 840 | |
| 841 | int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data) |
| 842 | { |
| 843 | int ret; |
| 844 | struct iio_buffer *buf; |
| 845 | |
| 846 | list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { |
| 847 | ret = iio_push_to_buffer(buf, data); |
| 848 | if (ret < 0) |
| 849 | return ret; |
| 850 | } |
| 851 | |
| 852 | return 0; |
| 853 | } |
| 854 | EXPORT_SYMBOL_GPL(iio_push_to_buffers); |
| 855 | |
| 856 | static int iio_buffer_update_demux(struct iio_dev *indio_dev, |
| 857 | struct iio_buffer *buffer) |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 858 | { |
| 859 | const struct iio_chan_spec *ch; |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 860 | int ret, in_ind = -1, out_ind, length; |
| 861 | unsigned in_loc = 0, out_loc = 0; |
Jonathan Cameron | 842cd10 | 2012-04-21 10:09:45 +0100 | [diff] [blame] | 862 | struct iio_demux_table *p; |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 863 | |
| 864 | /* Clear out any old demux */ |
Jonathan Cameron | 842cd10 | 2012-04-21 10:09:45 +0100 | [diff] [blame] | 865 | iio_buffer_demux_free(buffer); |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 866 | kfree(buffer->demux_bounce); |
| 867 | buffer->demux_bounce = NULL; |
| 868 | |
| 869 | /* First work out which scan mode we will actually have */ |
| 870 | if (bitmap_equal(indio_dev->active_scan_mask, |
| 871 | buffer->scan_mask, |
| 872 | indio_dev->masklength)) |
| 873 | return 0; |
| 874 | |
| 875 | /* Now we have the two masks, work from least sig and build up sizes */ |
| 876 | for_each_set_bit(out_ind, |
| 877 | indio_dev->active_scan_mask, |
| 878 | indio_dev->masklength) { |
| 879 | in_ind = find_next_bit(indio_dev->active_scan_mask, |
| 880 | indio_dev->masklength, |
| 881 | in_ind + 1); |
| 882 | while (in_ind != out_ind) { |
| 883 | in_ind = find_next_bit(indio_dev->active_scan_mask, |
| 884 | indio_dev->masklength, |
| 885 | in_ind + 1); |
| 886 | ch = iio_find_channel_from_si(indio_dev, in_ind); |
| 887 | length = ch->scan_type.storagebits/8; |
| 888 | /* Make sure we are aligned */ |
| 889 | in_loc += length; |
| 890 | if (in_loc % length) |
| 891 | in_loc += length - in_loc % length; |
| 892 | } |
| 893 | p = kmalloc(sizeof(*p), GFP_KERNEL); |
| 894 | if (p == NULL) { |
| 895 | ret = -ENOMEM; |
| 896 | goto error_clear_mux_table; |
| 897 | } |
| 898 | ch = iio_find_channel_from_si(indio_dev, in_ind); |
| 899 | length = ch->scan_type.storagebits/8; |
| 900 | if (out_loc % length) |
| 901 | out_loc += length - out_loc % length; |
| 902 | if (in_loc % length) |
| 903 | in_loc += length - in_loc % length; |
| 904 | p->from = in_loc; |
| 905 | p->to = out_loc; |
| 906 | p->length = length; |
| 907 | list_add_tail(&p->l, &buffer->demux_list); |
| 908 | out_loc += length; |
| 909 | in_loc += length; |
| 910 | } |
| 911 | /* Relies on scan_timestamp being last */ |
| 912 | if (buffer->scan_timestamp) { |
| 913 | p = kmalloc(sizeof(*p), GFP_KERNEL); |
| 914 | if (p == NULL) { |
| 915 | ret = -ENOMEM; |
| 916 | goto error_clear_mux_table; |
| 917 | } |
| 918 | ch = iio_find_channel_from_si(indio_dev, |
Jonathan Cameron | f126480 | 2012-04-21 10:09:34 +0100 | [diff] [blame] | 919 | indio_dev->scan_index_timestamp); |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 920 | length = ch->scan_type.storagebits/8; |
| 921 | if (out_loc % length) |
| 922 | out_loc += length - out_loc % length; |
| 923 | if (in_loc % length) |
| 924 | in_loc += length - in_loc % length; |
| 925 | p->from = in_loc; |
| 926 | p->to = out_loc; |
| 927 | p->length = length; |
| 928 | list_add_tail(&p->l, &buffer->demux_list); |
| 929 | out_loc += length; |
| 930 | in_loc += length; |
| 931 | } |
| 932 | buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); |
| 933 | if (buffer->demux_bounce == NULL) { |
| 934 | ret = -ENOMEM; |
| 935 | goto error_clear_mux_table; |
| 936 | } |
| 937 | return 0; |
| 938 | |
| 939 | error_clear_mux_table: |
Jonathan Cameron | 842cd10 | 2012-04-21 10:09:45 +0100 | [diff] [blame] | 940 | iio_buffer_demux_free(buffer); |
| 941 | |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 942 | return ret; |
| 943 | } |
Jonathan Cameron | 84b36ce | 2012-06-30 20:06:00 +0100 | [diff] [blame] | 944 | |
| 945 | int iio_update_demux(struct iio_dev *indio_dev) |
| 946 | { |
| 947 | struct iio_buffer *buffer; |
| 948 | int ret; |
| 949 | |
| 950 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { |
| 951 | ret = iio_buffer_update_demux(indio_dev, buffer); |
| 952 | if (ret < 0) |
| 953 | goto error_clear_mux_table; |
| 954 | } |
| 955 | return 0; |
| 956 | |
| 957 | error_clear_mux_table: |
| 958 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) |
| 959 | iio_buffer_demux_free(buffer); |
| 960 | |
| 961 | return ret; |
| 962 | } |
Jonathan Cameron | 5ada4ea | 2011-12-05 21:37:14 +0000 | [diff] [blame] | 963 | EXPORT_SYMBOL_GPL(iio_update_demux); |