blob: 967c94ae95bb74ef5d43588f459a003049d9c6f1 [file] [log] [blame]
William Breathitt Grayb6c50af2021-09-29 12:15:59 +09001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic Counter character device interface
4 * Copyright (C) 2020 William Breathitt Gray
5 */
6#include <linux/atomic.h>
7#include <linux/cdev.h>
8#include <linux/counter.h>
9#include <linux/err.h>
10#include <linux/errno.h>
11#include <linux/export.h>
12#include <linux/fs.h>
13#include <linux/kfifo.h>
14#include <linux/list.h>
15#include <linux/mutex.h>
16#include <linux/nospec.h>
17#include <linux/poll.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/timekeeping.h>
21#include <linux/types.h>
22#include <linux/uaccess.h>
23#include <linux/wait.h>
24
25#include "counter-chrdev.h"
26
27struct counter_comp_node {
28 struct list_head l;
29 struct counter_component component;
30 struct counter_comp comp;
31 void *parent;
32};
33
34#define counter_comp_read_is_equal(a, b) \
35 (a.action_read == b.action_read || \
36 a.device_u8_read == b.device_u8_read || \
37 a.count_u8_read == b.count_u8_read || \
38 a.signal_u8_read == b.signal_u8_read || \
39 a.device_u32_read == b.device_u32_read || \
40 a.count_u32_read == b.count_u32_read || \
41 a.signal_u32_read == b.signal_u32_read || \
42 a.device_u64_read == b.device_u64_read || \
43 a.count_u64_read == b.count_u64_read || \
44 a.signal_u64_read == b.signal_u64_read)
45
46#define counter_comp_read_is_set(comp) \
47 (comp.action_read || \
48 comp.device_u8_read || \
49 comp.count_u8_read || \
50 comp.signal_u8_read || \
51 comp.device_u32_read || \
52 comp.count_u32_read || \
53 comp.signal_u32_read || \
54 comp.device_u64_read || \
55 comp.count_u64_read || \
56 comp.signal_u64_read)
57
58static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
59 size_t len, loff_t *f_ps)
60{
61 struct counter_device *const counter = filp->private_data;
62 int err;
63 unsigned int copied;
64
65 if (!counter->ops)
66 return -ENODEV;
67
68 if (len < sizeof(struct counter_event))
69 return -EINVAL;
70
71 do {
72 if (kfifo_is_empty(&counter->events)) {
73 if (filp->f_flags & O_NONBLOCK)
74 return -EAGAIN;
75
76 err = wait_event_interruptible(counter->events_wait,
77 !kfifo_is_empty(&counter->events) ||
78 !counter->ops);
79 if (err < 0)
80 return err;
81 if (!counter->ops)
82 return -ENODEV;
83 }
84
85 if (mutex_lock_interruptible(&counter->events_lock))
86 return -ERESTARTSYS;
87 err = kfifo_to_user(&counter->events, buf, len, &copied);
88 mutex_unlock(&counter->events_lock);
89 if (err < 0)
90 return err;
91 } while (!copied);
92
93 return copied;
94}
95
96static __poll_t counter_chrdev_poll(struct file *filp,
97 struct poll_table_struct *pollt)
98{
99 struct counter_device *const counter = filp->private_data;
100 __poll_t events = 0;
101
102 if (!counter->ops)
103 return events;
104
105 poll_wait(filp, &counter->events_wait, pollt);
106
107 if (!kfifo_is_empty(&counter->events))
108 events = EPOLLIN | EPOLLRDNORM;
109
110 return events;
111}
112
113static void counter_events_list_free(struct list_head *const events_list)
114{
115 struct counter_event_node *p, *n;
116 struct counter_comp_node *q, *o;
117
118 list_for_each_entry_safe(p, n, events_list, l) {
119 /* Free associated component nodes */
120 list_for_each_entry_safe(q, o, &p->comp_list, l) {
121 list_del(&q->l);
122 kfree(q);
123 }
124
125 /* Free event node */
126 list_del(&p->l);
127 kfree(p);
128 }
129}
130
131static int counter_set_event_node(struct counter_device *const counter,
132 struct counter_watch *const watch,
133 const struct counter_comp_node *const cfg)
134{
135 struct counter_event_node *event_node;
136 int err = 0;
137 struct counter_comp_node *comp_node;
138
139 /* Search for event in the list */
140 list_for_each_entry(event_node, &counter->next_events_list, l)
141 if (event_node->event == watch->event &&
142 event_node->channel == watch->channel)
143 break;
144
145 /* If event is not already in the list */
146 if (&event_node->l == &counter->next_events_list) {
147 /* Allocate new event node */
148 event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
149 if (!event_node)
150 return -ENOMEM;
151
152 /* Configure event node and add to the list */
153 event_node->event = watch->event;
154 event_node->channel = watch->channel;
155 INIT_LIST_HEAD(&event_node->comp_list);
156 list_add(&event_node->l, &counter->next_events_list);
157 }
158
159 /* Check if component watch has already been set before */
160 list_for_each_entry(comp_node, &event_node->comp_list, l)
161 if (comp_node->parent == cfg->parent &&
162 counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
163 err = -EINVAL;
164 goto exit_free_event_node;
165 }
166
167 /* Allocate component node */
168 comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
169 if (!comp_node) {
170 err = -ENOMEM;
171 goto exit_free_event_node;
172 }
173 *comp_node = *cfg;
174
175 /* Add component node to event node */
176 list_add_tail(&comp_node->l, &event_node->comp_list);
177
178exit_free_event_node:
179 /* Free event node if no one else is watching */
180 if (list_empty(&event_node->comp_list)) {
181 list_del(&event_node->l);
182 kfree(event_node);
183 }
184
185 return err;
186}
187
188static int counter_enable_events(struct counter_device *const counter)
189{
190 unsigned long flags;
191 int err = 0;
192
193 mutex_lock(&counter->n_events_list_lock);
194 spin_lock_irqsave(&counter->events_list_lock, flags);
195
196 counter_events_list_free(&counter->events_list);
197 list_replace_init(&counter->next_events_list,
198 &counter->events_list);
199
200 if (counter->ops->events_configure)
201 err = counter->ops->events_configure(counter);
202
203 spin_unlock_irqrestore(&counter->events_list_lock, flags);
204 mutex_unlock(&counter->n_events_list_lock);
205
206 return err;
207}
208
209static int counter_disable_events(struct counter_device *const counter)
210{
211 unsigned long flags;
212 int err = 0;
213
214 spin_lock_irqsave(&counter->events_list_lock, flags);
215
216 counter_events_list_free(&counter->events_list);
217
218 if (counter->ops->events_configure)
219 err = counter->ops->events_configure(counter);
220
221 spin_unlock_irqrestore(&counter->events_list_lock, flags);
222
223 mutex_lock(&counter->n_events_list_lock);
224
225 counter_events_list_free(&counter->next_events_list);
226
227 mutex_unlock(&counter->n_events_list_lock);
228
229 return err;
230}
231
232static int counter_add_watch(struct counter_device *const counter,
233 const unsigned long arg)
234{
235 void __user *const uwatch = (void __user *)arg;
236 struct counter_watch watch;
237 struct counter_comp_node comp_node = {};
238 size_t parent, id;
239 struct counter_comp *ext;
240 size_t num_ext;
241 int err = 0;
242
243 if (copy_from_user(&watch, uwatch, sizeof(watch)))
244 return -EFAULT;
245
246 if (watch.component.type == COUNTER_COMPONENT_NONE)
247 goto no_component;
248
249 parent = watch.component.parent;
250
251 /* Configure parent component info for comp node */
252 switch (watch.component.scope) {
253 case COUNTER_SCOPE_DEVICE:
254 ext = counter->ext;
255 num_ext = counter->num_ext;
256 break;
257 case COUNTER_SCOPE_SIGNAL:
258 if (parent >= counter->num_signals)
259 return -EINVAL;
260 parent = array_index_nospec(parent, counter->num_signals);
261
262 comp_node.parent = counter->signals + parent;
263
264 ext = counter->signals[parent].ext;
265 num_ext = counter->signals[parent].num_ext;
266 break;
267 case COUNTER_SCOPE_COUNT:
268 if (parent >= counter->num_counts)
269 return -EINVAL;
270 parent = array_index_nospec(parent, counter->num_counts);
271
272 comp_node.parent = counter->counts + parent;
273
274 ext = counter->counts[parent].ext;
275 num_ext = counter->counts[parent].num_ext;
276 break;
277 default:
278 return -EINVAL;
279 }
280
281 id = watch.component.id;
282
283 /* Configure component info for comp node */
284 switch (watch.component.type) {
285 case COUNTER_COMPONENT_SIGNAL:
286 if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
287 return -EINVAL;
288
289 comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
290 comp_node.comp.signal_u32_read = counter->ops->signal_read;
291 break;
292 case COUNTER_COMPONENT_COUNT:
293 if (watch.component.scope != COUNTER_SCOPE_COUNT)
294 return -EINVAL;
295
296 comp_node.comp.type = COUNTER_COMP_U64;
297 comp_node.comp.count_u64_read = counter->ops->count_read;
298 break;
299 case COUNTER_COMPONENT_FUNCTION:
300 if (watch.component.scope != COUNTER_SCOPE_COUNT)
301 return -EINVAL;
302
303 comp_node.comp.type = COUNTER_COMP_FUNCTION;
304 comp_node.comp.count_u32_read = counter->ops->function_read;
305 break;
306 case COUNTER_COMPONENT_SYNAPSE_ACTION:
307 if (watch.component.scope != COUNTER_SCOPE_COUNT)
308 return -EINVAL;
309 if (id >= counter->counts[parent].num_synapses)
310 return -EINVAL;
311 id = array_index_nospec(id, counter->counts[parent].num_synapses);
312
313 comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
314 comp_node.comp.action_read = counter->ops->action_read;
315 comp_node.comp.priv = counter->counts[parent].synapses + id;
316 break;
317 case COUNTER_COMPONENT_EXTENSION:
318 if (id >= num_ext)
319 return -EINVAL;
320 id = array_index_nospec(id, num_ext);
321
322 comp_node.comp = ext[id];
323 break;
324 default:
325 return -EINVAL;
326 }
327 if (!counter_comp_read_is_set(comp_node.comp))
328 return -EOPNOTSUPP;
329
330no_component:
331 mutex_lock(&counter->n_events_list_lock);
332
333 if (counter->ops->watch_validate) {
334 err = counter->ops->watch_validate(counter, &watch);
335 if (err < 0)
336 goto err_exit;
337 }
338
339 comp_node.component = watch.component;
340
341 err = counter_set_event_node(counter, &watch, &comp_node);
342
343err_exit:
344 mutex_unlock(&counter->n_events_list_lock);
345
346 return err;
347}
348
349static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
350 unsigned long arg)
351{
352 struct counter_device *const counter = filp->private_data;
353 int ret = -ENODEV;
354
355 mutex_lock(&counter->ops_exist_lock);
356
357 if (!counter->ops)
358 goto out_unlock;
359
360 switch (cmd) {
361 case COUNTER_ADD_WATCH_IOCTL:
362 ret = counter_add_watch(counter, arg);
363 break;
364 case COUNTER_ENABLE_EVENTS_IOCTL:
365 ret = counter_enable_events(counter);
366 break;
367 case COUNTER_DISABLE_EVENTS_IOCTL:
368 ret = counter_disable_events(counter);
369 break;
370 default:
371 ret = -ENOIOCTLCMD;
372 break;
373 }
374
375out_unlock:
376 mutex_unlock(&counter->ops_exist_lock);
377
378 return ret;
379}
380
381static int counter_chrdev_open(struct inode *inode, struct file *filp)
382{
383 struct counter_device *const counter = container_of(inode->i_cdev,
384 typeof(*counter),
385 chrdev);
386
387 /* Ensure chrdev is not opened more than 1 at a time */
388 if (!atomic_add_unless(&counter->chrdev_lock, 1, 1))
389 return -EBUSY;
390
391 get_device(&counter->dev);
392 filp->private_data = counter;
393
394 return nonseekable_open(inode, filp);
395}
396
397static int counter_chrdev_release(struct inode *inode, struct file *filp)
398{
399 struct counter_device *const counter = filp->private_data;
400 int ret = 0;
401
402 mutex_lock(&counter->ops_exist_lock);
403
404 if (!counter->ops) {
405 /* Free any lingering held memory */
406 counter_events_list_free(&counter->events_list);
407 counter_events_list_free(&counter->next_events_list);
408 ret = -ENODEV;
409 goto out_unlock;
410 }
411
412 ret = counter_disable_events(counter);
413 if (ret < 0) {
414 mutex_unlock(&counter->ops_exist_lock);
415 return ret;
416 }
417
418out_unlock:
419 mutex_unlock(&counter->ops_exist_lock);
420
421 put_device(&counter->dev);
422 atomic_dec(&counter->chrdev_lock);
423
424 return ret;
425}
426
427static const struct file_operations counter_fops = {
428 .owner = THIS_MODULE,
429 .llseek = no_llseek,
430 .read = counter_chrdev_read,
431 .poll = counter_chrdev_poll,
432 .unlocked_ioctl = counter_chrdev_ioctl,
433 .open = counter_chrdev_open,
434 .release = counter_chrdev_release,
435};
436
437int counter_chrdev_add(struct counter_device *const counter)
438{
439 /* Initialize Counter events lists */
440 INIT_LIST_HEAD(&counter->events_list);
441 INIT_LIST_HEAD(&counter->next_events_list);
442 spin_lock_init(&counter->events_list_lock);
443 mutex_init(&counter->n_events_list_lock);
444 init_waitqueue_head(&counter->events_wait);
445 mutex_init(&counter->events_lock);
446
447 /* Initialize character device */
448 atomic_set(&counter->chrdev_lock, 0);
449 cdev_init(&counter->chrdev, &counter_fops);
450
451 /* Allocate Counter events queue */
452 return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
453}
454
455void counter_chrdev_remove(struct counter_device *const counter)
456{
457 kfifo_free(&counter->events);
458}
459
460static int counter_get_data(struct counter_device *const counter,
461 const struct counter_comp_node *const comp_node,
462 u64 *const value)
463{
464 const struct counter_comp *const comp = &comp_node->comp;
465 void *const parent = comp_node->parent;
466 u8 value_u8 = 0;
467 u32 value_u32 = 0;
468 int ret;
469
470 if (comp_node->component.type == COUNTER_COMPONENT_NONE)
471 return 0;
472
473 switch (comp->type) {
474 case COUNTER_COMP_U8:
475 case COUNTER_COMP_BOOL:
476 switch (comp_node->component.scope) {
477 case COUNTER_SCOPE_DEVICE:
478 ret = comp->device_u8_read(counter, &value_u8);
479 break;
480 case COUNTER_SCOPE_SIGNAL:
481 ret = comp->signal_u8_read(counter, parent, &value_u8);
482 break;
483 case COUNTER_SCOPE_COUNT:
484 ret = comp->count_u8_read(counter, parent, &value_u8);
485 break;
486 }
487 *value = value_u8;
488 return ret;
489 case COUNTER_COMP_SIGNAL_LEVEL:
490 case COUNTER_COMP_FUNCTION:
491 case COUNTER_COMP_ENUM:
492 case COUNTER_COMP_COUNT_DIRECTION:
493 case COUNTER_COMP_COUNT_MODE:
494 switch (comp_node->component.scope) {
495 case COUNTER_SCOPE_DEVICE:
496 ret = comp->device_u32_read(counter, &value_u32);
497 break;
498 case COUNTER_SCOPE_SIGNAL:
499 ret = comp->signal_u32_read(counter, parent,
500 &value_u32);
501 break;
502 case COUNTER_SCOPE_COUNT:
503 ret = comp->count_u32_read(counter, parent, &value_u32);
504 break;
505 }
506 *value = value_u32;
507 return ret;
508 case COUNTER_COMP_U64:
509 switch (comp_node->component.scope) {
510 case COUNTER_SCOPE_DEVICE:
511 return comp->device_u64_read(counter, value);
512 case COUNTER_SCOPE_SIGNAL:
513 return comp->signal_u64_read(counter, parent, value);
514 case COUNTER_SCOPE_COUNT:
515 return comp->count_u64_read(counter, parent, value);
516 default:
517 return -EINVAL;
518 }
519 case COUNTER_COMP_SYNAPSE_ACTION:
520 ret = comp->action_read(counter, parent, comp->priv,
521 &value_u32);
522 *value = value_u32;
523 return ret;
524 default:
525 return -EINVAL;
526 }
527}
528
529/**
530 * counter_push_event - queue event for userspace reading
531 * @counter: pointer to Counter structure
532 * @event: triggered event
533 * @channel: event channel
534 *
535 * Note: If no one is watching for the respective event, it is silently
536 * discarded.
537 */
538void counter_push_event(struct counter_device *const counter, const u8 event,
539 const u8 channel)
540{
541 struct counter_event ev;
542 unsigned int copied = 0;
543 unsigned long flags;
544 struct counter_event_node *event_node;
545 struct counter_comp_node *comp_node;
546
547 ev.timestamp = ktime_get_ns();
548 ev.watch.event = event;
549 ev.watch.channel = channel;
550
551 /* Could be in an interrupt context, so use a spin lock */
552 spin_lock_irqsave(&counter->events_list_lock, flags);
553
554 /* Search for event in the list */
555 list_for_each_entry(event_node, &counter->events_list, l)
556 if (event_node->event == event &&
557 event_node->channel == channel)
558 break;
559
560 /* If event is not in the list */
561 if (&event_node->l == &counter->events_list)
562 goto exit_early;
563
564 /* Read and queue relevant comp for userspace */
565 list_for_each_entry(comp_node, &event_node->comp_list, l) {
566 ev.watch.component = comp_node->component;
567 ev.status = -counter_get_data(counter, comp_node, &ev.value);
568
569 copied += kfifo_in(&counter->events, &ev, 1);
570 }
571
572exit_early:
573 spin_unlock_irqrestore(&counter->events_list_lock, flags);
574
575 if (copied)
576 wake_up_poll(&counter->events_wait, EPOLLIN);
577}
578EXPORT_SYMBOL_GPL(counter_push_event);