blob: 3f5e040f0c7120b19e9aae15899c8568fcae1b9e [file] [log] [blame]
Thomas Gleixner35e62ae2019-05-29 16:57:58 -07001// SPDX-License-Identifier: GPL-2.0-only
David Janderd2545862014-10-10 17:30:10 +02002/*
3 * Copyright (c) 2014 David Jander, Protonic Holland
4 * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
David Janderd2545862014-10-10 17:30:10 +02005 */
6
7#include <linux/can/dev.h>
8#include <linux/can/rx-offload.h>
9
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +020010struct can_rx_offload_cb {
11 u32 timestamp;
12};
13
14static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
15{
16 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
17
18 return (struct can_rx_offload_cb *)skb->cb;
19}
20
21static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
22{
23 if (offload->inc)
24 return a <= b;
25 else
26 return a >= b;
27}
28
29static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
30{
31 if (offload->inc)
32 return (*val)++;
33 else
34 return (*val)--;
35}
36
David Janderd2545862014-10-10 17:30:10 +020037static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
38{
39 struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
40 struct net_device *dev = offload->dev;
41 struct net_device_stats *stats = &dev->stats;
42 struct sk_buff *skb;
43 int work_done = 0;
44
45 while ((work_done < quota) &&
46 (skb = skb_dequeue(&offload->skb_queue))) {
47 struct can_frame *cf = (struct can_frame *)skb->data;
48
49 work_done++;
50 stats->rx_packets++;
51 stats->rx_bytes += cf->can_dlc;
52 netif_receive_skb(skb);
53 }
54
55 if (work_done < quota) {
56 napi_complete_done(napi, work_done);
57
58 /* Check if there was another interrupt */
59 if (!skb_queue_empty(&offload->skb_queue))
60 napi_reschedule(&offload->napi);
61 }
62
63 can_led_event(offload->dev, CAN_LED_EVENT_RX);
64
65 return work_done;
66}
67
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +020068static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
69 int (*compare)(struct sk_buff *a, struct sk_buff *b))
70{
David S. Miller6effee62018-08-22 16:43:34 -070071 struct sk_buff *pos, *insert = NULL;
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +020072
73 skb_queue_reverse_walk(head, pos) {
74 const struct can_rx_offload_cb *cb_pos, *cb_new;
75
76 cb_pos = can_rx_offload_get_cb(pos);
77 cb_new = can_rx_offload_get_cb(new);
78
79 netdev_dbg(new->dev,
80 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
81 __func__,
82 cb_pos->timestamp, cb_new->timestamp,
83 cb_new->timestamp - cb_pos->timestamp,
84 skb_queue_len(head));
85
86 if (compare(pos, new) < 0)
87 continue;
88 insert = pos;
89 break;
90 }
David S. Miller6effee62018-08-22 16:43:34 -070091 if (!insert)
92 __skb_queue_head(head, new);
93 else
94 __skb_queue_after(head, insert, new);
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +020095}
96
97static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
98{
99 const struct can_rx_offload_cb *cb_a, *cb_b;
100
101 cb_a = can_rx_offload_get_cb(a);
102 cb_b = can_rx_offload_get_cb(b);
103
104 /* Substract two u32 and return result as int, to keep
105 * difference steady around the u32 overflow.
106 */
107 return cb_b->timestamp - cb_a->timestamp;
108}
109
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200110/**
111 * can_rx_offload_offload_one() - Read one CAN frame from HW
112 * @offload: pointer to rx_offload context
113 * @n: number of mailbox to read
114 *
115 * The task of this function is to read a CAN frame from mailbox @n
116 * from the device and return the mailbox's content as a struct
117 * sk_buff.
118 *
119 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
120 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
121 * allocated, the mailbox contents is discarded by reading it into an
122 * overflow buffer. This way the mailbox is marked as free by the
123 * driver.
124 *
125 * Return: A pointer to skb containing the CAN frame on success.
126 *
127 * NULL if the mailbox @n is empty.
128 *
129 * ERR_PTR() in case of an error
130 */
131static struct sk_buff *
132can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
David Janderd2545862014-10-10 17:30:10 +0200133{
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200134 struct sk_buff *skb = NULL, *skb_error = NULL;
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200135 struct can_rx_offload_cb *cb;
David Janderd2545862014-10-10 17:30:10 +0200136 struct can_frame *cf;
137 int ret;
138
Marc Kleine-Buddea2dc3f52019-10-09 16:03:18 +0200139 if (likely(skb_queue_len(&offload->skb_queue) <
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200140 offload->skb_queue_len_max)) {
David Janderd2545862014-10-10 17:30:10 +0200141 skb = alloc_can_skb(offload->dev, &cf);
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200142 if (unlikely(!skb))
143 skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
144 } else {
145 skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
146 }
David Janderd2545862014-10-10 17:30:10 +0200147
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200148 /* If queue is full or skb not available, drop by reading into
149 * overflow buffer.
150 */
151 if (unlikely(skb_error)) {
David Janderd2545862014-10-10 17:30:10 +0200152 struct can_frame cf_overflow;
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200153 u32 timestamp;
David Janderd2545862014-10-10 17:30:10 +0200154
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200155 ret = offload->mailbox_read(offload, &cf_overflow,
156 &timestamp, n);
David Janderd2545862014-10-10 17:30:10 +0200157
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200158 /* Mailbox was empty. */
159 if (unlikely(!ret))
160 return NULL;
161
162 /* Mailbox has been read and we're dropping it or
163 * there was a problem reading the mailbox.
164 *
165 * Increment error counters in any case.
166 */
167 offload->dev->stats.rx_dropped++;
168 offload->dev->stats.rx_fifo_errors++;
169
170 /* There was a problem reading the mailbox, propagate
171 * error value.
172 */
173 if (unlikely(ret < 0))
174 return ERR_PTR(ret);
175
176 return skb_error;
David Janderd2545862014-10-10 17:30:10 +0200177 }
178
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200179 cb = can_rx_offload_get_cb(skb);
180 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200181
182 /* Mailbox was empty. */
183 if (unlikely(!ret)) {
David Janderd2545862014-10-10 17:30:10 +0200184 kfree_skb(skb);
185 return NULL;
186 }
187
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200188 /* There was a problem reading the mailbox, propagate error value. */
189 if (unlikely(ret < 0)) {
190 kfree_skb(skb);
191
192 offload->dev->stats.rx_dropped++;
193 offload->dev->stats.rx_fifo_errors++;
194
195 return ERR_PTR(ret);
196 }
197
198 /* Mailbox was read. */
David Janderd2545862014-10-10 17:30:10 +0200199 return skb;
200}
201
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200202int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
203{
204 struct sk_buff_head skb_queue;
205 unsigned int i;
206
207 __skb_queue_head_init(&skb_queue);
208
209 for (i = offload->mb_first;
210 can_rx_offload_le(offload, i, offload->mb_last);
211 can_rx_offload_inc(offload, &i)) {
212 struct sk_buff *skb;
213
214 if (!(pending & BIT_ULL(i)))
215 continue;
216
217 skb = can_rx_offload_offload_one(offload, i);
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200218 if (IS_ERR_OR_NULL(skb))
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200219 break;
220
221 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
222 }
223
224 if (!skb_queue_empty(&skb_queue)) {
225 unsigned long flags;
226 u32 queue_len;
227
228 spin_lock_irqsave(&offload->skb_queue.lock, flags);
229 skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
230 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
231
232 if ((queue_len = skb_queue_len(&offload->skb_queue)) >
233 (offload->skb_queue_len_max / 8))
234 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
235 __func__, queue_len);
236
237 can_rx_offload_schedule(offload);
238 }
239
240 return skb_queue_len(&skb_queue);
241}
242EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
243
David Janderd2545862014-10-10 17:30:10 +0200244int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
245{
246 struct sk_buff *skb;
247 int received = 0;
248
Marc Kleine-Budded763ab32019-10-09 21:00:32 +0200249 while (1) {
250 skb = can_rx_offload_offload_one(offload, 0);
251 if (IS_ERR_OR_NULL(skb))
252 break;
253
David Janderd2545862014-10-10 17:30:10 +0200254 skb_queue_tail(&offload->skb_queue, skb);
255 received++;
256 }
257
258 if (received)
259 can_rx_offload_schedule(offload);
260
261 return received;
262}
263EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
264
Oleksij Rempel55059f2b7f2018-09-18 11:40:38 +0200265int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
266 struct sk_buff *skb, u32 timestamp)
267{
268 struct can_rx_offload_cb *cb;
269 unsigned long flags;
270
271 if (skb_queue_len(&offload->skb_queue) >
Marc Kleine-Buddeca913f12019-10-09 15:48:48 +0200272 offload->skb_queue_len_max) {
273 kfree_skb(skb);
274 return -ENOBUFS;
275 }
Oleksij Rempel55059f2b7f2018-09-18 11:40:38 +0200276
277 cb = can_rx_offload_get_cb(skb);
278 cb->timestamp = timestamp;
279
280 spin_lock_irqsave(&offload->skb_queue.lock, flags);
281 __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
282 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
283
284 can_rx_offload_schedule(offload);
285
286 return 0;
287}
288EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
289
290unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
291 unsigned int idx, u32 timestamp)
292{
293 struct net_device *dev = offload->dev;
294 struct net_device_stats *stats = &dev->stats;
295 struct sk_buff *skb;
296 u8 len;
297 int err;
298
299 skb = __can_get_echo_skb(dev, idx, &len);
300 if (!skb)
301 return 0;
302
303 err = can_rx_offload_queue_sorted(offload, skb, timestamp);
304 if (err) {
305 stats->rx_errors++;
306 stats->tx_fifo_errors++;
307 }
308
309 return len;
310}
311EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
312
Oleksij Rempel4530ec32018-09-18 11:40:40 +0200313int can_rx_offload_queue_tail(struct can_rx_offload *offload,
314 struct sk_buff *skb)
David Janderd2545862014-10-10 17:30:10 +0200315{
316 if (skb_queue_len(&offload->skb_queue) >
Marc Kleine-Budde6caf8a62019-10-09 15:48:48 +0200317 offload->skb_queue_len_max) {
318 kfree_skb(skb);
319 return -ENOBUFS;
320 }
David Janderd2545862014-10-10 17:30:10 +0200321
322 skb_queue_tail(&offload->skb_queue, skb);
323 can_rx_offload_schedule(offload);
324
325 return 0;
326}
Oleksij Rempel4530ec32018-09-18 11:40:40 +0200327EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
David Janderd2545862014-10-10 17:30:10 +0200328
329static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
330{
331 offload->dev = dev;
332
333 /* Limit queue len to 4x the weight (rounted to next power of two) */
334 offload->skb_queue_len_max = 2 << fls(weight);
335 offload->skb_queue_len_max *= 4;
336 skb_queue_head_init(&offload->skb_queue);
337
338 can_rx_offload_reset(offload);
339 netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
340
341 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
342 __func__, offload->skb_queue_len_max);
343
344 return 0;
345}
346
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200347int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
348{
349 unsigned int weight;
350
351 if (offload->mb_first > BITS_PER_LONG_LONG ||
352 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
353 return -EINVAL;
354
355 if (offload->mb_first < offload->mb_last) {
356 offload->inc = true;
357 weight = offload->mb_last - offload->mb_first;
358 } else {
359 offload->inc = false;
360 weight = offload->mb_first - offload->mb_last;
361 }
362
Marc Kleine-Budde41e0a3fd2017-12-06 14:19:08 +0100363 return can_rx_offload_init_queue(dev, offload, weight);
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200364}
365EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
366
David Janderd2545862014-10-10 17:30:10 +0200367int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
368{
369 if (!offload->mailbox_read)
370 return -EINVAL;
371
372 return can_rx_offload_init_queue(dev, offload, weight);
373}
374EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
375
376void can_rx_offload_enable(struct can_rx_offload *offload)
377{
378 can_rx_offload_reset(offload);
379 napi_enable(&offload->napi);
380}
381EXPORT_SYMBOL_GPL(can_rx_offload_enable);
382
383void can_rx_offload_del(struct can_rx_offload *offload)
384{
385 netif_napi_del(&offload->napi);
386 skb_queue_purge(&offload->skb_queue);
387}
388EXPORT_SYMBOL_GPL(can_rx_offload_del);
389
390void can_rx_offload_reset(struct can_rx_offload *offload)
391{
392}
393EXPORT_SYMBOL_GPL(can_rx_offload_reset);