blob: 663697439d1c7cae55b238fa0016d45ae9f06e36 [file] [log] [blame]
Thomas Gleixner35e62ae2019-05-29 16:57:58 -07001// SPDX-License-Identifier: GPL-2.0-only
David Janderd2545862014-10-10 17:30:10 +02002/*
3 * Copyright (c) 2014 David Jander, Protonic Holland
4 * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
David Janderd2545862014-10-10 17:30:10 +02005 */
6
7#include <linux/can/dev.h>
8#include <linux/can/rx-offload.h>
9
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +020010struct can_rx_offload_cb {
11 u32 timestamp;
12};
13
14static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
15{
16 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
17
18 return (struct can_rx_offload_cb *)skb->cb;
19}
20
21static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
22{
23 if (offload->inc)
24 return a <= b;
25 else
26 return a >= b;
27}
28
29static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
30{
31 if (offload->inc)
32 return (*val)++;
33 else
34 return (*val)--;
35}
36
David Janderd2545862014-10-10 17:30:10 +020037static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
38{
39 struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
40 struct net_device *dev = offload->dev;
41 struct net_device_stats *stats = &dev->stats;
42 struct sk_buff *skb;
43 int work_done = 0;
44
45 while ((work_done < quota) &&
46 (skb = skb_dequeue(&offload->skb_queue))) {
47 struct can_frame *cf = (struct can_frame *)skb->data;
48
49 work_done++;
50 stats->rx_packets++;
51 stats->rx_bytes += cf->can_dlc;
52 netif_receive_skb(skb);
53 }
54
55 if (work_done < quota) {
56 napi_complete_done(napi, work_done);
57
58 /* Check if there was another interrupt */
59 if (!skb_queue_empty(&offload->skb_queue))
60 napi_reschedule(&offload->napi);
61 }
62
63 can_led_event(offload->dev, CAN_LED_EVENT_RX);
64
65 return work_done;
66}
67
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +020068static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
69 int (*compare)(struct sk_buff *a, struct sk_buff *b))
70{
David S. Miller6effee62018-08-22 16:43:34 -070071 struct sk_buff *pos, *insert = NULL;
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +020072
73 skb_queue_reverse_walk(head, pos) {
74 const struct can_rx_offload_cb *cb_pos, *cb_new;
75
76 cb_pos = can_rx_offload_get_cb(pos);
77 cb_new = can_rx_offload_get_cb(new);
78
79 netdev_dbg(new->dev,
80 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
81 __func__,
82 cb_pos->timestamp, cb_new->timestamp,
83 cb_new->timestamp - cb_pos->timestamp,
84 skb_queue_len(head));
85
86 if (compare(pos, new) < 0)
87 continue;
88 insert = pos;
89 break;
90 }
David S. Miller6effee62018-08-22 16:43:34 -070091 if (!insert)
92 __skb_queue_head(head, new);
93 else
94 __skb_queue_after(head, insert, new);
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +020095}
96
97static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
98{
99 const struct can_rx_offload_cb *cb_a, *cb_b;
100
101 cb_a = can_rx_offload_get_cb(a);
102 cb_b = can_rx_offload_get_cb(b);
103
104 /* Substract two u32 and return result as int, to keep
105 * difference steady around the u32 overflow.
106 */
107 return cb_b->timestamp - cb_a->timestamp;
108}
109
David Janderd2545862014-10-10 17:30:10 +0200110static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
111{
112 struct sk_buff *skb = NULL;
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200113 struct can_rx_offload_cb *cb;
David Janderd2545862014-10-10 17:30:10 +0200114 struct can_frame *cf;
115 int ret;
116
117 /* If queue is full or skb not available, read to discard mailbox */
118 if (likely(skb_queue_len(&offload->skb_queue) <=
119 offload->skb_queue_len_max))
120 skb = alloc_can_skb(offload->dev, &cf);
121
122 if (!skb) {
123 struct can_frame cf_overflow;
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200124 u32 timestamp;
David Janderd2545862014-10-10 17:30:10 +0200125
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200126 ret = offload->mailbox_read(offload, &cf_overflow,
127 &timestamp, n);
David Janderd2545862014-10-10 17:30:10 +0200128 if (ret)
129 offload->dev->stats.rx_dropped++;
130
131 return NULL;
132 }
133
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200134 cb = can_rx_offload_get_cb(skb);
135 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
David Janderd2545862014-10-10 17:30:10 +0200136 if (!ret) {
137 kfree_skb(skb);
138 return NULL;
139 }
140
141 return skb;
142}
143
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200144int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
145{
146 struct sk_buff_head skb_queue;
147 unsigned int i;
148
149 __skb_queue_head_init(&skb_queue);
150
151 for (i = offload->mb_first;
152 can_rx_offload_le(offload, i, offload->mb_last);
153 can_rx_offload_inc(offload, &i)) {
154 struct sk_buff *skb;
155
156 if (!(pending & BIT_ULL(i)))
157 continue;
158
159 skb = can_rx_offload_offload_one(offload, i);
160 if (!skb)
161 break;
162
163 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
164 }
165
166 if (!skb_queue_empty(&skb_queue)) {
167 unsigned long flags;
168 u32 queue_len;
169
170 spin_lock_irqsave(&offload->skb_queue.lock, flags);
171 skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
172 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
173
174 if ((queue_len = skb_queue_len(&offload->skb_queue)) >
175 (offload->skb_queue_len_max / 8))
176 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
177 __func__, queue_len);
178
179 can_rx_offload_schedule(offload);
180 }
181
182 return skb_queue_len(&skb_queue);
183}
184EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
185
David Janderd2545862014-10-10 17:30:10 +0200186int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
187{
188 struct sk_buff *skb;
189 int received = 0;
190
191 while ((skb = can_rx_offload_offload_one(offload, 0))) {
192 skb_queue_tail(&offload->skb_queue, skb);
193 received++;
194 }
195
196 if (received)
197 can_rx_offload_schedule(offload);
198
199 return received;
200}
201EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
202
Oleksij Rempel55059f2b7f2018-09-18 11:40:38 +0200203int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
204 struct sk_buff *skb, u32 timestamp)
205{
206 struct can_rx_offload_cb *cb;
207 unsigned long flags;
208
209 if (skb_queue_len(&offload->skb_queue) >
Marc Kleine-Buddeca913f12019-10-09 15:48:48 +0200210 offload->skb_queue_len_max) {
211 kfree_skb(skb);
212 return -ENOBUFS;
213 }
Oleksij Rempel55059f2b7f2018-09-18 11:40:38 +0200214
215 cb = can_rx_offload_get_cb(skb);
216 cb->timestamp = timestamp;
217
218 spin_lock_irqsave(&offload->skb_queue.lock, flags);
219 __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
220 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
221
222 can_rx_offload_schedule(offload);
223
224 return 0;
225}
226EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
227
228unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
229 unsigned int idx, u32 timestamp)
230{
231 struct net_device *dev = offload->dev;
232 struct net_device_stats *stats = &dev->stats;
233 struct sk_buff *skb;
234 u8 len;
235 int err;
236
237 skb = __can_get_echo_skb(dev, idx, &len);
238 if (!skb)
239 return 0;
240
241 err = can_rx_offload_queue_sorted(offload, skb, timestamp);
242 if (err) {
243 stats->rx_errors++;
244 stats->tx_fifo_errors++;
245 }
246
247 return len;
248}
249EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
250
Oleksij Rempel4530ec32018-09-18 11:40:40 +0200251int can_rx_offload_queue_tail(struct can_rx_offload *offload,
252 struct sk_buff *skb)
David Janderd2545862014-10-10 17:30:10 +0200253{
254 if (skb_queue_len(&offload->skb_queue) >
255 offload->skb_queue_len_max)
256 return -ENOMEM;
257
258 skb_queue_tail(&offload->skb_queue, skb);
259 can_rx_offload_schedule(offload);
260
261 return 0;
262}
Oleksij Rempel4530ec32018-09-18 11:40:40 +0200263EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
David Janderd2545862014-10-10 17:30:10 +0200264
265static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
266{
267 offload->dev = dev;
268
269 /* Limit queue len to 4x the weight (rounted to next power of two) */
270 offload->skb_queue_len_max = 2 << fls(weight);
271 offload->skb_queue_len_max *= 4;
272 skb_queue_head_init(&offload->skb_queue);
273
274 can_rx_offload_reset(offload);
275 netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
276
277 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
278 __func__, offload->skb_queue_len_max);
279
280 return 0;
281}
282
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200283int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
284{
285 unsigned int weight;
286
287 if (offload->mb_first > BITS_PER_LONG_LONG ||
288 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
289 return -EINVAL;
290
291 if (offload->mb_first < offload->mb_last) {
292 offload->inc = true;
293 weight = offload->mb_last - offload->mb_first;
294 } else {
295 offload->inc = false;
296 weight = offload->mb_first - offload->mb_last;
297 }
298
Marc Kleine-Budde41e0a3fd2017-12-06 14:19:08 +0100299 return can_rx_offload_init_queue(dev, offload, weight);
Marc Kleine-Budde3abbac02014-09-23 15:28:21 +0200300}
301EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
302
David Janderd2545862014-10-10 17:30:10 +0200303int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
304{
305 if (!offload->mailbox_read)
306 return -EINVAL;
307
308 return can_rx_offload_init_queue(dev, offload, weight);
309}
310EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
311
312void can_rx_offload_enable(struct can_rx_offload *offload)
313{
314 can_rx_offload_reset(offload);
315 napi_enable(&offload->napi);
316}
317EXPORT_SYMBOL_GPL(can_rx_offload_enable);
318
319void can_rx_offload_del(struct can_rx_offload *offload)
320{
321 netif_napi_del(&offload->napi);
322 skb_queue_purge(&offload->skb_queue);
323}
324EXPORT_SYMBOL_GPL(can_rx_offload_del);
325
326void can_rx_offload_reset(struct can_rx_offload *offload)
327{
328}
329EXPORT_SYMBOL_GPL(can_rx_offload_reset);