blob: 7780b07543bb8d2bf247268f456e44efed517eab [file] [log] [blame]
Lorenzo Bianconib40b15e2018-07-31 10:09:19 +02001/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "mt76.h"
18#include "usb_trace.h"
19#include "dma.h"
20
21#define MT_VEND_REQ_MAX_RETRY 10
22#define MT_VEND_REQ_TOUT_MS 300
23
24/* should be called with usb_ctrl_mtx locked */
25static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
26 u8 req_type, u16 val, u16 offset,
27 void *buf, size_t len)
28{
29 struct usb_interface *intf = to_usb_interface(dev->dev);
30 struct usb_device *udev = interface_to_usbdev(intf);
31 unsigned int pipe;
32 int i, ret;
33
34 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
35 : usb_sndctrlpipe(udev, 0);
36 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
37 if (test_bit(MT76_REMOVED, &dev->state))
38 return -EIO;
39
40 ret = usb_control_msg(udev, pipe, req, req_type, val,
41 offset, buf, len, MT_VEND_REQ_TOUT_MS);
42 if (ret == -ENODEV)
43 set_bit(MT76_REMOVED, &dev->state);
44 if (ret >= 0 || ret == -ENODEV)
45 return ret;
46 usleep_range(5000, 10000);
47 }
48
49 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
50 req, offset, ret);
51 return ret;
52}
53
54int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
55 u8 req_type, u16 val, u16 offset,
56 void *buf, size_t len)
57{
58 int ret;
59
60 mutex_lock(&dev->usb.usb_ctrl_mtx);
61 ret = __mt76u_vendor_request(dev, req, req_type,
62 val, offset, buf, len);
63 trace_usb_reg_wr(dev, offset, val);
64 mutex_unlock(&dev->usb.usb_ctrl_mtx);
65
66 return ret;
67}
68EXPORT_SYMBOL_GPL(mt76u_vendor_request);
69
70/* should be called with usb_ctrl_mtx locked */
71static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
72{
73 struct mt76_usb *usb = &dev->usb;
74 u32 data = ~0;
75 u16 offset;
76 int ret;
77 u8 req;
78
79 switch (addr & MT_VEND_TYPE_MASK) {
80 case MT_VEND_TYPE_EEPROM:
81 req = MT_VEND_READ_EEPROM;
82 break;
83 case MT_VEND_TYPE_CFG:
84 req = MT_VEND_READ_CFG;
85 break;
86 default:
87 req = MT_VEND_MULTI_READ;
88 break;
89 }
90 offset = addr & ~MT_VEND_TYPE_MASK;
91
92 ret = __mt76u_vendor_request(dev, req,
93 USB_DIR_IN | USB_TYPE_VENDOR,
94 0, offset, usb->data, sizeof(__le32));
95 if (ret == sizeof(__le32))
96 data = get_unaligned_le32(usb->data);
97 trace_usb_reg_rr(dev, addr, data);
98
99 return data;
100}
101
102u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
103{
104 u32 ret;
105
106 mutex_lock(&dev->usb.usb_ctrl_mtx);
107 ret = __mt76u_rr(dev, addr);
108 mutex_unlock(&dev->usb.usb_ctrl_mtx);
109
110 return ret;
111}
112
113/* should be called with usb_ctrl_mtx locked */
114static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
115{
116 struct mt76_usb *usb = &dev->usb;
117 u16 offset;
118 u8 req;
119
120 switch (addr & MT_VEND_TYPE_MASK) {
121 case MT_VEND_TYPE_CFG:
122 req = MT_VEND_WRITE_CFG;
123 break;
124 default:
125 req = MT_VEND_MULTI_WRITE;
126 break;
127 }
128 offset = addr & ~MT_VEND_TYPE_MASK;
129
130 put_unaligned_le32(val, usb->data);
131 __mt76u_vendor_request(dev, req,
132 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
133 offset, usb->data, sizeof(__le32));
134 trace_usb_reg_wr(dev, addr, val);
135}
136
137void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
138{
139 mutex_lock(&dev->usb.usb_ctrl_mtx);
140 __mt76u_wr(dev, addr, val);
141 mutex_unlock(&dev->usb.usb_ctrl_mtx);
142}
143
144static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
145 u32 mask, u32 val)
146{
147 mutex_lock(&dev->usb.usb_ctrl_mtx);
148 val |= __mt76u_rr(dev, addr) & ~mask;
149 __mt76u_wr(dev, addr, val);
150 mutex_unlock(&dev->usb.usb_ctrl_mtx);
151
152 return val;
153}
154
155static void mt76u_copy(struct mt76_dev *dev, u32 offset,
156 const void *data, int len)
157{
158 struct mt76_usb *usb = &dev->usb;
159 const u32 *val = data;
160 int i, ret;
161
162 mutex_lock(&usb->usb_ctrl_mtx);
163 for (i = 0; i < (len / 4); i++) {
164 put_unaligned_le32(val[i], usb->data);
165 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
166 USB_DIR_OUT | USB_TYPE_VENDOR,
167 0, offset + i * 4, usb->data,
168 sizeof(__le32));
169 if (ret < 0)
170 break;
171 }
172 mutex_unlock(&usb->usb_ctrl_mtx);
173}
174
175void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
176 const u16 offset, const u32 val)
177{
178 mutex_lock(&dev->usb.usb_ctrl_mtx);
179 __mt76u_vendor_request(dev, req,
180 USB_DIR_OUT | USB_TYPE_VENDOR,
181 val & 0xffff, offset, NULL, 0);
182 __mt76u_vendor_request(dev, req,
183 USB_DIR_OUT | USB_TYPE_VENDOR,
184 val >> 16, offset + 2, NULL, 0);
185 mutex_unlock(&dev->usb.usb_ctrl_mtx);
186}
187EXPORT_SYMBOL_GPL(mt76u_single_wr);
188
189static int
190mt76u_set_endpoints(struct usb_interface *intf,
191 struct mt76_usb *usb)
192{
193 struct usb_host_interface *intf_desc = intf->cur_altsetting;
194 struct usb_endpoint_descriptor *ep_desc;
195 int i, in_ep = 0, out_ep = 0;
196
197 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
198 ep_desc = &intf_desc->endpoint[i].desc;
199
200 if (usb_endpoint_is_bulk_in(ep_desc) &&
201 in_ep < __MT_EP_IN_MAX) {
202 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
203 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
204 in_ep++;
205 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
206 out_ep < __MT_EP_OUT_MAX) {
207 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
208 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
209 out_ep++;
210 }
211 }
212
213 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
214 return -EINVAL;
215 return 0;
216}
217
218static int
219mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
220 int nsgs, int len, int sglen)
221{
222 struct urb *urb = buf->urb;
223 int i;
224
225 for (i = 0; i < nsgs; i++) {
226 struct page *page;
227 void *data;
228 int offset;
229
230 data = netdev_alloc_frag(len);
231 if (!data)
232 break;
233
234 page = virt_to_head_page(data);
235 offset = data - page_address(page);
236 sg_set_page(&urb->sg[i], page, sglen, offset);
237 }
238
239 if (i < nsgs) {
240 int j;
241
242 for (j = nsgs; j < urb->num_sgs; j++)
243 skb_free_frag(sg_virt(&urb->sg[j]));
244 urb->num_sgs = i;
245 }
246
247 urb->num_sgs = max_t(int, i, urb->num_sgs);
248 buf->len = urb->num_sgs * sglen,
249 sg_init_marker(urb->sg, urb->num_sgs);
250
251 return i ? : -ENOMEM;
252}
253
254int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
255 int nsgs, int len, int sglen, gfp_t gfp)
256{
257 buf->urb = usb_alloc_urb(0, gfp);
258 if (!buf->urb)
259 return -ENOMEM;
260
261 buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
262 gfp);
263 if (!buf->urb->sg)
264 return -ENOMEM;
265
266 sg_init_table(buf->urb->sg, nsgs);
267 buf->dev = dev;
268
269 return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
270}
271EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
272
273void mt76u_buf_free(struct mt76u_buf *buf)
274{
275 struct urb *urb = buf->urb;
276 int i;
277
278 for (i = 0; i < urb->num_sgs; i++)
279 skb_free_frag(sg_virt(&urb->sg[i]));
280 usb_free_urb(buf->urb);
281}
282EXPORT_SYMBOL_GPL(mt76u_buf_free);
283
284int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
285 struct mt76u_buf *buf, gfp_t gfp,
286 usb_complete_t complete_fn, void *context)
287{
288 struct usb_interface *intf = to_usb_interface(dev->dev);
289 struct usb_device *udev = interface_to_usbdev(intf);
290 unsigned int pipe;
291
292 if (dir == USB_DIR_IN)
293 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
294 else
295 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
296
297 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
298 complete_fn, context);
299
300 return usb_submit_urb(buf->urb, gfp);
301}
302EXPORT_SYMBOL_GPL(mt76u_submit_buf);
303
304static inline struct mt76u_buf
305*mt76u_get_next_rx_entry(struct mt76_queue *q)
306{
307 struct mt76u_buf *buf = NULL;
308 unsigned long flags;
309
310 spin_lock_irqsave(&q->lock, flags);
311 if (q->queued > 0) {
312 buf = &q->entry[q->head].ubuf;
313 q->head = (q->head + 1) % q->ndesc;
314 q->queued--;
315 }
316 spin_unlock_irqrestore(&q->lock, flags);
317
318 return buf;
319}
320
321static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
322{
323 u16 dma_len, min_len;
324
325 dma_len = get_unaligned_le16(data);
326 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
327 MT_FCE_INFO_LEN;
328
329 if (data_len < min_len || WARN_ON(!dma_len) ||
330 WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
331 WARN_ON(dma_len & 0x3))
332 return -EINVAL;
333 return dma_len;
334}
335
336static int
337mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
338{
339 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
340 u8 *data = sg_virt(&urb->sg[0]);
341 int data_len, len, nsgs = 1;
342 struct sk_buff *skb;
343
344 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
345 return 0;
346
347 len = mt76u_get_rx_entry_len(data, urb->actual_length);
348 if (len < 0)
349 return 0;
350
351 skb = build_skb(data, q->buf_size);
352 if (!skb)
353 return 0;
354
355 data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
356 skb_reserve(skb, MT_DMA_HDR_LEN);
357 if (skb->tail + data_len > skb->end) {
358 dev_kfree_skb(skb);
359 return 1;
360 }
361
362 __skb_put(skb, data_len);
363 len -= data_len;
364
365 while (len > 0) {
366 data_len = min_t(int, len, urb->sg[nsgs].length);
367 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
368 sg_page(&urb->sg[nsgs]),
369 urb->sg[nsgs].offset,
370 data_len, q->buf_size);
371 len -= data_len;
372 nsgs++;
373 }
374 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
375
376 return nsgs;
377}
378
379static void mt76u_complete_rx(struct urb *urb)
380{
381 struct mt76_dev *dev = urb->context;
382 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
383 unsigned long flags;
384
385 switch (urb->status) {
386 case -ECONNRESET:
387 case -ESHUTDOWN:
388 case -ENOENT:
389 return;
390 default:
391 dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
392 /* fall through */
393 case 0:
394 break;
395 }
396
397 spin_lock_irqsave(&q->lock, flags);
398 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
399 goto out;
400
401 q->tail = (q->tail + 1) % q->ndesc;
402 q->queued++;
403 tasklet_schedule(&dev->usb.rx_tasklet);
404out:
405 spin_unlock_irqrestore(&q->lock, flags);
406}
407
408static void mt76u_rx_tasklet(unsigned long data)
409{
410 struct mt76_dev *dev = (struct mt76_dev *)data;
411 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
412 int err, nsgs, buf_len = q->buf_size;
413 struct mt76u_buf *buf;
414
415 rcu_read_lock();
416
417 while (true) {
418 buf = mt76u_get_next_rx_entry(q);
419 if (!buf)
420 break;
421
422 nsgs = mt76u_process_rx_entry(dev, buf->urb);
423 if (nsgs > 0) {
424 err = mt76u_fill_rx_sg(dev, buf, nsgs,
425 buf_len,
426 SKB_WITH_OVERHEAD(buf_len));
427 if (err < 0)
428 break;
429 }
430 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
431 buf, GFP_ATOMIC,
432 mt76u_complete_rx, dev);
433 }
434 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
435
436 rcu_read_unlock();
437}
438
439int mt76u_submit_rx_buffers(struct mt76_dev *dev)
440{
441 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
442 unsigned long flags;
443 int i, err = 0;
444
445 spin_lock_irqsave(&q->lock, flags);
446 for (i = 0; i < q->ndesc; i++) {
447 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
448 &q->entry[i].ubuf, GFP_ATOMIC,
449 mt76u_complete_rx, dev);
450 if (err < 0)
451 break;
452 }
453 q->head = q->tail = 0;
454 q->queued = 0;
455 spin_unlock_irqrestore(&q->lock, flags);
456
457 return err;
458}
459EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
460
461static int mt76u_alloc_rx(struct mt76_dev *dev)
462{
463 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
464 int i, err, nsgs;
465
466 spin_lock_init(&q->lock);
467 q->entry = devm_kzalloc(dev->dev,
468 MT_NUM_RX_ENTRIES * sizeof(*q->entry),
469 GFP_KERNEL);
470 if (!q->entry)
471 return -ENOMEM;
472
473 if (mt76u_check_sg(dev)) {
474 q->buf_size = MT_RX_BUF_SIZE;
475 nsgs = MT_SG_MAX_SIZE;
476 } else {
477 q->buf_size = PAGE_SIZE;
478 nsgs = 1;
479 }
480
481 for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
482 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
483 nsgs, q->buf_size,
484 SKB_WITH_OVERHEAD(q->buf_size),
485 GFP_KERNEL);
486 if (err < 0)
487 return err;
488 }
489 q->ndesc = MT_NUM_RX_ENTRIES;
490
491 return mt76u_submit_rx_buffers(dev);
492}
493
494static void mt76u_free_rx(struct mt76_dev *dev)
495{
496 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
497 int i;
498
499 for (i = 0; i < q->ndesc; i++)
500 mt76u_buf_free(&q->entry[i].ubuf);
501}
502
503static void mt76u_stop_rx(struct mt76_dev *dev)
504{
505 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
506 int i;
507
508 for (i = 0; i < q->ndesc; i++)
509 usb_kill_urb(q->entry[i].ubuf.urb);
510}
511
512int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
513{
514 struct sk_buff *iter, *last = skb;
515 u32 info, pad;
516
517 /* Buffer layout:
518 * | 4B | xfer len | pad | 4B |
519 * | TXINFO | pkt/cmd | zero pad to 4B | zero |
520 *
521 * length field of TXINFO should be set to 'xfer len'.
522 */
523 info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
524 FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
525 put_unaligned_le32(info, skb_push(skb, sizeof(info)));
526
527 pad = round_up(skb->len, 4) + 4 - skb->len;
528 skb_walk_frags(skb, iter) {
529 last = iter;
530 if (!iter->next) {
531 skb->data_len += pad;
532 skb->len += pad;
533 break;
534 }
535 }
536
537 if (unlikely(pad)) {
538 if (__skb_pad(last, pad, true))
539 return -ENOMEM;
540 __skb_put(last, pad);
541 }
542 return 0;
543}
544EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
545
546static void mt76u_tx_tasklet(unsigned long data)
547{
548 struct mt76_dev *dev = (struct mt76_dev *)data;
549 struct mt76u_buf *buf;
550 struct mt76_queue *q;
551 bool wake;
552 int i;
553
554 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
555 q = &dev->q_tx[i];
556
557 spin_lock_bh(&q->lock);
558 while (true) {
559 buf = &q->entry[q->head].ubuf;
560 if (!buf->done || !q->queued)
561 break;
562
563 dev->drv->tx_complete_skb(dev, q,
564 &q->entry[q->head],
565 false);
566
567 if (q->entry[q->head].schedule) {
568 q->entry[q->head].schedule = false;
569 q->swq_queued--;
570 }
571
572 q->head = (q->head + 1) % q->ndesc;
573 q->queued--;
574 }
575 mt76_txq_schedule(dev, q);
576 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
577 if (!q->queued)
578 wake_up(&dev->tx_wait);
579
580 spin_unlock_bh(&q->lock);
581
582 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
583 ieee80211_queue_delayed_work(dev->hw,
584 &dev->usb.stat_work,
585 msecs_to_jiffies(10));
586
587 if (wake)
588 ieee80211_wake_queue(dev->hw, i);
589 }
590}
591
592static void mt76u_tx_status_data(struct work_struct *work)
593{
594 struct mt76_usb *usb;
595 struct mt76_dev *dev;
596 u8 update = 1;
597 u16 count = 0;
598
599 usb = container_of(work, struct mt76_usb, stat_work.work);
600 dev = container_of(usb, struct mt76_dev, usb);
601
602 while (true) {
603 if (test_bit(MT76_REMOVED, &dev->state))
604 break;
605
606 if (!dev->drv->tx_status_data(dev, &update))
607 break;
608 count++;
609 }
610
611 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
612 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
613 msecs_to_jiffies(10));
614 else
615 clear_bit(MT76_READING_STATS, &dev->state);
616}
617
618static void mt76u_complete_tx(struct urb *urb)
619{
620 struct mt76u_buf *buf = urb->context;
621 struct mt76_dev *dev = buf->dev;
622
623 if (mt76u_urb_error(urb))
624 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
625 buf->done = true;
626
627 tasklet_schedule(&dev->usb.tx_tasklet);
628}
629
630static int
631mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
632{
633 int nsgs = 1 + skb_shinfo(skb)->nr_frags;
634 struct sk_buff *iter;
635
636 skb_walk_frags(skb, iter)
637 nsgs += 1 + skb_shinfo(iter)->nr_frags;
638
639 memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
640
641 nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
642 sg_init_marker(urb->sg, nsgs);
643 urb->num_sgs = nsgs;
644
645 return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
646}
647
648static int
649mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
650 struct sk_buff *skb, struct mt76_wcid *wcid,
651 struct ieee80211_sta *sta)
652{
653 struct usb_interface *intf = to_usb_interface(dev->dev);
654 struct usb_device *udev = interface_to_usbdev(intf);
655 u8 ep = q2ep(q->hw_idx);
656 struct mt76u_buf *buf;
657 u16 idx = q->tail;
658 unsigned int pipe;
659 int err;
660
661 if (q->queued == q->ndesc)
662 return -ENOSPC;
663
664 err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
665 if (err < 0)
666 return err;
667
668 buf = &q->entry[idx].ubuf;
669 buf->done = false;
670
671 err = mt76u_tx_build_sg(skb, buf->urb);
672 if (err < 0)
673 return err;
674
675 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
676 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
677 mt76u_complete_tx, buf);
678
679 q->tail = (q->tail + 1) % q->ndesc;
680 q->entry[idx].skb = skb;
681 q->queued++;
682
683 return idx;
684}
685
686static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
687{
688 struct mt76u_buf *buf;
689 int err;
690
691 while (q->first != q->tail) {
692 buf = &q->entry[q->first].ubuf;
693 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
694 if (err < 0) {
695 if (err == -ENODEV)
696 set_bit(MT76_REMOVED, &dev->state);
697 else
698 dev_err(dev->dev, "tx urb submit failed:%d\n",
699 err);
700 break;
701 }
702 q->first = (q->first + 1) % q->ndesc;
703 }
704}
705
706static int mt76u_alloc_tx(struct mt76_dev *dev)
707{
708 struct mt76u_buf *buf;
709 struct mt76_queue *q;
710 size_t size;
711 int i, j;
712
713 size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
714 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
715 q = &dev->q_tx[i];
716 spin_lock_init(&q->lock);
717 INIT_LIST_HEAD(&q->swq);
718 q->hw_idx = q2hwq(i);
719
720 q->entry = devm_kzalloc(dev->dev,
721 MT_NUM_TX_ENTRIES * sizeof(*q->entry),
722 GFP_KERNEL);
723 if (!q->entry)
724 return -ENOMEM;
725
726 q->ndesc = MT_NUM_TX_ENTRIES;
727 for (j = 0; j < q->ndesc; j++) {
728 buf = &q->entry[j].ubuf;
729 buf->dev = dev;
730
731 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
732 if (!buf->urb)
733 return -ENOMEM;
734
735 buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
736 if (!buf->urb->sg)
737 return -ENOMEM;
738 }
739 }
740 return 0;
741}
742
743static void mt76u_free_tx(struct mt76_dev *dev)
744{
745 struct mt76_queue *q;
746 int i, j;
747
748 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
749 q = &dev->q_tx[i];
750 for (j = 0; j < q->ndesc; j++)
751 usb_free_urb(q->entry[j].ubuf.urb);
752 }
753}
754
755static void mt76u_stop_tx(struct mt76_dev *dev)
756{
757 struct mt76_queue *q;
758 int i, j;
759
760 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
761 q = &dev->q_tx[i];
762 for (j = 0; j < q->ndesc; j++)
763 usb_kill_urb(q->entry[j].ubuf.urb);
764 }
765}
766
767void mt76u_stop_queues(struct mt76_dev *dev)
768{
769 tasklet_disable(&dev->usb.rx_tasklet);
770 tasklet_disable(&dev->usb.tx_tasklet);
771
772 mt76u_stop_rx(dev);
773 mt76u_stop_tx(dev);
774}
775EXPORT_SYMBOL_GPL(mt76u_stop_queues);
776
777void mt76u_stop_stat_wk(struct mt76_dev *dev)
778{
779 cancel_delayed_work_sync(&dev->usb.stat_work);
780 clear_bit(MT76_READING_STATS, &dev->state);
781}
782EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
783
784void mt76u_queues_deinit(struct mt76_dev *dev)
785{
786 mt76u_stop_queues(dev);
787
788 mt76u_free_rx(dev);
789 mt76u_free_tx(dev);
790}
791EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
792
793int mt76u_alloc_queues(struct mt76_dev *dev)
794{
795 int err;
796
797 err = mt76u_alloc_rx(dev);
798 if (err < 0)
799 goto err;
800
801 err = mt76u_alloc_tx(dev);
802 if (err < 0)
803 goto err;
804
805 return 0;
806err:
807 mt76u_queues_deinit(dev);
808 return err;
809}
810EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
811
812static const struct mt76_queue_ops usb_queue_ops = {
813 .tx_queue_skb = mt76u_tx_queue_skb,
814 .kick = mt76u_tx_kick,
815};
816
817int mt76u_init(struct mt76_dev *dev,
818 struct usb_interface *intf)
819{
820 static const struct mt76_bus_ops mt76u_ops = {
821 .rr = mt76u_rr,
822 .wr = mt76u_wr,
823 .rmw = mt76u_rmw,
824 .copy = mt76u_copy,
825 };
826 struct mt76_usb *usb = &dev->usb;
827
828 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
829 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
830 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
831 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
832
833 init_completion(&usb->mcu.cmpl);
834 mutex_init(&usb->mcu.mutex);
835
836 mutex_init(&usb->usb_ctrl_mtx);
837 dev->bus = &mt76u_ops;
838 dev->queue_ops = &usb_queue_ops;
839
840 return mt76u_set_endpoints(intf, usb);
841}
842EXPORT_SYMBOL_GPL(mt76u_init);
843
844MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
845MODULE_LICENSE("Dual BSD/GPL");