blob: 481e534534eb3a437a732509c80bba6db73ff2e1 [file] [log] [blame]
Michael Buesche4d6b792007-09-18 15:39:42 -04001/*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
Michael Büscheb032b92011-07-04 20:50:05 +02007 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
Michael Buesche4d6b792007-09-18 15:39:42 -04008
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "b43.h"
31#include "dma.h"
32#include "main.h"
33#include "debugfs.h"
34#include "xmit.h"
35
36#include <linux/dma-mapping.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
Michael Buesch280d0e12007-12-26 18:26:17 +010040#include <linux/etherdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Michael Buesch57df40d2008-03-07 15:50:02 +010042#include <asm/div64.h>
Michael Buesch280d0e12007-12-26 18:26:17 +010043
Michael Buesche4d6b792007-09-18 15:39:42 -040044
Michael Bueschbdceeb22009-02-19 23:45:43 +010045/* Required number of TX DMA slots per TX frame.
46 * This currently is 2, because we put the header and the ieee80211 frame
47 * into separate slots. */
48#define TX_SLOTS_PER_FRAME 2
49
50
Michael Buesche4d6b792007-09-18 15:39:42 -040051/* 32bit DMA ops. */
52static
53struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
54 int slot,
55 struct b43_dmadesc_meta **meta)
56{
57 struct b43_dmadesc32 *desc;
58
59 *meta = &(ring->meta[slot]);
60 desc = ring->descbase;
61 desc = &(desc[slot]);
62
63 return (struct b43_dmadesc_generic *)desc;
64}
65
66static void op32_fill_descriptor(struct b43_dmaring *ring,
67 struct b43_dmadesc_generic *desc,
68 dma_addr_t dmaaddr, u16 bufsize,
69 int start, int end, int irq)
70{
71 struct b43_dmadesc32 *descbase = ring->descbase;
72 int slot;
73 u32 ctl;
74 u32 addr;
75 u32 addrext;
76
77 slot = (int)(&(desc->dma32) - descbase);
78 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
79
80 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
81 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
82 >> SSB_DMA_TRANSLATION_SHIFT;
Rafał Miłecki05100a22011-05-17 14:00:02 +020083 addr |= ring->dev->dma.translation;
Michael Buesch8eccb532009-02-19 23:39:26 +010084 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
Michael Buesche4d6b792007-09-18 15:39:42 -040085 if (slot == ring->nr_slots - 1)
86 ctl |= B43_DMA32_DCTL_DTABLEEND;
87 if (start)
88 ctl |= B43_DMA32_DCTL_FRAMESTART;
89 if (end)
90 ctl |= B43_DMA32_DCTL_FRAMEEND;
91 if (irq)
92 ctl |= B43_DMA32_DCTL_IRQ;
93 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
94 & B43_DMA32_DCTL_ADDREXT_MASK;
95
96 desc->dma32.control = cpu_to_le32(ctl);
97 desc->dma32.address = cpu_to_le32(addr);
98}
99
100static void op32_poke_tx(struct b43_dmaring *ring, int slot)
101{
102 b43_dma_write(ring, B43_DMA32_TXINDEX,
103 (u32) (slot * sizeof(struct b43_dmadesc32)));
104}
105
106static void op32_tx_suspend(struct b43_dmaring *ring)
107{
108 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
109 | B43_DMA32_TXSUSPEND);
110}
111
112static void op32_tx_resume(struct b43_dmaring *ring)
113{
114 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
115 & ~B43_DMA32_TXSUSPEND);
116}
117
118static int op32_get_current_rxslot(struct b43_dmaring *ring)
119{
120 u32 val;
121
122 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
123 val &= B43_DMA32_RXDPTR;
124
125 return (val / sizeof(struct b43_dmadesc32));
126}
127
128static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
129{
130 b43_dma_write(ring, B43_DMA32_RXINDEX,
131 (u32) (slot * sizeof(struct b43_dmadesc32)));
132}
133
134static const struct b43_dma_ops dma32_ops = {
135 .idx2desc = op32_idx2desc,
136 .fill_descriptor = op32_fill_descriptor,
137 .poke_tx = op32_poke_tx,
138 .tx_suspend = op32_tx_suspend,
139 .tx_resume = op32_tx_resume,
140 .get_current_rxslot = op32_get_current_rxslot,
141 .set_current_rxslot = op32_set_current_rxslot,
142};
143
144/* 64bit DMA ops. */
145static
146struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
147 int slot,
148 struct b43_dmadesc_meta **meta)
149{
150 struct b43_dmadesc64 *desc;
151
152 *meta = &(ring->meta[slot]);
153 desc = ring->descbase;
154 desc = &(desc[slot]);
155
156 return (struct b43_dmadesc_generic *)desc;
157}
158
159static void op64_fill_descriptor(struct b43_dmaring *ring,
160 struct b43_dmadesc_generic *desc,
161 dma_addr_t dmaaddr, u16 bufsize,
162 int start, int end, int irq)
163{
164 struct b43_dmadesc64 *descbase = ring->descbase;
165 int slot;
166 u32 ctl0 = 0, ctl1 = 0;
167 u32 addrlo, addrhi;
168 u32 addrext;
169
170 slot = (int)(&(desc->dma64) - descbase);
171 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
172
173 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
174 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
175 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
176 >> SSB_DMA_TRANSLATION_SHIFT;
Rafał Miłeckia9770a82011-07-20 19:52:14 +0200177 addrhi |= ring->dev->dma.translation;
Michael Buesche4d6b792007-09-18 15:39:42 -0400178 if (slot == ring->nr_slots - 1)
179 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
180 if (start)
181 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
182 if (end)
183 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
184 if (irq)
185 ctl0 |= B43_DMA64_DCTL0_IRQ;
Michael Buesch8eccb532009-02-19 23:39:26 +0100186 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
Michael Buesche4d6b792007-09-18 15:39:42 -0400187 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
188 & B43_DMA64_DCTL1_ADDREXT_MASK;
189
190 desc->dma64.control0 = cpu_to_le32(ctl0);
191 desc->dma64.control1 = cpu_to_le32(ctl1);
192 desc->dma64.address_low = cpu_to_le32(addrlo);
193 desc->dma64.address_high = cpu_to_le32(addrhi);
194}
195
196static void op64_poke_tx(struct b43_dmaring *ring, int slot)
197{
198 b43_dma_write(ring, B43_DMA64_TXINDEX,
199 (u32) (slot * sizeof(struct b43_dmadesc64)));
200}
201
202static void op64_tx_suspend(struct b43_dmaring *ring)
203{
204 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
205 | B43_DMA64_TXSUSPEND);
206}
207
208static void op64_tx_resume(struct b43_dmaring *ring)
209{
210 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
211 & ~B43_DMA64_TXSUSPEND);
212}
213
214static int op64_get_current_rxslot(struct b43_dmaring *ring)
215{
216 u32 val;
217
218 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
219 val &= B43_DMA64_RXSTATDPTR;
220
221 return (val / sizeof(struct b43_dmadesc64));
222}
223
224static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
225{
226 b43_dma_write(ring, B43_DMA64_RXINDEX,
227 (u32) (slot * sizeof(struct b43_dmadesc64)));
228}
229
230static const struct b43_dma_ops dma64_ops = {
231 .idx2desc = op64_idx2desc,
232 .fill_descriptor = op64_fill_descriptor,
233 .poke_tx = op64_poke_tx,
234 .tx_suspend = op64_tx_suspend,
235 .tx_resume = op64_tx_resume,
236 .get_current_rxslot = op64_get_current_rxslot,
237 .set_current_rxslot = op64_set_current_rxslot,
238};
239
240static inline int free_slots(struct b43_dmaring *ring)
241{
242 return (ring->nr_slots - ring->used_slots);
243}
244
245static inline int next_slot(struct b43_dmaring *ring, int slot)
246{
247 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
248 if (slot == ring->nr_slots - 1)
249 return 0;
250 return slot + 1;
251}
252
253static inline int prev_slot(struct b43_dmaring *ring, int slot)
254{
255 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
256 if (slot == 0)
257 return ring->nr_slots - 1;
258 return slot - 1;
259}
260
261#ifdef CONFIG_B43_DEBUG
262static void update_max_used_slots(struct b43_dmaring *ring,
263 int current_used_slots)
264{
265 if (current_used_slots <= ring->max_used_slots)
266 return;
267 ring->max_used_slots = current_used_slots;
268 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
269 b43dbg(ring->dev->wl,
270 "max_used_slots increased to %d on %s ring %d\n",
271 ring->max_used_slots,
272 ring->tx ? "TX" : "RX", ring->index);
273 }
274}
275#else
276static inline
277 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
278{
279}
280#endif /* DEBUG */
281
282/* Request a slot for usage. */
283static inline int request_slot(struct b43_dmaring *ring)
284{
285 int slot;
286
287 B43_WARN_ON(!ring->tx);
288 B43_WARN_ON(ring->stopped);
289 B43_WARN_ON(free_slots(ring) == 0);
290
291 slot = next_slot(ring, ring->current_slot);
292 ring->current_slot = slot;
293 ring->used_slots++;
294
295 update_max_used_slots(ring, ring->used_slots);
296
297 return slot;
298}
299
Michael Bueschb79caa62008-02-05 12:50:41 +0100300static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
Michael Buesche4d6b792007-09-18 15:39:42 -0400301{
302 static const u16 map64[] = {
303 B43_MMIO_DMA64_BASE0,
304 B43_MMIO_DMA64_BASE1,
305 B43_MMIO_DMA64_BASE2,
306 B43_MMIO_DMA64_BASE3,
307 B43_MMIO_DMA64_BASE4,
308 B43_MMIO_DMA64_BASE5,
309 };
310 static const u16 map32[] = {
311 B43_MMIO_DMA32_BASE0,
312 B43_MMIO_DMA32_BASE1,
313 B43_MMIO_DMA32_BASE2,
314 B43_MMIO_DMA32_BASE3,
315 B43_MMIO_DMA32_BASE4,
316 B43_MMIO_DMA32_BASE5,
317 };
318
Michael Bueschb79caa62008-02-05 12:50:41 +0100319 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400320 B43_WARN_ON(!(controller_idx >= 0 &&
321 controller_idx < ARRAY_SIZE(map64)));
322 return map64[controller_idx];
323 }
324 B43_WARN_ON(!(controller_idx >= 0 &&
325 controller_idx < ARRAY_SIZE(map32)));
326 return map32[controller_idx];
327}
328
329static inline
330 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
331 unsigned char *buf, size_t len, int tx)
332{
333 dma_addr_t dmaaddr;
334
335 if (tx) {
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200336 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700337 buf, len, DMA_TO_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400338 } else {
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200339 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700340 buf, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400341 }
342
343 return dmaaddr;
344}
345
346static inline
347 void unmap_descbuffer(struct b43_dmaring *ring,
348 dma_addr_t addr, size_t len, int tx)
349{
350 if (tx) {
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200351 dma_unmap_single(ring->dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700352 addr, len, DMA_TO_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400353 } else {
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200354 dma_unmap_single(ring->dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700355 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400356 }
357}
358
359static inline
360 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
361 dma_addr_t addr, size_t len)
362{
363 B43_WARN_ON(ring->tx);
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200364 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
Michael Bueschf2257632008-06-20 11:50:29 +0200365 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400366}
367
368static inline
369 void sync_descbuffer_for_device(struct b43_dmaring *ring,
370 dma_addr_t addr, size_t len)
371{
372 B43_WARN_ON(ring->tx);
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200373 dma_sync_single_for_device(ring->dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700374 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400375}
376
377static inline
378 void free_descriptor_buffer(struct b43_dmaring *ring,
379 struct b43_dmadesc_meta *meta)
380{
381 if (meta->skb) {
382 dev_kfree_skb_any(meta->skb);
383 meta->skb = NULL;
384 }
385}
386
387static int alloc_ringmemory(struct b43_dmaring *ring)
388{
John W. Linville55afc802009-12-29 14:07:42 -0500389 gfp_t flags = GFP_KERNEL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400390
John W. Linville55afc802009-12-29 14:07:42 -0500391 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
392 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
393 * has shown that 4K is sufficient for the latter as long as the buffer
394 * does not cross an 8K boundary.
395 *
396 * For unknown reasons - possibly a hardware error - the BCM4311 rev
397 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
398 * which accounts for the GFP_DMA flag below.
399 *
400 * The flags here must match the flags in free_ringmemory below!
Larry Finger013978b2007-11-26 10:29:47 -0600401 */
Michael Bueschb79caa62008-02-05 12:50:41 +0100402 if (ring->type == B43_DMA_64BIT)
John W. Linville55afc802009-12-29 14:07:42 -0500403 flags |= GFP_DMA;
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200404 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700405 B43_DMA_RINGMEMSIZE,
406 &(ring->dmabase), flags);
John W. Linville55afc802009-12-29 14:07:42 -0500407 if (!ring->descbase) {
408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
Michael Buesche4d6b792007-09-18 15:39:42 -0400409 return -ENOMEM;
410 }
John W. Linville55afc802009-12-29 14:07:42 -0500411 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400412
413 return 0;
414}
415
416static void free_ringmemory(struct b43_dmaring *ring)
417{
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200418 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700419 ring->descbase, ring->dmabase);
Michael Buesche4d6b792007-09-18 15:39:42 -0400420}
421
422/* Reset the RX DMA channel */
Michael Bueschb79caa62008-02-05 12:50:41 +0100423static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
424 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400425{
426 int i;
427 u32 value;
428 u16 offset;
429
430 might_sleep();
431
Michael Bueschb79caa62008-02-05 12:50:41 +0100432 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400433 b43_write32(dev, mmio_base + offset, 0);
434 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100435 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
436 B43_DMA32_RXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400437 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100438 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400439 value &= B43_DMA64_RXSTAT;
440 if (value == B43_DMA64_RXSTAT_DISABLED) {
441 i = -1;
442 break;
443 }
444 } else {
445 value &= B43_DMA32_RXSTATE;
446 if (value == B43_DMA32_RXSTAT_DISABLED) {
447 i = -1;
448 break;
449 }
450 }
451 msleep(1);
452 }
453 if (i != -1) {
454 b43err(dev->wl, "DMA RX reset timed out\n");
455 return -ENODEV;
456 }
457
458 return 0;
459}
460
Larry Finger013978b2007-11-26 10:29:47 -0600461/* Reset the TX DMA channel */
Michael Bueschb79caa62008-02-05 12:50:41 +0100462static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
463 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400464{
465 int i;
466 u32 value;
467 u16 offset;
468
469 might_sleep();
470
471 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100472 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
473 B43_DMA32_TXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400474 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100475 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400476 value &= B43_DMA64_TXSTAT;
477 if (value == B43_DMA64_TXSTAT_DISABLED ||
478 value == B43_DMA64_TXSTAT_IDLEWAIT ||
479 value == B43_DMA64_TXSTAT_STOPPED)
480 break;
481 } else {
482 value &= B43_DMA32_TXSTATE;
483 if (value == B43_DMA32_TXSTAT_DISABLED ||
484 value == B43_DMA32_TXSTAT_IDLEWAIT ||
485 value == B43_DMA32_TXSTAT_STOPPED)
486 break;
487 }
488 msleep(1);
489 }
Michael Bueschb79caa62008-02-05 12:50:41 +0100490 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400491 b43_write32(dev, mmio_base + offset, 0);
492 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100493 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
494 B43_DMA32_TXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400495 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100496 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400497 value &= B43_DMA64_TXSTAT;
498 if (value == B43_DMA64_TXSTAT_DISABLED) {
499 i = -1;
500 break;
501 }
502 } else {
503 value &= B43_DMA32_TXSTATE;
504 if (value == B43_DMA32_TXSTAT_DISABLED) {
505 i = -1;
506 break;
507 }
508 }
509 msleep(1);
510 }
511 if (i != -1) {
512 b43err(dev->wl, "DMA TX reset timed out\n");
513 return -ENODEV;
514 }
515 /* ensure the reset is completed. */
516 msleep(1);
517
518 return 0;
519}
520
Michael Bueschb79caa62008-02-05 12:50:41 +0100521/* Check if a DMA mapping address is invalid. */
522static bool b43_dma_mapping_error(struct b43_dmaring *ring,
523 dma_addr_t addr,
Michael Bueschffa92562008-03-22 22:04:45 +0100524 size_t buffersize, bool dma_to_device)
Michael Bueschb79caa62008-02-05 12:50:41 +0100525{
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200526 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
Michael Bueschb79caa62008-02-05 12:50:41 +0100527 return 1;
528
John W. Linville55afc802009-12-29 14:07:42 -0500529 switch (ring->type) {
530 case B43_DMA_30BIT:
531 if ((u64)addr + buffersize > (1ULL << 30))
532 goto address_error;
533 break;
534 case B43_DMA_32BIT:
535 if ((u64)addr + buffersize > (1ULL << 32))
536 goto address_error;
537 break;
538 case B43_DMA_64BIT:
539 /* Currently we can't have addresses beyond
540 * 64bit in the kernel. */
541 break;
Michael Bueschb79caa62008-02-05 12:50:41 +0100542 }
543
544 /* The address is OK. */
545 return 0;
John W. Linville55afc802009-12-29 14:07:42 -0500546
547address_error:
548 /* We can't support this address. Unmap it again. */
549 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
550
551 return 1;
Michael Bueschb79caa62008-02-05 12:50:41 +0100552}
553
Michael Bueschec9a1d82009-03-27 22:51:58 +0100554static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
555{
556 unsigned char *f = skb->data + ring->frameoffset;
557
558 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
559}
560
561static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
562{
563 struct b43_rxhdr_fw4 *rxhdr;
564 unsigned char *frame;
565
566 /* This poisons the RX buffer to detect DMA failures. */
567
568 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
569 rxhdr->frame_len = 0;
570
571 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
572 frame = skb->data + ring->frameoffset;
573 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
574}
575
Michael Buesche4d6b792007-09-18 15:39:42 -0400576static int setup_rx_descbuffer(struct b43_dmaring *ring,
577 struct b43_dmadesc_generic *desc,
578 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
579{
Michael Buesche4d6b792007-09-18 15:39:42 -0400580 dma_addr_t dmaaddr;
581 struct sk_buff *skb;
582
583 B43_WARN_ON(ring->tx);
584
585 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
586 if (unlikely(!skb))
587 return -ENOMEM;
Michael Bueschec9a1d82009-03-27 22:51:58 +0100588 b43_poison_rx_buffer(ring, skb);
Michael Buesche4d6b792007-09-18 15:39:42 -0400589 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
Michael Bueschffa92562008-03-22 22:04:45 +0100590 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400591 /* ugh. try to realloc in zone_dma */
592 gfp_flags |= GFP_DMA;
593
594 dev_kfree_skb_any(skb);
595
596 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
597 if (unlikely(!skb))
598 return -ENOMEM;
Michael Bueschec9a1d82009-03-27 22:51:58 +0100599 b43_poison_rx_buffer(ring, skb);
Michael Buesche4d6b792007-09-18 15:39:42 -0400600 dmaaddr = map_descbuffer(ring, skb->data,
601 ring->rx_buffersize, 0);
Michael Bueschbdceeb22009-02-19 23:45:43 +0100602 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
603 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
604 dev_kfree_skb_any(skb);
605 return -EIO;
606 }
Michael Buesche4d6b792007-09-18 15:39:42 -0400607 }
608
609 meta->skb = skb;
610 meta->dmaaddr = dmaaddr;
611 ring->ops->fill_descriptor(ring, desc, dmaaddr,
612 ring->rx_buffersize, 0, 0, 0);
613
Michael Buesche4d6b792007-09-18 15:39:42 -0400614 return 0;
615}
616
617/* Allocate the initial descbuffers.
618 * This is used for an RX ring only.
619 */
620static int alloc_initial_descbuffers(struct b43_dmaring *ring)
621{
622 int i, err = -ENOMEM;
623 struct b43_dmadesc_generic *desc;
624 struct b43_dmadesc_meta *meta;
625
626 for (i = 0; i < ring->nr_slots; i++) {
627 desc = ring->ops->idx2desc(ring, i, &meta);
628
629 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
630 if (err) {
631 b43err(ring->dev->wl,
632 "Failed to allocate initial descbuffers\n");
633 goto err_unwind;
634 }
635 }
636 mb();
637 ring->used_slots = ring->nr_slots;
638 err = 0;
639 out:
640 return err;
641
642 err_unwind:
643 for (i--; i >= 0; i--) {
644 desc = ring->ops->idx2desc(ring, i, &meta);
645
646 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
647 dev_kfree_skb(meta->skb);
648 }
649 goto out;
650}
651
652/* Do initial setup of the DMA controller.
653 * Reset the controller, write the ring busaddress
654 * and switch the "enable" bit on.
655 */
656static int dmacontroller_setup(struct b43_dmaring *ring)
657{
658 int err = 0;
659 u32 value;
660 u32 addrext;
Rafał Miłecki05100a22011-05-17 14:00:02 +0200661 u32 trans = ring->dev->dma.translation;
Rafał Miłecki78c1ee72011-07-20 19:47:07 +0200662 bool parity = ring->dev->dma.parity;
Michael Buesche4d6b792007-09-18 15:39:42 -0400663
664 if (ring->tx) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100665 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400666 u64 ringbase = (u64) (ring->dmabase);
667
668 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
669 >> SSB_DMA_TRANSLATION_SHIFT;
670 value = B43_DMA64_TXENABLE;
671 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
672 & B43_DMA64_TXADDREXT_MASK;
Rafał Miłecki78c1ee72011-07-20 19:47:07 +0200673 if (!parity)
674 value |= B43_DMA64_TXPARITYDISABLE;
Michael Buesche4d6b792007-09-18 15:39:42 -0400675 b43_dma_write(ring, B43_DMA64_TXCTL, value);
676 b43_dma_write(ring, B43_DMA64_TXRINGLO,
677 (ringbase & 0xFFFFFFFF));
678 b43_dma_write(ring, B43_DMA64_TXRINGHI,
679 ((ringbase >> 32) &
680 ~SSB_DMA_TRANSLATION_MASK)
Rafał Miłeckia9770a82011-07-20 19:52:14 +0200681 | trans);
Michael Buesche4d6b792007-09-18 15:39:42 -0400682 } else {
683 u32 ringbase = (u32) (ring->dmabase);
684
685 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
686 >> SSB_DMA_TRANSLATION_SHIFT;
687 value = B43_DMA32_TXENABLE;
688 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
689 & B43_DMA32_TXADDREXT_MASK;
Rafał Miłecki78c1ee72011-07-20 19:47:07 +0200690 if (!parity)
691 value |= B43_DMA32_TXPARITYDISABLE;
Michael Buesche4d6b792007-09-18 15:39:42 -0400692 b43_dma_write(ring, B43_DMA32_TXCTL, value);
693 b43_dma_write(ring, B43_DMA32_TXRING,
694 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
695 | trans);
696 }
697 } else {
698 err = alloc_initial_descbuffers(ring);
699 if (err)
700 goto out;
Michael Bueschb79caa62008-02-05 12:50:41 +0100701 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400702 u64 ringbase = (u64) (ring->dmabase);
703
704 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
705 >> SSB_DMA_TRANSLATION_SHIFT;
706 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
707 value |= B43_DMA64_RXENABLE;
708 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
709 & B43_DMA64_RXADDREXT_MASK;
Rafał Miłecki78c1ee72011-07-20 19:47:07 +0200710 if (!parity)
711 value |= B43_DMA64_RXPARITYDISABLE;
Michael Buesche4d6b792007-09-18 15:39:42 -0400712 b43_dma_write(ring, B43_DMA64_RXCTL, value);
713 b43_dma_write(ring, B43_DMA64_RXRINGLO,
714 (ringbase & 0xFFFFFFFF));
715 b43_dma_write(ring, B43_DMA64_RXRINGHI,
716 ((ringbase >> 32) &
717 ~SSB_DMA_TRANSLATION_MASK)
Rafał Miłeckia9770a82011-07-20 19:52:14 +0200718 | trans);
Larry Finger013978b2007-11-26 10:29:47 -0600719 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
720 sizeof(struct b43_dmadesc64));
Michael Buesche4d6b792007-09-18 15:39:42 -0400721 } else {
722 u32 ringbase = (u32) (ring->dmabase);
723
724 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
725 >> SSB_DMA_TRANSLATION_SHIFT;
726 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
727 value |= B43_DMA32_RXENABLE;
728 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
729 & B43_DMA32_RXADDREXT_MASK;
Rafał Miłecki78c1ee72011-07-20 19:47:07 +0200730 if (!parity)
731 value |= B43_DMA32_RXPARITYDISABLE;
Michael Buesche4d6b792007-09-18 15:39:42 -0400732 b43_dma_write(ring, B43_DMA32_RXCTL, value);
733 b43_dma_write(ring, B43_DMA32_RXRING,
734 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
735 | trans);
Larry Finger013978b2007-11-26 10:29:47 -0600736 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
737 sizeof(struct b43_dmadesc32));
Michael Buesche4d6b792007-09-18 15:39:42 -0400738 }
739 }
740
Larry Finger013978b2007-11-26 10:29:47 -0600741out:
Michael Buesche4d6b792007-09-18 15:39:42 -0400742 return err;
743}
744
745/* Shutdown the DMA controller. */
746static void dmacontroller_cleanup(struct b43_dmaring *ring)
747{
748 if (ring->tx) {
749 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
Michael Bueschb79caa62008-02-05 12:50:41 +0100750 ring->type);
751 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400752 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
753 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
754 } else
755 b43_dma_write(ring, B43_DMA32_TXRING, 0);
756 } else {
757 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
Michael Bueschb79caa62008-02-05 12:50:41 +0100758 ring->type);
759 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400760 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
761 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
762 } else
763 b43_dma_write(ring, B43_DMA32_RXRING, 0);
764 }
765}
766
767static void free_all_descbuffers(struct b43_dmaring *ring)
768{
Michael Buesche4d6b792007-09-18 15:39:42 -0400769 struct b43_dmadesc_meta *meta;
770 int i;
771
772 if (!ring->used_slots)
773 return;
774 for (i = 0; i < ring->nr_slots; i++) {
Larry Finger9c1cacd2011-05-22 20:54:25 -0500775 /* get meta - ignore returned value */
776 ring->ops->idx2desc(ring, i, &meta);
Michael Buesche4d6b792007-09-18 15:39:42 -0400777
Michael Buesch07681e22009-11-19 22:24:29 +0100778 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400779 B43_WARN_ON(!ring->tx);
780 continue;
781 }
782 if (ring->tx) {
783 unmap_descbuffer(ring, meta->dmaaddr,
784 meta->skb->len, 1);
785 } else {
786 unmap_descbuffer(ring, meta->dmaaddr,
787 ring->rx_buffersize, 0);
788 }
789 free_descriptor_buffer(ring, meta);
790 }
791}
792
793static u64 supported_dma_mask(struct b43_wldev *dev)
794{
795 u32 tmp;
796 u16 mmio_base;
797
Hauke Mehrtens5b36c9b2011-07-23 13:57:33 +0200798 switch (dev->dev->bus_type) {
799#ifdef CONFIG_B43_BCMA
800 case B43_BUS_BCMA:
801 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
802 if (tmp & BCMA_IOST_DMA64)
803 return DMA_BIT_MASK(64);
804 break;
805#endif
806#ifdef CONFIG_B43_SSB
807 case B43_BUS_SSB:
808 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
809 if (tmp & SSB_TMSHIGH_DMA64)
810 return DMA_BIT_MASK(64);
811 break;
812#endif
813 }
814
Michael Buesche4d6b792007-09-18 15:39:42 -0400815 mmio_base = b43_dmacontroller_base(0, 0);
816 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
817 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
818 if (tmp & B43_DMA32_TXADDREXT_MASK)
Yang Hongyang284901a2009-04-06 19:01:15 -0700819 return DMA_BIT_MASK(32);
Michael Buesche4d6b792007-09-18 15:39:42 -0400820
Yang Hongyang28b76792009-04-06 19:01:17 -0700821 return DMA_BIT_MASK(30);
Michael Buesche4d6b792007-09-18 15:39:42 -0400822}
823
Michael Buesch5100d5a2008-03-29 21:01:16 +0100824static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
825{
Yang Hongyang28b76792009-04-06 19:01:17 -0700826 if (dmamask == DMA_BIT_MASK(30))
Michael Buesch5100d5a2008-03-29 21:01:16 +0100827 return B43_DMA_30BIT;
Yang Hongyang284901a2009-04-06 19:01:15 -0700828 if (dmamask == DMA_BIT_MASK(32))
Michael Buesch5100d5a2008-03-29 21:01:16 +0100829 return B43_DMA_32BIT;
Yang Hongyang6a355282009-04-06 19:01:13 -0700830 if (dmamask == DMA_BIT_MASK(64))
Michael Buesch5100d5a2008-03-29 21:01:16 +0100831 return B43_DMA_64BIT;
832 B43_WARN_ON(1);
833 return B43_DMA_30BIT;
834}
835
Michael Buesche4d6b792007-09-18 15:39:42 -0400836/* Main initialization function. */
837static
838struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
839 int controller_index,
Michael Bueschb79caa62008-02-05 12:50:41 +0100840 int for_tx,
841 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400842{
843 struct b43_dmaring *ring;
Michael Buesch07681e22009-11-19 22:24:29 +0100844 int i, err;
Michael Buesche4d6b792007-09-18 15:39:42 -0400845 dma_addr_t dma_test;
846
847 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
848 if (!ring)
849 goto out;
850
Michael Buesch028118a2008-06-12 11:58:56 +0200851 ring->nr_slots = B43_RXRING_SLOTS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400852 if (for_tx)
Michael Buesch028118a2008-06-12 11:58:56 +0200853 ring->nr_slots = B43_TXRING_SLOTS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400854
Michael Buesch028118a2008-06-12 11:58:56 +0200855 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
Michael Buesche4d6b792007-09-18 15:39:42 -0400856 GFP_KERNEL);
857 if (!ring->meta)
858 goto err_kfree_ring;
Michael Buesch07681e22009-11-19 22:24:29 +0100859 for (i = 0; i < ring->nr_slots; i++)
860 ring->meta->skb = B43_DMA_PTR_POISON;
Michael Buesche4d6b792007-09-18 15:39:42 -0400861
Michael Buesch028118a2008-06-12 11:58:56 +0200862 ring->type = type;
Michael Buesche4d6b792007-09-18 15:39:42 -0400863 ring->dev = dev;
Michael Bueschb79caa62008-02-05 12:50:41 +0100864 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
Michael Buesche4d6b792007-09-18 15:39:42 -0400865 ring->index = controller_index;
Michael Bueschb79caa62008-02-05 12:50:41 +0100866 if (type == B43_DMA_64BIT)
Michael Buesche4d6b792007-09-18 15:39:42 -0400867 ring->ops = &dma64_ops;
868 else
869 ring->ops = &dma32_ops;
870 if (for_tx) {
871 ring->tx = 1;
872 ring->current_slot = -1;
873 } else {
874 if (ring->index == 0) {
875 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
876 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
Michael Buesche4d6b792007-09-18 15:39:42 -0400877 } else
878 B43_WARN_ON(1);
879 }
Michael Buesche4d6b792007-09-18 15:39:42 -0400880#ifdef CONFIG_B43_DEBUG
881 ring->last_injected_overflow = jiffies;
882#endif
883
Michael Buesch028118a2008-06-12 11:58:56 +0200884 if (for_tx) {
Michael Buesch2d071ca2009-02-20 12:24:52 +0100885 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
886 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
887
Michael Bueschbdceeb22009-02-19 23:45:43 +0100888 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
Michael Buesch028118a2008-06-12 11:58:56 +0200889 b43_txhdr_size(dev),
890 GFP_KERNEL);
891 if (!ring->txhdr_cache)
892 goto err_kfree_meta;
893
894 /* test for ability to dma to txhdr_cache */
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200895 dma_test = dma_map_single(dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700896 ring->txhdr_cache,
897 b43_txhdr_size(dev),
898 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +0200899
900 if (b43_dma_mapping_error(ring, dma_test,
901 b43_txhdr_size(dev), 1)) {
902 /* ugh realloc */
903 kfree(ring->txhdr_cache);
Michael Bueschbdceeb22009-02-19 23:45:43 +0100904 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
Michael Buesch028118a2008-06-12 11:58:56 +0200905 b43_txhdr_size(dev),
906 GFP_KERNEL | GFP_DMA);
907 if (!ring->txhdr_cache)
908 goto err_kfree_meta;
909
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200910 dma_test = dma_map_single(dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700911 ring->txhdr_cache,
912 b43_txhdr_size(dev),
913 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +0200914
915 if (b43_dma_mapping_error(ring, dma_test,
916 b43_txhdr_size(dev), 1)) {
917
918 b43err(dev->wl,
919 "TXHDR DMA allocation failed\n");
920 goto err_kfree_txhdr_cache;
921 }
922 }
923
Rafał Miłeckia18c7152011-05-18 02:06:40 +0200924 dma_unmap_single(dev->dev->dma_dev,
FUJITA Tomonori718e8892010-06-03 19:37:36 -0700925 dma_test, b43_txhdr_size(dev),
926 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +0200927 }
928
Michael Buesche4d6b792007-09-18 15:39:42 -0400929 err = alloc_ringmemory(ring);
930 if (err)
931 goto err_kfree_txhdr_cache;
932 err = dmacontroller_setup(ring);
933 if (err)
934 goto err_free_ringmemory;
935
936 out:
937 return ring;
938
939 err_free_ringmemory:
940 free_ringmemory(ring);
941 err_kfree_txhdr_cache:
942 kfree(ring->txhdr_cache);
943 err_kfree_meta:
944 kfree(ring->meta);
945 err_kfree_ring:
946 kfree(ring);
947 ring = NULL;
948 goto out;
949}
950
Michael Buesch57df40d2008-03-07 15:50:02 +0100951#define divide(a, b) ({ \
952 typeof(a) __a = a; \
953 do_div(__a, b); \
954 __a; \
955 })
956
957#define modulo(a, b) ({ \
958 typeof(a) __a = a; \
959 do_div(__a, b); \
960 })
961
Michael Buesche4d6b792007-09-18 15:39:42 -0400962/* Main cleanup function. */
Michael Bueschb27faf82008-03-06 16:32:46 +0100963static void b43_destroy_dmaring(struct b43_dmaring *ring,
964 const char *ringname)
Michael Buesche4d6b792007-09-18 15:39:42 -0400965{
966 if (!ring)
967 return;
968
Michael Buesch57df40d2008-03-07 15:50:02 +0100969#ifdef CONFIG_B43_DEBUG
970 {
971 /* Print some statistics. */
972 u64 failed_packets = ring->nr_failed_tx_packets;
973 u64 succeed_packets = ring->nr_succeed_tx_packets;
974 u64 nr_packets = failed_packets + succeed_packets;
975 u64 permille_failed = 0, average_tries = 0;
976
977 if (nr_packets)
978 permille_failed = divide(failed_packets * 1000, nr_packets);
979 if (nr_packets)
980 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
981
982 b43dbg(ring->dev->wl, "DMA-%u %s: "
983 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
984 "Average tries %llu.%02llu\n",
985 (unsigned int)(ring->type), ringname,
986 ring->max_used_slots,
987 ring->nr_slots,
988 (unsigned long long)failed_packets,
Michael Buesch87d96112008-03-07 19:52:24 +0100989 (unsigned long long)nr_packets,
Michael Buesch57df40d2008-03-07 15:50:02 +0100990 (unsigned long long)divide(permille_failed, 10),
991 (unsigned long long)modulo(permille_failed, 10),
992 (unsigned long long)divide(average_tries, 100),
993 (unsigned long long)modulo(average_tries, 100));
994 }
995#endif /* DEBUG */
996
Michael Buesche4d6b792007-09-18 15:39:42 -0400997 /* Device IRQs are disabled prior entering this function,
998 * so no need to take care of concurrency with rx handler stuff.
999 */
1000 dmacontroller_cleanup(ring);
1001 free_all_descbuffers(ring);
1002 free_ringmemory(ring);
1003
1004 kfree(ring->txhdr_cache);
1005 kfree(ring->meta);
1006 kfree(ring);
1007}
1008
Michael Bueschb27faf82008-03-06 16:32:46 +01001009#define destroy_ring(dma, ring) do { \
1010 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1011 (dma)->ring = NULL; \
1012 } while (0)
1013
Michael Buesche4d6b792007-09-18 15:39:42 -04001014void b43_dma_free(struct b43_wldev *dev)
1015{
Michael Buesch5100d5a2008-03-29 21:01:16 +01001016 struct b43_dma *dma;
1017
1018 if (b43_using_pio_transfers(dev))
1019 return;
1020 dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -04001021
Michael Bueschb27faf82008-03-06 16:32:46 +01001022 destroy_ring(dma, rx_ring);
1023 destroy_ring(dma, tx_ring_AC_BK);
1024 destroy_ring(dma, tx_ring_AC_BE);
1025 destroy_ring(dma, tx_ring_AC_VI);
1026 destroy_ring(dma, tx_ring_AC_VO);
1027 destroy_ring(dma, tx_ring_mcast);
Michael Buesche4d6b792007-09-18 15:39:42 -04001028}
1029
Michael Buesch1033b3e2008-04-23 19:13:01 +02001030static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1031{
1032 u64 orig_mask = mask;
1033 bool fallback = 0;
1034 int err;
1035
1036 /* Try to set the DMA mask. If it fails, try falling back to a
1037 * lower mask, as we can always also support a lower one. */
1038 while (1) {
Rafał Miłeckia18c7152011-05-18 02:06:40 +02001039 err = dma_set_mask(dev->dev->dma_dev, mask);
FUJITA Tomonori718e8892010-06-03 19:37:36 -07001040 if (!err) {
Rafał Miłeckia18c7152011-05-18 02:06:40 +02001041 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
FUJITA Tomonori718e8892010-06-03 19:37:36 -07001042 if (!err)
1043 break;
1044 }
Yang Hongyang6a355282009-04-06 19:01:13 -07001045 if (mask == DMA_BIT_MASK(64)) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001046 mask = DMA_BIT_MASK(32);
Michael Buesch1033b3e2008-04-23 19:13:01 +02001047 fallback = 1;
1048 continue;
1049 }
Yang Hongyang284901a2009-04-06 19:01:15 -07001050 if (mask == DMA_BIT_MASK(32)) {
Yang Hongyang28b76792009-04-06 19:01:17 -07001051 mask = DMA_BIT_MASK(30);
Michael Buesch1033b3e2008-04-23 19:13:01 +02001052 fallback = 1;
1053 continue;
1054 }
1055 b43err(dev->wl, "The machine/kernel does not support "
1056 "the required %u-bit DMA mask\n",
1057 (unsigned int)dma_mask_to_engine_type(orig_mask));
1058 return -EOPNOTSUPP;
1059 }
1060 if (fallback) {
1061 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1062 (unsigned int)dma_mask_to_engine_type(orig_mask),
1063 (unsigned int)dma_mask_to_engine_type(mask));
1064 }
1065
1066 return 0;
1067}
1068
Michael Buesche4d6b792007-09-18 15:39:42 -04001069int b43_dma_init(struct b43_wldev *dev)
1070{
1071 struct b43_dma *dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -04001072 int err;
1073 u64 dmamask;
Michael Bueschb79caa62008-02-05 12:50:41 +01001074 enum b43_dmatype type;
Michael Buesche4d6b792007-09-18 15:39:42 -04001075
1076 dmamask = supported_dma_mask(dev);
Michael Buesch5100d5a2008-03-29 21:01:16 +01001077 type = dma_mask_to_engine_type(dmamask);
Michael Buesch1033b3e2008-04-23 19:13:01 +02001078 err = b43_dma_set_mask(dev, dmamask);
1079 if (err)
1080 return err;
Rafał Miłecki6cbab0d2011-07-06 15:45:26 +02001081
1082 switch (dev->dev->bus_type) {
Rafał Miłeckieb90e9e2011-07-20 19:52:16 +02001083#ifdef CONFIG_B43_BCMA
1084 case B43_BUS_BCMA:
1085 dma->translation = bcma_core_dma_translation(dev->dev->bdev);
1086 break;
1087#endif
Rafał Miłecki6cbab0d2011-07-06 15:45:26 +02001088#ifdef CONFIG_B43_SSB
1089 case B43_BUS_SSB:
1090 dma->translation = ssb_dma_translation(dev->dev->sdev);
1091 break;
1092#endif
1093 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001094
Rafał Miłecki78c1ee72011-07-20 19:47:07 +02001095 dma->parity = true;
1096#ifdef CONFIG_B43_BCMA
1097 /* TODO: find out which SSB devices need disabling parity */
1098 if (dev->dev->bus_type == B43_BUS_BCMA)
1099 dma->parity = false;
1100#endif
1101
Michael Buesche4d6b792007-09-18 15:39:42 -04001102 err = -ENOMEM;
1103 /* setup TX DMA channels. */
Michael Bueschb27faf82008-03-06 16:32:46 +01001104 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1105 if (!dma->tx_ring_AC_BK)
Michael Buesche4d6b792007-09-18 15:39:42 -04001106 goto out;
Michael Buesche4d6b792007-09-18 15:39:42 -04001107
Michael Bueschb27faf82008-03-06 16:32:46 +01001108 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1109 if (!dma->tx_ring_AC_BE)
1110 goto err_destroy_bk;
Michael Buesche4d6b792007-09-18 15:39:42 -04001111
Michael Bueschb27faf82008-03-06 16:32:46 +01001112 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1113 if (!dma->tx_ring_AC_VI)
1114 goto err_destroy_be;
Michael Buesche4d6b792007-09-18 15:39:42 -04001115
Michael Bueschb27faf82008-03-06 16:32:46 +01001116 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1117 if (!dma->tx_ring_AC_VO)
1118 goto err_destroy_vi;
Michael Buesche4d6b792007-09-18 15:39:42 -04001119
Michael Bueschb27faf82008-03-06 16:32:46 +01001120 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1121 if (!dma->tx_ring_mcast)
1122 goto err_destroy_vo;
Michael Buesche4d6b792007-09-18 15:39:42 -04001123
Michael Bueschb27faf82008-03-06 16:32:46 +01001124 /* setup RX DMA channel. */
1125 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1126 if (!dma->rx_ring)
1127 goto err_destroy_mcast;
Michael Buesche4d6b792007-09-18 15:39:42 -04001128
Michael Bueschb27faf82008-03-06 16:32:46 +01001129 /* No support for the TX status DMA ring. */
Rafał Miłecki21d889d2011-05-18 02:06:38 +02001130 B43_WARN_ON(dev->dev->core_rev < 5);
Michael Buesche4d6b792007-09-18 15:39:42 -04001131
Michael Bueschb79caa62008-02-05 12:50:41 +01001132 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1133 (unsigned int)type);
Michael Buesche4d6b792007-09-18 15:39:42 -04001134 err = 0;
Michael Bueschb27faf82008-03-06 16:32:46 +01001135out:
Michael Buesche4d6b792007-09-18 15:39:42 -04001136 return err;
1137
Michael Bueschb27faf82008-03-06 16:32:46 +01001138err_destroy_mcast:
1139 destroy_ring(dma, tx_ring_mcast);
1140err_destroy_vo:
1141 destroy_ring(dma, tx_ring_AC_VO);
1142err_destroy_vi:
1143 destroy_ring(dma, tx_ring_AC_VI);
1144err_destroy_be:
1145 destroy_ring(dma, tx_ring_AC_BE);
1146err_destroy_bk:
1147 destroy_ring(dma, tx_ring_AC_BK);
1148 return err;
Michael Buesche4d6b792007-09-18 15:39:42 -04001149}
1150
1151/* Generate a cookie for the TX header. */
1152static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1153{
Michael Bueschb27faf82008-03-06 16:32:46 +01001154 u16 cookie;
Michael Buesche4d6b792007-09-18 15:39:42 -04001155
1156 /* Use the upper 4 bits of the cookie as
1157 * DMA controller ID and store the slot number
1158 * in the lower 12 bits.
1159 * Note that the cookie must never be 0, as this
1160 * is a special value used in RX path.
Michael Buesch280d0e12007-12-26 18:26:17 +01001161 * It can also not be 0xFFFF because that is special
1162 * for multicast frames.
Michael Buesche4d6b792007-09-18 15:39:42 -04001163 */
Michael Bueschb27faf82008-03-06 16:32:46 +01001164 cookie = (((u16)ring->index + 1) << 12);
Michael Buesche4d6b792007-09-18 15:39:42 -04001165 B43_WARN_ON(slot & ~0x0FFF);
Michael Bueschb27faf82008-03-06 16:32:46 +01001166 cookie |= (u16)slot;
Michael Buesche4d6b792007-09-18 15:39:42 -04001167
1168 return cookie;
1169}
1170
1171/* Inspect a cookie and find out to which controller/slot it belongs. */
1172static
1173struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1174{
1175 struct b43_dma *dma = &dev->dma;
1176 struct b43_dmaring *ring = NULL;
1177
1178 switch (cookie & 0xF000) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001179 case 0x1000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001180 ring = dma->tx_ring_AC_BK;
Michael Buesche4d6b792007-09-18 15:39:42 -04001181 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001182 case 0x2000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001183 ring = dma->tx_ring_AC_BE;
Michael Buesche4d6b792007-09-18 15:39:42 -04001184 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001185 case 0x3000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001186 ring = dma->tx_ring_AC_VI;
Michael Buesche4d6b792007-09-18 15:39:42 -04001187 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001188 case 0x4000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001189 ring = dma->tx_ring_AC_VO;
Michael Buesche4d6b792007-09-18 15:39:42 -04001190 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001191 case 0x5000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001192 ring = dma->tx_ring_mcast;
Michael Buesche4d6b792007-09-18 15:39:42 -04001193 break;
Michael Buesche4d6b792007-09-18 15:39:42 -04001194 }
1195 *slot = (cookie & 0x0FFF);
Michael Buesch07681e22009-11-19 22:24:29 +01001196 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1197 b43dbg(dev->wl, "TX-status contains "
1198 "invalid cookie: 0x%04X\n", cookie);
1199 return NULL;
1200 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001201
1202 return ring;
1203}
1204
1205static int dma_tx_fragment(struct b43_dmaring *ring,
Michael Bueschf54a5202009-11-06 18:32:44 +01001206 struct sk_buff *skb)
Michael Buesche4d6b792007-09-18 15:39:42 -04001207{
1208 const struct b43_dma_ops *ops = ring->ops;
Johannes Berge039fa42008-05-15 12:55:29 +02001209 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Bueschf54a5202009-11-06 18:32:44 +01001210 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
Michael Buesche4d6b792007-09-18 15:39:42 -04001211 u8 *header;
Michael Buesch09552cc2008-01-23 21:44:15 +01001212 int slot, old_top_slot, old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001213 int err;
1214 struct b43_dmadesc_generic *desc;
1215 struct b43_dmadesc_meta *meta;
1216 struct b43_dmadesc_meta *meta_hdr;
Michael Buesch280d0e12007-12-26 18:26:17 +01001217 u16 cookie;
Michael Buescheb189d8b2008-01-28 14:47:41 -08001218 size_t hdrsize = b43_txhdr_size(ring->dev);
Michael Buesche4d6b792007-09-18 15:39:42 -04001219
Michael Bueschbdceeb22009-02-19 23:45:43 +01001220 /* Important note: If the number of used DMA slots per TX frame
1221 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1222 * the file has to be updated, too!
1223 */
Michael Buesche4d6b792007-09-18 15:39:42 -04001224
Michael Buesch09552cc2008-01-23 21:44:15 +01001225 old_top_slot = ring->current_slot;
1226 old_used_slots = ring->used_slots;
1227
Michael Buesche4d6b792007-09-18 15:39:42 -04001228 /* Get a slot for the header. */
1229 slot = request_slot(ring);
1230 desc = ops->idx2desc(ring, slot, &meta_hdr);
1231 memset(meta_hdr, 0, sizeof(*meta_hdr));
1232
Michael Bueschbdceeb22009-02-19 23:45:43 +01001233 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
Michael Buesch280d0e12007-12-26 18:26:17 +01001234 cookie = generate_cookie(ring, slot);
Michael Buesch09552cc2008-01-23 21:44:15 +01001235 err = b43_generate_txhdr(ring->dev, header,
gregor kowski035d0242009-08-19 22:35:45 +02001236 skb, info, cookie);
Michael Buesch09552cc2008-01-23 21:44:15 +01001237 if (unlikely(err)) {
1238 ring->current_slot = old_top_slot;
1239 ring->used_slots = old_used_slots;
1240 return err;
1241 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001242
1243 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001244 hdrsize, 1);
Michael Bueschffa92562008-03-22 22:04:45 +01001245 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001246 ring->current_slot = old_top_slot;
1247 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001248 return -EIO;
Michael Buesch09552cc2008-01-23 21:44:15 +01001249 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001250 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001251 hdrsize, 1, 0, 0);
Michael Buesche4d6b792007-09-18 15:39:42 -04001252
1253 /* Get a slot for the payload. */
1254 slot = request_slot(ring);
1255 desc = ops->idx2desc(ring, slot, &meta);
1256 memset(meta, 0, sizeof(*meta));
1257
Michael Buesche4d6b792007-09-18 15:39:42 -04001258 meta->skb = skb;
1259 meta->is_last_fragment = 1;
Michael Bueschf54a5202009-11-06 18:32:44 +01001260 priv_info->bouncebuffer = NULL;
Michael Buesche4d6b792007-09-18 15:39:42 -04001261
1262 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1263 /* create a bounce buffer in zone_dma on mapping failure. */
Michael Bueschffa92562008-03-22 22:04:45 +01001264 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
Julia Lawalla61aac72010-05-15 23:20:26 +02001265 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1266 GFP_ATOMIC | GFP_DMA);
Michael Bueschf54a5202009-11-06 18:32:44 +01001267 if (!priv_info->bouncebuffer) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001268 ring->current_slot = old_top_slot;
1269 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001270 err = -ENOMEM;
1271 goto out_unmap_hdr;
1272 }
1273
Michael Bueschf54a5202009-11-06 18:32:44 +01001274 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
Michael Bueschffa92562008-03-22 22:04:45 +01001275 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
Michael Bueschf54a5202009-11-06 18:32:44 +01001276 kfree(priv_info->bouncebuffer);
1277 priv_info->bouncebuffer = NULL;
Michael Buesch09552cc2008-01-23 21:44:15 +01001278 ring->current_slot = old_top_slot;
1279 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001280 err = -EIO;
Michael Bueschf54a5202009-11-06 18:32:44 +01001281 goto out_unmap_hdr;
Michael Buesche4d6b792007-09-18 15:39:42 -04001282 }
1283 }
1284
1285 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1286
Johannes Berge039fa42008-05-15 12:55:29 +02001287 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001288 /* Tell the firmware about the cookie of the last
1289 * mcast frame, so it can clear the more-data bit in it. */
1290 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1291 B43_SHM_SH_MCASTCOOKIE, cookie);
1292 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001293 /* Now transfer the whole frame. */
1294 wmb();
1295 ops->poke_tx(ring, next_slot(ring, slot));
1296 return 0;
1297
Michael Buesch280d0e12007-12-26 18:26:17 +01001298out_unmap_hdr:
Michael Buesche4d6b792007-09-18 15:39:42 -04001299 unmap_descbuffer(ring, meta_hdr->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001300 hdrsize, 1);
Michael Buesche4d6b792007-09-18 15:39:42 -04001301 return err;
1302}
1303
1304static inline int should_inject_overflow(struct b43_dmaring *ring)
1305{
1306#ifdef CONFIG_B43_DEBUG
1307 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1308 /* Check if we should inject another ringbuffer overflow
1309 * to test handling of this situation in the stack. */
1310 unsigned long next_overflow;
1311
1312 next_overflow = ring->last_injected_overflow + HZ;
1313 if (time_after(jiffies, next_overflow)) {
1314 ring->last_injected_overflow = jiffies;
1315 b43dbg(ring->dev->wl,
1316 "Injecting TX ring overflow on "
1317 "DMA controller %d\n", ring->index);
1318 return 1;
1319 }
1320 }
1321#endif /* CONFIG_B43_DEBUG */
1322 return 0;
1323}
1324
Michael Buesche6f5b932008-03-05 21:18:49 +01001325/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
John Daiker99da1852009-02-24 02:16:42 -08001326static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1327 u8 queue_prio)
Michael Buesche6f5b932008-03-05 21:18:49 +01001328{
1329 struct b43_dmaring *ring;
1330
Michael Buesch403a3a12009-06-08 21:04:57 +02001331 if (dev->qos_enabled) {
Michael Buesche6f5b932008-03-05 21:18:49 +01001332 /* 0 = highest priority */
1333 switch (queue_prio) {
1334 default:
1335 B43_WARN_ON(1);
1336 /* fallthrough */
1337 case 0:
Michael Bueschb27faf82008-03-06 16:32:46 +01001338 ring = dev->dma.tx_ring_AC_VO;
Michael Buesche6f5b932008-03-05 21:18:49 +01001339 break;
1340 case 1:
Michael Bueschb27faf82008-03-06 16:32:46 +01001341 ring = dev->dma.tx_ring_AC_VI;
Michael Buesche6f5b932008-03-05 21:18:49 +01001342 break;
1343 case 2:
Michael Bueschb27faf82008-03-06 16:32:46 +01001344 ring = dev->dma.tx_ring_AC_BE;
Michael Buesche6f5b932008-03-05 21:18:49 +01001345 break;
1346 case 3:
Michael Bueschb27faf82008-03-06 16:32:46 +01001347 ring = dev->dma.tx_ring_AC_BK;
Michael Buesche6f5b932008-03-05 21:18:49 +01001348 break;
1349 }
1350 } else
Michael Bueschb27faf82008-03-06 16:32:46 +01001351 ring = dev->dma.tx_ring_AC_BE;
Michael Buesche6f5b932008-03-05 21:18:49 +01001352
1353 return ring;
1354}
1355
Johannes Berge039fa42008-05-15 12:55:29 +02001356int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
Michael Buesche4d6b792007-09-18 15:39:42 -04001357{
1358 struct b43_dmaring *ring;
Michael Buesch280d0e12007-12-26 18:26:17 +01001359 struct ieee80211_hdr *hdr;
Michael Buesche4d6b792007-09-18 15:39:42 -04001360 int err = 0;
Johannes Berge039fa42008-05-15 12:55:29 +02001361 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesche4d6b792007-09-18 15:39:42 -04001362
Michael Buesch280d0e12007-12-26 18:26:17 +01001363 hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +02001364 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001365 /* The multicast ring will be sent after the DTIM */
Michael Bueschb27faf82008-03-06 16:32:46 +01001366 ring = dev->dma.tx_ring_mcast;
Michael Buesch280d0e12007-12-26 18:26:17 +01001367 /* Set the more-data bit. Ucode will clear it on
1368 * the last frame for us. */
1369 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1370 } else {
1371 /* Decide by priority where to put this frame. */
Johannes Berge2530082008-05-17 00:57:14 +02001372 ring = select_ring_by_priority(
1373 dev, skb_get_queue_mapping(skb));
Michael Buesch280d0e12007-12-26 18:26:17 +01001374 }
1375
Michael Buesche4d6b792007-09-18 15:39:42 -04001376 B43_WARN_ON(!ring->tx);
Michael Bueschca2d5592009-02-19 20:17:36 +01001377
Larry Finger18c69512009-07-29 10:54:06 -05001378 if (unlikely(ring->stopped)) {
1379 /* We get here only because of a bug in mac80211.
1380 * Because of a race, one packet may be queued after
1381 * the queue is stopped, thus we got called when we shouldn't.
1382 * For now, just refuse the transmit. */
1383 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1384 b43err(dev->wl, "Packet after queue stopped\n");
1385 err = -ENOSPC;
Michael Buesch637dae32009-09-04 22:55:00 +02001386 goto out;
Larry Finger18c69512009-07-29 10:54:06 -05001387 }
1388
1389 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1390 /* If we get here, we have a real error with the queue
1391 * full, but queues not stopped. */
1392 b43err(dev->wl, "DMA queue overflow\n");
Michael Buesche4d6b792007-09-18 15:39:42 -04001393 err = -ENOSPC;
Michael Buesch637dae32009-09-04 22:55:00 +02001394 goto out;
Michael Buesche4d6b792007-09-18 15:39:42 -04001395 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001396
Michael Buesche6f5b932008-03-05 21:18:49 +01001397 /* Assign the queue number to the ring (if not already done before)
1398 * so TX status handling can use it. The queue to ring mapping is
1399 * static, so we don't need to store it per frame. */
Johannes Berge2530082008-05-17 00:57:14 +02001400 ring->queue_prio = skb_get_queue_mapping(skb);
Michael Buesche6f5b932008-03-05 21:18:49 +01001401
Michael Bueschf54a5202009-11-06 18:32:44 +01001402 err = dma_tx_fragment(ring, skb);
Michael Buesch09552cc2008-01-23 21:44:15 +01001403 if (unlikely(err == -ENOKEY)) {
1404 /* Drop this packet, as we don't have the encryption key
1405 * anymore and must not transmit it unencrypted. */
1406 dev_kfree_skb_any(skb);
1407 err = 0;
Michael Buesch637dae32009-09-04 22:55:00 +02001408 goto out;
Michael Buesch09552cc2008-01-23 21:44:15 +01001409 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001410 if (unlikely(err)) {
1411 b43err(dev->wl, "DMA tx mapping failure\n");
Michael Buesch637dae32009-09-04 22:55:00 +02001412 goto out;
Michael Buesche4d6b792007-09-18 15:39:42 -04001413 }
Michael Bueschbdceeb22009-02-19 23:45:43 +01001414 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
Michael Buesche4d6b792007-09-18 15:39:42 -04001415 should_inject_overflow(ring)) {
1416 /* This TX ring is full. */
Johannes Berge2530082008-05-17 00:57:14 +02001417 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Michael Buesche4d6b792007-09-18 15:39:42 -04001418 ring->stopped = 1;
1419 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1420 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1421 }
1422 }
Michael Buesch637dae32009-09-04 22:55:00 +02001423out:
Michael Buesche4d6b792007-09-18 15:39:42 -04001424
1425 return err;
1426}
1427
1428void b43_dma_handle_txstatus(struct b43_wldev *dev,
1429 const struct b43_txstatus *status)
1430{
1431 const struct b43_dma_ops *ops;
1432 struct b43_dmaring *ring;
Michael Buesche4d6b792007-09-18 15:39:42 -04001433 struct b43_dmadesc_meta *meta;
Michael Buesch07681e22009-11-19 22:24:29 +01001434 int slot, firstused;
Michael Buesch5100d5a2008-03-29 21:01:16 +01001435 bool frame_succeed;
Michael Buesche4d6b792007-09-18 15:39:42 -04001436
1437 ring = parse_cookie(dev, status->cookie, &slot);
1438 if (unlikely(!ring))
1439 return;
Michael Buesche4d6b792007-09-18 15:39:42 -04001440 B43_WARN_ON(!ring->tx);
Michael Buesch07681e22009-11-19 22:24:29 +01001441
1442 /* Sanity check: TX packets are processed in-order on one ring.
1443 * Check if the slot deduced from the cookie really is the first
1444 * used slot. */
1445 firstused = ring->current_slot - ring->used_slots + 1;
1446 if (firstused < 0)
1447 firstused = ring->nr_slots + firstused;
1448 if (unlikely(slot != firstused)) {
1449 /* This possibly is a firmware bug and will result in
1450 * malfunction, memory leaks and/or stall of DMA functionality. */
1451 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1452 "Expected %d, but got %d\n",
1453 ring->index, firstused, slot);
1454 return;
1455 }
1456
Michael Buesche4d6b792007-09-18 15:39:42 -04001457 ops = ring->ops;
1458 while (1) {
Michael Buesch07681e22009-11-19 22:24:29 +01001459 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
Larry Finger9c1cacd2011-05-22 20:54:25 -05001460 /* get meta - ignore returned value */
1461 ops->idx2desc(ring, slot, &meta);
Michael Buesche4d6b792007-09-18 15:39:42 -04001462
Michael Buesch07681e22009-11-19 22:24:29 +01001463 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1464 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1465 "on ring %d\n",
1466 slot, firstused, ring->index);
1467 break;
1468 }
Michael Bueschf54a5202009-11-06 18:32:44 +01001469 if (meta->skb) {
1470 struct b43_private_tx_info *priv_info =
1471 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1472
1473 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1474 kfree(priv_info->bouncebuffer);
1475 priv_info->bouncebuffer = NULL;
1476 } else {
Michael Buesche4d6b792007-09-18 15:39:42 -04001477 unmap_descbuffer(ring, meta->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001478 b43_txhdr_size(dev), 1);
Michael Bueschf54a5202009-11-06 18:32:44 +01001479 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001480
1481 if (meta->is_last_fragment) {
Johannes Berge039fa42008-05-15 12:55:29 +02001482 struct ieee80211_tx_info *info;
1483
Michael Buesch07681e22009-11-19 22:24:29 +01001484 if (unlikely(!meta->skb)) {
1485 /* This is a scatter-gather fragment of a frame, so
1486 * the skb pointer must not be NULL. */
1487 b43dbg(dev->wl, "TX status unexpected NULL skb "
1488 "at slot %d (first=%d) on ring %d\n",
1489 slot, firstused, ring->index);
1490 break;
1491 }
Johannes Berge039fa42008-05-15 12:55:29 +02001492
1493 info = IEEE80211_SKB_CB(meta->skb);
1494
Johannes Berge039fa42008-05-15 12:55:29 +02001495 /*
1496 * Call back to inform the ieee80211 subsystem about
1497 * the status of the transmission.
Michael Buesche4d6b792007-09-18 15:39:42 -04001498 */
Johannes Berge6a98542008-10-21 12:40:02 +02001499 frame_succeed = b43_fill_txstatus_report(dev, info, status);
Michael Buesch5100d5a2008-03-29 21:01:16 +01001500#ifdef CONFIG_B43_DEBUG
1501 if (frame_succeed)
1502 ring->nr_succeed_tx_packets++;
1503 else
1504 ring->nr_failed_tx_packets++;
1505 ring->nr_total_packet_tries += status->frame_count;
1506#endif /* DEBUG */
Michael Bueschce6c4a12009-09-10 20:22:02 +02001507 ieee80211_tx_status(dev->wl->hw, meta->skb);
Johannes Berge039fa42008-05-15 12:55:29 +02001508
Michael Buesch07681e22009-11-19 22:24:29 +01001509 /* skb will be freed by ieee80211_tx_status().
1510 * Poison our pointer. */
1511 meta->skb = B43_DMA_PTR_POISON;
Michael Buesche4d6b792007-09-18 15:39:42 -04001512 } else {
1513 /* No need to call free_descriptor_buffer here, as
1514 * this is only the txhdr, which is not allocated.
1515 */
Michael Buesch07681e22009-11-19 22:24:29 +01001516 if (unlikely(meta->skb)) {
1517 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1518 "at slot %d (first=%d) on ring %d\n",
1519 slot, firstused, ring->index);
1520 break;
1521 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001522 }
1523
1524 /* Everything unmapped and free'd. So it's not used anymore. */
1525 ring->used_slots--;
1526
Michael Buesch07681e22009-11-19 22:24:29 +01001527 if (meta->is_last_fragment) {
1528 /* This is the last scatter-gather
1529 * fragment of the frame. We are done. */
Michael Buesche4d6b792007-09-18 15:39:42 -04001530 break;
Michael Buesch07681e22009-11-19 22:24:29 +01001531 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001532 slot = next_slot(ring, slot);
1533 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001534 if (ring->stopped) {
Michael Bueschbdceeb22009-02-19 23:45:43 +01001535 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
Michael Buesche6f5b932008-03-05 21:18:49 +01001536 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
Michael Buesche4d6b792007-09-18 15:39:42 -04001537 ring->stopped = 0;
1538 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1539 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1540 }
1541 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001542}
1543
Michael Buesche4d6b792007-09-18 15:39:42 -04001544static void dma_rx(struct b43_dmaring *ring, int *slot)
1545{
1546 const struct b43_dma_ops *ops = ring->ops;
1547 struct b43_dmadesc_generic *desc;
1548 struct b43_dmadesc_meta *meta;
1549 struct b43_rxhdr_fw4 *rxhdr;
1550 struct sk_buff *skb;
1551 u16 len;
1552 int err;
1553 dma_addr_t dmaaddr;
1554
1555 desc = ops->idx2desc(ring, *slot, &meta);
1556
1557 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1558 skb = meta->skb;
1559
Michael Buesche4d6b792007-09-18 15:39:42 -04001560 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1561 len = le16_to_cpu(rxhdr->frame_len);
1562 if (len == 0) {
1563 int i = 0;
1564
1565 do {
1566 udelay(2);
1567 barrier();
1568 len = le16_to_cpu(rxhdr->frame_len);
1569 } while (len == 0 && i++ < 5);
1570 if (unlikely(len == 0)) {
Michael Bueschcf686362009-03-28 00:41:25 +01001571 dmaaddr = meta->dmaaddr;
1572 goto drop_recycle_buffer;
Michael Buesche4d6b792007-09-18 15:39:42 -04001573 }
1574 }
Michael Bueschec9a1d82009-03-27 22:51:58 +01001575 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1576 /* Something went wrong with the DMA.
1577 * The device did not touch the buffer and did not overwrite the poison. */
1578 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
Michael Bueschcf686362009-03-28 00:41:25 +01001579 dmaaddr = meta->dmaaddr;
1580 goto drop_recycle_buffer;
Michael Bueschec9a1d82009-03-27 22:51:58 +01001581 }
John W. Linvillec85ce652011-03-30 14:02:46 -04001582 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
Michael Buesche4d6b792007-09-18 15:39:42 -04001583 /* The data did not fit into one descriptor buffer
1584 * and is split over multiple buffers.
1585 * This should never happen, as we try to allocate buffers
1586 * big enough. So simply ignore this packet.
1587 */
1588 int cnt = 0;
1589 s32 tmp = len;
1590
1591 while (1) {
1592 desc = ops->idx2desc(ring, *slot, &meta);
1593 /* recycle the descriptor buffer. */
Michael Bueschcf686362009-03-28 00:41:25 +01001594 b43_poison_rx_buffer(ring, meta->skb);
Michael Buesche4d6b792007-09-18 15:39:42 -04001595 sync_descbuffer_for_device(ring, meta->dmaaddr,
1596 ring->rx_buffersize);
1597 *slot = next_slot(ring, *slot);
1598 cnt++;
1599 tmp -= ring->rx_buffersize;
1600 if (tmp <= 0)
1601 break;
1602 }
1603 b43err(ring->dev->wl, "DMA RX buffer too small "
1604 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1605 len, ring->rx_buffersize, cnt);
1606 goto drop;
1607 }
1608
1609 dmaaddr = meta->dmaaddr;
1610 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1611 if (unlikely(err)) {
1612 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
Michael Bueschcf686362009-03-28 00:41:25 +01001613 goto drop_recycle_buffer;
Michael Buesche4d6b792007-09-18 15:39:42 -04001614 }
1615
1616 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1617 skb_put(skb, len + ring->frameoffset);
1618 skb_pull(skb, ring->frameoffset);
1619
1620 b43_rx(ring->dev, skb, rxhdr);
Michael Bueschb27faf82008-03-06 16:32:46 +01001621drop:
Michael Buesche4d6b792007-09-18 15:39:42 -04001622 return;
Michael Bueschcf686362009-03-28 00:41:25 +01001623
1624drop_recycle_buffer:
1625 /* Poison and recycle the RX buffer. */
1626 b43_poison_rx_buffer(ring, skb);
1627 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
Michael Buesche4d6b792007-09-18 15:39:42 -04001628}
1629
1630void b43_dma_rx(struct b43_dmaring *ring)
1631{
1632 const struct b43_dma_ops *ops = ring->ops;
1633 int slot, current_slot;
1634 int used_slots = 0;
1635
1636 B43_WARN_ON(ring->tx);
1637 current_slot = ops->get_current_rxslot(ring);
1638 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1639
1640 slot = ring->current_slot;
1641 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1642 dma_rx(ring, &slot);
1643 update_max_used_slots(ring, ++used_slots);
1644 }
Michael Büsch73e6cdc2011-07-04 19:51:11 +02001645 wmb();
Michael Buesche4d6b792007-09-18 15:39:42 -04001646 ops->set_current_rxslot(ring, slot);
1647 ring->current_slot = slot;
1648}
1649
1650static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1651{
Michael Buesche4d6b792007-09-18 15:39:42 -04001652 B43_WARN_ON(!ring->tx);
1653 ring->ops->tx_suspend(ring);
Michael Buesche4d6b792007-09-18 15:39:42 -04001654}
1655
1656static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1657{
Michael Buesche4d6b792007-09-18 15:39:42 -04001658 B43_WARN_ON(!ring->tx);
1659 ring->ops->tx_resume(ring);
Michael Buesche4d6b792007-09-18 15:39:42 -04001660}
1661
1662void b43_dma_tx_suspend(struct b43_wldev *dev)
1663{
1664 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
Michael Bueschb27faf82008-03-06 16:32:46 +01001665 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1666 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1667 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1668 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1669 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
Michael Buesche4d6b792007-09-18 15:39:42 -04001670}
1671
1672void b43_dma_tx_resume(struct b43_wldev *dev)
1673{
Michael Bueschb27faf82008-03-06 16:32:46 +01001674 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1675 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1676 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1677 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1678 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
Michael Buesche4d6b792007-09-18 15:39:42 -04001679 b43_power_saving_ctl_bits(dev, 0);
1680}
Michael Buesch5100d5a2008-03-29 21:01:16 +01001681
Michael Buesch5100d5a2008-03-29 21:01:16 +01001682static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1683 u16 mmio_base, bool enable)
1684{
1685 u32 ctl;
1686
1687 if (type == B43_DMA_64BIT) {
1688 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1689 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1690 if (enable)
1691 ctl |= B43_DMA64_RXDIRECTFIFO;
1692 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1693 } else {
1694 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1695 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1696 if (enable)
1697 ctl |= B43_DMA32_RXDIRECTFIFO;
1698 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1699 }
1700}
1701
1702/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1703 * This is called from PIO code, so DMA structures are not available. */
1704void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1705 unsigned int engine_index, bool enable)
1706{
1707 enum b43_dmatype type;
1708 u16 mmio_base;
1709
1710 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1711
1712 mmio_base = b43_dmacontroller_base(type, engine_index);
1713 direct_fifo_rx(dev, type, mmio_base, enable);
1714}