blob: 96b994bde48051db4dc619b93e590c14be946914 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44/*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
49 *
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
85#include <linux/config.h>
86#include <linux/kernel.h>
87#include <linux/list.h>
88#include <linux/slab.h>
89#include <linux/interrupt.h>
90#include <linux/wait.h>
91#include <linux/errno.h>
92#include <linux/module.h>
93#include <linux/moduleparam.h>
94#include <linux/pci.h>
95#include <linux/fs.h>
96#include <linux/poll.h>
97#include <asm/byteorder.h>
98#include <asm/atomic.h>
99#include <asm/uaccess.h>
100#include <linux/delay.h>
101#include <linux/spinlock.h>
102
103#include <asm/pgtable.h>
104#include <asm/page.h>
105#include <asm/irq.h>
106#include <linux/sched.h>
107#include <linux/types.h>
108#include <linux/vmalloc.h>
109#include <linux/init.h>
110
111#ifdef CONFIG_PPC_PMAC
112#include <asm/machdep.h>
113#include <asm/pmac_feature.h>
114#include <asm/prom.h>
115#include <asm/pci-bridge.h>
116#endif
117
118#include "csr1212.h"
119#include "ieee1394.h"
120#include "ieee1394_types.h"
121#include "hosts.h"
122#include "dma.h"
123#include "iso.h"
124#include "ieee1394_core.h"
125#include "highlevel.h"
126#include "ohci1394.h"
127
128#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129#define OHCI1394_DEBUG
130#endif
131
132#ifdef DBGMSG
133#undef DBGMSG
134#endif
135
136#ifdef OHCI1394_DEBUG
137#define DBGMSG(fmt, args...) \
138printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139#else
140#define DBGMSG(fmt, args...)
141#endif
142
143#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144#define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147#define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150static int global_outstanding_dmas = 0;
151#else
152#define OHCI_DMA_ALLOC(fmt, args...)
153#define OHCI_DMA_FREE(fmt, args...)
154#endif
155
156/* print general (card independent) information */
157#define PRINT_G(level, fmt, args...) \
158printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160/* print card specific information */
161#define PRINT(level, fmt, args...) \
162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164/* Module Parameters */
165static int phys_dma = 1;
Ben Collinsfa9b7392006-06-12 18:13:42 -0400166module_param(phys_dma, int, 0444);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
168
169static void dma_trm_tasklet(unsigned long data);
170static void dma_trm_reset(struct dma_trm_ctx *d);
171
172static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
173 enum context_type type, int ctx, int num_desc,
174 int buf_size, int split_buf_size, int context_base);
175static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
176static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
177
178static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
179 enum context_type type, int ctx, int num_desc,
180 int context_base);
181
182static void ohci1394_pci_remove(struct pci_dev *pdev);
183
184#ifndef __LITTLE_ENDIAN
185static unsigned hdr_sizes[] =
186{
187 3, /* TCODE_WRITEQ */
188 4, /* TCODE_WRITEB */
189 3, /* TCODE_WRITE_RESPONSE */
190 0, /* ??? */
191 3, /* TCODE_READQ */
192 4, /* TCODE_READB */
193 3, /* TCODE_READQ_RESPONSE */
194 4, /* TCODE_READB_RESPONSE */
195 1, /* TCODE_CYCLE_START (???) */
196 4, /* TCODE_LOCK_REQUEST */
197 2, /* TCODE_ISO_DATA */
198 4, /* TCODE_LOCK_RESPONSE */
199};
200
201/* Swap headers */
202static inline void packet_swab(quadlet_t *data, int tcode)
203{
204 size_t size = hdr_sizes[tcode];
205
206 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
207 return;
208
209 while (size--)
210 data[size] = swab32(data[size]);
211}
212#else
213/* Don't waste cycles on same sex byte swaps */
214#define packet_swab(w,x)
215#endif /* !LITTLE_ENDIAN */
216
217/***********************************
218 * IEEE-1394 functionality section *
219 ***********************************/
220
221static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
222{
223 int i;
224 unsigned long flags;
225 quadlet_t r;
226
227 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
228
229 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
230
231 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
232 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
233 break;
234
235 mdelay(1);
236 }
237
238 r = reg_read(ohci, OHCI1394_PhyControl);
239
240 if (i >= OHCI_LOOP_COUNT)
241 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
242 r, r & 0x80000000, i);
243
244 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
245
246 return (r & 0x00ff0000) >> 16;
247}
248
249static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
250{
251 int i;
252 unsigned long flags;
253 u32 r = 0;
254
255 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
256
257 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
258
259 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
260 r = reg_read(ohci, OHCI1394_PhyControl);
261 if (!(r & 0x00004000))
262 break;
263
264 mdelay(1);
265 }
266
267 if (i == OHCI_LOOP_COUNT)
268 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
269 r, r & 0x00004000, i);
270
271 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
272
273 return;
274}
275
276/* Or's our value into the current value */
277static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
278{
279 u8 old;
280
281 old = get_phy_reg (ohci, addr);
282 old |= data;
283 set_phy_reg (ohci, addr, old);
284
285 return;
286}
287
288static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
289 int phyid, int isroot)
290{
291 quadlet_t *q = ohci->selfid_buf_cpu;
292 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
293 size_t size;
294 quadlet_t q0, q1;
295
296 /* Check status of self-id reception */
297
298 if (ohci->selfid_swap)
299 q0 = le32_to_cpu(q[0]);
300 else
301 q0 = q[0];
302
303 if ((self_id_count & 0x80000000) ||
304 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
305 PRINT(KERN_ERR,
306 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
307 self_id_count, q0, ohci->self_id_errors);
308
309 /* Tip by James Goodwin <jamesg@Filanet.com>:
310 * We had an error, generate another bus reset in response. */
311 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
312 set_phy_reg_mask (ohci, 1, 0x40);
313 ohci->self_id_errors++;
314 } else {
315 PRINT(KERN_ERR,
316 "Too many errors on SelfID error reception, giving up!");
317 }
318 return;
319 }
320
321 /* SelfID Ok, reset error counter. */
322 ohci->self_id_errors = 0;
323
324 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
325 q++;
326
327 while (size > 0) {
328 if (ohci->selfid_swap) {
329 q0 = le32_to_cpu(q[0]);
330 q1 = le32_to_cpu(q[1]);
331 } else {
332 q0 = q[0];
333 q1 = q[1];
334 }
335
336 if (q0 == ~q1) {
337 DBGMSG ("SelfID packet 0x%x received", q0);
338 hpsb_selfid_received(host, cpu_to_be32(q0));
339 if (((q0 & 0x3f000000) >> 24) == phyid)
340 DBGMSG ("SelfID for this node is 0x%08x", q0);
341 } else {
342 PRINT(KERN_ERR,
343 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
344 }
345 q += 2;
346 size -= 2;
347 }
348
349 DBGMSG("SelfID complete");
350
351 return;
352}
353
354static void ohci_soft_reset(struct ti_ohci *ohci) {
355 int i;
356
357 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
358
359 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
360 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
361 break;
362 mdelay(1);
363 }
364 DBGMSG ("Soft reset finished");
365}
366
367
368/* Generate the dma receive prgs and start the context */
369static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
370{
371 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
372 int i;
373
374 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
375
376 for (i=0; i<d->num_desc; i++) {
377 u32 c;
378
379 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
380 if (generate_irq)
381 c |= DMA_CTL_IRQ;
382
383 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
384
385 /* End of descriptor list? */
386 if (i + 1 < d->num_desc) {
387 d->prg_cpu[i]->branchAddress =
388 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
389 } else {
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
392 }
393
394 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
395 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
396 }
397
398 d->buf_ind = 0;
399 d->buf_offset = 0;
400
401 if (d->type == DMA_CTX_ISO) {
402 /* Clear contextControl */
403 reg_write(ohci, d->ctrlClear, 0xffffffff);
404
405 /* Set bufferFill, isochHeader, multichannel for IR context */
406 reg_write(ohci, d->ctrlSet, 0xd0000000);
407
408 /* Set the context match register to match on all tags */
409 reg_write(ohci, d->ctxtMatch, 0xf0000000);
410
411 /* Clear the multi channel mask high and low registers */
412 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
413 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
414
415 /* Set up isoRecvIntMask to generate interrupts */
416 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
417 }
418
419 /* Tell the controller where the first AR program is */
420 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
421
422 /* Run context */
423 reg_write(ohci, d->ctrlSet, 0x00008000);
424
425 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
426}
427
428/* Initialize the dma transmit context */
429static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
430{
431 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
432
433 /* Stop the context */
434 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
435
436 d->prg_ind = 0;
437 d->sent_ind = 0;
438 d->free_prgs = d->num_desc;
439 d->branchAddrPtr = NULL;
440 INIT_LIST_HEAD(&d->fifo_list);
441 INIT_LIST_HEAD(&d->pending_list);
442
443 if (d->type == DMA_CTX_ISO) {
444 /* enable interrupts */
445 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
446 }
447
448 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
449}
450
451/* Count the number of available iso contexts */
452static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
453{
454 int i,ctx=0;
455 u32 tmp;
456
457 reg_write(ohci, reg, 0xffffffff);
458 tmp = reg_read(ohci, reg);
459
460 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
461
462 /* Count the number of contexts */
463 for (i=0; i<32; i++) {
464 if (tmp & 1) ctx++;
465 tmp >>= 1;
466 }
467 return ctx;
468}
469
470/* Global initialization */
471static void ohci_initialize(struct ti_ohci *ohci)
472{
473 char irq_buf[16];
474 quadlet_t buf;
475 int num_ports, i;
476
477 spin_lock_init(&ohci->phy_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 /* Put some defaults to these undefined bus options */
480 buf = reg_read(ohci, OHCI1394_BusOptions);
481 buf |= 0x60000000; /* Enable CMC and ISC */
Ben Collins1934b8b2005-07-09 20:01:23 -0400482 if (hpsb_disable_irm)
483 buf &= ~0x80000000;
484 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 buf |= 0x80000000; /* Enable IRMC */
486 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487 buf &= ~0x18000000; /* Disable PMC and BMC */
488 reg_write(ohci, OHCI1394_BusOptions, buf);
489
490 /* Set the bus number */
491 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
492
493 /* Enable posted writes */
494 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
495
496 /* Clear link control register */
497 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
498
499 /* Enable cycle timer and cycle master and set the IRM
500 * contender bit in our self ID packets if appropriate. */
501 reg_write(ohci, OHCI1394_LinkControlSet,
502 OHCI1394_LinkControl_CycleTimerEnable |
503 OHCI1394_LinkControl_CycleMaster);
Ben Collins1934b8b2005-07-09 20:01:23 -0400504 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
505 if (hpsb_disable_irm)
506 i &= ~PHY_04_CONTENDER;
507 else
508 i |= PHY_04_CONTENDER;
509 set_phy_reg(ohci, 4, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511 /* Set up self-id dma buffer */
512 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
513
514 /* enable self-id and phys */
515 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
516 OHCI1394_LinkControl_RcvPhyPkt);
517
518 /* Set the Config ROM mapping register */
519 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
520
521 /* Now get our max packet size */
522 ohci->max_packet_size =
523 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
524
525 /* Don't accept phy packets into AR request context */
526 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
527
528 /* Clear the interrupt mask */
529 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
530 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
531
532 /* Clear the interrupt mask */
533 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
534 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
535
536 /* Initialize AR dma */
537 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
538 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
539
540 /* Initialize AT dma */
541 initialize_dma_trm_ctx(&ohci->at_req_context);
542 initialize_dma_trm_ctx(&ohci->at_resp_context);
543
Jody McIntyree4ec0f22005-04-21 14:09:42 -0700544 /* Initialize IR Legacy DMA channel mask */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 ohci->ir_legacy_channels = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Stefan Richter180a4302006-03-28 19:57:34 -0500547 /* Accept AR requests from all nodes */
548 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
549
550 /* Set the address range of the physical response unit.
551 * Most controllers do not implement it as a writable register though.
552 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
553 * register content.
554 * To actually enable physical responses is the job of our interrupt
555 * handler which programs the physical request filter. */
Ben Collins4611ed32006-06-12 18:13:32 -0400556 reg_write(ohci, OHCI1394_PhyUpperBound,
557 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
Stefan Richter180a4302006-03-28 19:57:34 -0500558
559 DBGMSG("physUpperBoundOffset=%08x",
560 reg_read(ohci, OHCI1394_PhyUpperBound));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 /* Specify AT retries */
563 reg_write(ohci, OHCI1394_ATRetries,
564 OHCI1394_MAX_AT_REQ_RETRIES |
565 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
566 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
567
568 /* We don't want hardware swapping */
569 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
570
571 /* Enable interrupts */
572 reg_write(ohci, OHCI1394_IntMaskSet,
573 OHCI1394_unrecoverableError |
574 OHCI1394_masterIntEnable |
575 OHCI1394_busReset |
576 OHCI1394_selfIDComplete |
577 OHCI1394_RSPkt |
578 OHCI1394_RQPkt |
579 OHCI1394_respTxComplete |
580 OHCI1394_reqTxComplete |
581 OHCI1394_isochRx |
582 OHCI1394_isochTx |
Jody McIntyree2f81652006-03-28 19:55:11 -0500583 OHCI1394_postedWriteErr |
Ben Collins57fdb582006-06-12 18:12:21 -0400584 OHCI1394_cycleTooLong |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 OHCI1394_cycleInconsistent);
586
587 /* Enable link */
588 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
589
590 buf = reg_read(ohci, OHCI1394_Version);
591#ifndef __sparc__
592 sprintf (irq_buf, "%d", ohci->dev->irq);
593#else
594 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
595#endif
596 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
Stefan Richter209171a2005-12-13 11:05:00 -0500597 "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
599 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
600 pci_resource_start(ohci->dev, 0),
601 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
Stefan Richter209171a2005-12-13 11:05:00 -0500602 ohci->max_packet_size,
603 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605 /* Check all of our ports to make sure that if anything is
606 * connected, we enable that port. */
607 num_ports = get_phy_reg(ohci, 2) & 0xf;
608 for (i = 0; i < num_ports; i++) {
609 unsigned int status;
610
611 set_phy_reg(ohci, 7, i);
612 status = get_phy_reg(ohci, 8);
613
614 if (status & 0x20)
615 set_phy_reg(ohci, 8, status & ~1);
616 }
617
618 /* Serial EEPROM Sanity check. */
619 if ((ohci->max_packet_size < 512) ||
620 (ohci->max_packet_size > 4096)) {
621 /* Serial EEPROM contents are suspect, set a sane max packet
622 * size and print the raw contents for bug reports if verbose
623 * debug is enabled. */
624#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
625 int i;
626#endif
627
628 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
629 "attempting to setting max_packet_size to 512 bytes");
630 reg_write(ohci, OHCI1394_BusOptions,
631 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
632 ohci->max_packet_size = 512;
633#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
634 PRINT(KERN_DEBUG, " EEPROM Present: %d",
635 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
636 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
637
638 for (i = 0;
639 ((i < 1000) &&
640 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
641 udelay(10);
642
643 for (i = 0; i < 0x20; i++) {
644 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
645 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
646 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
647 }
648#endif
649 }
650}
651
652/*
653 * Insert a packet in the DMA fifo and generate the DMA prg
654 * FIXME: rewrite the program in order to accept packets crossing
655 * page boundaries.
656 * check also that a single dma descriptor doesn't cross a
657 * page boundary.
658 */
659static void insert_packet(struct ti_ohci *ohci,
660 struct dma_trm_ctx *d, struct hpsb_packet *packet)
661{
662 u32 cycleTimer;
663 int idx = d->prg_ind;
664
665 DBGMSG("Inserting packet for node " NODE_BUS_FMT
666 ", tlabel=%d, tcode=0x%x, speed=%d",
667 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
668 packet->tcode, packet->speed_code);
669
670 d->prg_cpu[idx]->begin.address = 0;
671 d->prg_cpu[idx]->begin.branchAddress = 0;
672
673 if (d->type == DMA_CTX_ASYNC_RESP) {
674 /*
675 * For response packets, we need to put a timeout value in
676 * the 16 lower bits of the status... let's try 1 sec timeout
677 */
678 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
679 d->prg_cpu[idx]->begin.status = cpu_to_le32(
680 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
681 ((cycleTimer&0x01fff000)>>12));
682
683 DBGMSG("cycleTimer: %08x timeStamp: %08x",
684 cycleTimer, d->prg_cpu[idx]->begin.status);
685 } else
686 d->prg_cpu[idx]->begin.status = 0;
687
688 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
689
690 if (packet->type == hpsb_raw) {
691 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
692 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
693 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
694 } else {
695 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
696 (packet->header[0] & 0xFFFF);
697
698 if (packet->tcode == TCODE_ISO_DATA) {
699 /* Sending an async stream packet */
700 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
701 } else {
702 /* Sending a normal async request or response */
703 d->prg_cpu[idx]->data[1] =
704 (packet->header[1] & 0xFFFF) |
705 (packet->header[0] & 0xFFFF0000);
706 d->prg_cpu[idx]->data[2] = packet->header[2];
707 d->prg_cpu[idx]->data[3] = packet->header[3];
708 }
709 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
710 }
711
712 if (packet->data_size) { /* block transmit */
713 if (packet->tcode == TCODE_STREAM_DATA){
714 d->prg_cpu[idx]->begin.control =
715 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
716 DMA_CTL_IMMEDIATE | 0x8);
717 } else {
718 d->prg_cpu[idx]->begin.control =
719 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
720 DMA_CTL_IMMEDIATE | 0x10);
721 }
722 d->prg_cpu[idx]->end.control =
723 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
724 DMA_CTL_IRQ |
725 DMA_CTL_BRANCH |
726 packet->data_size);
727 /*
728 * Check that the packet data buffer
729 * does not cross a page boundary.
730 *
731 * XXX Fix this some day. eth1394 seems to trigger
732 * it, but ignoring it doesn't seem to cause a
733 * problem.
734 */
735#if 0
736 if (cross_bound((unsigned long)packet->data,
737 packet->data_size)>0) {
738 /* FIXME: do something about it */
739 PRINT(KERN_ERR,
740 "%s: packet data addr: %p size %Zd bytes "
741 "cross page boundary", __FUNCTION__,
742 packet->data, packet->data_size);
743 }
744#endif
745 d->prg_cpu[idx]->end.address = cpu_to_le32(
746 pci_map_single(ohci->dev, packet->data,
747 packet->data_size,
748 PCI_DMA_TODEVICE));
749 OHCI_DMA_ALLOC("single, block transmit packet");
750
751 d->prg_cpu[idx]->end.branchAddress = 0;
752 d->prg_cpu[idx]->end.status = 0;
753 if (d->branchAddrPtr)
754 *(d->branchAddrPtr) =
755 cpu_to_le32(d->prg_bus[idx] | 0x3);
756 d->branchAddrPtr =
757 &(d->prg_cpu[idx]->end.branchAddress);
758 } else { /* quadlet transmit */
759 if (packet->type == hpsb_raw)
760 d->prg_cpu[idx]->begin.control =
761 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
762 DMA_CTL_IMMEDIATE |
763 DMA_CTL_IRQ |
764 DMA_CTL_BRANCH |
765 (packet->header_size + 4));
766 else
767 d->prg_cpu[idx]->begin.control =
768 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
769 DMA_CTL_IMMEDIATE |
770 DMA_CTL_IRQ |
771 DMA_CTL_BRANCH |
772 packet->header_size);
773
774 if (d->branchAddrPtr)
775 *(d->branchAddrPtr) =
776 cpu_to_le32(d->prg_bus[idx] | 0x2);
777 d->branchAddrPtr =
778 &(d->prg_cpu[idx]->begin.branchAddress);
779 }
780
781 } else { /* iso packet */
782 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
783 (packet->header[0] & 0xFFFF);
784 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
785 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
786
787 d->prg_cpu[idx]->begin.control =
788 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
789 DMA_CTL_IMMEDIATE | 0x8);
790 d->prg_cpu[idx]->end.control =
791 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
792 DMA_CTL_UPDATE |
793 DMA_CTL_IRQ |
794 DMA_CTL_BRANCH |
795 packet->data_size);
796 d->prg_cpu[idx]->end.address = cpu_to_le32(
797 pci_map_single(ohci->dev, packet->data,
798 packet->data_size, PCI_DMA_TODEVICE));
799 OHCI_DMA_ALLOC("single, iso transmit packet");
800
801 d->prg_cpu[idx]->end.branchAddress = 0;
802 d->prg_cpu[idx]->end.status = 0;
803 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
804 " begin=%08x %08x %08x %08x\n"
805 " %08x %08x %08x %08x\n"
806 " end =%08x %08x %08x %08x",
807 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
808 d->prg_cpu[idx]->begin.control,
809 d->prg_cpu[idx]->begin.address,
810 d->prg_cpu[idx]->begin.branchAddress,
811 d->prg_cpu[idx]->begin.status,
812 d->prg_cpu[idx]->data[0],
813 d->prg_cpu[idx]->data[1],
814 d->prg_cpu[idx]->data[2],
815 d->prg_cpu[idx]->data[3],
816 d->prg_cpu[idx]->end.control,
817 d->prg_cpu[idx]->end.address,
818 d->prg_cpu[idx]->end.branchAddress,
819 d->prg_cpu[idx]->end.status);
820 if (d->branchAddrPtr)
821 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
822 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
823 }
824 d->free_prgs--;
825
826 /* queue the packet in the appropriate context queue */
827 list_add_tail(&packet->driver_list, &d->fifo_list);
828 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
829}
830
831/*
832 * This function fills the FIFO with the (eventual) pending packets
833 * and runs or wakes up the DMA prg if necessary.
834 *
835 * The function MUST be called with the d->lock held.
836 */
837static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
838{
839 struct hpsb_packet *packet, *ptmp;
840 int idx = d->prg_ind;
841 int z = 0;
842
843 /* insert the packets into the dma fifo */
844 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
845 if (!d->free_prgs)
846 break;
847
848 /* For the first packet only */
849 if (!z)
850 z = (packet->data_size) ? 3 : 2;
851
852 /* Insert the packet */
853 list_del_init(&packet->driver_list);
854 insert_packet(ohci, d, packet);
855 }
856
857 /* Nothing must have been done, either no free_prgs or no packets */
858 if (z == 0)
859 return;
860
861 /* Is the context running ? (should be unless it is
862 the first packet to be sent in this context) */
863 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
864 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
865
866 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
867 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
868
869 /* Check that the node id is valid, and not 63 */
870 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
871 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
872 else
873 reg_write(ohci, d->ctrlSet, 0x8000);
874 } else {
875 /* Wake up the dma context if necessary */
876 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
877 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
878
879 /* do this always, to avoid race condition */
880 reg_write(ohci, d->ctrlSet, 0x1000);
881 }
882
883 return;
884}
885
886/* Transmission of an async or iso packet */
887static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
888{
889 struct ti_ohci *ohci = host->hostdata;
890 struct dma_trm_ctx *d;
891 unsigned long flags;
892
893 if (packet->data_size > ohci->max_packet_size) {
894 PRINT(KERN_ERR,
895 "Transmit packet size %Zd is too big",
896 packet->data_size);
897 return -EOVERFLOW;
898 }
899
900 /* Decide whether we have an iso, a request, or a response packet */
901 if (packet->type == hpsb_raw)
902 d = &ohci->at_req_context;
903 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
904 /* The legacy IT DMA context is initialized on first
905 * use. However, the alloc cannot be run from
906 * interrupt context, so we bail out if that is the
907 * case. I don't see anyone sending ISO packets from
908 * interrupt context anyway... */
909
910 if (ohci->it_legacy_context.ohci == NULL) {
911 if (in_interrupt()) {
912 PRINT(KERN_ERR,
913 "legacy IT context cannot be initialized during interrupt");
914 return -EINVAL;
915 }
916
917 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
918 DMA_CTX_ISO, 0, IT_NUM_DESC,
919 OHCI1394_IsoXmitContextBase) < 0) {
920 PRINT(KERN_ERR,
921 "error initializing legacy IT context");
922 return -ENOMEM;
923 }
924
925 initialize_dma_trm_ctx(&ohci->it_legacy_context);
926 }
927
928 d = &ohci->it_legacy_context;
929 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
930 d = &ohci->at_resp_context;
931 else
932 d = &ohci->at_req_context;
933
934 spin_lock_irqsave(&d->lock,flags);
935
936 list_add_tail(&packet->driver_list, &d->pending_list);
937
938 dma_trm_flush(ohci, d);
939
940 spin_unlock_irqrestore(&d->lock,flags);
941
942 return 0;
943}
944
945static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
946{
947 struct ti_ohci *ohci = host->hostdata;
948 int retval = 0;
949 unsigned long flags;
950 int phy_reg;
951
952 switch (cmd) {
953 case RESET_BUS:
954 switch (arg) {
955 case SHORT_RESET:
956 phy_reg = get_phy_reg(ohci, 5);
957 phy_reg |= 0x40;
958 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
959 break;
960 case LONG_RESET:
961 phy_reg = get_phy_reg(ohci, 1);
962 phy_reg |= 0x40;
963 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
964 break;
965 case SHORT_RESET_NO_FORCE_ROOT:
966 phy_reg = get_phy_reg(ohci, 1);
967 if (phy_reg & 0x80) {
968 phy_reg &= ~0x80;
969 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
970 }
971
972 phy_reg = get_phy_reg(ohci, 5);
973 phy_reg |= 0x40;
974 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
975 break;
976 case LONG_RESET_NO_FORCE_ROOT:
977 phy_reg = get_phy_reg(ohci, 1);
978 phy_reg &= ~0x80;
979 phy_reg |= 0x40;
980 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
981 break;
982 case SHORT_RESET_FORCE_ROOT:
983 phy_reg = get_phy_reg(ohci, 1);
984 if (!(phy_reg & 0x80)) {
985 phy_reg |= 0x80;
986 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
987 }
988
989 phy_reg = get_phy_reg(ohci, 5);
990 phy_reg |= 0x40;
991 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
992 break;
993 case LONG_RESET_FORCE_ROOT:
994 phy_reg = get_phy_reg(ohci, 1);
995 phy_reg |= 0xc0;
996 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
997 break;
998 default:
999 retval = -1;
1000 }
1001 break;
1002
1003 case GET_CYCLE_COUNTER:
1004 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1005 break;
1006
1007 case SET_CYCLE_COUNTER:
1008 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1009 break;
1010
1011 case SET_BUS_ID:
1012 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1013 break;
1014
1015 case ACT_CYCLE_MASTER:
1016 if (arg) {
1017 /* check if we are root and other nodes are present */
1018 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1019 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1020 /*
1021 * enable cycleTimer, cycleMaster
1022 */
1023 DBGMSG("Cycle master enabled");
1024 reg_write(ohci, OHCI1394_LinkControlSet,
1025 OHCI1394_LinkControl_CycleTimerEnable |
1026 OHCI1394_LinkControl_CycleMaster);
1027 }
1028 } else {
1029 /* disable cycleTimer, cycleMaster, cycleSource */
1030 reg_write(ohci, OHCI1394_LinkControlClear,
1031 OHCI1394_LinkControl_CycleTimerEnable |
1032 OHCI1394_LinkControl_CycleMaster |
1033 OHCI1394_LinkControl_CycleSource);
1034 }
1035 break;
1036
1037 case CANCEL_REQUESTS:
1038 DBGMSG("Cancel request received");
1039 dma_trm_reset(&ohci->at_req_context);
1040 dma_trm_reset(&ohci->at_resp_context);
1041 break;
1042
1043 case ISO_LISTEN_CHANNEL:
1044 {
1045 u64 mask;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001046 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1047 int ir_legacy_active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049 if (arg<0 || arg>63) {
1050 PRINT(KERN_ERR,
1051 "%s: IS0 listen channel %d is out of range",
1052 __FUNCTION__, arg);
1053 return -EFAULT;
1054 }
1055
1056 mask = (u64)0x1<<arg;
1057
1058 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1059
1060 if (ohci->ISO_channel_usage & mask) {
1061 PRINT(KERN_ERR,
1062 "%s: IS0 listen channel %d is already used",
1063 __FUNCTION__, arg);
1064 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1065 return -EFAULT;
1066 }
1067
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001068 ir_legacy_active = ohci->ir_legacy_channels;
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 ohci->ISO_channel_usage |= mask;
1071 ohci->ir_legacy_channels |= mask;
1072
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001073 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1074
1075 if (!ir_legacy_active) {
1076 if (ohci1394_register_iso_tasklet(ohci,
1077 &ohci->ir_legacy_tasklet) < 0) {
1078 PRINT(KERN_ERR, "No IR DMA context available");
1079 return -EBUSY;
1080 }
1081
1082 /* the IR context can be assigned to any DMA context
1083 * by ohci1394_register_iso_tasklet */
1084 d->ctx = ohci->ir_legacy_tasklet.context;
1085 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1086 32*d->ctx;
1087 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1088 32*d->ctx;
1089 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1090 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1091
1092 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1093
Olaf Hering98848fa2005-07-14 00:33:45 -07001094 if (printk_ratelimit())
Jody McIntyre32e7a042005-09-30 11:59:19 -07001095 DBGMSG("IR legacy activated");
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001096 }
1097
1098 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 if (arg>31)
1101 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1102 1<<(arg-32));
1103 else
1104 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1105 1<<arg);
1106
1107 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1108 DBGMSG("Listening enabled on channel %d", arg);
1109 break;
1110 }
1111 case ISO_UNLISTEN_CHANNEL:
1112 {
1113 u64 mask;
1114
1115 if (arg<0 || arg>63) {
1116 PRINT(KERN_ERR,
1117 "%s: IS0 unlisten channel %d is out of range",
1118 __FUNCTION__, arg);
1119 return -EFAULT;
1120 }
1121
1122 mask = (u64)0x1<<arg;
1123
1124 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1125
1126 if (!(ohci->ISO_channel_usage & mask)) {
1127 PRINT(KERN_ERR,
1128 "%s: IS0 unlisten channel %d is not used",
1129 __FUNCTION__, arg);
1130 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1131 return -EFAULT;
1132 }
1133
1134 ohci->ISO_channel_usage &= ~mask;
1135 ohci->ir_legacy_channels &= ~mask;
1136
1137 if (arg>31)
1138 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1139 1<<(arg-32));
1140 else
1141 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1142 1<<arg);
1143
1144 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1145 DBGMSG("Listening disabled on channel %d", arg);
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001146
1147 if (ohci->ir_legacy_channels == 0) {
1148 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1149 DBGMSG("ISO legacy receive context stopped");
1150 }
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 break;
1153 }
1154 default:
1155 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1156 cmd);
1157 break;
1158 }
1159 return retval;
1160}
1161
1162/***********************************
1163 * rawiso ISO reception *
1164 ***********************************/
1165
1166/*
1167 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1168 buffer is split into "blocks" (regions described by one DMA
1169 descriptor). Each block must be one page or less in size, and
1170 must not cross a page boundary.
1171
1172 There is one little wrinkle with buffer-fill mode: a packet that
1173 starts in the final block may wrap around into the first block. But
1174 the user API expects all packets to be contiguous. Our solution is
1175 to keep the very last page of the DMA buffer in reserve - if a
1176 packet spans the gap, we copy its tail into this page.
1177*/
1178
1179struct ohci_iso_recv {
1180 struct ti_ohci *ohci;
1181
1182 struct ohci1394_iso_tasklet task;
1183 int task_active;
1184
1185 enum { BUFFER_FILL_MODE = 0,
1186 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1187
1188 /* memory and PCI mapping for the DMA descriptors */
1189 struct dma_prog_region prog;
1190 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1191
1192 /* how many DMA blocks fit in the buffer */
1193 unsigned int nblocks;
1194
1195 /* stride of DMA blocks */
1196 unsigned int buf_stride;
1197
1198 /* number of blocks to batch between interrupts */
1199 int block_irq_interval;
1200
1201 /* block that DMA will finish next */
1202 int block_dma;
1203
1204 /* (buffer-fill only) block that the reader will release next */
1205 int block_reader;
1206
1207 /* (buffer-fill only) bytes of buffer the reader has released,
1208 less than one block */
1209 int released_bytes;
1210
1211 /* (buffer-fill only) buffer offset at which the next packet will appear */
1212 int dma_offset;
1213
1214 /* OHCI DMA context control registers */
1215 u32 ContextControlSet;
1216 u32 ContextControlClear;
1217 u32 CommandPtr;
1218 u32 ContextMatch;
1219};
1220
1221static void ohci_iso_recv_task(unsigned long data);
1222static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1223static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1224static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1225static void ohci_iso_recv_program(struct hpsb_iso *iso);
1226
1227static int ohci_iso_recv_init(struct hpsb_iso *iso)
1228{
1229 struct ti_ohci *ohci = iso->host->hostdata;
1230 struct ohci_iso_recv *recv;
1231 int ctx;
1232 int ret = -ENOMEM;
1233
1234 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1235 if (!recv)
1236 return -ENOMEM;
1237
1238 iso->hostdata = recv;
1239 recv->ohci = ohci;
1240 recv->task_active = 0;
1241 dma_prog_region_init(&recv->prog);
1242 recv->block = NULL;
1243
1244 /* use buffer-fill mode, unless irq_interval is 1
1245 (note: multichannel requires buffer-fill) */
1246
1247 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1248 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1249 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1250 } else {
1251 recv->dma_mode = BUFFER_FILL_MODE;
1252 }
1253
1254 /* set nblocks, buf_stride, block_irq_interval */
1255
1256 if (recv->dma_mode == BUFFER_FILL_MODE) {
1257 recv->buf_stride = PAGE_SIZE;
1258
1259 /* one block per page of data in the DMA buffer, minus the final guard page */
1260 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1261 if (recv->nblocks < 3) {
1262 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1263 goto err;
1264 }
1265
1266 /* iso->irq_interval is in packets - translate that to blocks */
1267 if (iso->irq_interval == 1)
1268 recv->block_irq_interval = 1;
1269 else
1270 recv->block_irq_interval = iso->irq_interval *
1271 ((recv->nblocks+1)/iso->buf_packets);
1272 if (recv->block_irq_interval*4 > recv->nblocks)
1273 recv->block_irq_interval = recv->nblocks/4;
1274 if (recv->block_irq_interval < 1)
1275 recv->block_irq_interval = 1;
1276
1277 } else {
1278 int max_packet_size;
1279
1280 recv->nblocks = iso->buf_packets;
1281 recv->block_irq_interval = iso->irq_interval;
1282 if (recv->block_irq_interval * 4 > iso->buf_packets)
1283 recv->block_irq_interval = iso->buf_packets / 4;
1284 if (recv->block_irq_interval < 1)
1285 recv->block_irq_interval = 1;
1286
1287 /* choose a buffer stride */
1288 /* must be a power of 2, and <= PAGE_SIZE */
1289
1290 max_packet_size = iso->buf_size / iso->buf_packets;
1291
1292 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1293 recv->buf_stride *= 2);
1294
1295 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1296 recv->buf_stride > PAGE_SIZE) {
1297 /* this shouldn't happen, but anyway... */
1298 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1299 goto err;
1300 }
1301 }
1302
1303 recv->block_reader = 0;
1304 recv->released_bytes = 0;
1305 recv->block_dma = 0;
1306 recv->dma_offset = 0;
1307
1308 /* size of DMA program = one descriptor per block */
1309 if (dma_prog_region_alloc(&recv->prog,
1310 sizeof(struct dma_cmd) * recv->nblocks,
1311 recv->ohci->dev))
1312 goto err;
1313
1314 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1315
1316 ohci1394_init_iso_tasklet(&recv->task,
1317 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1318 OHCI_ISO_RECEIVE,
1319 ohci_iso_recv_task, (unsigned long) iso);
1320
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001321 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1322 ret = -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 goto err;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
1326 recv->task_active = 1;
1327
1328 /* recv context registers are spaced 32 bytes apart */
1329 ctx = recv->task.context;
1330 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1331 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1332 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1333 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1334
1335 if (iso->channel == -1) {
1336 /* clear multi-channel selection mask */
1337 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1338 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1339 }
1340
1341 /* write the DMA program */
1342 ohci_iso_recv_program(iso);
1343
1344 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1345 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1346 recv->dma_mode == BUFFER_FILL_MODE ?
1347 "buffer-fill" : "packet-per-buffer",
1348 iso->buf_size/PAGE_SIZE, iso->buf_size,
1349 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1350
1351 return 0;
1352
1353err:
1354 ohci_iso_recv_shutdown(iso);
1355 return ret;
1356}
1357
1358static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1359{
1360 struct ohci_iso_recv *recv = iso->hostdata;
1361
1362 /* disable interrupts */
1363 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1364
1365 /* halt DMA */
1366 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1367}
1368
1369static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1370{
1371 struct ohci_iso_recv *recv = iso->hostdata;
1372
1373 if (recv->task_active) {
1374 ohci_iso_recv_stop(iso);
1375 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1376 recv->task_active = 0;
1377 }
1378
1379 dma_prog_region_free(&recv->prog);
1380 kfree(recv);
1381 iso->hostdata = NULL;
1382}
1383
1384/* set up a "gapped" ring buffer DMA program */
1385static void ohci_iso_recv_program(struct hpsb_iso *iso)
1386{
1387 struct ohci_iso_recv *recv = iso->hostdata;
1388 int blk;
1389
1390 /* address of 'branch' field in previous DMA descriptor */
1391 u32 *prev_branch = NULL;
1392
1393 for (blk = 0; blk < recv->nblocks; blk++) {
1394 u32 control;
1395
1396 /* the DMA descriptor */
1397 struct dma_cmd *cmd = &recv->block[blk];
1398
1399 /* offset of the DMA descriptor relative to the DMA prog buffer */
1400 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1401
1402 /* offset of this packet's data within the DMA buffer */
1403 unsigned long buf_offset = blk * recv->buf_stride;
1404
1405 if (recv->dma_mode == BUFFER_FILL_MODE) {
1406 control = 2 << 28; /* INPUT_MORE */
1407 } else {
1408 control = 3 << 28; /* INPUT_LAST */
1409 }
1410
1411 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1412
1413 /* interrupt on last block, and at intervals */
1414 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1415 control |= 3 << 20; /* want interrupt */
1416 }
1417
1418 control |= 3 << 18; /* enable branch to address */
1419 control |= recv->buf_stride;
1420
1421 cmd->control = cpu_to_le32(control);
1422 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1423 cmd->branchAddress = 0; /* filled in on next loop */
1424 cmd->status = cpu_to_le32(recv->buf_stride);
1425
1426 /* link the previous descriptor to this one */
1427 if (prev_branch) {
1428 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1429 }
1430
1431 prev_branch = &cmd->branchAddress;
1432 }
1433
1434 /* the final descriptor's branch address and Z should be left at 0 */
1435}
1436
1437/* listen or unlisten to a specific channel (multi-channel mode only) */
1438static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1439{
1440 struct ohci_iso_recv *recv = iso->hostdata;
1441 int reg, i;
1442
1443 if (channel < 32) {
1444 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1445 i = channel;
1446 } else {
1447 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1448 i = channel - 32;
1449 }
1450
1451 reg_write(recv->ohci, reg, (1 << i));
1452
1453 /* issue a dummy read to force all PCI writes to be posted immediately */
1454 mb();
1455 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1456}
1457
1458static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1459{
1460 struct ohci_iso_recv *recv = iso->hostdata;
1461 int i;
1462
1463 for (i = 0; i < 64; i++) {
1464 if (mask & (1ULL << i)) {
1465 if (i < 32)
1466 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1467 else
1468 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1469 } else {
1470 if (i < 32)
1471 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1472 else
1473 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1474 }
1475 }
1476
1477 /* issue a dummy read to force all PCI writes to be posted immediately */
1478 mb();
1479 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1480}
1481
1482static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1483{
1484 struct ohci_iso_recv *recv = iso->hostdata;
1485 struct ti_ohci *ohci = recv->ohci;
1486 u32 command, contextMatch;
1487
1488 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1489 wmb();
1490
1491 /* always keep ISO headers */
1492 command = (1 << 30);
1493
1494 if (recv->dma_mode == BUFFER_FILL_MODE)
1495 command |= (1 << 31);
1496
1497 reg_write(recv->ohci, recv->ContextControlSet, command);
1498
1499 /* match on specified tags */
1500 contextMatch = tag_mask << 28;
1501
1502 if (iso->channel == -1) {
1503 /* enable multichannel reception */
1504 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1505 } else {
1506 /* listen on channel */
1507 contextMatch |= iso->channel;
1508 }
1509
1510 if (cycle != -1) {
1511 u32 seconds;
1512
1513 /* enable cycleMatch */
1514 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1515
1516 /* set starting cycle */
1517 cycle &= 0x1FFF;
1518
1519 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1520 just snarf them from the current time */
1521 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1522
1523 /* advance one second to give some extra time for DMA to start */
1524 seconds += 1;
1525
1526 cycle |= (seconds & 3) << 13;
1527
1528 contextMatch |= cycle << 12;
1529 }
1530
1531 if (sync != -1) {
1532 /* set sync flag on first DMA descriptor */
1533 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1534 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1535
1536 /* match sync field */
1537 contextMatch |= (sync&0xf)<<8;
1538 }
1539
1540 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1541
1542 /* address of first descriptor block */
1543 command = dma_prog_region_offset_to_bus(&recv->prog,
1544 recv->block_dma * sizeof(struct dma_cmd));
1545 command |= 1; /* Z=1 */
1546
1547 reg_write(recv->ohci, recv->CommandPtr, command);
1548
1549 /* enable interrupts */
1550 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1551
1552 wmb();
1553
1554 /* run */
1555 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1556
1557 /* issue a dummy read of the cycle timer register to force
1558 all PCI writes to be posted immediately */
1559 mb();
1560 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1561
1562 /* check RUN */
1563 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1564 PRINT(KERN_ERR,
1565 "Error starting IR DMA (ContextControl 0x%08x)\n",
1566 reg_read(recv->ohci, recv->ContextControlSet));
1567 return -1;
1568 }
1569
1570 return 0;
1571}
1572
1573static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1574{
1575 /* re-use the DMA descriptor for the block */
1576 /* by linking the previous descriptor to it */
1577
1578 int next_i = block;
1579 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1580
1581 struct dma_cmd *next = &recv->block[next_i];
1582 struct dma_cmd *prev = &recv->block[prev_i];
Ben Collins1934b8b2005-07-09 20:01:23 -04001583
1584 /* ignore out-of-range requests */
1585 if ((block < 0) || (block > recv->nblocks))
1586 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
1588 /* 'next' becomes the new end of the DMA chain,
1589 so disable branch and enable interrupt */
1590 next->branchAddress = 0;
1591 next->control |= cpu_to_le32(3 << 20);
1592 next->status = cpu_to_le32(recv->buf_stride);
1593
1594 /* link prev to next */
1595 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1596 sizeof(struct dma_cmd) * next_i)
1597 | 1); /* Z=1 */
1598
1599 /* disable interrupt on previous DMA descriptor, except at intervals */
1600 if ((prev_i % recv->block_irq_interval) == 0) {
1601 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1602 } else {
1603 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1604 }
1605 wmb();
1606
1607 /* wake up DMA in case it fell asleep */
1608 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1609}
1610
1611static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1612 struct hpsb_iso_packet_info *info)
1613{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 /* release the memory where the packet was */
Ben Collins1934b8b2005-07-09 20:01:23 -04001615 recv->released_bytes += info->total_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
1617 /* have we released enough memory for one block? */
1618 while (recv->released_bytes > recv->buf_stride) {
1619 ohci_iso_recv_release_block(recv, recv->block_reader);
1620 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1621 recv->released_bytes -= recv->buf_stride;
1622 }
1623}
1624
1625static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1626{
1627 struct ohci_iso_recv *recv = iso->hostdata;
1628 if (recv->dma_mode == BUFFER_FILL_MODE) {
1629 ohci_iso_recv_bufferfill_release(recv, info);
1630 } else {
1631 ohci_iso_recv_release_block(recv, info - iso->infos);
1632 }
1633}
1634
1635/* parse all packets from blocks that have been fully received */
1636static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1637{
1638 int wake = 0;
1639 int runaway = 0;
1640 struct ti_ohci *ohci = recv->ohci;
1641
1642 while (1) {
1643 /* we expect the next parsable packet to begin at recv->dma_offset */
1644 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1645
1646 unsigned int offset;
Ben Collins1934b8b2005-07-09 20:01:23 -04001647 unsigned short len, cycle, total_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 unsigned char channel, tag, sy;
1649
1650 unsigned char *p = iso->data_buf.kvirt;
1651
1652 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1653
1654 /* don't loop indefinitely */
1655 if (runaway++ > 100000) {
1656 atomic_inc(&iso->overflows);
1657 PRINT(KERN_ERR,
1658 "IR DMA error - Runaway during buffer parsing!\n");
1659 break;
1660 }
1661
1662 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1663 if (this_block == recv->block_dma)
1664 break;
1665
1666 wake = 1;
1667
1668 /* parse data length, tag, channel, and sy */
1669
1670 /* note: we keep our own local copies of 'len' and 'offset'
1671 so the user can't mess with them by poking in the mmap area */
1672
1673 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1674
1675 if (len > 4096) {
1676 PRINT(KERN_ERR,
1677 "IR DMA error - bogus 'len' value %u\n", len);
1678 }
1679
1680 channel = p[recv->dma_offset+1] & 0x3F;
1681 tag = p[recv->dma_offset+1] >> 6;
1682 sy = p[recv->dma_offset+0] & 0xF;
1683
1684 /* advance to data payload */
1685 recv->dma_offset += 4;
1686
1687 /* check for wrap-around */
1688 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1689 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1690 }
1691
1692 /* dma_offset now points to the first byte of the data payload */
1693 offset = recv->dma_offset;
1694
1695 /* advance to xferStatus/timeStamp */
1696 recv->dma_offset += len;
1697
Ben Collins1934b8b2005-07-09 20:01:23 -04001698 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 /* payload is padded to 4 bytes */
1700 if (len % 4) {
1701 recv->dma_offset += 4 - (len%4);
Ben Collins1934b8b2005-07-09 20:01:23 -04001702 total_len += 4 - (len%4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 }
1704
1705 /* check for wrap-around */
1706 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1707 /* uh oh, the packet data wraps from the last
1708 to the first DMA block - make the packet
1709 contiguous by copying its "tail" into the
1710 guard page */
1711
1712 int guard_off = recv->buf_stride*recv->nblocks;
1713 int tail_len = len - (guard_off - offset);
1714
1715 if (tail_len > 0 && tail_len < recv->buf_stride) {
1716 memcpy(iso->data_buf.kvirt + guard_off,
1717 iso->data_buf.kvirt,
1718 tail_len);
1719 }
1720
1721 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1722 }
1723
1724 /* parse timestamp */
1725 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1726 cycle &= 0x1FFF;
1727
1728 /* advance to next packet */
1729 recv->dma_offset += 4;
1730
1731 /* check for wrap-around */
1732 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1733 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1734 }
1735
Ben Collins1934b8b2005-07-09 20:01:23 -04001736 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 }
1738
1739 if (wake)
1740 hpsb_iso_wake(iso);
1741}
1742
1743static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1744{
1745 int loop;
1746 struct ti_ohci *ohci = recv->ohci;
1747
1748 /* loop over all blocks */
1749 for (loop = 0; loop < recv->nblocks; loop++) {
1750
1751 /* check block_dma to see if it's done */
1752 struct dma_cmd *im = &recv->block[recv->block_dma];
1753
1754 /* check the DMA descriptor for new writes to xferStatus */
1755 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1756
1757 /* rescount is the number of bytes *remaining to be written* in the block */
1758 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1759
1760 unsigned char event = xferstatus & 0x1F;
1761
1762 if (!event) {
1763 /* nothing has happened to this block yet */
1764 break;
1765 }
1766
1767 if (event != 0x11) {
1768 atomic_inc(&iso->overflows);
1769 PRINT(KERN_ERR,
1770 "IR DMA error - OHCI error code 0x%02x\n", event);
1771 }
1772
1773 if (rescount != 0) {
1774 /* the card is still writing to this block;
1775 we can't touch it until it's done */
1776 break;
1777 }
1778
1779 /* OK, the block is finished... */
1780
1781 /* sync our view of the block */
1782 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1783
1784 /* reset the DMA descriptor */
1785 im->status = recv->buf_stride;
1786
1787 /* advance block_dma */
1788 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1789
1790 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1791 atomic_inc(&iso->overflows);
1792 DBGMSG("ISO reception overflow - "
1793 "ran out of DMA blocks");
1794 }
1795 }
1796
1797 /* parse any packets that have arrived */
1798 ohci_iso_recv_bufferfill_parse(iso, recv);
1799}
1800
1801static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1802{
1803 int count;
1804 int wake = 0;
1805 struct ti_ohci *ohci = recv->ohci;
1806
1807 /* loop over the entire buffer */
1808 for (count = 0; count < recv->nblocks; count++) {
1809 u32 packet_len = 0;
1810
1811 /* pointer to the DMA descriptor */
1812 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1813
1814 /* check the DMA descriptor for new writes to xferStatus */
1815 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1816 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1817
1818 unsigned char event = xferstatus & 0x1F;
1819
1820 if (!event) {
1821 /* this packet hasn't come in yet; we are done for now */
1822 goto out;
1823 }
1824
1825 if (event == 0x11) {
1826 /* packet received successfully! */
1827
1828 /* rescount is the number of bytes *remaining* in the packet buffer,
1829 after the packet was written */
1830 packet_len = recv->buf_stride - rescount;
1831
1832 } else if (event == 0x02) {
1833 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1834 } else if (event) {
1835 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1836 }
1837
1838 /* sync our view of the buffer */
1839 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1840
1841 /* record the per-packet info */
1842 {
1843 /* iso header is 8 bytes ahead of the data payload */
1844 unsigned char *hdr;
1845
1846 unsigned int offset;
1847 unsigned short cycle;
1848 unsigned char channel, tag, sy;
1849
1850 offset = iso->pkt_dma * recv->buf_stride;
1851 hdr = iso->data_buf.kvirt + offset;
1852
1853 /* skip iso header */
1854 offset += 8;
1855 packet_len -= 8;
1856
1857 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1858 channel = hdr[5] & 0x3F;
1859 tag = hdr[5] >> 6;
1860 sy = hdr[4] & 0xF;
1861
Ben Collins1934b8b2005-07-09 20:01:23 -04001862 hpsb_iso_packet_received(iso, offset, packet_len,
1863 recv->buf_stride, cycle, channel, tag, sy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 }
1865
1866 /* reset the DMA descriptor */
1867 il->status = recv->buf_stride;
1868
1869 wake = 1;
1870 recv->block_dma = iso->pkt_dma;
1871 }
1872
1873out:
1874 if (wake)
1875 hpsb_iso_wake(iso);
1876}
1877
1878static void ohci_iso_recv_task(unsigned long data)
1879{
1880 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1881 struct ohci_iso_recv *recv = iso->hostdata;
1882
1883 if (recv->dma_mode == BUFFER_FILL_MODE)
1884 ohci_iso_recv_bufferfill_task(iso, recv);
1885 else
1886 ohci_iso_recv_packetperbuf_task(iso, recv);
1887}
1888
1889/***********************************
1890 * rawiso ISO transmission *
1891 ***********************************/
1892
1893struct ohci_iso_xmit {
1894 struct ti_ohci *ohci;
1895 struct dma_prog_region prog;
1896 struct ohci1394_iso_tasklet task;
1897 int task_active;
1898
1899 u32 ContextControlSet;
1900 u32 ContextControlClear;
1901 u32 CommandPtr;
1902};
1903
1904/* transmission DMA program:
1905 one OUTPUT_MORE_IMMEDIATE for the IT header
1906 one OUTPUT_LAST for the buffer data */
1907
1908struct iso_xmit_cmd {
1909 struct dma_cmd output_more_immediate;
1910 u8 iso_hdr[8];
1911 u32 unused[2];
1912 struct dma_cmd output_last;
1913};
1914
1915static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1916static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1917static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1918static void ohci_iso_xmit_task(unsigned long data);
1919
1920static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1921{
1922 struct ohci_iso_xmit *xmit;
1923 unsigned int prog_size;
1924 int ctx;
1925 int ret = -ENOMEM;
1926
1927 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1928 if (!xmit)
1929 return -ENOMEM;
1930
1931 iso->hostdata = xmit;
1932 xmit->ohci = iso->host->hostdata;
1933 xmit->task_active = 0;
1934
1935 dma_prog_region_init(&xmit->prog);
1936
1937 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1938
1939 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1940 goto err;
1941
1942 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1943 ohci_iso_xmit_task, (unsigned long) iso);
1944
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001945 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1946 ret = -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 goto err;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
1950 xmit->task_active = 1;
1951
1952 /* xmit context registers are spaced 16 bytes apart */
1953 ctx = xmit->task.context;
1954 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1955 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1956 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1957
1958 return 0;
1959
1960err:
1961 ohci_iso_xmit_shutdown(iso);
1962 return ret;
1963}
1964
1965static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1966{
1967 struct ohci_iso_xmit *xmit = iso->hostdata;
1968 struct ti_ohci *ohci = xmit->ohci;
1969
1970 /* disable interrupts */
1971 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1972
1973 /* halt DMA */
1974 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1975 /* XXX the DMA context will lock up if you try to send too much data! */
1976 PRINT(KERN_ERR,
1977 "you probably exceeded the OHCI card's bandwidth limit - "
1978 "reload the module and reduce xmit bandwidth");
1979 }
1980}
1981
1982static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1983{
1984 struct ohci_iso_xmit *xmit = iso->hostdata;
1985
1986 if (xmit->task_active) {
1987 ohci_iso_xmit_stop(iso);
1988 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1989 xmit->task_active = 0;
1990 }
1991
1992 dma_prog_region_free(&xmit->prog);
1993 kfree(xmit);
1994 iso->hostdata = NULL;
1995}
1996
1997static void ohci_iso_xmit_task(unsigned long data)
1998{
1999 struct hpsb_iso *iso = (struct hpsb_iso*) data;
2000 struct ohci_iso_xmit *xmit = iso->hostdata;
2001 struct ti_ohci *ohci = xmit->ohci;
2002 int wake = 0;
2003 int count;
2004
2005 /* check the whole buffer if necessary, starting at pkt_dma */
2006 for (count = 0; count < iso->buf_packets; count++) {
2007 int cycle;
2008
2009 /* DMA descriptor */
2010 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2011
2012 /* check for new writes to xferStatus */
2013 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2014 u8 event = xferstatus & 0x1F;
2015
2016 if (!event) {
2017 /* packet hasn't been sent yet; we are done for now */
2018 break;
2019 }
2020
2021 if (event != 0x11)
2022 PRINT(KERN_ERR,
2023 "IT DMA error - OHCI error code 0x%02x\n", event);
2024
2025 /* at least one packet went out, so wake up the writer */
2026 wake = 1;
2027
2028 /* parse cycle */
2029 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2030
2031 /* tell the subsystem the packet has gone out */
2032 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2033
2034 /* reset the DMA descriptor for next time */
2035 cmd->output_last.status = 0;
2036 }
2037
2038 if (wake)
2039 hpsb_iso_wake(iso);
2040}
2041
2042static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2043{
2044 struct ohci_iso_xmit *xmit = iso->hostdata;
2045 struct ti_ohci *ohci = xmit->ohci;
2046
2047 int next_i, prev_i;
2048 struct iso_xmit_cmd *next, *prev;
2049
2050 unsigned int offset;
2051 unsigned short len;
2052 unsigned char tag, sy;
2053
2054 /* check that the packet doesn't cross a page boundary
2055 (we could allow this if we added OUTPUT_MORE descriptor support) */
2056 if (cross_bound(info->offset, info->len)) {
2057 PRINT(KERN_ERR,
2058 "rawiso xmit: packet %u crosses a page boundary",
2059 iso->first_packet);
2060 return -EINVAL;
2061 }
2062
2063 offset = info->offset;
2064 len = info->len;
2065 tag = info->tag;
2066 sy = info->sy;
2067
2068 /* sync up the card's view of the buffer */
2069 dma_region_sync_for_device(&iso->data_buf, offset, len);
2070
2071 /* append first_packet to the DMA chain */
2072 /* by linking the previous descriptor to it */
2073 /* (next will become the new end of the DMA chain) */
2074
2075 next_i = iso->first_packet;
2076 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2077
2078 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2079 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2080
2081 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2082 memset(next, 0, sizeof(struct iso_xmit_cmd));
2083 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2084
2085 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2086
2087 /* tcode = 0xA, and sy */
2088 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2089
2090 /* tag and channel number */
2091 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2092
2093 /* transmission speed */
2094 next->iso_hdr[2] = iso->speed & 0x7;
2095
2096 /* payload size */
2097 next->iso_hdr[6] = len & 0xFF;
2098 next->iso_hdr[7] = len >> 8;
2099
2100 /* set up the OUTPUT_LAST */
2101 next->output_last.control = cpu_to_le32(1 << 28);
2102 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2103 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2104 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2105 next->output_last.control |= cpu_to_le32(len);
2106
2107 /* payload bus address */
2108 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2109
2110 /* leave branchAddress at zero for now */
2111
2112 /* re-write the previous DMA descriptor to chain to this one */
2113
2114 /* set prev branch address to point to next (Z=3) */
2115 prev->output_last.branchAddress = cpu_to_le32(
2116 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2117
2118 /* disable interrupt, unless required by the IRQ interval */
2119 if (prev_i % iso->irq_interval) {
2120 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2121 } else {
2122 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2123 }
2124
2125 wmb();
2126
2127 /* wake DMA in case it is sleeping */
2128 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2129
2130 /* issue a dummy read of the cycle timer to force all PCI
2131 writes to be posted immediately */
2132 mb();
2133 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2134
2135 return 0;
2136}
2137
2138static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2139{
2140 struct ohci_iso_xmit *xmit = iso->hostdata;
2141 struct ti_ohci *ohci = xmit->ohci;
2142
2143 /* clear out the control register */
2144 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2145 wmb();
2146
2147 /* address and length of first descriptor block (Z=3) */
2148 reg_write(xmit->ohci, xmit->CommandPtr,
2149 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2150
2151 /* cycle match */
2152 if (cycle != -1) {
2153 u32 start = cycle & 0x1FFF;
2154
2155 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2156 just snarf them from the current time */
2157 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2158
2159 /* advance one second to give some extra time for DMA to start */
2160 seconds += 1;
2161
2162 start |= (seconds & 3) << 13;
2163
2164 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2165 }
2166
2167 /* enable interrupts */
2168 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2169
2170 /* run */
2171 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2172 mb();
2173
2174 /* wait 100 usec to give the card time to go active */
2175 udelay(100);
2176
2177 /* check the RUN bit */
2178 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2179 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2180 reg_read(xmit->ohci, xmit->ContextControlSet));
2181 return -1;
2182 }
2183
2184 return 0;
2185}
2186
2187static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2188{
2189
2190 switch(cmd) {
2191 case XMIT_INIT:
2192 return ohci_iso_xmit_init(iso);
2193 case XMIT_START:
2194 return ohci_iso_xmit_start(iso, arg);
2195 case XMIT_STOP:
2196 ohci_iso_xmit_stop(iso);
2197 return 0;
2198 case XMIT_QUEUE:
2199 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2200 case XMIT_SHUTDOWN:
2201 ohci_iso_xmit_shutdown(iso);
2202 return 0;
2203
2204 case RECV_INIT:
2205 return ohci_iso_recv_init(iso);
2206 case RECV_START: {
2207 int *args = (int*) arg;
2208 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2209 }
2210 case RECV_STOP:
2211 ohci_iso_recv_stop(iso);
2212 return 0;
2213 case RECV_RELEASE:
2214 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2215 return 0;
2216 case RECV_FLUSH:
2217 ohci_iso_recv_task((unsigned long) iso);
2218 return 0;
2219 case RECV_SHUTDOWN:
2220 ohci_iso_recv_shutdown(iso);
2221 return 0;
2222 case RECV_LISTEN_CHANNEL:
2223 ohci_iso_recv_change_channel(iso, arg, 1);
2224 return 0;
2225 case RECV_UNLISTEN_CHANNEL:
2226 ohci_iso_recv_change_channel(iso, arg, 0);
2227 return 0;
2228 case RECV_SET_CHANNEL_MASK:
2229 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2230 return 0;
2231
2232 default:
2233 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2234 cmd);
2235 break;
2236 }
2237 return -EINVAL;
2238}
2239
2240/***************************************
2241 * IEEE-1394 functionality section END *
2242 ***************************************/
2243
2244
2245/********************************************************
2246 * Global stuff (interrupt handler, init/shutdown code) *
2247 ********************************************************/
2248
2249static void dma_trm_reset(struct dma_trm_ctx *d)
2250{
2251 unsigned long flags;
2252 LIST_HEAD(packet_list);
2253 struct ti_ohci *ohci = d->ohci;
2254 struct hpsb_packet *packet, *ptmp;
2255
2256 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2257
2258 /* Lock the context, reset it and release it. Move the packets
2259 * that were pending in the context to packet_list and free
2260 * them after releasing the lock. */
2261
2262 spin_lock_irqsave(&d->lock, flags);
2263
2264 list_splice(&d->fifo_list, &packet_list);
2265 list_splice(&d->pending_list, &packet_list);
2266 INIT_LIST_HEAD(&d->fifo_list);
2267 INIT_LIST_HEAD(&d->pending_list);
2268
2269 d->branchAddrPtr = NULL;
2270 d->sent_ind = d->prg_ind;
2271 d->free_prgs = d->num_desc;
2272
2273 spin_unlock_irqrestore(&d->lock, flags);
2274
2275 if (list_empty(&packet_list))
2276 return;
2277
2278 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2279
2280 /* Now process subsystem callbacks for the packets from this
2281 * context. */
2282 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2283 list_del_init(&packet->driver_list);
2284 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2285 }
2286}
2287
2288static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2289 quadlet_t rx_event,
2290 quadlet_t tx_event)
2291{
2292 struct ohci1394_iso_tasklet *t;
2293 unsigned long mask;
Andy Wingo4a9949d2005-10-19 21:23:46 -07002294 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
Andy Wingo4a9949d2005-10-19 21:23:46 -07002296 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2299 mask = 1 << t->context;
2300
2301 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2302 tasklet_schedule(&t->tasklet);
2303 else if (rx_event & mask)
2304 tasklet_schedule(&t->tasklet);
2305 }
2306
Andy Wingo4a9949d2005-10-19 21:23:46 -07002307 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308}
2309
2310static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2311 struct pt_regs *regs_are_unused)
2312{
2313 quadlet_t event, node_id;
2314 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2315 struct hpsb_host *host = ohci->host;
2316 int phyid = -1, isroot = 0;
2317 unsigned long flags;
2318
2319 /* Read and clear the interrupt event register. Don't clear
2320 * the busReset event, though. This is done when we get the
2321 * selfIDComplete interrupt. */
2322 spin_lock_irqsave(&ohci->event_lock, flags);
2323 event = reg_read(ohci, OHCI1394_IntEventClear);
2324 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2325 spin_unlock_irqrestore(&ohci->event_lock, flags);
2326
2327 if (!event)
2328 return IRQ_NONE;
2329
2330 /* If event is ~(u32)0 cardbus card was ejected. In this case
2331 * we just return, and clean up in the ohci1394_pci_remove
2332 * function. */
2333 if (event == ~(u32) 0) {
2334 DBGMSG("Device removed.");
2335 return IRQ_NONE;
2336 }
2337
2338 DBGMSG("IntEvent: %08x", event);
2339
2340 if (event & OHCI1394_unrecoverableError) {
2341 int ctx;
2342 PRINT(KERN_ERR, "Unrecoverable error!");
2343
2344 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2345 PRINT(KERN_ERR, "Async Req Tx Context died: "
2346 "ctrl[%08x] cmdptr[%08x]",
2347 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2348 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2349
2350 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2351 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2352 "ctrl[%08x] cmdptr[%08x]",
2353 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2354 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2355
2356 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2357 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2358 "ctrl[%08x] cmdptr[%08x]",
2359 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2360 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2361
2362 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2363 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2364 "ctrl[%08x] cmdptr[%08x]",
2365 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2366 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2367
2368 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2369 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2370 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2371 "ctrl[%08x] cmdptr[%08x]", ctx,
2372 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2373 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2374 }
2375
2376 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2377 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2378 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2379 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2380 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2381 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2382 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2383 }
2384
2385 event &= ~OHCI1394_unrecoverableError;
2386 }
Jody McIntyree2f81652006-03-28 19:55:11 -05002387 if (event & OHCI1394_postedWriteErr) {
2388 PRINT(KERN_ERR, "physical posted write error");
2389 /* no recovery strategy yet, had to involve protocol drivers */
2390 }
Ben Collins57fdb582006-06-12 18:12:21 -04002391 if (event & OHCI1394_cycleTooLong) {
2392 if(printk_ratelimit())
2393 PRINT(KERN_WARNING, "isochronous cycle too long");
2394 else
2395 DBGMSG("OHCI1394_cycleTooLong");
2396 reg_write(ohci, OHCI1394_LinkControlSet,
2397 OHCI1394_LinkControl_CycleMaster);
2398 event &= ~OHCI1394_cycleTooLong;
2399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 if (event & OHCI1394_cycleInconsistent) {
2401 /* We subscribe to the cycleInconsistent event only to
2402 * clear the corresponding event bit... otherwise,
2403 * isochronous cycleMatch DMA won't work. */
2404 DBGMSG("OHCI1394_cycleInconsistent");
2405 event &= ~OHCI1394_cycleInconsistent;
2406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 if (event & OHCI1394_busReset) {
2408 /* The busReset event bit can't be cleared during the
2409 * selfID phase, so we disable busReset interrupts, to
2410 * avoid burying the cpu in interrupt requests. */
2411 spin_lock_irqsave(&ohci->event_lock, flags);
2412 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2413
2414 if (ohci->check_busreset) {
2415 int loop_count = 0;
2416
2417 udelay(10);
2418
2419 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2420 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2421
2422 spin_unlock_irqrestore(&ohci->event_lock, flags);
2423 udelay(10);
2424 spin_lock_irqsave(&ohci->event_lock, flags);
2425
2426 /* The loop counter check is to prevent the driver
2427 * from remaining in this state forever. For the
2428 * initial bus reset, the loop continues for ever
2429 * and the system hangs, until some device is plugged-in
2430 * or out manually into a port! The forced reset seems
2431 * to solve this problem. This mainly effects nForce2. */
2432 if (loop_count > 10000) {
2433 ohci_devctl(host, RESET_BUS, LONG_RESET);
2434 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2435 loop_count = 0;
2436 }
2437
2438 loop_count++;
2439 }
2440 }
2441 spin_unlock_irqrestore(&ohci->event_lock, flags);
2442 if (!host->in_bus_reset) {
2443 DBGMSG("irq_handler: Bus reset requested");
2444
2445 /* Subsystem call */
2446 hpsb_bus_reset(ohci->host);
2447 }
2448 event &= ~OHCI1394_busReset;
2449 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 if (event & OHCI1394_reqTxComplete) {
2451 struct dma_trm_ctx *d = &ohci->at_req_context;
2452 DBGMSG("Got reqTxComplete interrupt "
2453 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2454 if (reg_read(ohci, d->ctrlSet) & 0x800)
2455 ohci1394_stop_context(ohci, d->ctrlClear,
2456 "reqTxComplete");
2457 else
2458 dma_trm_tasklet((unsigned long)d);
2459 //tasklet_schedule(&d->task);
2460 event &= ~OHCI1394_reqTxComplete;
2461 }
2462 if (event & OHCI1394_respTxComplete) {
2463 struct dma_trm_ctx *d = &ohci->at_resp_context;
2464 DBGMSG("Got respTxComplete interrupt "
2465 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2466 if (reg_read(ohci, d->ctrlSet) & 0x800)
2467 ohci1394_stop_context(ohci, d->ctrlClear,
2468 "respTxComplete");
2469 else
2470 tasklet_schedule(&d->task);
2471 event &= ~OHCI1394_respTxComplete;
2472 }
2473 if (event & OHCI1394_RQPkt) {
2474 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2475 DBGMSG("Got RQPkt interrupt status=0x%08X",
2476 reg_read(ohci, d->ctrlSet));
2477 if (reg_read(ohci, d->ctrlSet) & 0x800)
2478 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2479 else
2480 tasklet_schedule(&d->task);
2481 event &= ~OHCI1394_RQPkt;
2482 }
2483 if (event & OHCI1394_RSPkt) {
2484 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2485 DBGMSG("Got RSPkt interrupt status=0x%08X",
2486 reg_read(ohci, d->ctrlSet));
2487 if (reg_read(ohci, d->ctrlSet) & 0x800)
2488 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2489 else
2490 tasklet_schedule(&d->task);
2491 event &= ~OHCI1394_RSPkt;
2492 }
2493 if (event & OHCI1394_isochRx) {
2494 quadlet_t rx_event;
2495
2496 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2497 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2498 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2499 event &= ~OHCI1394_isochRx;
2500 }
2501 if (event & OHCI1394_isochTx) {
2502 quadlet_t tx_event;
2503
2504 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2505 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2506 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2507 event &= ~OHCI1394_isochTx;
2508 }
2509 if (event & OHCI1394_selfIDComplete) {
2510 if (host->in_bus_reset) {
2511 node_id = reg_read(ohci, OHCI1394_NodeID);
2512
2513 if (!(node_id & 0x80000000)) {
2514 PRINT(KERN_ERR,
2515 "SelfID received, but NodeID invalid "
2516 "(probably new bus reset occurred): %08X",
2517 node_id);
2518 goto selfid_not_valid;
2519 }
2520
2521 phyid = node_id & 0x0000003f;
2522 isroot = (node_id & 0x40000000) != 0;
2523
2524 DBGMSG("SelfID interrupt received "
2525 "(phyid %d, %s)", phyid,
2526 (isroot ? "root" : "not root"));
2527
2528 handle_selfid(ohci, host, phyid, isroot);
2529
2530 /* Clear the bus reset event and re-enable the
2531 * busReset interrupt. */
2532 spin_lock_irqsave(&ohci->event_lock, flags);
2533 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2534 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2535 spin_unlock_irqrestore(&ohci->event_lock, flags);
2536
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 /* Turn on phys dma reception.
2538 *
2539 * TODO: Enable some sort of filtering management.
2540 */
2541 if (phys_dma) {
Stefan Richter180a4302006-03-28 19:57:34 -05002542 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2543 0xffffffff);
2544 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2545 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 }
2547
2548 DBGMSG("PhyReqFilter=%08x%08x",
Stefan Richter180a4302006-03-28 19:57:34 -05002549 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2550 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
2552 hpsb_selfid_complete(host, phyid, isroot);
2553 } else
2554 PRINT(KERN_ERR,
2555 "SelfID received outside of bus reset sequence");
2556
2557selfid_not_valid:
2558 event &= ~OHCI1394_selfIDComplete;
2559 }
2560
2561 /* Make sure we handle everything, just in case we accidentally
2562 * enabled an interrupt that we didn't write a handler for. */
2563 if (event)
2564 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2565 event);
2566
2567 return IRQ_HANDLED;
2568}
2569
2570/* Put the buffer back into the dma context */
2571static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2572{
2573 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2574 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2575
2576 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2577 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2578 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2579 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2580
2581 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2582 * context program descriptors before it sees the wakeup bit set. */
2583 wmb();
2584
2585 /* wake up the dma context if necessary */
2586 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2587 PRINT(KERN_INFO,
2588 "Waking dma ctx=%d ... processing is probably too slow",
2589 d->ctx);
2590 }
2591
2592 /* do this always, to avoid race condition */
2593 reg_write(ohci, d->ctrlSet, 0x1000);
2594}
2595
2596#define cond_le32_to_cpu(data, noswap) \
2597 (noswap ? data : le32_to_cpu(data))
2598
2599static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2600 -1, 0, -1, 0, -1, -1, 16, -1};
2601
2602/*
2603 * Determine the length of a packet in the buffer
2604 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2605 */
2606static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2607 int offset, unsigned char tcode, int noswap)
2608{
2609 int length = -1;
2610
2611 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2612 length = TCODE_SIZE[tcode];
2613 if (length == 0) {
2614 if (offset + 12 >= d->buf_size) {
2615 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2616 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2617 } else {
2618 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2619 }
2620 length += 20;
2621 }
2622 } else if (d->type == DMA_CTX_ISO) {
2623 /* Assumption: buffer fill mode with header/trailer */
2624 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2625 }
2626
2627 if (length > 0 && length % 4)
2628 length += 4 - (length % 4);
2629
2630 return length;
2631}
2632
2633/* Tasklet that processes dma receive buffers */
2634static void dma_rcv_tasklet (unsigned long data)
2635{
2636 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2637 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2638 unsigned int split_left, idx, offset, rescount;
2639 unsigned char tcode;
2640 int length, bytes_left, ack;
2641 unsigned long flags;
2642 quadlet_t *buf_ptr;
2643 char *split_ptr;
2644 char msg[256];
2645
2646 spin_lock_irqsave(&d->lock, flags);
2647
2648 idx = d->buf_ind;
2649 offset = d->buf_offset;
2650 buf_ptr = d->buf_cpu[idx] + offset/4;
2651
2652 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2653 bytes_left = d->buf_size - rescount - offset;
2654
2655 while (bytes_left > 0) {
2656 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2657
2658 /* packet_length() will return < 4 for an error */
2659 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2660
2661 if (length < 4) { /* something is wrong */
2662 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2663 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2664 d->ctx, length);
2665 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2666 spin_unlock_irqrestore(&d->lock, flags);
2667 return;
2668 }
2669
2670 /* The first case is where we have a packet that crosses
2671 * over more than one descriptor. The next case is where
2672 * it's all in the first descriptor. */
2673 if ((offset + length) > d->buf_size) {
2674 DBGMSG("Split packet rcv'd");
2675 if (length > d->split_buf_size) {
2676 ohci1394_stop_context(ohci, d->ctrlClear,
2677 "Split packet size exceeded");
2678 d->buf_ind = idx;
2679 d->buf_offset = offset;
2680 spin_unlock_irqrestore(&d->lock, flags);
2681 return;
2682 }
2683
2684 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2685 == d->buf_size) {
2686 /* Other part of packet not written yet.
2687 * this should never happen I think
2688 * anyway we'll get it on the next call. */
2689 PRINT(KERN_INFO,
2690 "Got only half a packet!");
2691 d->buf_ind = idx;
2692 d->buf_offset = offset;
2693 spin_unlock_irqrestore(&d->lock, flags);
2694 return;
2695 }
2696
2697 split_left = length;
2698 split_ptr = (char *)d->spb;
2699 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2700 split_left -= d->buf_size-offset;
2701 split_ptr += d->buf_size-offset;
2702 insert_dma_buffer(d, idx);
2703 idx = (idx+1) % d->num_desc;
2704 buf_ptr = d->buf_cpu[idx];
2705 offset=0;
2706
2707 while (split_left >= d->buf_size) {
2708 memcpy(split_ptr,buf_ptr,d->buf_size);
2709 split_ptr += d->buf_size;
2710 split_left -= d->buf_size;
2711 insert_dma_buffer(d, idx);
2712 idx = (idx+1) % d->num_desc;
2713 buf_ptr = d->buf_cpu[idx];
2714 }
2715
2716 if (split_left > 0) {
2717 memcpy(split_ptr, buf_ptr, split_left);
2718 offset = split_left;
2719 buf_ptr += offset/4;
2720 }
2721 } else {
2722 DBGMSG("Single packet rcv'd");
2723 memcpy(d->spb, buf_ptr, length);
2724 offset += length;
2725 buf_ptr += length/4;
2726 if (offset==d->buf_size) {
2727 insert_dma_buffer(d, idx);
2728 idx = (idx+1) % d->num_desc;
2729 buf_ptr = d->buf_cpu[idx];
2730 offset=0;
2731 }
2732 }
2733
2734 /* We get one phy packet to the async descriptor for each
2735 * bus reset. We always ignore it. */
2736 if (tcode != OHCI1394_TCODE_PHY) {
2737 if (!ohci->no_swap_incoming)
2738 packet_swab(d->spb, tcode);
2739 DBGMSG("Packet received from node"
2740 " %d ack=0x%02X spd=%d tcode=0x%X"
2741 " length=%d ctx=%d tlabel=%d",
2742 (d->spb[1]>>16)&0x3f,
2743 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2744 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2745 tcode, length, d->ctx,
Jody McIntyredfe547a2005-04-21 14:09:42 -07002746 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747
2748 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2749 == 0x11) ? 1 : 0;
2750
2751 hpsb_packet_received(ohci->host, d->spb,
2752 length-4, ack);
2753 }
2754#ifdef OHCI1394_DEBUG
2755 else
2756 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2757 d->ctx);
2758#endif
2759
2760 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2761
2762 bytes_left = d->buf_size - rescount - offset;
2763
2764 }
2765
2766 d->buf_ind = idx;
2767 d->buf_offset = offset;
2768
2769 spin_unlock_irqrestore(&d->lock, flags);
2770}
2771
2772/* Bottom half that processes sent packets */
2773static void dma_trm_tasklet (unsigned long data)
2774{
2775 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2776 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2777 struct hpsb_packet *packet, *ptmp;
2778 unsigned long flags;
2779 u32 status, ack;
2780 size_t datasize;
2781
2782 spin_lock_irqsave(&d->lock, flags);
2783
2784 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2785 datasize = packet->data_size;
2786 if (datasize && packet->type != hpsb_raw)
2787 status = le32_to_cpu(
2788 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2789 else
2790 status = le32_to_cpu(
2791 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2792
2793 if (status == 0)
2794 /* this packet hasn't been sent yet*/
2795 break;
2796
2797#ifdef OHCI1394_DEBUG
2798 if (datasize)
2799 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2800 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2801 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2802 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2803 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2804 status&0x1f, (status>>5)&0x3,
2805 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2806 d->ctx);
2807 else
2808 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
Jody McIntyredfe547a2005-04-21 14:09:42 -07002809 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2811 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2812 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2813 status&0x1f, (status>>5)&0x3,
2814 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2815 d->ctx);
2816 else
2817 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
Jody McIntyredfe547a2005-04-21 14:09:42 -07002818 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2820 >>16)&0x3f,
2821 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2822 >>4)&0xf,
2823 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2824 >>10)&0x3f,
2825 status&0x1f, (status>>5)&0x3,
2826 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2827 d->ctx);
2828#endif
2829
2830 if (status & 0x10) {
2831 ack = status & 0xf;
2832 } else {
2833 switch (status & 0x1f) {
2834 case EVT_NO_STATUS: /* that should never happen */
2835 case EVT_RESERVED_A: /* that should never happen */
2836 case EVT_LONG_PACKET: /* that should never happen */
2837 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2838 ack = ACKX_SEND_ERROR;
2839 break;
2840 case EVT_MISSING_ACK:
2841 ack = ACKX_TIMEOUT;
2842 break;
2843 case EVT_UNDERRUN:
2844 ack = ACKX_SEND_ERROR;
2845 break;
2846 case EVT_OVERRUN: /* that should never happen */
2847 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2848 ack = ACKX_SEND_ERROR;
2849 break;
2850 case EVT_DESCRIPTOR_READ:
2851 case EVT_DATA_READ:
2852 case EVT_DATA_WRITE:
2853 ack = ACKX_SEND_ERROR;
2854 break;
2855 case EVT_BUS_RESET: /* that should never happen */
2856 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2857 ack = ACKX_SEND_ERROR;
2858 break;
2859 case EVT_TIMEOUT:
2860 ack = ACKX_TIMEOUT;
2861 break;
2862 case EVT_TCODE_ERR:
2863 ack = ACKX_SEND_ERROR;
2864 break;
2865 case EVT_RESERVED_B: /* that should never happen */
2866 case EVT_RESERVED_C: /* that should never happen */
2867 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2868 ack = ACKX_SEND_ERROR;
2869 break;
2870 case EVT_UNKNOWN:
2871 case EVT_FLUSHED:
2872 ack = ACKX_SEND_ERROR;
2873 break;
2874 default:
2875 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2876 ack = ACKX_SEND_ERROR;
2877 BUG();
2878 }
2879 }
2880
2881 list_del_init(&packet->driver_list);
2882 hpsb_packet_sent(ohci->host, packet, ack);
2883
2884 if (datasize) {
2885 pci_unmap_single(ohci->dev,
2886 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2887 datasize, PCI_DMA_TODEVICE);
2888 OHCI_DMA_FREE("single Xmit data packet");
2889 }
2890
2891 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2892 d->free_prgs++;
2893 }
2894
2895 dma_trm_flush(ohci, d);
2896
2897 spin_unlock_irqrestore(&d->lock, flags);
2898}
2899
2900static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2901{
2902 if (d->ctrlClear) {
2903 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2904
2905 if (d->type == DMA_CTX_ISO) {
2906 /* disable interrupts */
2907 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2908 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2909 } else {
2910 tasklet_kill(&d->task);
2911 }
2912 }
2913}
2914
2915
2916static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2917{
2918 int i;
2919 struct ti_ohci *ohci = d->ohci;
2920
2921 if (ohci == NULL)
2922 return;
2923
2924 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2925
2926 if (d->buf_cpu) {
2927 for (i=0; i<d->num_desc; i++)
2928 if (d->buf_cpu[i] && d->buf_bus[i]) {
2929 pci_free_consistent(
2930 ohci->dev, d->buf_size,
2931 d->buf_cpu[i], d->buf_bus[i]);
2932 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2933 }
2934 kfree(d->buf_cpu);
2935 kfree(d->buf_bus);
2936 }
2937 if (d->prg_cpu) {
2938 for (i=0; i<d->num_desc; i++)
2939 if (d->prg_cpu[i] && d->prg_bus[i]) {
2940 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2941 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2942 }
2943 pci_pool_destroy(d->prg_pool);
2944 OHCI_DMA_FREE("dma_rcv prg pool");
2945 kfree(d->prg_cpu);
2946 kfree(d->prg_bus);
2947 }
Jody McIntyre616b8592005-05-16 21:54:01 -07002948 kfree(d->spb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949
2950 /* Mark this context as freed. */
2951 d->ohci = NULL;
2952}
2953
2954static int
2955alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2956 enum context_type type, int ctx, int num_desc,
2957 int buf_size, int split_buf_size, int context_base)
2958{
2959 int i, len;
2960 static int num_allocs;
2961 static char pool_name[20];
2962
2963 d->ohci = ohci;
2964 d->type = type;
2965 d->ctx = ctx;
2966
2967 d->num_desc = num_desc;
2968 d->buf_size = buf_size;
2969 d->split_buf_size = split_buf_size;
2970
2971 d->ctrlSet = 0;
2972 d->ctrlClear = 0;
2973 d->cmdPtr = 0;
2974
Stefan Richter85511582005-11-07 06:31:45 -05002975 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2976 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977
2978 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2979 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2980 free_dma_rcv_ctx(d);
2981 return -ENOMEM;
2982 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983
Stefan Richter85511582005-11-07 06:31:45 -05002984 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2985 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986
2987 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2988 PRINT(KERN_ERR, "Failed to allocate dma prg");
2989 free_dma_rcv_ctx(d);
2990 return -ENOMEM;
2991 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
2993 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2994
2995 if (d->spb == NULL) {
2996 PRINT(KERN_ERR, "Failed to allocate split buffer");
2997 free_dma_rcv_ctx(d);
2998 return -ENOMEM;
2999 }
3000
3001 len = sprintf(pool_name, "ohci1394_rcv_prg");
3002 sprintf(pool_name+len, "%d", num_allocs);
3003 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3004 sizeof(struct dma_cmd), 4, 0);
3005 if(d->prg_pool == NULL)
3006 {
3007 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3008 free_dma_rcv_ctx(d);
3009 return -ENOMEM;
3010 }
3011 num_allocs++;
3012
3013 OHCI_DMA_ALLOC("dma_rcv prg pool");
3014
3015 for (i=0; i<d->num_desc; i++) {
3016 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3017 d->buf_size,
3018 d->buf_bus+i);
3019 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3020
3021 if (d->buf_cpu[i] != NULL) {
3022 memset(d->buf_cpu[i], 0, d->buf_size);
3023 } else {
3024 PRINT(KERN_ERR,
3025 "Failed to allocate dma buffer");
3026 free_dma_rcv_ctx(d);
3027 return -ENOMEM;
3028 }
3029
3030 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3031 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3032
3033 if (d->prg_cpu[i] != NULL) {
3034 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3035 } else {
3036 PRINT(KERN_ERR,
3037 "Failed to allocate dma prg");
3038 free_dma_rcv_ctx(d);
3039 return -ENOMEM;
3040 }
3041 }
3042
3043 spin_lock_init(&d->lock);
3044
3045 if (type == DMA_CTX_ISO) {
3046 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3047 OHCI_ISO_MULTICHANNEL_RECEIVE,
3048 dma_rcv_tasklet, (unsigned long) d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 } else {
3050 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3051 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3052 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3053
3054 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3055 }
3056
3057 return 0;
3058}
3059
3060static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3061{
3062 int i;
3063 struct ti_ohci *ohci = d->ohci;
3064
3065 if (ohci == NULL)
3066 return;
3067
3068 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3069
3070 if (d->prg_cpu) {
3071 for (i=0; i<d->num_desc; i++)
3072 if (d->prg_cpu[i] && d->prg_bus[i]) {
3073 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3074 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3075 }
3076 pci_pool_destroy(d->prg_pool);
3077 OHCI_DMA_FREE("dma_trm prg pool");
3078 kfree(d->prg_cpu);
3079 kfree(d->prg_bus);
3080 }
3081
3082 /* Mark this context as freed. */
3083 d->ohci = NULL;
3084}
3085
3086static int
3087alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3088 enum context_type type, int ctx, int num_desc,
3089 int context_base)
3090{
3091 int i, len;
3092 static char pool_name[20];
3093 static int num_allocs=0;
3094
3095 d->ohci = ohci;
3096 d->type = type;
3097 d->ctx = ctx;
3098 d->num_desc = num_desc;
3099 d->ctrlSet = 0;
3100 d->ctrlClear = 0;
3101 d->cmdPtr = 0;
3102
Stefan Richter85511582005-11-07 06:31:45 -05003103 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3104 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105
3106 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3107 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3108 free_dma_trm_ctx(d);
3109 return -ENOMEM;
3110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111
3112 len = sprintf(pool_name, "ohci1394_trm_prg");
3113 sprintf(pool_name+len, "%d", num_allocs);
3114 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3115 sizeof(struct at_dma_prg), 4, 0);
3116 if (d->prg_pool == NULL) {
3117 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3118 free_dma_trm_ctx(d);
3119 return -ENOMEM;
3120 }
3121 num_allocs++;
3122
3123 OHCI_DMA_ALLOC("dma_rcv prg pool");
3124
3125 for (i = 0; i < d->num_desc; i++) {
3126 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3127 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3128
3129 if (d->prg_cpu[i] != NULL) {
3130 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3131 } else {
3132 PRINT(KERN_ERR,
3133 "Failed to allocate at dma prg");
3134 free_dma_trm_ctx(d);
3135 return -ENOMEM;
3136 }
3137 }
3138
3139 spin_lock_init(&d->lock);
3140
3141 /* initialize tasklet */
3142 if (type == DMA_CTX_ISO) {
3143 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3144 dma_trm_tasklet, (unsigned long) d);
3145 if (ohci1394_register_iso_tasklet(ohci,
3146 &ohci->it_legacy_tasklet) < 0) {
3147 PRINT(KERN_ERR, "No IT DMA context available");
3148 free_dma_trm_ctx(d);
3149 return -EBUSY;
3150 }
3151
3152 /* IT can be assigned to any context by register_iso_tasklet */
3153 d->ctx = ohci->it_legacy_tasklet.context;
3154 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3155 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3156 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3157 } else {
3158 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3159 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3160 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3161 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3162 }
3163
3164 return 0;
3165}
3166
3167static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3168{
3169 struct ti_ohci *ohci = host->hostdata;
3170
3171 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3172 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3173
3174 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3175}
3176
3177
3178static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3179 quadlet_t data, quadlet_t compare)
3180{
3181 struct ti_ohci *ohci = host->hostdata;
3182 int i;
3183
3184 reg_write(ohci, OHCI1394_CSRData, data);
3185 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3186 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3187
3188 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3189 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3190 break;
3191
3192 mdelay(1);
3193 }
3194
3195 return reg_read(ohci, OHCI1394_CSRData);
3196}
3197
3198static struct hpsb_host_driver ohci1394_driver = {
3199 .owner = THIS_MODULE,
3200 .name = OHCI1394_DRIVER_NAME,
3201 .set_hw_config_rom = ohci_set_hw_config_rom,
3202 .transmit_packet = ohci_transmit,
3203 .devctl = ohci_devctl,
3204 .isoctl = ohci_isoctl,
3205 .hw_csr_reg = ohci_hw_csr_reg,
3206};
3207
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208/***********************************
3209 * PCI Driver Interface functions *
3210 ***********************************/
3211
3212#define FAIL(err, fmt, args...) \
3213do { \
3214 PRINT_G(KERN_ERR, fmt , ## args); \
3215 ohci1394_pci_remove(dev); \
3216 return err; \
3217} while (0)
3218
3219static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3220 const struct pci_device_id *ent)
3221{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 struct hpsb_host *host;
3223 struct ti_ohci *ohci; /* shortcut to currently handled device */
3224 unsigned long ohci_base;
3225
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 if (pci_enable_device(dev))
3227 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3228 pci_set_master(dev);
3229
3230 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3231 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3232
3233 ohci = host->hostdata;
3234 ohci->dev = dev;
3235 ohci->host = host;
3236 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3237 host->pdev = dev;
3238 pci_set_drvdata(dev, ohci);
3239
3240 /* We don't want hardware swapping */
3241 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3242
3243 /* Some oddball Apple controllers do not order the selfid
3244 * properly, so we make up for it here. */
3245#ifndef __LITTLE_ENDIAN
3246 /* XXX: Need a better way to check this. I'm wondering if we can
3247 * read the values of the OHCI1394_PCI_HCI_Control and the
3248 * noByteSwapData registers to see if they were not cleared to
3249 * zero. Should this work? Obviously it's not defined what these
3250 * registers will read when they aren't supported. Bleh! */
3251 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3252 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3253 ohci->no_swap_incoming = 1;
3254 ohci->selfid_swap = 0;
3255 } else
3256 ohci->selfid_swap = 1;
3257#endif
3258
3259
3260#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3261#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3262#endif
3263
3264 /* These chipsets require a bit of extra care when checking after
3265 * a busreset. */
3266 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3267 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3268 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3269 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3270 ohci->check_busreset = 1;
3271
3272 /* We hardwire the MMIO length, since some CardBus adaptors
3273 * fail to report the right length. Anyway, the ohci spec
3274 * clearly says it's 2kb, so this shouldn't be a problem. */
3275 ohci_base = pci_resource_start(dev, 0);
Jody McIntyre94c2d012006-03-28 20:04:04 -05003276 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3277 PRINT(KERN_WARNING, "PCI resource length of %lx too small!",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 pci_resource_len(dev, 0));
3279
3280 /* Seems PCMCIA handles this internally. Not sure why. Seems
3281 * pretty bogus to force a driver to special case this. */
3282#ifndef PCMCIA
3283 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3284 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3285 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3286#endif
3287 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3288
3289 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3290 if (ohci->registers == NULL)
3291 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3292 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3293 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3294
3295 /* csr_config rom allocation */
3296 ohci->csr_config_rom_cpu =
3297 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3298 &ohci->csr_config_rom_bus);
3299 OHCI_DMA_ALLOC("consistent csr_config_rom");
3300 if (ohci->csr_config_rom_cpu == NULL)
3301 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3302 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3303
3304 /* self-id dma buffer allocation */
3305 ohci->selfid_buf_cpu =
3306 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3307 &ohci->selfid_buf_bus);
3308 OHCI_DMA_ALLOC("consistent selfid_buf");
3309
3310 if (ohci->selfid_buf_cpu == NULL)
3311 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3312 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3313
3314 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3315 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3316 "8Kb boundary... may cause problems on some CXD3222 chip",
3317 ohci->selfid_buf_cpu);
3318
3319 /* No self-id errors at startup */
3320 ohci->self_id_errors = 0;
3321
3322 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3323 /* AR DMA request context allocation */
3324 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3325 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3326 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3327 OHCI1394_AsReqRcvContextBase) < 0)
3328 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3329
3330 /* AR DMA response context allocation */
3331 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3332 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3333 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3334 OHCI1394_AsRspRcvContextBase) < 0)
3335 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3336
3337 /* AT DMA request context */
3338 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3339 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3340 OHCI1394_AsReqTrContextBase) < 0)
3341 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3342
3343 /* AT DMA response context */
3344 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3345 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3346 OHCI1394_AsRspTrContextBase) < 0)
3347 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3348
3349 /* Start off with a soft reset, to clear everything to a sane
3350 * state. */
3351 ohci_soft_reset(ohci);
3352
3353 /* Now enable LPS, which we need in order to start accessing
3354 * most of the registers. In fact, on some cards (ALI M5251),
3355 * accessing registers in the SClk domain without LPS enabled
3356 * will lock up the machine. Wait 50msec to make sure we have
3357 * full link enabled. */
3358 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3359
3360 /* Disable and clear interrupts */
3361 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3362 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3363
3364 mdelay(50);
3365
3366 /* Determine the number of available IR and IT contexts. */
3367 ohci->nb_iso_rcv_ctx =
3368 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 ohci->nb_iso_xmit_ctx =
3370 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371
3372 /* Set the usage bits for non-existent contexts so they can't
3373 * be allocated */
3374 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3375 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3376
3377 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3378 spin_lock_init(&ohci->iso_tasklet_list_lock);
3379 ohci->ISO_channel_usage = 0;
3380 spin_lock_init(&ohci->IR_channel_lock);
3381
3382 /* Allocate the IR DMA context right here so we don't have
3383 * to do it in interrupt path - note that this doesn't
3384 * waste much memory and avoids the jugglery required to
3385 * allocate it in IRQ path. */
3386 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3387 DMA_CTX_ISO, 0, IR_NUM_DESC,
3388 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3389 OHCI1394_IsoRcvContextBase) < 0) {
3390 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3391 }
3392
3393 /* We hopefully don't have to pre-allocate IT DMA like we did
3394 * for IR DMA above. Allocate it on-demand and mark inactive. */
3395 ohci->it_legacy_context.ohci = NULL;
Al Viro3515d012005-08-25 23:13:14 +01003396 spin_lock_init(&ohci->event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397
Al Viro3515d012005-08-25 23:13:14 +01003398 /*
3399 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3400 * might get called anyway. We'll see no event, of course, but
3401 * we need to get to that "no event", so enough should be initialized
3402 * by that point.
3403 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3405 OHCI1394_DRIVER_NAME, ohci))
3406 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3407
3408 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3409 ohci_initialize(ohci);
3410
3411 /* Set certain csr values */
3412 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3413 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3414 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3415 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3416 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3417
Ben Collins4611ed32006-06-12 18:13:32 -04003418 if (phys_dma) {
3419 host->low_addr_space =
3420 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3421 if (!host->low_addr_space)
3422 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3423 }
3424 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3425
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 /* Tell the highlevel this host is ready */
3427 if (hpsb_add_host(host))
3428 FAIL(-ENOMEM, "Failed to register host with highlevel");
3429
3430 ohci->init_state = OHCI_INIT_DONE;
3431
3432 return 0;
3433#undef FAIL
3434}
3435
3436static void ohci1394_pci_remove(struct pci_dev *pdev)
3437{
3438 struct ti_ohci *ohci;
3439 struct device *dev;
3440
3441 ohci = pci_get_drvdata(pdev);
3442 if (!ohci)
3443 return;
3444
3445 dev = get_device(&ohci->host->device);
3446
3447 switch (ohci->init_state) {
3448 case OHCI_INIT_DONE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 hpsb_remove_host(ohci->host);
3450
3451 /* Clear out BUS Options */
3452 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3453 reg_write(ohci, OHCI1394_BusOptions,
3454 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3455 0x00ff0000);
3456 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3457
3458 case OHCI_INIT_HAVE_IRQ:
3459 /* Clear interrupt registers */
3460 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3461 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3462 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3463 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3464 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3465 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3466
3467 /* Disable IRM Contender */
3468 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3469
3470 /* Clear link control register */
3471 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3472
3473 /* Let all other nodes know to ignore us */
3474 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3475
3476 /* Soft reset before we start - this disables
3477 * interrupts and clears linkEnable and LPS. */
3478 ohci_soft_reset(ohci);
3479 free_irq(ohci->dev->irq, ohci);
3480
3481 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3482 /* The ohci_soft_reset() stops all DMA contexts, so we
3483 * dont need to do this. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 free_dma_rcv_ctx(&ohci->ar_req_context);
3485 free_dma_rcv_ctx(&ohci->ar_resp_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 free_dma_trm_ctx(&ohci->at_req_context);
3487 free_dma_trm_ctx(&ohci->at_resp_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 free_dma_rcv_ctx(&ohci->ir_legacy_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 free_dma_trm_ctx(&ohci->it_legacy_context);
3490
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 case OHCI_INIT_HAVE_SELFID_BUFFER:
3492 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3493 ohci->selfid_buf_cpu,
3494 ohci->selfid_buf_bus);
3495 OHCI_DMA_FREE("consistent selfid_buf");
3496
3497 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3498 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3499 ohci->csr_config_rom_cpu,
3500 ohci->csr_config_rom_bus);
3501 OHCI_DMA_FREE("consistent csr_config_rom");
3502
3503 case OHCI_INIT_HAVE_IOMAPPING:
3504 iounmap(ohci->registers);
3505
3506 case OHCI_INIT_HAVE_MEM_REGION:
3507#ifndef PCMCIA
3508 release_mem_region(pci_resource_start(ohci->dev, 0),
3509 OHCI1394_REGISTER_SIZE);
3510#endif
3511
3512#ifdef CONFIG_PPC_PMAC
3513 /* On UniNorth, power down the cable and turn off the chip
3514 * clock when the module is removed to save power on
3515 * laptops. Turning it back ON is done by the arch code when
3516 * pci_enable_device() is called */
3517 {
3518 struct device_node* of_node;
3519
3520 of_node = pci_device_to_OF_node(ohci->dev);
3521 if (of_node) {
3522 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3523 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3524 }
3525 }
3526#endif /* CONFIG_PPC_PMAC */
3527
3528 case OHCI_INIT_ALLOC_HOST:
3529 pci_set_drvdata(ohci->dev, NULL);
3530 }
3531
3532 if (dev)
3533 put_device(dev);
3534}
3535
3536
3537static int ohci1394_pci_resume (struct pci_dev *pdev)
3538{
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003539#ifdef CONFIG_PPC_PMAC
Benjamin Herrenschmidte8222502006-03-28 23:15:54 +11003540 if (machine_is(powermac)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 struct device_node *of_node;
3542
3543 /* Re-enable 1394 */
3544 of_node = pci_device_to_OF_node (pdev);
3545 if (of_node)
3546 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3547 }
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003548#endif /* CONFIG_PPC_PMAC */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549
Ben Collinsb21efb52006-06-12 18:15:03 -04003550 pci_restore_state(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 pci_enable_device(pdev);
3552
3553 return 0;
3554}
3555
3556
3557static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3558{
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003559#ifdef CONFIG_PPC_PMAC
Benjamin Herrenschmidte8222502006-03-28 23:15:54 +11003560 if (machine_is(powermac)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 struct device_node *of_node;
3562
3563 /* Disable 1394 */
3564 of_node = pci_device_to_OF_node (pdev);
3565 if (of_node)
3566 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3567 }
3568#endif
3569
Ben Collinsb21efb52006-06-12 18:15:03 -04003570 pci_save_state(pdev);
3571
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572 return 0;
3573}
3574
3575
3576#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3577
3578static struct pci_device_id ohci1394_pci_tbl[] = {
3579 {
3580 .class = PCI_CLASS_FIREWIRE_OHCI,
3581 .class_mask = PCI_ANY_ID,
3582 .vendor = PCI_ANY_ID,
3583 .device = PCI_ANY_ID,
3584 .subvendor = PCI_ANY_ID,
3585 .subdevice = PCI_ANY_ID,
3586 },
3587 { 0, },
3588};
3589
3590MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3591
3592static struct pci_driver ohci1394_pci_driver = {
3593 .name = OHCI1394_DRIVER_NAME,
3594 .id_table = ohci1394_pci_tbl,
3595 .probe = ohci1394_pci_probe,
3596 .remove = ohci1394_pci_remove,
3597 .resume = ohci1394_pci_resume,
3598 .suspend = ohci1394_pci_suspend,
3599};
3600
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601/***********************************
3602 * OHCI1394 Video Interface *
3603 ***********************************/
3604
3605/* essentially the only purpose of this code is to allow another
3606 module to hook into ohci's interrupt handler */
3607
3608int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3609{
3610 int i=0;
3611
3612 /* stop the channel program if it's still running */
3613 reg_write(ohci, reg, 0x8000);
3614
3615 /* Wait until it effectively stops */
3616 while (reg_read(ohci, reg) & 0x400) {
3617 i++;
3618 if (i>5000) {
3619 PRINT(KERN_ERR,
3620 "Runaway loop while stopping context: %s...", msg ? msg : "");
3621 return 1;
3622 }
3623
3624 mb();
3625 udelay(10);
3626 }
3627 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3628 return 0;
3629}
3630
3631void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3632 void (*func)(unsigned long), unsigned long data)
3633{
3634 tasklet_init(&tasklet->tasklet, func, data);
3635 tasklet->type = type;
3636 /* We init the tasklet->link field, so we can list_del() it
3637 * without worrying whether it was added to the list or not. */
3638 INIT_LIST_HEAD(&tasklet->link);
3639}
3640
3641int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3642 struct ohci1394_iso_tasklet *tasklet)
3643{
3644 unsigned long flags, *usage;
3645 int n, i, r = -EBUSY;
3646
3647 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3648 n = ohci->nb_iso_xmit_ctx;
3649 usage = &ohci->it_ctx_usage;
3650 }
3651 else {
3652 n = ohci->nb_iso_rcv_ctx;
3653 usage = &ohci->ir_ctx_usage;
3654
3655 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3656 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3657 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3658 return r;
3659 }
3660 }
3661 }
3662
3663 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3664
3665 for (i = 0; i < n; i++)
3666 if (!test_and_set_bit(i, usage)) {
3667 tasklet->context = i;
3668 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3669 r = 0;
3670 break;
3671 }
3672
3673 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3674
3675 return r;
3676}
3677
3678void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3679 struct ohci1394_iso_tasklet *tasklet)
3680{
3681 unsigned long flags;
3682
3683 tasklet_kill(&tasklet->tasklet);
3684
3685 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3686
3687 if (tasklet->type == OHCI_ISO_TRANSMIT)
3688 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3689 else {
3690 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3691
3692 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3693 clear_bit(0, &ohci->ir_multichannel_used);
3694 }
3695 }
3696
3697 list_del(&tasklet->link);
3698
3699 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3700}
3701
3702EXPORT_SYMBOL(ohci1394_stop_context);
3703EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3704EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3705EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3706
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707/***********************************
3708 * General module initialization *
3709 ***********************************/
3710
3711MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3712MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3713MODULE_LICENSE("GPL");
3714
3715static void __exit ohci1394_cleanup (void)
3716{
3717 pci_unregister_driver(&ohci1394_pci_driver);
3718}
3719
3720static int __init ohci1394_init(void)
3721{
3722 return pci_register_driver(&ohci1394_pci_driver);
3723}
3724
3725module_init(ohci1394_init);
3726module_exit(ohci1394_cleanup);