blob: b0817ea56bb95c964f12cd167084eb66df39e140 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
4 *
5 * S5SCC/DMA support by Janko Koleznik S52HI
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22
23#include <linux/module.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070024#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/if_arp.h>
28#include <linux/in.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/ioport.h>
32#include <linux/kernel.h>
33#include <linux/mm.h>
34#include <linux/netdevice.h>
35#include <linux/rtnetlink.h>
36#include <linux/sockios.h>
37#include <linux/workqueue.h>
38#include <asm/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/dma.h>
40#include <asm/io.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43#include <net/ax25.h>
44#include "z8530.h"
45
46
47/* Number of buffers per channel */
48
49#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
52
53
54/* Cards supported */
55
56#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
64
65#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
66
67#define TMR_0_HZ 25600 /* Frequency of timer 0 */
68
69#define TYPE_PI 0
70#define TYPE_PI2 1
71#define TYPE_TWIN 2
72#define TYPE_S5 3
73#define NUM_TYPES 4
74
75#define MAX_NUM_DEVS 32
76
77
78/* SCC chips supported */
79
80#define Z8530 0
81#define Z85C30 1
82#define Z85230 2
83
84#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
85
86
87/* I/O registers */
88
89/* 8530 registers relative to card base */
90#define SCCB_CMD 0x00
91#define SCCB_DATA 0x01
92#define SCCA_CMD 0x02
93#define SCCA_DATA 0x03
94
95/* 8253/8254 registers relative to card base */
96#define TMR_CNT0 0x00
97#define TMR_CNT1 0x01
98#define TMR_CNT2 0x02
99#define TMR_CTRL 0x03
100
101/* Additional PI/PI2 registers relative to card base */
102#define PI_DREQ_MASK 0x04
103
104/* Additional PackeTwin registers relative to card base */
105#define TWIN_INT_REG 0x08
106#define TWIN_CLR_TMR1 0x09
107#define TWIN_CLR_TMR2 0x0a
108#define TWIN_SPARE_1 0x0b
109#define TWIN_DMA_CFG 0x08
110#define TWIN_SERIAL_CFG 0x09
111#define TWIN_DMA_CLR_FF 0x0a
112#define TWIN_SPARE_2 0x0b
113
114
115/* PackeTwin I/O register values */
116
117/* INT_REG */
118#define TWIN_SCC_MSK 0x01
119#define TWIN_TMR1_MSK 0x02
120#define TWIN_TMR2_MSK 0x04
121#define TWIN_INT_MSK 0x07
122
123/* SERIAL_CFG */
124#define TWIN_DTRA_ON 0x01
125#define TWIN_DTRB_ON 0x02
126#define TWIN_EXTCLKA 0x04
127#define TWIN_EXTCLKB 0x08
128#define TWIN_LOOPA_ON 0x10
129#define TWIN_LOOPB_ON 0x20
130#define TWIN_EI 0x80
131
132/* DMA_CFG */
133#define TWIN_DMA_HDX_T1 0x08
134#define TWIN_DMA_HDX_R1 0x0a
135#define TWIN_DMA_HDX_T3 0x14
136#define TWIN_DMA_HDX_R3 0x16
137#define TWIN_DMA_FDX_T3R1 0x1b
138#define TWIN_DMA_FDX_T1R3 0x1d
139
140
141/* Status values */
142
143#define IDLE 0
144#define TX_HEAD 1
145#define TX_DATA 2
146#define TX_PAUSE 3
147#define TX_TAIL 4
148#define RTS_OFF 5
149#define WAIT 6
150#define DCD_ON 7
151#define RX_ON 8
152#define DCD_OFF 9
153
154
155/* Ioctls */
156
157#define SIOCGSCCPARAM SIOCDEVPRIVATE
158#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
159
160
161/* Data types */
162
163struct scc_param {
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
179};
180
181struct scc_hardware {
182 char *name;
183 int io_region;
184 int io_delta;
185 int io_size;
186 int num_devs;
187 int scc_offset;
188 int tmr_offset;
189 int tmr_hz;
190 int pclk_hz;
191};
192
193struct scc_priv {
194 int type;
195 int chip;
196 struct net_device *dev;
197 struct scc_info *info;
Stephen Hemminger13c05822009-01-09 13:01:33 +0000198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 int channel;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
205 int rx_ptr;
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
208 int rx_over;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
211 int tx_ptr;
212 int tx_head, tx_tail, tx_count;
213 int state;
214 unsigned long tx_start;
215 int rr0;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
218};
219
220struct scc_info {
221 int irq_used;
222 int twin_serial_cfg;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
227};
228
229
230/* Function declarations */
231static int setup_adapter(int card_base, int type, int n) __init;
232
233static void write_scc(struct scc_priv *priv, int reg, int val);
234static void write_scc_data(struct scc_priv *priv, int val, int fast);
235static int read_scc(struct scc_priv *priv, int reg);
236static int read_scc_data(struct scc_priv *priv);
237
238static int scc_open(struct net_device *dev);
239static int scc_close(struct net_device *dev);
240static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242static int scc_set_mac_address(struct net_device *dev, void *sa);
243
244static inline void tx_on(struct scc_priv *priv);
245static inline void rx_on(struct scc_priv *priv);
246static inline void rx_off(struct scc_priv *priv);
247static void start_timer(struct scc_priv *priv, int t, int r15);
248static inline unsigned char random(void);
249
250static inline void z8530_isr(struct scc_info *info);
David Howells7d12e782006-10-05 14:55:46 +0100251static irqreturn_t scc_isr(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252static void rx_isr(struct scc_priv *priv);
253static void special_condition(struct scc_priv *priv, int rc);
Al Viro7a87b6c2006-12-06 18:51:40 +0000254static void rx_bh(struct work_struct *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255static void tx_isr(struct scc_priv *priv);
256static void es_isr(struct scc_priv *priv);
257static void tm_isr(struct scc_priv *priv);
258
259
260/* Initialization variables */
261
262static int io[MAX_NUM_DEVS] __initdata = { 0, };
263
Randy Dunlapcd8d6272008-02-05 03:04:05 -0800264/* Beware! hw[] is also used in dmascc_exit(). */
265static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267
268/* Global variables */
269
270static struct scc_info *first;
271static unsigned long rand;
272
273
274MODULE_AUTHOR("Klaus Kudielka");
275MODULE_DESCRIPTION("Driver for high-speed SCC boards");
Rusty Russell8d3b33f2006-03-25 03:07:05 -0800276module_param_array(io, int, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277MODULE_LICENSE("GPL");
278
279static void __exit dmascc_exit(void)
280{
281 int i;
282 struct scc_info *info;
283
284 while (first) {
285 info = first;
286
287 /* Unregister devices */
288 for (i = 0; i < 2; i++)
289 unregister_netdev(info->dev[i]);
290
291 /* Reset board */
292 if (info->priv[0].type == TYPE_TWIN)
293 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
294 write_scc(&info->priv[0], R9, FHWRES);
295 release_region(info->dev[0]->base_addr,
296 hw[info->priv[0].type].io_size);
297
298 for (i = 0; i < 2; i++)
299 free_netdev(info->dev[i]);
300
301 /* Free memory */
302 first = info->next;
303 kfree(info);
304 }
305}
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307static int __init dmascc_init(void)
308{
309 int h, i, j, n;
310 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
311 t1[MAX_NUM_DEVS];
312 unsigned t_val;
313 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
314 counting[MAX_NUM_DEVS];
315
316 /* Initialize random number generator */
317 rand = jiffies;
318 /* Cards found = 0 */
319 n = 0;
320 /* Warning message */
321 if (!io[0])
322 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
323
324 /* Run autodetection for each card type */
325 for (h = 0; h < NUM_TYPES; h++) {
326
327 if (io[0]) {
328 /* User-specified I/O address regions */
329 for (i = 0; i < hw[h].num_devs; i++)
330 base[i] = 0;
331 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
332 j = (io[i] -
333 hw[h].io_region) / hw[h].io_delta;
334 if (j >= 0 && j < hw[h].num_devs
335 && hw[h].io_region +
336 j * hw[h].io_delta == io[i]) {
337 base[j] = io[i];
338 }
339 }
340 } else {
341 /* Default I/O address regions */
342 for (i = 0; i < hw[h].num_devs; i++) {
343 base[i] =
344 hw[h].io_region + i * hw[h].io_delta;
345 }
346 }
347
348 /* Check valid I/O address regions */
349 for (i = 0; i < hw[h].num_devs; i++)
350 if (base[i]) {
351 if (!request_region
352 (base[i], hw[h].io_size, "dmascc"))
353 base[i] = 0;
354 else {
355 tcmd[i] =
356 base[i] + hw[h].tmr_offset +
357 TMR_CTRL;
358 t0[i] =
359 base[i] + hw[h].tmr_offset +
360 TMR_CNT0;
361 t1[i] =
362 base[i] + hw[h].tmr_offset +
363 TMR_CNT1;
364 }
365 }
366
367 /* Start timers */
368 for (i = 0; i < hw[h].num_devs; i++)
369 if (base[i]) {
370 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
371 outb(0x36, tcmd[i]);
372 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
373 t0[i]);
374 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
375 t0[i]);
376 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
377 outb(0x70, tcmd[i]);
378 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
379 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
380 start[i] = jiffies;
381 delay[i] = 0;
382 counting[i] = 1;
383 /* Timer 2: LSB+MSB, Mode 0 */
384 outb(0xb0, tcmd[i]);
385 }
386 time = jiffies;
387 /* Wait until counter registers are loaded */
388 udelay(2000000 / TMR_0_HZ);
389
390 /* Timing loop */
391 while (jiffies - time < 13) {
392 for (i = 0; i < hw[h].num_devs; i++)
393 if (base[i] && counting[i]) {
394 /* Read back Timer 1: latch; read LSB; read MSB */
395 outb(0x40, tcmd[i]);
396 t_val =
397 inb(t1[i]) + (inb(t1[i]) << 8);
398 /* Also check whether counter did wrap */
399 if (t_val == 0
400 || t_val > TMR_0_HZ / HZ * 10)
401 counting[i] = 0;
402 delay[i] = jiffies - start[i];
403 }
404 }
405
406 /* Evaluate measurements */
407 for (i = 0; i < hw[h].num_devs; i++)
408 if (base[i]) {
409 if ((delay[i] >= 9 && delay[i] <= 11) &&
410 /* Ok, we have found an adapter */
411 (setup_adapter(base[i], h, n) == 0))
412 n++;
413 else
414 release_region(base[i],
415 hw[h].io_size);
416 }
417
418 } /* NUM_TYPES */
419
420 /* If any adapter was successfully initialized, return ok */
421 if (n)
422 return 0;
423
424 /* If no adapter found, return error */
425 printk(KERN_INFO "dmascc: no adapters found\n");
426 return -EIO;
427}
428
429module_init(dmascc_init);
430module_exit(dmascc_exit);
431
Adrian Bunke2fdbc02006-06-26 12:31:46 +0200432static void __init dev_setup(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 dev->type = ARPHRD_AX25;
Ralf Baechlec4bc7ee2005-09-12 14:19:26 -0700435 dev->hard_header_len = AX25_MAX_HEADER_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 dev->mtu = 1500;
Ralf Baechlec4bc7ee2005-09-12 14:19:26 -0700437 dev->addr_len = AX25_ADDR_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 dev->tx_queue_len = 64;
Ralf Baechle15b1c0e2006-12-07 15:47:08 -0800439 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
440 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442
443static int __init setup_adapter(int card_base, int type, int n)
444{
445 int i, irq, chip;
446 struct scc_info *info;
447 struct net_device *dev;
448 struct scc_priv *priv;
449 unsigned long time;
450 unsigned int irqs;
451 int tmr_base = card_base + hw[type].tmr_offset;
452 int scc_base = card_base + hw[type].scc_offset;
453 char *chipnames[] = CHIPNAMES;
454
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700455 /* Initialize what is necessary for write_scc and write_scc_data */
456 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 if (!info) {
458 printk(KERN_ERR "dmascc: "
459 "could not allocate memory for %s at %#3x\n",
460 hw[type].name, card_base);
461 goto out;
462 }
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 info->dev[0] = alloc_netdev(0, "", dev_setup);
466 if (!info->dev[0]) {
467 printk(KERN_ERR "dmascc: "
468 "could not allocate memory for %s at %#3x\n",
469 hw[type].name, card_base);
470 goto out1;
471 }
472
473 info->dev[1] = alloc_netdev(0, "", dev_setup);
474 if (!info->dev[1]) {
475 printk(KERN_ERR "dmascc: "
476 "could not allocate memory for %s at %#3x\n",
477 hw[type].name, card_base);
478 goto out2;
479 }
480 spin_lock_init(&info->register_lock);
481
482 priv = &info->priv[0];
483 priv->type = type;
484 priv->card_base = card_base;
485 priv->scc_cmd = scc_base + SCCA_CMD;
486 priv->scc_data = scc_base + SCCA_DATA;
487 priv->register_lock = &info->register_lock;
488
489 /* Reset SCC */
490 write_scc(priv, R9, FHWRES | MIE | NV);
491
492 /* Determine type of chip by enabling SDLC/HDLC enhancements */
493 write_scc(priv, R15, SHDLCE);
494 if (!read_scc(priv, R15)) {
495 /* WR7' not present. This is an ordinary Z8530 SCC. */
496 chip = Z8530;
497 } else {
498 /* Put one character in TX FIFO */
499 write_scc_data(priv, 0, 0);
500 if (read_scc(priv, R0) & Tx_BUF_EMP) {
501 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
502 chip = Z85230;
503 } else {
504 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
505 chip = Z85C30;
506 }
507 }
508 write_scc(priv, R15, 0);
509
510 /* Start IRQ auto-detection */
511 irqs = probe_irq_on();
512
513 /* Enable interrupts */
514 if (type == TYPE_TWIN) {
515 outb(0, card_base + TWIN_DMA_CFG);
516 inb(card_base + TWIN_CLR_TMR1);
517 inb(card_base + TWIN_CLR_TMR2);
518 info->twin_serial_cfg = TWIN_EI;
519 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
520 } else {
521 write_scc(priv, R15, CTSIE);
522 write_scc(priv, R0, RES_EXT_INT);
523 write_scc(priv, R1, EXT_INT_ENAB);
524 }
525
526 /* Start timer */
527 outb(1, tmr_base + TMR_CNT1);
528 outb(0, tmr_base + TMR_CNT1);
529
530 /* Wait and detect IRQ */
531 time = jiffies;
532 while (jiffies - time < 2 + HZ / TMR_0_HZ);
533 irq = probe_irq_off(irqs);
534
535 /* Clear pending interrupt, disable interrupts */
536 if (type == TYPE_TWIN) {
537 inb(card_base + TWIN_CLR_TMR1);
538 } else {
539 write_scc(priv, R1, 0);
540 write_scc(priv, R15, 0);
541 write_scc(priv, R0, RES_EXT_INT);
542 }
543
544 if (irq <= 0) {
545 printk(KERN_ERR
546 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
547 hw[type].name, card_base, irq);
548 goto out3;
549 }
550
551 /* Set up data structures */
552 for (i = 0; i < 2; i++) {
553 dev = info->dev[i];
554 priv = &info->priv[i];
555 priv->type = type;
556 priv->chip = chip;
557 priv->dev = dev;
558 priv->info = info;
559 priv->channel = i;
560 spin_lock_init(&priv->ring_lock);
561 priv->register_lock = &info->register_lock;
562 priv->card_base = card_base;
563 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
564 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
565 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
566 priv->tmr_ctrl = tmr_base + TMR_CTRL;
567 priv->tmr_mode = i ? 0xb0 : 0x70;
568 priv->param.pclk_hz = hw[type].pclk_hz;
569 priv->param.brg_tc = -1;
570 priv->param.clocks = TCTRxCP | RCRTxCP;
571 priv->param.persist = 256;
572 priv->param.dma = -1;
Al Viro7a87b6c2006-12-06 18:51:40 +0000573 INIT_WORK(&priv->rx_work, rx_bh);
Wang Chenf4bdd262008-11-20 01:02:05 -0800574 dev->ml_priv = priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 sprintf(dev->name, "dmascc%i", 2 * n + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 dev->base_addr = card_base;
577 dev->irq = irq;
578 dev->open = scc_open;
579 dev->stop = scc_close;
580 dev->do_ioctl = scc_ioctl;
581 dev->hard_start_xmit = scc_send_packet;
582 dev->get_stats = scc_get_stats;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700583 dev->header_ops = &ax25_header_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 dev->set_mac_address = scc_set_mac_address;
585 }
586 if (register_netdev(info->dev[0])) {
587 printk(KERN_ERR "dmascc: could not register %s\n",
588 info->dev[0]->name);
589 goto out3;
590 }
591 if (register_netdev(info->dev[1])) {
592 printk(KERN_ERR "dmascc: could not register %s\n",
593 info->dev[1]->name);
594 goto out4;
595 }
596
597
598 info->next = first;
599 first = info;
600 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
601 hw[type].name, chipnames[chip], card_base, irq);
602 return 0;
603
604 out4:
605 unregister_netdev(info->dev[0]);
606 out3:
607 if (info->priv[0].type == TYPE_TWIN)
608 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
609 write_scc(&info->priv[0], R9, FHWRES);
610 free_netdev(info->dev[1]);
611 out2:
612 free_netdev(info->dev[0]);
613 out1:
614 kfree(info);
615 out:
616 return -1;
617}
618
619
620/* Driver functions */
621
622static void write_scc(struct scc_priv *priv, int reg, int val)
623{
624 unsigned long flags;
625 switch (priv->type) {
626 case TYPE_S5:
627 if (reg)
628 outb(reg, priv->scc_cmd);
629 outb(val, priv->scc_cmd);
630 return;
631 case TYPE_TWIN:
632 if (reg)
633 outb_p(reg, priv->scc_cmd);
634 outb_p(val, priv->scc_cmd);
635 return;
636 default:
637 spin_lock_irqsave(priv->register_lock, flags);
638 outb_p(0, priv->card_base + PI_DREQ_MASK);
639 if (reg)
640 outb_p(reg, priv->scc_cmd);
641 outb_p(val, priv->scc_cmd);
642 outb(1, priv->card_base + PI_DREQ_MASK);
643 spin_unlock_irqrestore(priv->register_lock, flags);
644 return;
645 }
646}
647
648
649static void write_scc_data(struct scc_priv *priv, int val, int fast)
650{
651 unsigned long flags;
652 switch (priv->type) {
653 case TYPE_S5:
654 outb(val, priv->scc_data);
655 return;
656 case TYPE_TWIN:
657 outb_p(val, priv->scc_data);
658 return;
659 default:
660 if (fast)
661 outb_p(val, priv->scc_data);
662 else {
663 spin_lock_irqsave(priv->register_lock, flags);
664 outb_p(0, priv->card_base + PI_DREQ_MASK);
665 outb_p(val, priv->scc_data);
666 outb(1, priv->card_base + PI_DREQ_MASK);
667 spin_unlock_irqrestore(priv->register_lock, flags);
668 }
669 return;
670 }
671}
672
673
674static int read_scc(struct scc_priv *priv, int reg)
675{
676 int rc;
677 unsigned long flags;
678 switch (priv->type) {
679 case TYPE_S5:
680 if (reg)
681 outb(reg, priv->scc_cmd);
682 return inb(priv->scc_cmd);
683 case TYPE_TWIN:
684 if (reg)
685 outb_p(reg, priv->scc_cmd);
686 return inb_p(priv->scc_cmd);
687 default:
688 spin_lock_irqsave(priv->register_lock, flags);
689 outb_p(0, priv->card_base + PI_DREQ_MASK);
690 if (reg)
691 outb_p(reg, priv->scc_cmd);
692 rc = inb_p(priv->scc_cmd);
693 outb(1, priv->card_base + PI_DREQ_MASK);
694 spin_unlock_irqrestore(priv->register_lock, flags);
695 return rc;
696 }
697}
698
699
700static int read_scc_data(struct scc_priv *priv)
701{
702 int rc;
703 unsigned long flags;
704 switch (priv->type) {
705 case TYPE_S5:
706 return inb(priv->scc_data);
707 case TYPE_TWIN:
708 return inb_p(priv->scc_data);
709 default:
710 spin_lock_irqsave(priv->register_lock, flags);
711 outb_p(0, priv->card_base + PI_DREQ_MASK);
712 rc = inb_p(priv->scc_data);
713 outb(1, priv->card_base + PI_DREQ_MASK);
714 spin_unlock_irqrestore(priv->register_lock, flags);
715 return rc;
716 }
717}
718
719
720static int scc_open(struct net_device *dev)
721{
Wang Chenf4bdd262008-11-20 01:02:05 -0800722 struct scc_priv *priv = dev->ml_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 struct scc_info *info = priv->info;
724 int card_base = priv->card_base;
725
726 /* Request IRQ if not already used by other channel */
727 if (!info->irq_used) {
728 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
729 return -EAGAIN;
730 }
731 }
732 info->irq_used++;
733
734 /* Request DMA if required */
735 if (priv->param.dma >= 0) {
736 if (request_dma(priv->param.dma, "dmascc")) {
737 if (--info->irq_used == 0)
738 free_irq(dev->irq, info);
739 return -EAGAIN;
740 } else {
741 unsigned long flags = claim_dma_lock();
742 clear_dma_ff(priv->param.dma);
743 release_dma_lock(flags);
744 }
745 }
746
747 /* Initialize local variables */
748 priv->rx_ptr = 0;
749 priv->rx_over = 0;
750 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
751 priv->state = IDLE;
752 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
753 priv->tx_ptr = 0;
754
755 /* Reset channel */
756 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
757 /* X1 clock, SDLC mode */
758 write_scc(priv, R4, SDLC | X1CLK);
759 /* DMA */
760 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
761 /* 8 bit RX char, RX disable */
762 write_scc(priv, R3, Rx8);
763 /* 8 bit TX char, TX disable */
764 write_scc(priv, R5, Tx8);
765 /* SDLC address field */
766 write_scc(priv, R6, 0);
767 /* SDLC flag */
768 write_scc(priv, R7, FLAG);
769 switch (priv->chip) {
770 case Z85C30:
771 /* Select WR7' */
772 write_scc(priv, R15, SHDLCE);
773 /* Auto EOM reset */
774 write_scc(priv, R7, AUTOEOM);
775 write_scc(priv, R15, 0);
776 break;
777 case Z85230:
778 /* Select WR7' */
779 write_scc(priv, R15, SHDLCE);
780 /* The following bits are set (see 2.5.2.1):
781 - Automatic EOM reset
782 - Interrupt request if RX FIFO is half full
783 This bit should be ignored in DMA mode (according to the
784 documentation), but actually isn't. The receiver doesn't work if
785 it is set. Thus, we have to clear it in DMA mode.
786 - Interrupt/DMA request if TX FIFO is completely empty
787 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
788 compatibility).
789 b) If cleared, DMA requests may follow each other very quickly,
790 filling up the TX FIFO.
791 Advantage: TX works even in case of high bus latency.
792 Disadvantage: Edge-triggered DMA request circuitry may miss
793 a request. No more data is delivered, resulting
794 in a TX FIFO underrun.
795 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
796 The PackeTwin doesn't. I don't know about the PI, but let's
797 assume it behaves like the PI2.
798 */
799 if (priv->param.dma >= 0) {
800 if (priv->type == TYPE_TWIN)
801 write_scc(priv, R7, AUTOEOM | TXFIFOE);
802 else
803 write_scc(priv, R7, AUTOEOM);
804 } else {
805 write_scc(priv, R7, AUTOEOM | RXFIFOH);
806 }
807 write_scc(priv, R15, 0);
808 break;
809 }
810 /* Preset CRC, NRZ(I) encoding */
811 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
812
813 /* Configure baud rate generator */
814 if (priv->param.brg_tc >= 0) {
815 /* Program BR generator */
816 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
817 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
818 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
819 PackeTwin, not connected on the PI2); set DPLL source to BRG */
820 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
821 /* Enable DPLL */
822 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
823 } else {
824 /* Disable BR generator */
825 write_scc(priv, R14, DTRREQ | BRSRC);
826 }
827
828 /* Configure clocks */
829 if (priv->type == TYPE_TWIN) {
830 /* Disable external TX clock receiver */
831 outb((info->twin_serial_cfg &=
832 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
833 card_base + TWIN_SERIAL_CFG);
834 }
835 write_scc(priv, R11, priv->param.clocks);
836 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
837 /* Enable external TX clock receiver */
838 outb((info->twin_serial_cfg |=
839 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
840 card_base + TWIN_SERIAL_CFG);
841 }
842
843 /* Configure PackeTwin */
844 if (priv->type == TYPE_TWIN) {
845 /* Assert DTR, enable interrupts */
846 outb((info->twin_serial_cfg |= TWIN_EI |
847 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
848 card_base + TWIN_SERIAL_CFG);
849 }
850
851 /* Read current status */
852 priv->rr0 = read_scc(priv, R0);
853 /* Enable DCD interrupt */
854 write_scc(priv, R15, DCDIE);
855
856 netif_start_queue(dev);
857
858 return 0;
859}
860
861
862static int scc_close(struct net_device *dev)
863{
Wang Chenf4bdd262008-11-20 01:02:05 -0800864 struct scc_priv *priv = dev->ml_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 struct scc_info *info = priv->info;
866 int card_base = priv->card_base;
867
868 netif_stop_queue(dev);
869
870 if (priv->type == TYPE_TWIN) {
871 /* Drop DTR */
872 outb((info->twin_serial_cfg &=
873 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
874 card_base + TWIN_SERIAL_CFG);
875 }
876
877 /* Reset channel, free DMA and IRQ */
878 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
879 if (priv->param.dma >= 0) {
880 if (priv->type == TYPE_TWIN)
881 outb(0, card_base + TWIN_DMA_CFG);
882 free_dma(priv->param.dma);
883 }
884 if (--info->irq_used == 0)
885 free_irq(dev->irq, info);
886
887 return 0;
888}
889
890
891static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
892{
Wang Chenf4bdd262008-11-20 01:02:05 -0800893 struct scc_priv *priv = dev->ml_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
895 switch (cmd) {
896 case SIOCGSCCPARAM:
897 if (copy_to_user
898 (ifr->ifr_data, &priv->param,
899 sizeof(struct scc_param)))
900 return -EFAULT;
901 return 0;
902 case SIOCSSCCPARAM:
903 if (!capable(CAP_NET_ADMIN))
904 return -EPERM;
905 if (netif_running(dev))
906 return -EAGAIN;
907 if (copy_from_user
908 (&priv->param, ifr->ifr_data,
909 sizeof(struct scc_param)))
910 return -EFAULT;
911 return 0;
912 default:
913 return -EINVAL;
914 }
915}
916
917
918static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
919{
Wang Chenf4bdd262008-11-20 01:02:05 -0800920 struct scc_priv *priv = dev->ml_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 unsigned long flags;
922 int i;
923
924 /* Temporarily stop the scheduler feeding us packets */
925 netif_stop_queue(dev);
926
927 /* Transfer data to DMA buffer */
928 i = priv->tx_head;
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300929 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 priv->tx_len[i] = skb->len - 1;
931
932 /* Clear interrupts while we touch our circular buffers */
933
934 spin_lock_irqsave(&priv->ring_lock, flags);
935 /* Move the ring buffer's head */
936 priv->tx_head = (i + 1) % NUM_TX_BUF;
937 priv->tx_count++;
938
939 /* If we just filled up the last buffer, leave queue stopped.
940 The higher layers must wait until we have a DMA buffer
941 to accept the data. */
942 if (priv->tx_count < NUM_TX_BUF)
943 netif_wake_queue(dev);
944
945 /* Set new TX state */
946 if (priv->state == IDLE) {
947 /* Assert RTS, start timer */
948 priv->state = TX_HEAD;
949 priv->tx_start = jiffies;
950 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
951 write_scc(priv, R15, 0);
952 start_timer(priv, priv->param.txdelay, 0);
953 }
954
955 /* Turn interrupts back on and free buffer */
956 spin_unlock_irqrestore(&priv->ring_lock, flags);
957 dev_kfree_skb(skb);
958
959 return 0;
960}
961
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963static int scc_set_mac_address(struct net_device *dev, void *sa)
964{
965 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
966 dev->addr_len);
967 return 0;
968}
969
970
971static inline void tx_on(struct scc_priv *priv)
972{
973 int i, n;
974 unsigned long flags;
975
976 if (priv->param.dma >= 0) {
977 n = (priv->chip == Z85230) ? 3 : 1;
978 /* Program DMA controller */
979 flags = claim_dma_lock();
980 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
981 set_dma_addr(priv->param.dma,
982 (int) priv->tx_buf[priv->tx_tail] + n);
983 set_dma_count(priv->param.dma,
984 priv->tx_len[priv->tx_tail] - n);
985 release_dma_lock(flags);
986 /* Enable TX underrun interrupt */
987 write_scc(priv, R15, TxUIE);
988 /* Configure DREQ */
989 if (priv->type == TYPE_TWIN)
990 outb((priv->param.dma ==
991 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
992 priv->card_base + TWIN_DMA_CFG);
993 else
994 write_scc(priv, R1,
995 EXT_INT_ENAB | WT_FN_RDYFN |
996 WT_RDY_ENAB);
997 /* Write first byte(s) */
998 spin_lock_irqsave(priv->register_lock, flags);
999 for (i = 0; i < n; i++)
1000 write_scc_data(priv,
1001 priv->tx_buf[priv->tx_tail][i], 1);
1002 enable_dma(priv->param.dma);
1003 spin_unlock_irqrestore(priv->register_lock, flags);
1004 } else {
1005 write_scc(priv, R15, TxUIE);
1006 write_scc(priv, R1,
1007 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1008 tx_isr(priv);
1009 }
1010 /* Reset EOM latch if we do not have the AUTOEOM feature */
1011 if (priv->chip == Z8530)
1012 write_scc(priv, R0, RES_EOM_L);
1013}
1014
1015
1016static inline void rx_on(struct scc_priv *priv)
1017{
1018 unsigned long flags;
1019
1020 /* Clear RX FIFO */
1021 while (read_scc(priv, R0) & Rx_CH_AV)
1022 read_scc_data(priv);
1023 priv->rx_over = 0;
1024 if (priv->param.dma >= 0) {
1025 /* Program DMA controller */
1026 flags = claim_dma_lock();
1027 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1028 set_dma_addr(priv->param.dma,
1029 (int) priv->rx_buf[priv->rx_head]);
1030 set_dma_count(priv->param.dma, BUF_SIZE);
1031 release_dma_lock(flags);
1032 enable_dma(priv->param.dma);
1033 /* Configure PackeTwin DMA */
1034 if (priv->type == TYPE_TWIN) {
1035 outb((priv->param.dma ==
1036 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1037 priv->card_base + TWIN_DMA_CFG);
1038 }
1039 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1040 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1041 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1042 } else {
1043 /* Reset current frame */
1044 priv->rx_ptr = 0;
1045 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1046 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1047 WT_FN_RDYFN);
1048 }
1049 write_scc(priv, R0, ERR_RES);
1050 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1051}
1052
1053
1054static inline void rx_off(struct scc_priv *priv)
1055{
1056 /* Disable receiver */
1057 write_scc(priv, R3, Rx8);
1058 /* Disable DREQ / RX interrupt */
1059 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1060 outb(0, priv->card_base + TWIN_DMA_CFG);
1061 else
1062 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1063 /* Disable DMA */
1064 if (priv->param.dma >= 0)
1065 disable_dma(priv->param.dma);
1066}
1067
1068
1069static void start_timer(struct scc_priv *priv, int t, int r15)
1070{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 outb(priv->tmr_mode, priv->tmr_ctrl);
1072 if (t == 0) {
1073 tm_isr(priv);
1074 } else if (t > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 outb(t & 0xFF, priv->tmr_cnt);
1076 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1077 if (priv->type != TYPE_TWIN) {
1078 write_scc(priv, R15, r15 | CTSIE);
1079 priv->rr0 |= CTS;
1080 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 }
1082}
1083
1084
1085static inline unsigned char random(void)
1086{
1087 /* See "Numerical Recipes in C", second edition, p. 284 */
1088 rand = rand * 1664525L + 1013904223L;
1089 return (unsigned char) (rand >> 24);
1090}
1091
1092static inline void z8530_isr(struct scc_info *info)
1093{
1094 int is, i = 100;
1095
1096 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1097 if (is & CHARxIP) {
1098 rx_isr(&info->priv[0]);
1099 } else if (is & CHATxIP) {
1100 tx_isr(&info->priv[0]);
1101 } else if (is & CHAEXT) {
1102 es_isr(&info->priv[0]);
1103 } else if (is & CHBRxIP) {
1104 rx_isr(&info->priv[1]);
1105 } else if (is & CHBTxIP) {
1106 tx_isr(&info->priv[1]);
1107 } else {
1108 es_isr(&info->priv[1]);
1109 }
1110 write_scc(&info->priv[0], R0, RES_H_IUS);
1111 i++;
1112 }
1113 if (i < 0) {
1114 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1115 is);
1116 }
1117 /* Ok, no interrupts pending from this 8530. The INT line should
1118 be inactive now. */
1119}
1120
1121
David Howells7d12e782006-10-05 14:55:46 +01001122static irqreturn_t scc_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123{
1124 struct scc_info *info = dev_id;
1125
1126 spin_lock(info->priv[0].register_lock);
1127 /* At this point interrupts are enabled, and the interrupt under service
1128 is already acknowledged, but masked off.
1129
1130 Interrupt processing: We loop until we know that the IRQ line is
1131 low. If another positive edge occurs afterwards during the ISR,
1132 another interrupt will be triggered by the interrupt controller
1133 as soon as the IRQ level is enabled again (see asm/irq.h).
1134
1135 Bottom-half handlers will be processed after scc_isr(). This is
1136 important, since we only have small ringbuffers and want new data
1137 to be fetched/delivered immediately. */
1138
1139 if (info->priv[0].type == TYPE_TWIN) {
1140 int is, card_base = info->priv[0].card_base;
1141 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1142 TWIN_INT_MSK) {
1143 if (is & TWIN_SCC_MSK) {
1144 z8530_isr(info);
1145 } else if (is & TWIN_TMR1_MSK) {
1146 inb(card_base + TWIN_CLR_TMR1);
1147 tm_isr(&info->priv[0]);
1148 } else {
1149 inb(card_base + TWIN_CLR_TMR2);
1150 tm_isr(&info->priv[1]);
1151 }
1152 }
1153 } else
1154 z8530_isr(info);
1155 spin_unlock(info->priv[0].register_lock);
1156 return IRQ_HANDLED;
1157}
1158
1159
1160static void rx_isr(struct scc_priv *priv)
1161{
1162 if (priv->param.dma >= 0) {
1163 /* Check special condition and perform error reset. See 2.4.7.5. */
1164 special_condition(priv, read_scc(priv, R1));
1165 write_scc(priv, R0, ERR_RES);
1166 } else {
1167 /* Check special condition for each character. Error reset not necessary.
1168 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1169 int rc;
1170 while (read_scc(priv, R0) & Rx_CH_AV) {
1171 rc = read_scc(priv, R1);
1172 if (priv->rx_ptr < BUF_SIZE)
1173 priv->rx_buf[priv->rx_head][priv->
1174 rx_ptr++] =
1175 read_scc_data(priv);
1176 else {
1177 priv->rx_over = 2;
1178 read_scc_data(priv);
1179 }
1180 special_condition(priv, rc);
1181 }
1182 }
1183}
1184
1185
1186static void special_condition(struct scc_priv *priv, int rc)
1187{
1188 int cb;
1189 unsigned long flags;
1190
1191 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1192
1193 if (rc & Rx_OVR) {
1194 /* Receiver overrun */
1195 priv->rx_over = 1;
1196 if (priv->param.dma < 0)
1197 write_scc(priv, R0, ERR_RES);
1198 } else if (rc & END_FR) {
1199 /* End of frame. Get byte count */
1200 if (priv->param.dma >= 0) {
1201 flags = claim_dma_lock();
1202 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1203 2;
1204 release_dma_lock(flags);
1205 } else {
1206 cb = priv->rx_ptr - 2;
1207 }
1208 if (priv->rx_over) {
1209 /* We had an overrun */
Stephen Hemminger13c05822009-01-09 13:01:33 +00001210 priv->dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 if (priv->rx_over == 2)
Stephen Hemminger13c05822009-01-09 13:01:33 +00001212 priv->dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 else
Stephen Hemminger13c05822009-01-09 13:01:33 +00001214 priv->dev->stats.rx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 priv->rx_over = 0;
1216 } else if (rc & CRC_ERR) {
1217 /* Count invalid CRC only if packet length >= minimum */
1218 if (cb >= 15) {
Stephen Hemminger13c05822009-01-09 13:01:33 +00001219 priv->dev->stats.rx_errors++;
1220 priv->dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 }
1222 } else {
1223 if (cb >= 15) {
1224 if (priv->rx_count < NUM_RX_BUF - 1) {
1225 /* Put good frame in FIFO */
1226 priv->rx_len[priv->rx_head] = cb;
1227 priv->rx_head =
1228 (priv->rx_head +
1229 1) % NUM_RX_BUF;
1230 priv->rx_count++;
1231 schedule_work(&priv->rx_work);
1232 } else {
Stephen Hemminger13c05822009-01-09 13:01:33 +00001233 priv->dev->stats.rx_errors++;
1234 priv->dev->stats.rx_over_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 }
1236 }
1237 }
1238 /* Get ready for new frame */
1239 if (priv->param.dma >= 0) {
1240 flags = claim_dma_lock();
1241 set_dma_addr(priv->param.dma,
1242 (int) priv->rx_buf[priv->rx_head]);
1243 set_dma_count(priv->param.dma, BUF_SIZE);
1244 release_dma_lock(flags);
1245 } else {
1246 priv->rx_ptr = 0;
1247 }
1248 }
1249}
1250
1251
Al Viro7a87b6c2006-12-06 18:51:40 +00001252static void rx_bh(struct work_struct *ugli_api)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253{
Al Viro7a87b6c2006-12-06 18:51:40 +00001254 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 int i = priv->rx_tail;
1256 int cb;
1257 unsigned long flags;
1258 struct sk_buff *skb;
1259 unsigned char *data;
1260
1261 spin_lock_irqsave(&priv->ring_lock, flags);
1262 while (priv->rx_count) {
1263 spin_unlock_irqrestore(&priv->ring_lock, flags);
1264 cb = priv->rx_len[i];
1265 /* Allocate buffer */
1266 skb = dev_alloc_skb(cb + 1);
1267 if (skb == NULL) {
1268 /* Drop packet */
Stephen Hemminger13c05822009-01-09 13:01:33 +00001269 priv->dev->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 } else {
1271 /* Fill buffer */
1272 data = skb_put(skb, cb + 1);
1273 data[0] = 0;
1274 memcpy(&data[1], priv->rx_buf[i], cb);
Arnaldo Carvalho de Melo56cb5152005-04-24 18:53:06 -07001275 skb->protocol = ax25_type_trans(skb, priv->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 netif_rx(skb);
Stephen Hemminger13c05822009-01-09 13:01:33 +00001277 priv->dev->stats.rx_packets++;
1278 priv->dev->stats.rx_bytes += cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 }
1280 spin_lock_irqsave(&priv->ring_lock, flags);
1281 /* Move tail */
1282 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1283 priv->rx_count--;
1284 }
1285 spin_unlock_irqrestore(&priv->ring_lock, flags);
1286}
1287
1288
1289static void tx_isr(struct scc_priv *priv)
1290{
1291 int i = priv->tx_tail, p = priv->tx_ptr;
1292
1293 /* Suspend TX interrupts if we don't want to send anything.
1294 See Figure 2-22. */
1295 if (p == priv->tx_len[i]) {
1296 write_scc(priv, R0, RES_Tx_P);
1297 return;
1298 }
1299
1300 /* Write characters */
1301 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1302 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1303 }
1304
1305 /* Reset EOM latch of Z8530 */
1306 if (!priv->tx_ptr && p && priv->chip == Z8530)
1307 write_scc(priv, R0, RES_EOM_L);
1308
1309 priv->tx_ptr = p;
1310}
1311
1312
1313static void es_isr(struct scc_priv *priv)
1314{
1315 int i, rr0, drr0, res;
1316 unsigned long flags;
1317
1318 /* Read status, reset interrupt bit (open latches) */
1319 rr0 = read_scc(priv, R0);
1320 write_scc(priv, R0, RES_EXT_INT);
1321 drr0 = priv->rr0 ^ rr0;
1322 priv->rr0 = rr0;
1323
1324 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1325 it might have already been cleared again by AUTOEOM. */
1326 if (priv->state == TX_DATA) {
1327 /* Get remaining bytes */
1328 i = priv->tx_tail;
1329 if (priv->param.dma >= 0) {
1330 disable_dma(priv->param.dma);
1331 flags = claim_dma_lock();
1332 res = get_dma_residue(priv->param.dma);
1333 release_dma_lock(flags);
1334 } else {
1335 res = priv->tx_len[i] - priv->tx_ptr;
1336 priv->tx_ptr = 0;
1337 }
1338 /* Disable DREQ / TX interrupt */
1339 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1340 outb(0, priv->card_base + TWIN_DMA_CFG);
1341 else
1342 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1343 if (res) {
1344 /* Update packet statistics */
Stephen Hemminger13c05822009-01-09 13:01:33 +00001345 priv->dev->stats.tx_errors++;
1346 priv->dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 /* Other underrun interrupts may already be waiting */
1348 write_scc(priv, R0, RES_EXT_INT);
1349 write_scc(priv, R0, RES_EXT_INT);
1350 } else {
1351 /* Update packet statistics */
Stephen Hemminger13c05822009-01-09 13:01:33 +00001352 priv->dev->stats.tx_packets++;
1353 priv->dev->stats.tx_bytes += priv->tx_len[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 /* Remove frame from FIFO */
1355 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1356 priv->tx_count--;
1357 /* Inform upper layers */
1358 netif_wake_queue(priv->dev);
1359 }
1360 /* Switch state */
1361 write_scc(priv, R15, 0);
1362 if (priv->tx_count &&
1363 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1364 priv->state = TX_PAUSE;
1365 start_timer(priv, priv->param.txpause, 0);
1366 } else {
1367 priv->state = TX_TAIL;
1368 start_timer(priv, priv->param.txtail, 0);
1369 }
1370 }
1371
1372 /* DCD transition */
1373 if (drr0 & DCD) {
1374 if (rr0 & DCD) {
1375 switch (priv->state) {
1376 case IDLE:
1377 case WAIT:
1378 priv->state = DCD_ON;
1379 write_scc(priv, R15, 0);
1380 start_timer(priv, priv->param.dcdon, 0);
1381 }
1382 } else {
1383 switch (priv->state) {
1384 case RX_ON:
1385 rx_off(priv);
1386 priv->state = DCD_OFF;
1387 write_scc(priv, R15, 0);
1388 start_timer(priv, priv->param.dcdoff, 0);
1389 }
1390 }
1391 }
1392
1393 /* CTS transition */
1394 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1395 tm_isr(priv);
1396
1397}
1398
1399
1400static void tm_isr(struct scc_priv *priv)
1401{
1402 switch (priv->state) {
1403 case TX_HEAD:
1404 case TX_PAUSE:
1405 tx_on(priv);
1406 priv->state = TX_DATA;
1407 break;
1408 case TX_TAIL:
1409 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1410 priv->state = RTS_OFF;
1411 if (priv->type != TYPE_TWIN)
1412 write_scc(priv, R15, 0);
1413 start_timer(priv, priv->param.rtsoff, 0);
1414 break;
1415 case RTS_OFF:
1416 write_scc(priv, R15, DCDIE);
1417 priv->rr0 = read_scc(priv, R0);
1418 if (priv->rr0 & DCD) {
Stephen Hemminger13c05822009-01-09 13:01:33 +00001419 priv->dev->stats.collisions++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 rx_on(priv);
1421 priv->state = RX_ON;
1422 } else {
1423 priv->state = WAIT;
1424 start_timer(priv, priv->param.waittime, DCDIE);
1425 }
1426 break;
1427 case WAIT:
1428 if (priv->tx_count) {
1429 priv->state = TX_HEAD;
1430 priv->tx_start = jiffies;
1431 write_scc(priv, R5,
1432 TxCRC_ENAB | RTS | TxENAB | Tx8);
1433 write_scc(priv, R15, 0);
1434 start_timer(priv, priv->param.txdelay, 0);
1435 } else {
1436 priv->state = IDLE;
1437 if (priv->type != TYPE_TWIN)
1438 write_scc(priv, R15, DCDIE);
1439 }
1440 break;
1441 case DCD_ON:
1442 case DCD_OFF:
1443 write_scc(priv, R15, DCDIE);
1444 priv->rr0 = read_scc(priv, R0);
1445 if (priv->rr0 & DCD) {
1446 rx_on(priv);
1447 priv->state = RX_ON;
1448 } else {
1449 priv->state = WAIT;
1450 start_timer(priv,
1451 random() / priv->param.persist *
1452 priv->param.slottime, DCDIE);
1453 }
1454 break;
1455 }
1456}