blob: c1d50e669c4317bb5879c8eb0be7c5aeb3946190 [file] [log] [blame]
David S. Millercd9ad582007-04-26 21:19:23 -07001/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
Alexey Dobriyane1f2a092007-04-27 15:19:27 -070016#include <linux/irqreturn.h>
David S. Millercd9ad582007-04-26 21:19:23 -070017
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
52
53#define esp_log_intr(f, a...) \
54do { if (esp_debug & ESP_DEBUG_INTR) \
55 printk(f, ## a); \
56} while (0)
57
58#define esp_log_reset(f, a...) \
59do { if (esp_debug & ESP_DEBUG_RESET) \
60 printk(f, ## a); \
61} while (0)
62
63#define esp_log_msgin(f, a...) \
64do { if (esp_debug & ESP_DEBUG_MSGIN) \
65 printk(f, ## a); \
66} while (0)
67
68#define esp_log_msgout(f, a...) \
69do { if (esp_debug & ESP_DEBUG_MSGOUT) \
70 printk(f, ## a); \
71} while (0)
72
73#define esp_log_cmddone(f, a...) \
74do { if (esp_debug & ESP_DEBUG_CMDDONE) \
75 printk(f, ## a); \
76} while (0)
77
78#define esp_log_disconnect(f, a...) \
79do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
80 printk(f, ## a); \
81} while (0)
82
83#define esp_log_datastart(f, a...) \
84do { if (esp_debug & ESP_DEBUG_DATASTART) \
85 printk(f, ## a); \
86} while (0)
87
88#define esp_log_datadone(f, a...) \
89do { if (esp_debug & ESP_DEBUG_DATADONE) \
90 printk(f, ## a); \
91} while (0)
92
93#define esp_log_reconnect(f, a...) \
94do { if (esp_debug & ESP_DEBUG_RECONNECT) \
95 printk(f, ## a); \
96} while (0)
97
98#define esp_log_autosense(f, a...) \
99do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
100 printk(f, ## a); \
101} while (0)
102
103#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
104#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
105
106static void esp_log_fill_regs(struct esp *esp,
107 struct esp_event_ent *p)
108{
109 p->sreg = esp->sreg;
110 p->seqreg = esp->seqreg;
111 p->sreg2 = esp->sreg2;
112 p->ireg = esp->ireg;
113 p->select_state = esp->select_state;
114 p->event = esp->event;
115}
116
117void scsi_esp_cmd(struct esp *esp, u8 val)
118{
119 struct esp_event_ent *p;
120 int idx = esp->esp_event_cur;
121
122 p = &esp->esp_event_log[idx];
123 p->type = ESP_EVENT_TYPE_CMD;
124 p->val = val;
125 esp_log_fill_regs(esp, p);
126
127 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128
129 esp_write8(val, ESP_CMD);
130}
131EXPORT_SYMBOL(scsi_esp_cmd);
132
133static void esp_event(struct esp *esp, u8 val)
134{
135 struct esp_event_ent *p;
136 int idx = esp->esp_event_cur;
137
138 p = &esp->esp_event_log[idx];
139 p->type = ESP_EVENT_TYPE_EVENT;
140 p->val = val;
141 esp_log_fill_regs(esp, p);
142
143 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
144
145 esp->event = val;
146}
147
148static void esp_dump_cmd_log(struct esp *esp)
149{
150 int idx = esp->esp_event_cur;
151 int stop = idx;
152
153 printk(KERN_INFO PFX "esp%d: Dumping command log\n",
154 esp->host->unique_id);
155 do {
156 struct esp_event_ent *p = &esp->esp_event_log[idx];
157
158 printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
159 esp->host->unique_id, idx,
160 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
161
162 printk("val[%02x] sreg[%02x] seqreg[%02x] "
163 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
164 p->val, p->sreg, p->seqreg,
165 p->sreg2, p->ireg, p->select_state, p->event);
166
167 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
168 } while (idx != stop);
169}
170
171static void esp_flush_fifo(struct esp *esp)
172{
173 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
174 if (esp->rev == ESP236) {
175 int lim = 1000;
176
177 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
178 if (--lim == 0) {
179 printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
180 "will not clear!\n",
181 esp->host->unique_id);
182 break;
183 }
184 udelay(1);
185 }
186 }
187}
188
189static void hme_read_fifo(struct esp *esp)
190{
191 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
192 int idx = 0;
193
194 while (fcnt--) {
195 esp->fifo[idx++] = esp_read8(ESP_FDATA);
196 esp->fifo[idx++] = esp_read8(ESP_FDATA);
197 }
198 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
199 esp_write8(0, ESP_FDATA);
200 esp->fifo[idx++] = esp_read8(ESP_FDATA);
201 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
202 }
203 esp->fifo_cnt = idx;
204}
205
206static void esp_set_all_config3(struct esp *esp, u8 val)
207{
208 int i;
209
210 for (i = 0; i < ESP_MAX_TARGET; i++)
211 esp->target[i].esp_config3 = val;
212}
213
214/* Reset the ESP chip, _not_ the SCSI bus. */
215static void esp_reset_esp(struct esp *esp)
216{
217 u8 family_code, version;
218
219 /* Now reset the ESP chip */
220 scsi_esp_cmd(esp, ESP_CMD_RC);
221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
223
224 /* Reload the configuration registers */
225 esp_write8(esp->cfact, ESP_CFACT);
226
227 esp->prev_stp = 0;
228 esp_write8(esp->prev_stp, ESP_STP);
229
230 esp->prev_soff = 0;
231 esp_write8(esp->prev_soff, ESP_SOFF);
232
233 esp_write8(esp->neg_defp, ESP_TIMEO);
234
235 /* This is the only point at which it is reliable to read
236 * the ID-code for a fast ESP chip variants.
237 */
238 esp->max_period = ((35 * esp->ccycle) / 1000);
239 if (esp->rev == FAST) {
240 version = esp_read8(ESP_UID);
241 family_code = (version & 0xf8) >> 3;
242 if (family_code == 0x02)
243 esp->rev = FAS236;
244 else if (family_code == 0x0a)
245 esp->rev = FASHME; /* Version is usually '5'. */
246 else
247 esp->rev = FAS100A;
248 esp->min_period = ((4 * esp->ccycle) / 1000);
249 } else {
250 esp->min_period = ((5 * esp->ccycle) / 1000);
251 }
252 esp->max_period = (esp->max_period + 3)>>2;
253 esp->min_period = (esp->min_period + 3)>>2;
254
255 esp_write8(esp->config1, ESP_CFG1);
256 switch (esp->rev) {
257 case ESP100:
258 /* nothing to do */
259 break;
260
261 case ESP100A:
262 esp_write8(esp->config2, ESP_CFG2);
263 break;
264
265 case ESP236:
266 /* Slow 236 */
267 esp_write8(esp->config2, ESP_CFG2);
268 esp->prev_cfg3 = esp->target[0].esp_config3;
269 esp_write8(esp->prev_cfg3, ESP_CFG3);
270 break;
271
272 case FASHME:
273 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
274 /* fallthrough... */
275
276 case FAS236:
277 /* Fast 236 or HME */
278 esp_write8(esp->config2, ESP_CFG2);
279 if (esp->rev == FASHME) {
280 u8 cfg3 = esp->target[0].esp_config3;
281
282 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
283 if (esp->scsi_id >= 8)
284 cfg3 |= ESP_CONFIG3_IDBIT3;
285 esp_set_all_config3(esp, cfg3);
286 } else {
287 u32 cfg3 = esp->target[0].esp_config3;
288
289 cfg3 |= ESP_CONFIG3_FCLK;
290 esp_set_all_config3(esp, cfg3);
291 }
292 esp->prev_cfg3 = esp->target[0].esp_config3;
293 esp_write8(esp->prev_cfg3, ESP_CFG3);
294 if (esp->rev == FASHME) {
295 esp->radelay = 80;
296 } else {
297 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
298 esp->radelay = 0;
299 else
300 esp->radelay = 96;
301 }
302 break;
303
304 case FAS100A:
305 /* Fast 100a */
306 esp_write8(esp->config2, ESP_CFG2);
307 esp_set_all_config3(esp,
308 (esp->target[0].esp_config3 |
309 ESP_CONFIG3_FCLOCK));
310 esp->prev_cfg3 = esp->target[0].esp_config3;
311 esp_write8(esp->prev_cfg3, ESP_CFG3);
312 esp->radelay = 32;
313 break;
314
315 default:
316 break;
317 }
318
319 /* Eat any bitrot in the chip */
320 esp_read8(ESP_INTRPT);
321 udelay(100);
322}
323
324static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
325{
326 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900327 struct scatterlist *sg = scsi_sglist(cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700328 int dir = cmd->sc_data_direction;
329 int total, i;
330
331 if (dir == DMA_NONE)
332 return;
333
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900334 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700335 spriv->cur_residue = sg_dma_len(sg);
336 spriv->cur_sg = sg;
337
338 total = 0;
339 for (i = 0; i < spriv->u.num_sg; i++)
340 total += sg_dma_len(&sg[i]);
341 spriv->tot_residue = total;
342}
343
344static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
345 struct scsi_cmnd *cmd)
346{
347 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
348
349 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
350 return ent->sense_dma +
351 (ent->sense_ptr - cmd->sense_buffer);
352 }
353
354 return sg_dma_address(p->cur_sg) +
355 (sg_dma_len(p->cur_sg) -
356 p->cur_residue);
357}
358
359static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
360 struct scsi_cmnd *cmd)
361{
362 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
363
364 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
365 return SCSI_SENSE_BUFFERSIZE -
366 (ent->sense_ptr - cmd->sense_buffer);
367 }
368 return p->cur_residue;
369}
370
371static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
372 struct scsi_cmnd *cmd, unsigned int len)
373{
374 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
375
376 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
377 ent->sense_ptr += len;
378 return;
379 }
380
381 p->cur_residue -= len;
382 p->tot_residue -= len;
383 if (p->cur_residue < 0 || p->tot_residue < 0) {
384 printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
385 esp->host->unique_id);
386 printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
387 "len[%u]\n",
388 esp->host->unique_id,
389 p->cur_residue, p->tot_residue, len);
390 p->cur_residue = 0;
391 p->tot_residue = 0;
392 }
393 if (!p->cur_residue && p->tot_residue) {
394 p->cur_sg++;
395 p->cur_residue = sg_dma_len(p->cur_sg);
396 }
397}
398
399static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
400{
401 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
402 int dir = cmd->sc_data_direction;
403
404 if (dir == DMA_NONE)
405 return;
406
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900407 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700408}
409
410static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
411{
412 struct scsi_cmnd *cmd = ent->cmd;
413 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
414
415 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
416 ent->saved_sense_ptr = ent->sense_ptr;
417 return;
418 }
419 ent->saved_cur_residue = spriv->cur_residue;
420 ent->saved_cur_sg = spriv->cur_sg;
421 ent->saved_tot_residue = spriv->tot_residue;
422}
423
424static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
425{
426 struct scsi_cmnd *cmd = ent->cmd;
427 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
428
429 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
430 ent->sense_ptr = ent->saved_sense_ptr;
431 return;
432 }
433 spriv->cur_residue = ent->saved_cur_residue;
434 spriv->cur_sg = ent->saved_cur_sg;
435 spriv->tot_residue = ent->saved_tot_residue;
436}
437
438static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
439{
440 if (cmd->cmd_len == 6 ||
441 cmd->cmd_len == 10 ||
442 cmd->cmd_len == 12) {
443 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
444 } else {
445 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
446 }
447}
448
449static void esp_write_tgt_config3(struct esp *esp, int tgt)
450{
451 if (esp->rev > ESP100A) {
452 u8 val = esp->target[tgt].esp_config3;
453
454 if (val != esp->prev_cfg3) {
455 esp->prev_cfg3 = val;
456 esp_write8(val, ESP_CFG3);
457 }
458 }
459}
460
461static void esp_write_tgt_sync(struct esp *esp, int tgt)
462{
463 u8 off = esp->target[tgt].esp_offset;
464 u8 per = esp->target[tgt].esp_period;
465
466 if (off != esp->prev_soff) {
467 esp->prev_soff = off;
468 esp_write8(off, ESP_SOFF);
469 }
470 if (per != esp->prev_stp) {
471 esp->prev_stp = per;
472 esp_write8(per, ESP_STP);
473 }
474}
475
476static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
477{
478 if (esp->rev == FASHME) {
479 /* Arbitrary segment boundaries, 24-bit counts. */
480 if (dma_len > (1U << 24))
481 dma_len = (1U << 24);
482 } else {
483 u32 base, end;
484
485 /* ESP chip limits other variants by 16-bits of transfer
486 * count. Actually on FAS100A and FAS236 we could get
487 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
488 * in the ESP_CFG2 register but that causes other unwanted
489 * changes so we don't use it currently.
490 */
491 if (dma_len > (1U << 16))
492 dma_len = (1U << 16);
493
494 /* All of the DMA variants hooked up to these chips
495 * cannot handle crossing a 24-bit address boundary.
496 */
497 base = dma_addr & ((1U << 24) - 1U);
498 end = base + dma_len;
499 if (end > (1U << 24))
500 end = (1U <<24);
501 dma_len = end - base;
502 }
503 return dma_len;
504}
505
506static int esp_need_to_nego_wide(struct esp_target_data *tp)
507{
508 struct scsi_target *target = tp->starget;
509
510 return spi_width(target) != tp->nego_goal_width;
511}
512
513static int esp_need_to_nego_sync(struct esp_target_data *tp)
514{
515 struct scsi_target *target = tp->starget;
516
517 /* When offset is zero, period is "don't care". */
518 if (!spi_offset(target) && !tp->nego_goal_offset)
519 return 0;
520
521 if (spi_offset(target) == tp->nego_goal_offset &&
522 spi_period(target) == tp->nego_goal_period)
523 return 0;
524
525 return 1;
526}
527
528static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
529 struct esp_lun_data *lp)
530{
531 if (!ent->tag[0]) {
532 /* Non-tagged, slot already taken? */
533 if (lp->non_tagged_cmd)
534 return -EBUSY;
535
536 if (lp->hold) {
537 /* We are being held by active tagged
538 * commands.
539 */
540 if (lp->num_tagged)
541 return -EBUSY;
542
543 /* Tagged commands completed, we can unplug
544 * the queue and run this untagged command.
545 */
546 lp->hold = 0;
547 } else if (lp->num_tagged) {
548 /* Plug the queue until num_tagged decreases
549 * to zero in esp_free_lun_tag.
550 */
551 lp->hold = 1;
552 return -EBUSY;
553 }
554
555 lp->non_tagged_cmd = ent;
556 return 0;
557 } else {
558 /* Tagged command, see if blocked by a
559 * non-tagged one.
560 */
561 if (lp->non_tagged_cmd || lp->hold)
562 return -EBUSY;
563 }
564
565 BUG_ON(lp->tagged_cmds[ent->tag[1]]);
566
567 lp->tagged_cmds[ent->tag[1]] = ent;
568 lp->num_tagged++;
569
570 return 0;
571}
572
573static void esp_free_lun_tag(struct esp_cmd_entry *ent,
574 struct esp_lun_data *lp)
575{
576 if (ent->tag[0]) {
577 BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
578 lp->tagged_cmds[ent->tag[1]] = NULL;
579 lp->num_tagged--;
580 } else {
581 BUG_ON(lp->non_tagged_cmd != ent);
582 lp->non_tagged_cmd = NULL;
583 }
584}
585
586/* When a contingent allegiance conditon is created, we force feed a
587 * REQUEST_SENSE command to the device to fetch the sense data. I
588 * tried many other schemes, relying on the scsi error handling layer
589 * to send out the REQUEST_SENSE automatically, but this was difficult
590 * to get right especially in the presence of applications like smartd
591 * which use SG_IO to send out their own REQUEST_SENSE commands.
592 */
593static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
594{
595 struct scsi_cmnd *cmd = ent->cmd;
596 struct scsi_device *dev = cmd->device;
597 int tgt, lun;
598 u8 *p, val;
599
600 tgt = dev->id;
601 lun = dev->lun;
602
603
604 if (!ent->sense_ptr) {
605 esp_log_autosense("esp%d: Doing auto-sense for "
606 "tgt[%d] lun[%d]\n",
607 esp->host->unique_id, tgt, lun);
608
609 ent->sense_ptr = cmd->sense_buffer;
610 ent->sense_dma = esp->ops->map_single(esp,
611 ent->sense_ptr,
612 SCSI_SENSE_BUFFERSIZE,
613 DMA_FROM_DEVICE);
614 }
615 ent->saved_sense_ptr = ent->sense_ptr;
616
617 esp->active_cmd = ent;
618
619 p = esp->command_block;
620 esp->msg_out_len = 0;
621
622 *p++ = IDENTIFY(0, lun);
623 *p++ = REQUEST_SENSE;
624 *p++ = ((dev->scsi_level <= SCSI_2) ?
625 (lun << 5) : 0);
626 *p++ = 0;
627 *p++ = 0;
628 *p++ = SCSI_SENSE_BUFFERSIZE;
629 *p++ = 0;
630
631 esp->select_state = ESP_SELECT_BASIC;
632
633 val = tgt;
634 if (esp->rev == FASHME)
635 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
636 esp_write8(val, ESP_BUSID);
637
638 esp_write_tgt_sync(esp, tgt);
639 esp_write_tgt_config3(esp, tgt);
640
641 val = (p - esp->command_block);
642
643 if (esp->rev == FASHME)
644 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
645 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
646 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
647}
648
649static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
650{
651 struct esp_cmd_entry *ent;
652
653 list_for_each_entry(ent, &esp->queued_cmds, list) {
654 struct scsi_cmnd *cmd = ent->cmd;
655 struct scsi_device *dev = cmd->device;
656 struct esp_lun_data *lp = dev->hostdata;
657
658 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
659 ent->tag[0] = 0;
660 ent->tag[1] = 0;
661 return ent;
662 }
663
664 if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
665 ent->tag[0] = 0;
666 ent->tag[1] = 0;
667 }
668
669 if (esp_alloc_lun_tag(ent, lp) < 0)
670 continue;
671
672 return ent;
673 }
674
675 return NULL;
676}
677
678static void esp_maybe_execute_command(struct esp *esp)
679{
680 struct esp_target_data *tp;
681 struct esp_lun_data *lp;
682 struct scsi_device *dev;
683 struct scsi_cmnd *cmd;
684 struct esp_cmd_entry *ent;
685 int tgt, lun, i;
686 u32 val, start_cmd;
687 u8 *p;
688
689 if (esp->active_cmd ||
690 (esp->flags & ESP_FLAG_RESETTING))
691 return;
692
693 ent = find_and_prep_issuable_command(esp);
694 if (!ent)
695 return;
696
697 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
698 esp_autosense(esp, ent);
699 return;
700 }
701
702 cmd = ent->cmd;
703 dev = cmd->device;
704 tgt = dev->id;
705 lun = dev->lun;
706 tp = &esp->target[tgt];
707 lp = dev->hostdata;
708
709 list_del(&ent->list);
710 list_add(&ent->list, &esp->active_cmds);
711
712 esp->active_cmd = ent;
713
714 esp_map_dma(esp, cmd);
715 esp_save_pointers(esp, ent);
716
717 esp_check_command_len(esp, cmd);
718
719 p = esp->command_block;
720
721 esp->msg_out_len = 0;
722 if (tp->flags & ESP_TGT_CHECK_NEGO) {
723 /* Need to negotiate. If the target is broken
724 * go for synchronous transfers and non-wide.
725 */
726 if (tp->flags & ESP_TGT_BROKEN) {
727 tp->flags &= ~ESP_TGT_DISCONNECT;
728 tp->nego_goal_period = 0;
729 tp->nego_goal_offset = 0;
730 tp->nego_goal_width = 0;
731 tp->nego_goal_tags = 0;
732 }
733
734 /* If the settings are not changing, skip this. */
735 if (spi_width(tp->starget) == tp->nego_goal_width &&
736 spi_period(tp->starget) == tp->nego_goal_period &&
737 spi_offset(tp->starget) == tp->nego_goal_offset) {
738 tp->flags &= ~ESP_TGT_CHECK_NEGO;
739 goto build_identify;
740 }
741
742 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
743 esp->msg_out_len =
744 spi_populate_width_msg(&esp->msg_out[0],
745 (tp->nego_goal_width ?
746 1 : 0));
747 tp->flags |= ESP_TGT_NEGO_WIDE;
748 } else if (esp_need_to_nego_sync(tp)) {
749 esp->msg_out_len =
750 spi_populate_sync_msg(&esp->msg_out[0],
751 tp->nego_goal_period,
752 tp->nego_goal_offset);
753 tp->flags |= ESP_TGT_NEGO_SYNC;
754 } else {
755 tp->flags &= ~ESP_TGT_CHECK_NEGO;
756 }
757
758 /* Process it like a slow command. */
759 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
760 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
761 }
762
763build_identify:
764 /* If we don't have a lun-data struct yet, we're probing
765 * so do not disconnect. Also, do not disconnect unless
766 * we have a tag on this command.
767 */
768 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
769 *p++ = IDENTIFY(1, lun);
770 else
771 *p++ = IDENTIFY(0, lun);
772
773 if (ent->tag[0] && esp->rev == ESP100) {
774 /* ESP100 lacks select w/atn3 command, use select
775 * and stop instead.
776 */
777 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
778 }
779
780 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
781 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
782 if (ent->tag[0]) {
783 *p++ = ent->tag[0];
784 *p++ = ent->tag[1];
785
786 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
787 }
788
789 for (i = 0; i < cmd->cmd_len; i++)
790 *p++ = cmd->cmnd[i];
791
792 esp->select_state = ESP_SELECT_BASIC;
793 } else {
794 esp->cmd_bytes_left = cmd->cmd_len;
795 esp->cmd_bytes_ptr = &cmd->cmnd[0];
796
797 if (ent->tag[0]) {
798 for (i = esp->msg_out_len - 1;
799 i >= 0; i--)
800 esp->msg_out[i + 2] = esp->msg_out[i];
801 esp->msg_out[0] = ent->tag[0];
802 esp->msg_out[1] = ent->tag[1];
803 esp->msg_out_len += 2;
804 }
805
806 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
807 esp->select_state = ESP_SELECT_MSGOUT;
808 }
809 val = tgt;
810 if (esp->rev == FASHME)
811 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
812 esp_write8(val, ESP_BUSID);
813
814 esp_write_tgt_sync(esp, tgt);
815 esp_write_tgt_config3(esp, tgt);
816
817 val = (p - esp->command_block);
818
819 if (esp_debug & ESP_DEBUG_SCSICMD) {
820 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
821 for (i = 0; i < cmd->cmd_len; i++)
822 printk("%02x ", cmd->cmnd[i]);
823 printk("]\n");
824 }
825
826 if (esp->rev == FASHME)
827 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
828 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
829 val, 16, 0, start_cmd);
830}
831
832static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
833{
834 struct list_head *head = &esp->esp_cmd_pool;
835 struct esp_cmd_entry *ret;
836
837 if (list_empty(head)) {
838 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
839 } else {
840 ret = list_entry(head->next, struct esp_cmd_entry, list);
841 list_del(&ret->list);
842 memset(ret, 0, sizeof(*ret));
843 }
844 return ret;
845}
846
847static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
848{
849 list_add(&ent->list, &esp->esp_cmd_pool);
850}
851
852static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
853 struct scsi_cmnd *cmd, unsigned int result)
854{
855 struct scsi_device *dev = cmd->device;
856 int tgt = dev->id;
857 int lun = dev->lun;
858
859 esp->active_cmd = NULL;
860 esp_unmap_dma(esp, cmd);
861 esp_free_lun_tag(ent, dev->hostdata);
862 cmd->result = result;
863
864 if (ent->eh_done) {
865 complete(ent->eh_done);
866 ent->eh_done = NULL;
867 }
868
869 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
870 esp->ops->unmap_single(esp, ent->sense_dma,
871 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
872 ent->sense_ptr = NULL;
873
874 /* Restore the message/status bytes to what we actually
875 * saw originally. Also, report that we are providing
876 * the sense data.
877 */
878 cmd->result = ((DRIVER_SENSE << 24) |
879 (DID_OK << 16) |
880 (COMMAND_COMPLETE << 8) |
881 (SAM_STAT_CHECK_CONDITION << 0));
882
883 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
884 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
885 int i;
886
887 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
888 esp->host->unique_id, tgt, lun);
889 for (i = 0; i < 18; i++)
890 printk("%02x ", cmd->sense_buffer[i]);
891 printk("]\n");
892 }
893 }
894
895 cmd->scsi_done(cmd);
896
897 list_del(&ent->list);
898 esp_put_ent(esp, ent);
899
900 esp_maybe_execute_command(esp);
901}
902
903static unsigned int compose_result(unsigned int status, unsigned int message,
904 unsigned int driver_code)
905{
906 return (status | (message << 8) | (driver_code << 16));
907}
908
909static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
910{
911 struct scsi_device *dev = ent->cmd->device;
912 struct esp_lun_data *lp = dev->hostdata;
913
914 scsi_track_queue_full(dev, lp->num_tagged - 1);
915}
916
917static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
918{
919 struct scsi_device *dev = cmd->device;
Christoph Hellwig2b14ec72007-05-31 20:12:32 +0200920 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -0700921 struct esp_cmd_priv *spriv;
922 struct esp_cmd_entry *ent;
923
924 ent = esp_get_ent(esp);
925 if (!ent)
926 return SCSI_MLQUEUE_HOST_BUSY;
927
928 ent->cmd = cmd;
929
930 cmd->scsi_done = done;
931
932 spriv = ESP_CMD_PRIV(cmd);
933 spriv->u.dma_addr = ~(dma_addr_t)0x0;
934
935 list_add_tail(&ent->list, &esp->queued_cmds);
936
937 esp_maybe_execute_command(esp);
938
939 return 0;
940}
941
942static int esp_check_gross_error(struct esp *esp)
943{
944 if (esp->sreg & ESP_STAT_SPAM) {
945 /* Gross Error, could be one of:
946 * - top of fifo overwritten
947 * - top of command register overwritten
948 * - DMA programmed with wrong direction
949 * - improper phase change
950 */
951 printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
952 esp->host->unique_id, esp->sreg);
953 /* XXX Reset the chip. XXX */
954 return 1;
955 }
956 return 0;
957}
958
959static int esp_check_spur_intr(struct esp *esp)
960{
961 switch (esp->rev) {
962 case ESP100:
963 case ESP100A:
964 /* The interrupt pending bit of the status register cannot
965 * be trusted on these revisions.
966 */
967 esp->sreg &= ~ESP_STAT_INTR;
968 break;
969
970 default:
971 if (!(esp->sreg & ESP_STAT_INTR)) {
972 esp->ireg = esp_read8(ESP_INTRPT);
973 if (esp->ireg & ESP_INTR_SR)
974 return 1;
975
976 /* If the DMA is indicating interrupt pending and the
977 * ESP is not, the only possibility is a DMA error.
978 */
979 if (!esp->ops->dma_error(esp)) {
980 printk(KERN_ERR PFX "esp%d: Spurious irq, "
981 "sreg=%x.\n",
982 esp->host->unique_id, esp->sreg);
983 return -1;
984 }
985
986 printk(KERN_ERR PFX "esp%d: DMA error\n",
987 esp->host->unique_id);
988
989 /* XXX Reset the chip. XXX */
990 return -1;
991 }
992 break;
993 }
994
995 return 0;
996}
997
998static void esp_schedule_reset(struct esp *esp)
999{
1000 esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1001 __builtin_return_address(0));
1002 esp->flags |= ESP_FLAG_RESETTING;
1003 esp_event(esp, ESP_EVENT_RESET);
1004}
1005
1006/* In order to avoid having to add a special half-reconnected state
1007 * into the driver we just sit here and poll through the rest of
1008 * the reselection process to get the tag message bytes.
1009 */
1010static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1011 struct esp_lun_data *lp)
1012{
1013 struct esp_cmd_entry *ent;
1014 int i;
1015
1016 if (!lp->num_tagged) {
1017 printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1018 esp->host->unique_id);
1019 return NULL;
1020 }
1021
1022 esp_log_reconnect("ESP: reconnect tag, ");
1023
1024 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1025 if (esp->ops->irq_pending(esp))
1026 break;
1027 }
1028 if (i == ESP_QUICKIRQ_LIMIT) {
1029 printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1030 esp->host->unique_id);
1031 return NULL;
1032 }
1033
1034 esp->sreg = esp_read8(ESP_STATUS);
1035 esp->ireg = esp_read8(ESP_INTRPT);
1036
1037 esp_log_reconnect("IRQ(%d:%x:%x), ",
1038 i, esp->ireg, esp->sreg);
1039
1040 if (esp->ireg & ESP_INTR_DC) {
1041 printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1042 esp->host->unique_id);
1043 return NULL;
1044 }
1045
1046 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1047 printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1048 esp->host->unique_id, esp->sreg);
1049 return NULL;
1050 }
1051
1052 /* DMA in the tag bytes... */
1053 esp->command_block[0] = 0xff;
1054 esp->command_block[1] = 0xff;
1055 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1056 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1057
1058 /* ACK the msssage. */
1059 scsi_esp_cmd(esp, ESP_CMD_MOK);
1060
1061 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1062 if (esp->ops->irq_pending(esp)) {
1063 esp->sreg = esp_read8(ESP_STATUS);
1064 esp->ireg = esp_read8(ESP_INTRPT);
1065 if (esp->ireg & ESP_INTR_FDONE)
1066 break;
1067 }
1068 udelay(1);
1069 }
1070 if (i == ESP_RESELECT_TAG_LIMIT) {
1071 printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1072 esp->host->unique_id);
1073 return NULL;
1074 }
1075 esp->ops->dma_drain(esp);
1076 esp->ops->dma_invalidate(esp);
1077
1078 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1079 i, esp->ireg, esp->sreg,
1080 esp->command_block[0],
1081 esp->command_block[1]);
1082
1083 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1084 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1085 printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1086 "type %02x.\n",
1087 esp->host->unique_id, esp->command_block[0]);
1088 return NULL;
1089 }
1090
1091 ent = lp->tagged_cmds[esp->command_block[1]];
1092 if (!ent) {
1093 printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1094 "tag %02x.\n",
1095 esp->host->unique_id, esp->command_block[1]);
1096 return NULL;
1097 }
1098
1099 return ent;
1100}
1101
1102static int esp_reconnect(struct esp *esp)
1103{
1104 struct esp_cmd_entry *ent;
1105 struct esp_target_data *tp;
1106 struct esp_lun_data *lp;
1107 struct scsi_device *dev;
1108 int target, lun;
1109
1110 BUG_ON(esp->active_cmd);
1111 if (esp->rev == FASHME) {
1112 /* FASHME puts the target and lun numbers directly
1113 * into the fifo.
1114 */
1115 target = esp->fifo[0];
1116 lun = esp->fifo[1] & 0x7;
1117 } else {
1118 u8 bits = esp_read8(ESP_FDATA);
1119
1120 /* Older chips put the lun directly into the fifo, but
1121 * the target is given as a sample of the arbitration
1122 * lines on the bus at reselection time. So we should
1123 * see the ID of the ESP and the one reconnecting target
1124 * set in the bitmap.
1125 */
1126 if (!(bits & esp->scsi_id_mask))
1127 goto do_reset;
1128 bits &= ~esp->scsi_id_mask;
1129 if (!bits || (bits & (bits - 1)))
1130 goto do_reset;
1131
1132 target = ffs(bits) - 1;
1133 lun = (esp_read8(ESP_FDATA) & 0x7);
1134
1135 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1136 if (esp->rev == ESP100) {
1137 u8 ireg = esp_read8(ESP_INTRPT);
1138 /* This chip has a bug during reselection that can
1139 * cause a spurious illegal-command interrupt, which
1140 * we simply ACK here. Another possibility is a bus
1141 * reset so we must check for that.
1142 */
1143 if (ireg & ESP_INTR_SR)
1144 goto do_reset;
1145 }
1146 scsi_esp_cmd(esp, ESP_CMD_NULL);
1147 }
1148
1149 esp_write_tgt_sync(esp, target);
1150 esp_write_tgt_config3(esp, target);
1151
1152 scsi_esp_cmd(esp, ESP_CMD_MOK);
1153
1154 if (esp->rev == FASHME)
1155 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1156 ESP_BUSID);
1157
1158 tp = &esp->target[target];
1159 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1160 if (!dev) {
1161 printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1162 "tgt[%u] lun[%u]\n",
1163 esp->host->unique_id, target, lun);
1164 goto do_reset;
1165 }
1166 lp = dev->hostdata;
1167
1168 ent = lp->non_tagged_cmd;
1169 if (!ent) {
1170 ent = esp_reconnect_with_tag(esp, lp);
1171 if (!ent)
1172 goto do_reset;
1173 }
1174
1175 esp->active_cmd = ent;
1176
1177 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1178 esp->msg_out[0] = ABORT_TASK_SET;
1179 esp->msg_out_len = 1;
1180 scsi_esp_cmd(esp, ESP_CMD_SATN);
1181 }
1182
1183 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1184 esp_restore_pointers(esp, ent);
1185 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1186 return 1;
1187
1188do_reset:
1189 esp_schedule_reset(esp);
1190 return 0;
1191}
1192
1193static int esp_finish_select(struct esp *esp)
1194{
1195 struct esp_cmd_entry *ent;
1196 struct scsi_cmnd *cmd;
1197 u8 orig_select_state;
1198
1199 orig_select_state = esp->select_state;
1200
1201 /* No longer selecting. */
1202 esp->select_state = ESP_SELECT_NONE;
1203
1204 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1205 ent = esp->active_cmd;
1206 cmd = ent->cmd;
1207
1208 if (esp->ops->dma_error(esp)) {
1209 /* If we see a DMA error during or as a result of selection,
1210 * all bets are off.
1211 */
1212 esp_schedule_reset(esp);
1213 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1214 return 0;
1215 }
1216
1217 esp->ops->dma_invalidate(esp);
1218
1219 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1220 struct esp_target_data *tp = &esp->target[cmd->device->id];
1221
1222 /* Carefully back out of the selection attempt. Release
1223 * resources (such as DMA mapping & TAG) and reset state (such
1224 * as message out and command delivery variables).
1225 */
1226 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1227 esp_unmap_dma(esp, cmd);
1228 esp_free_lun_tag(ent, cmd->device->hostdata);
1229 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1230 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1231 esp->cmd_bytes_ptr = NULL;
1232 esp->cmd_bytes_left = 0;
1233 } else {
1234 esp->ops->unmap_single(esp, ent->sense_dma,
1235 SCSI_SENSE_BUFFERSIZE,
1236 DMA_FROM_DEVICE);
1237 ent->sense_ptr = NULL;
1238 }
1239
1240 /* Now that the state is unwound properly, put back onto
1241 * the issue queue. This command is no longer active.
1242 */
1243 list_del(&ent->list);
1244 list_add(&ent->list, &esp->queued_cmds);
1245 esp->active_cmd = NULL;
1246
1247 /* Return value ignored by caller, it directly invokes
1248 * esp_reconnect().
1249 */
1250 return 0;
1251 }
1252
1253 if (esp->ireg == ESP_INTR_DC) {
1254 struct scsi_device *dev = cmd->device;
1255
1256 /* Disconnect. Make sure we re-negotiate sync and
1257 * wide parameters if this target starts responding
1258 * again in the future.
1259 */
1260 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1261
1262 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1263 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1264 return 1;
1265 }
1266
1267 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1268 /* Selection successful. On pre-FAST chips we have
1269 * to do a NOP and possibly clean out the FIFO.
1270 */
1271 if (esp->rev <= ESP236) {
1272 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1273
1274 scsi_esp_cmd(esp, ESP_CMD_NULL);
1275
1276 if (!fcnt &&
1277 (!esp->prev_soff ||
1278 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1279 esp_flush_fifo(esp);
1280 }
1281
1282 /* If we are doing a slow command, negotiation, etc.
1283 * we'll do the right thing as we transition to the
1284 * next phase.
1285 */
1286 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1287 return 0;
1288 }
1289
1290 printk("ESP: Unexpected selection completion ireg[%x].\n",
1291 esp->ireg);
1292 esp_schedule_reset(esp);
1293 return 0;
1294}
1295
1296static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1297 struct scsi_cmnd *cmd)
1298{
1299 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1300
1301 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1302 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1303 fifo_cnt <<= 1;
1304
1305 ecount = 0;
1306 if (!(esp->sreg & ESP_STAT_TCNT)) {
1307 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1308 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1309 if (esp->rev == FASHME)
1310 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1311 }
1312
1313 bytes_sent = esp->data_dma_len;
1314 bytes_sent -= ecount;
1315
1316 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1317 bytes_sent -= fifo_cnt;
1318
1319 flush_fifo = 0;
1320 if (!esp->prev_soff) {
1321 /* Synchronous data transfer, always flush fifo. */
1322 flush_fifo = 1;
1323 } else {
1324 if (esp->rev == ESP100) {
1325 u32 fflags, phase;
1326
1327 /* ESP100 has a chip bug where in the synchronous data
1328 * phase it can mistake a final long REQ pulse from the
1329 * target as an extra data byte. Fun.
1330 *
1331 * To detect this case we resample the status register
1332 * and fifo flags. If we're still in a data phase and
1333 * we see spurious chunks in the fifo, we return error
1334 * to the caller which should reset and set things up
1335 * such that we only try future transfers to this
1336 * target in synchronous mode.
1337 */
1338 esp->sreg = esp_read8(ESP_STATUS);
1339 phase = esp->sreg & ESP_STAT_PMASK;
1340 fflags = esp_read8(ESP_FFLAGS);
1341
1342 if ((phase == ESP_DOP &&
1343 (fflags & ESP_FF_ONOTZERO)) ||
1344 (phase == ESP_DIP &&
1345 (fflags & ESP_FF_FBYTES)))
1346 return -1;
1347 }
1348 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1349 flush_fifo = 1;
1350 }
1351
1352 if (flush_fifo)
1353 esp_flush_fifo(esp);
1354
1355 return bytes_sent;
1356}
1357
1358static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1359 u8 scsi_period, u8 scsi_offset,
1360 u8 esp_stp, u8 esp_soff)
1361{
1362 spi_period(tp->starget) = scsi_period;
1363 spi_offset(tp->starget) = scsi_offset;
1364 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1365
1366 if (esp_soff) {
1367 esp_stp &= 0x1f;
1368 esp_soff |= esp->radelay;
1369 if (esp->rev >= FAS236) {
1370 u8 bit = ESP_CONFIG3_FSCSI;
1371 if (esp->rev >= FAS100A)
1372 bit = ESP_CONFIG3_FAST;
1373
1374 if (scsi_period < 50) {
1375 if (esp->rev == FASHME)
1376 esp_soff &= ~esp->radelay;
1377 tp->esp_config3 |= bit;
1378 } else {
1379 tp->esp_config3 &= ~bit;
1380 }
1381 esp->prev_cfg3 = tp->esp_config3;
1382 esp_write8(esp->prev_cfg3, ESP_CFG3);
1383 }
1384 }
1385
1386 tp->esp_period = esp->prev_stp = esp_stp;
1387 tp->esp_offset = esp->prev_soff = esp_soff;
1388
1389 esp_write8(esp_soff, ESP_SOFF);
1390 esp_write8(esp_stp, ESP_STP);
1391
1392 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1393
1394 spi_display_xfer_agreement(tp->starget);
1395}
1396
1397static void esp_msgin_reject(struct esp *esp)
1398{
1399 struct esp_cmd_entry *ent = esp->active_cmd;
1400 struct scsi_cmnd *cmd = ent->cmd;
1401 struct esp_target_data *tp;
1402 int tgt;
1403
1404 tgt = cmd->device->id;
1405 tp = &esp->target[tgt];
1406
1407 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1408 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1409
1410 if (!esp_need_to_nego_sync(tp)) {
1411 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1412 scsi_esp_cmd(esp, ESP_CMD_RATN);
1413 } else {
1414 esp->msg_out_len =
1415 spi_populate_sync_msg(&esp->msg_out[0],
1416 tp->nego_goal_period,
1417 tp->nego_goal_offset);
1418 tp->flags |= ESP_TGT_NEGO_SYNC;
1419 scsi_esp_cmd(esp, ESP_CMD_SATN);
1420 }
1421 return;
1422 }
1423
1424 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1425 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1426 tp->esp_period = 0;
1427 tp->esp_offset = 0;
1428 esp_setsync(esp, tp, 0, 0, 0, 0);
1429 scsi_esp_cmd(esp, ESP_CMD_RATN);
1430 return;
1431 }
1432
1433 esp->msg_out[0] = ABORT_TASK_SET;
1434 esp->msg_out_len = 1;
1435 scsi_esp_cmd(esp, ESP_CMD_SATN);
1436}
1437
1438static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1439{
1440 u8 period = esp->msg_in[3];
1441 u8 offset = esp->msg_in[4];
1442 u8 stp;
1443
1444 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1445 goto do_reject;
1446
1447 if (offset > 15)
1448 goto do_reject;
1449
1450 if (offset) {
1451 int rounded_up, one_clock;
1452
1453 if (period > esp->max_period) {
1454 period = offset = 0;
1455 goto do_sdtr;
1456 }
1457 if (period < esp->min_period)
1458 goto do_reject;
1459
1460 one_clock = esp->ccycle / 1000;
1461 rounded_up = (period << 2);
1462 rounded_up = (rounded_up + one_clock - 1) / one_clock;
1463 stp = rounded_up;
1464 if (stp && esp->rev >= FAS236) {
1465 if (stp >= 50)
1466 stp--;
1467 }
1468 } else {
1469 stp = 0;
1470 }
1471
1472 esp_setsync(esp, tp, period, offset, stp, offset);
1473 return;
1474
1475do_reject:
1476 esp->msg_out[0] = MESSAGE_REJECT;
1477 esp->msg_out_len = 1;
1478 scsi_esp_cmd(esp, ESP_CMD_SATN);
1479 return;
1480
1481do_sdtr:
1482 tp->nego_goal_period = period;
1483 tp->nego_goal_offset = offset;
1484 esp->msg_out_len =
1485 spi_populate_sync_msg(&esp->msg_out[0],
1486 tp->nego_goal_period,
1487 tp->nego_goal_offset);
1488 scsi_esp_cmd(esp, ESP_CMD_SATN);
1489}
1490
1491static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1492{
1493 int size = 8 << esp->msg_in[3];
1494 u8 cfg3;
1495
1496 if (esp->rev != FASHME)
1497 goto do_reject;
1498
1499 if (size != 8 && size != 16)
1500 goto do_reject;
1501
1502 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1503 goto do_reject;
1504
1505 cfg3 = tp->esp_config3;
1506 if (size == 16) {
1507 tp->flags |= ESP_TGT_WIDE;
1508 cfg3 |= ESP_CONFIG3_EWIDE;
1509 } else {
1510 tp->flags &= ~ESP_TGT_WIDE;
1511 cfg3 &= ~ESP_CONFIG3_EWIDE;
1512 }
1513 tp->esp_config3 = cfg3;
1514 esp->prev_cfg3 = cfg3;
1515 esp_write8(cfg3, ESP_CFG3);
1516
1517 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1518
1519 spi_period(tp->starget) = 0;
1520 spi_offset(tp->starget) = 0;
1521 if (!esp_need_to_nego_sync(tp)) {
1522 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1523 scsi_esp_cmd(esp, ESP_CMD_RATN);
1524 } else {
1525 esp->msg_out_len =
1526 spi_populate_sync_msg(&esp->msg_out[0],
1527 tp->nego_goal_period,
1528 tp->nego_goal_offset);
1529 tp->flags |= ESP_TGT_NEGO_SYNC;
1530 scsi_esp_cmd(esp, ESP_CMD_SATN);
1531 }
1532 return;
1533
1534do_reject:
1535 esp->msg_out[0] = MESSAGE_REJECT;
1536 esp->msg_out_len = 1;
1537 scsi_esp_cmd(esp, ESP_CMD_SATN);
1538}
1539
1540static void esp_msgin_extended(struct esp *esp)
1541{
1542 struct esp_cmd_entry *ent = esp->active_cmd;
1543 struct scsi_cmnd *cmd = ent->cmd;
1544 struct esp_target_data *tp;
1545 int tgt = cmd->device->id;
1546
1547 tp = &esp->target[tgt];
1548 if (esp->msg_in[2] == EXTENDED_SDTR) {
1549 esp_msgin_sdtr(esp, tp);
1550 return;
1551 }
1552 if (esp->msg_in[2] == EXTENDED_WDTR) {
1553 esp_msgin_wdtr(esp, tp);
1554 return;
1555 }
1556
1557 printk("ESP: Unexpected extended msg type %x\n",
1558 esp->msg_in[2]);
1559
1560 esp->msg_out[0] = ABORT_TASK_SET;
1561 esp->msg_out_len = 1;
1562 scsi_esp_cmd(esp, ESP_CMD_SATN);
1563}
1564
1565/* Analyze msgin bytes received from target so far. Return non-zero
1566 * if there are more bytes needed to complete the message.
1567 */
1568static int esp_msgin_process(struct esp *esp)
1569{
1570 u8 msg0 = esp->msg_in[0];
1571 int len = esp->msg_in_len;
1572
1573 if (msg0 & 0x80) {
1574 /* Identify */
1575 printk("ESP: Unexpected msgin identify\n");
1576 return 0;
1577 }
1578
1579 switch (msg0) {
1580 case EXTENDED_MESSAGE:
1581 if (len == 1)
1582 return 1;
1583 if (len < esp->msg_in[1] + 2)
1584 return 1;
1585 esp_msgin_extended(esp);
1586 return 0;
1587
1588 case IGNORE_WIDE_RESIDUE: {
1589 struct esp_cmd_entry *ent;
1590 struct esp_cmd_priv *spriv;
1591 if (len == 1)
1592 return 1;
1593
1594 if (esp->msg_in[1] != 1)
1595 goto do_reject;
1596
1597 ent = esp->active_cmd;
1598 spriv = ESP_CMD_PRIV(ent->cmd);
1599
1600 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1601 spriv->cur_sg--;
1602 spriv->cur_residue = 1;
1603 } else
1604 spriv->cur_residue++;
1605 spriv->tot_residue++;
1606 return 0;
1607 }
1608 case NOP:
1609 return 0;
1610 case RESTORE_POINTERS:
1611 esp_restore_pointers(esp, esp->active_cmd);
1612 return 0;
1613 case SAVE_POINTERS:
1614 esp_save_pointers(esp, esp->active_cmd);
1615 return 0;
1616
1617 case COMMAND_COMPLETE:
1618 case DISCONNECT: {
1619 struct esp_cmd_entry *ent = esp->active_cmd;
1620
1621 ent->message = msg0;
1622 esp_event(esp, ESP_EVENT_FREE_BUS);
1623 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1624 return 0;
1625 }
1626 case MESSAGE_REJECT:
1627 esp_msgin_reject(esp);
1628 return 0;
1629
1630 default:
1631 do_reject:
1632 esp->msg_out[0] = MESSAGE_REJECT;
1633 esp->msg_out_len = 1;
1634 scsi_esp_cmd(esp, ESP_CMD_SATN);
1635 return 0;
1636 }
1637}
1638
1639static int esp_process_event(struct esp *esp)
1640{
1641 int write;
1642
1643again:
1644 write = 0;
1645 switch (esp->event) {
1646 case ESP_EVENT_CHECK_PHASE:
1647 switch (esp->sreg & ESP_STAT_PMASK) {
1648 case ESP_DOP:
1649 esp_event(esp, ESP_EVENT_DATA_OUT);
1650 break;
1651 case ESP_DIP:
1652 esp_event(esp, ESP_EVENT_DATA_IN);
1653 break;
1654 case ESP_STATP:
1655 esp_flush_fifo(esp);
1656 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1657 esp_event(esp, ESP_EVENT_STATUS);
1658 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1659 return 1;
1660
1661 case ESP_MOP:
1662 esp_event(esp, ESP_EVENT_MSGOUT);
1663 break;
1664
1665 case ESP_MIP:
1666 esp_event(esp, ESP_EVENT_MSGIN);
1667 break;
1668
1669 case ESP_CMDP:
1670 esp_event(esp, ESP_EVENT_CMD_START);
1671 break;
1672
1673 default:
1674 printk("ESP: Unexpected phase, sreg=%02x\n",
1675 esp->sreg);
1676 esp_schedule_reset(esp);
1677 return 0;
1678 }
1679 goto again;
1680 break;
1681
1682 case ESP_EVENT_DATA_IN:
1683 write = 1;
1684 /* fallthru */
1685
1686 case ESP_EVENT_DATA_OUT: {
1687 struct esp_cmd_entry *ent = esp->active_cmd;
1688 struct scsi_cmnd *cmd = ent->cmd;
1689 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1690 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1691
1692 if (esp->rev == ESP100)
1693 scsi_esp_cmd(esp, ESP_CMD_NULL);
1694
1695 if (write)
1696 ent->flags |= ESP_CMD_FLAG_WRITE;
1697 else
1698 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1699
1700 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1701 esp->data_dma_len = dma_len;
1702
1703 if (!dma_len) {
1704 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1705 esp->host->unique_id);
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001706 printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001707 esp->host->unique_id,
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001708 (unsigned long long)esp_cur_dma_addr(ent, cmd),
David S. Millercd9ad582007-04-26 21:19:23 -07001709 esp_cur_dma_len(ent, cmd));
1710 esp_schedule_reset(esp);
1711 return 0;
1712 }
1713
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001714 esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
David S. Millercd9ad582007-04-26 21:19:23 -07001715 "write(%d)\n",
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001716 (unsigned long long)dma_addr, dma_len, write);
David S. Millercd9ad582007-04-26 21:19:23 -07001717
1718 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1719 write, ESP_CMD_DMA | ESP_CMD_TI);
1720 esp_event(esp, ESP_EVENT_DATA_DONE);
1721 break;
1722 }
1723 case ESP_EVENT_DATA_DONE: {
1724 struct esp_cmd_entry *ent = esp->active_cmd;
1725 struct scsi_cmnd *cmd = ent->cmd;
1726 int bytes_sent;
1727
1728 if (esp->ops->dma_error(esp)) {
1729 printk("ESP: data done, DMA error, resetting\n");
1730 esp_schedule_reset(esp);
1731 return 0;
1732 }
1733
1734 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1735 /* XXX parity errors, etc. XXX */
1736
1737 esp->ops->dma_drain(esp);
1738 }
1739 esp->ops->dma_invalidate(esp);
1740
1741 if (esp->ireg != ESP_INTR_BSERV) {
1742 /* We should always see exactly a bus-service
1743 * interrupt at the end of a successful transfer.
1744 */
1745 printk("ESP: data done, not BSERV, resetting\n");
1746 esp_schedule_reset(esp);
1747 return 0;
1748 }
1749
1750 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1751
1752 esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1753 ent->flags, bytes_sent);
1754
1755 if (bytes_sent < 0) {
1756 /* XXX force sync mode for this target XXX */
1757 esp_schedule_reset(esp);
1758 return 0;
1759 }
1760
1761 esp_advance_dma(esp, ent, cmd, bytes_sent);
1762 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1763 goto again;
1764 break;
1765 }
1766
1767 case ESP_EVENT_STATUS: {
1768 struct esp_cmd_entry *ent = esp->active_cmd;
1769
1770 if (esp->ireg & ESP_INTR_FDONE) {
1771 ent->status = esp_read8(ESP_FDATA);
1772 ent->message = esp_read8(ESP_FDATA);
1773 scsi_esp_cmd(esp, ESP_CMD_MOK);
1774 } else if (esp->ireg == ESP_INTR_BSERV) {
1775 ent->status = esp_read8(ESP_FDATA);
1776 ent->message = 0xff;
1777 esp_event(esp, ESP_EVENT_MSGIN);
1778 return 0;
1779 }
1780
1781 if (ent->message != COMMAND_COMPLETE) {
1782 printk("ESP: Unexpected message %x in status\n",
1783 ent->message);
1784 esp_schedule_reset(esp);
1785 return 0;
1786 }
1787
1788 esp_event(esp, ESP_EVENT_FREE_BUS);
1789 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1790 break;
1791 }
1792 case ESP_EVENT_FREE_BUS: {
1793 struct esp_cmd_entry *ent = esp->active_cmd;
1794 struct scsi_cmnd *cmd = ent->cmd;
1795
1796 if (ent->message == COMMAND_COMPLETE ||
1797 ent->message == DISCONNECT)
1798 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1799
1800 if (ent->message == COMMAND_COMPLETE) {
1801 esp_log_cmddone("ESP: Command done status[%x] "
1802 "message[%x]\n",
1803 ent->status, ent->message);
1804 if (ent->status == SAM_STAT_TASK_SET_FULL)
1805 esp_event_queue_full(esp, ent);
1806
1807 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1808 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1809 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1810 esp_autosense(esp, ent);
1811 } else {
1812 esp_cmd_is_done(esp, ent, cmd,
1813 compose_result(ent->status,
1814 ent->message,
1815 DID_OK));
1816 }
1817 } else if (ent->message == DISCONNECT) {
1818 esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1819 "tag[%x:%x]\n",
1820 cmd->device->id,
1821 ent->tag[0], ent->tag[1]);
1822
1823 esp->active_cmd = NULL;
1824 esp_maybe_execute_command(esp);
1825 } else {
1826 printk("ESP: Unexpected message %x in freebus\n",
1827 ent->message);
1828 esp_schedule_reset(esp);
1829 return 0;
1830 }
1831 if (esp->active_cmd)
1832 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1833 break;
1834 }
1835 case ESP_EVENT_MSGOUT: {
1836 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1837
1838 if (esp_debug & ESP_DEBUG_MSGOUT) {
1839 int i;
1840 printk("ESP: Sending message [ ");
1841 for (i = 0; i < esp->msg_out_len; i++)
1842 printk("%02x ", esp->msg_out[i]);
1843 printk("]\n");
1844 }
1845
1846 if (esp->rev == FASHME) {
1847 int i;
1848
1849 /* Always use the fifo. */
1850 for (i = 0; i < esp->msg_out_len; i++) {
1851 esp_write8(esp->msg_out[i], ESP_FDATA);
1852 esp_write8(0, ESP_FDATA);
1853 }
1854 scsi_esp_cmd(esp, ESP_CMD_TI);
1855 } else {
1856 if (esp->msg_out_len == 1) {
1857 esp_write8(esp->msg_out[0], ESP_FDATA);
1858 scsi_esp_cmd(esp, ESP_CMD_TI);
1859 } else {
1860 /* Use DMA. */
1861 memcpy(esp->command_block,
1862 esp->msg_out,
1863 esp->msg_out_len);
1864
1865 esp->ops->send_dma_cmd(esp,
1866 esp->command_block_dma,
1867 esp->msg_out_len,
1868 esp->msg_out_len,
1869 0,
1870 ESP_CMD_DMA|ESP_CMD_TI);
1871 }
1872 }
1873 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1874 break;
1875 }
1876 case ESP_EVENT_MSGOUT_DONE:
1877 if (esp->rev == FASHME) {
1878 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1879 } else {
1880 if (esp->msg_out_len > 1)
1881 esp->ops->dma_invalidate(esp);
1882 }
1883
1884 if (!(esp->ireg & ESP_INTR_DC)) {
1885 if (esp->rev != FASHME)
1886 scsi_esp_cmd(esp, ESP_CMD_NULL);
1887 }
1888 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1889 goto again;
1890 case ESP_EVENT_MSGIN:
1891 if (esp->ireg & ESP_INTR_BSERV) {
1892 if (esp->rev == FASHME) {
1893 if (!(esp_read8(ESP_STATUS2) &
1894 ESP_STAT2_FEMPTY))
1895 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1896 } else {
1897 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1898 if (esp->rev == ESP100)
1899 scsi_esp_cmd(esp, ESP_CMD_NULL);
1900 }
1901 scsi_esp_cmd(esp, ESP_CMD_TI);
1902 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1903 return 1;
1904 }
1905 if (esp->ireg & ESP_INTR_FDONE) {
1906 u8 val;
1907
1908 if (esp->rev == FASHME)
1909 val = esp->fifo[0];
1910 else
1911 val = esp_read8(ESP_FDATA);
1912 esp->msg_in[esp->msg_in_len++] = val;
1913
1914 esp_log_msgin("ESP: Got msgin byte %x\n", val);
1915
1916 if (!esp_msgin_process(esp))
1917 esp->msg_in_len = 0;
1918
1919 if (esp->rev == FASHME)
1920 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1921
1922 scsi_esp_cmd(esp, ESP_CMD_MOK);
1923
1924 if (esp->event != ESP_EVENT_FREE_BUS)
1925 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1926 } else {
1927 printk("ESP: MSGIN neither BSERV not FDON, resetting");
1928 esp_schedule_reset(esp);
1929 return 0;
1930 }
1931 break;
1932 case ESP_EVENT_CMD_START:
1933 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1934 esp->cmd_bytes_left);
1935 if (esp->rev == FASHME)
1936 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1937 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1938 esp->cmd_bytes_left, 16, 0,
1939 ESP_CMD_DMA | ESP_CMD_TI);
1940 esp_event(esp, ESP_EVENT_CMD_DONE);
1941 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1942 break;
1943 case ESP_EVENT_CMD_DONE:
1944 esp->ops->dma_invalidate(esp);
1945 if (esp->ireg & ESP_INTR_BSERV) {
1946 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1947 goto again;
1948 }
1949 esp_schedule_reset(esp);
1950 return 0;
1951 break;
1952
1953 case ESP_EVENT_RESET:
1954 scsi_esp_cmd(esp, ESP_CMD_RS);
1955 break;
1956
1957 default:
1958 printk("ESP: Unexpected event %x, resetting\n",
1959 esp->event);
1960 esp_schedule_reset(esp);
1961 return 0;
1962 break;
1963 }
1964 return 1;
1965}
1966
1967static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1968{
1969 struct scsi_cmnd *cmd = ent->cmd;
1970
1971 esp_unmap_dma(esp, cmd);
1972 esp_free_lun_tag(ent, cmd->device->hostdata);
1973 cmd->result = DID_RESET << 16;
1974
1975 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1976 esp->ops->unmap_single(esp, ent->sense_dma,
1977 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1978 ent->sense_ptr = NULL;
1979 }
1980
1981 cmd->scsi_done(cmd);
1982 list_del(&ent->list);
1983 esp_put_ent(esp, ent);
1984}
1985
1986static void esp_clear_hold(struct scsi_device *dev, void *data)
1987{
1988 struct esp_lun_data *lp = dev->hostdata;
1989
1990 BUG_ON(lp->num_tagged);
1991 lp->hold = 0;
1992}
1993
1994static void esp_reset_cleanup(struct esp *esp)
1995{
1996 struct esp_cmd_entry *ent, *tmp;
1997 int i;
1998
1999 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2000 struct scsi_cmnd *cmd = ent->cmd;
2001
2002 list_del(&ent->list);
2003 cmd->result = DID_RESET << 16;
2004 cmd->scsi_done(cmd);
2005 esp_put_ent(esp, ent);
2006 }
2007
2008 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2009 if (ent == esp->active_cmd)
2010 esp->active_cmd = NULL;
2011 esp_reset_cleanup_one(esp, ent);
2012 }
2013
2014 BUG_ON(esp->active_cmd != NULL);
2015
2016 /* Force renegotiation of sync/wide transfers. */
2017 for (i = 0; i < ESP_MAX_TARGET; i++) {
2018 struct esp_target_data *tp = &esp->target[i];
2019
2020 tp->esp_period = 0;
2021 tp->esp_offset = 0;
2022 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2023 ESP_CONFIG3_FSCSI |
2024 ESP_CONFIG3_FAST);
2025 tp->flags &= ~ESP_TGT_WIDE;
2026 tp->flags |= ESP_TGT_CHECK_NEGO;
2027
2028 if (tp->starget)
2029 starget_for_each_device(tp->starget, NULL,
2030 esp_clear_hold);
2031 }
2032}
2033
2034/* Runs under host->lock */
2035static void __esp_interrupt(struct esp *esp)
2036{
2037 int finish_reset, intr_done;
2038 u8 phase;
2039
2040 esp->sreg = esp_read8(ESP_STATUS);
2041
2042 if (esp->flags & ESP_FLAG_RESETTING) {
2043 finish_reset = 1;
2044 } else {
2045 if (esp_check_gross_error(esp))
2046 return;
2047
2048 finish_reset = esp_check_spur_intr(esp);
2049 if (finish_reset < 0)
2050 return;
2051 }
2052
2053 esp->ireg = esp_read8(ESP_INTRPT);
2054
2055 if (esp->ireg & ESP_INTR_SR)
2056 finish_reset = 1;
2057
2058 if (finish_reset) {
2059 esp_reset_cleanup(esp);
2060 if (esp->eh_reset) {
2061 complete(esp->eh_reset);
2062 esp->eh_reset = NULL;
2063 }
2064 return;
2065 }
2066
2067 phase = (esp->sreg & ESP_STAT_PMASK);
2068 if (esp->rev == FASHME) {
2069 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2070 esp->select_state == ESP_SELECT_NONE &&
2071 esp->event != ESP_EVENT_STATUS &&
2072 esp->event != ESP_EVENT_DATA_DONE) ||
2073 (esp->ireg & ESP_INTR_RSEL)) {
2074 esp->sreg2 = esp_read8(ESP_STATUS2);
2075 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2076 (esp->sreg2 & ESP_STAT2_F1BYTE))
2077 hme_read_fifo(esp);
2078 }
2079 }
2080
2081 esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2082 "sreg2[%02x] ireg[%02x]\n",
2083 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2084
2085 intr_done = 0;
2086
2087 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2088 printk("ESP: unexpected IREG %02x\n", esp->ireg);
2089 if (esp->ireg & ESP_INTR_IC)
2090 esp_dump_cmd_log(esp);
2091
2092 esp_schedule_reset(esp);
2093 } else {
2094 if (!(esp->ireg & ESP_INTR_RSEL)) {
2095 /* Some combination of FDONE, BSERV, DC. */
2096 if (esp->select_state != ESP_SELECT_NONE)
2097 intr_done = esp_finish_select(esp);
2098 } else if (esp->ireg & ESP_INTR_RSEL) {
2099 if (esp->active_cmd)
2100 (void) esp_finish_select(esp);
2101 intr_done = esp_reconnect(esp);
2102 }
2103 }
2104 while (!intr_done)
2105 intr_done = esp_process_event(esp);
2106}
2107
2108irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2109{
2110 struct esp *esp = dev_id;
2111 unsigned long flags;
2112 irqreturn_t ret;
2113
2114 spin_lock_irqsave(esp->host->host_lock, flags);
2115 ret = IRQ_NONE;
2116 if (esp->ops->irq_pending(esp)) {
2117 ret = IRQ_HANDLED;
2118 for (;;) {
2119 int i;
2120
2121 __esp_interrupt(esp);
2122 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2123 break;
2124 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2125
2126 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2127 if (esp->ops->irq_pending(esp))
2128 break;
2129 }
2130 if (i == ESP_QUICKIRQ_LIMIT)
2131 break;
2132 }
2133 }
2134 spin_unlock_irqrestore(esp->host->host_lock, flags);
2135
2136 return ret;
2137}
2138EXPORT_SYMBOL(scsi_esp_intr);
2139
2140static void __devinit esp_get_revision(struct esp *esp)
2141{
2142 u8 val;
2143
2144 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2145 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2146 esp_write8(esp->config2, ESP_CFG2);
2147
2148 val = esp_read8(ESP_CFG2);
2149 val &= ~ESP_CONFIG2_MAGIC;
2150 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2151 /* If what we write to cfg2 does not come back, cfg2 is not
2152 * implemented, therefore this must be a plain esp100.
2153 */
2154 esp->rev = ESP100;
2155 } else {
2156 esp->config2 = 0;
2157 esp_set_all_config3(esp, 5);
2158 esp->prev_cfg3 = 5;
2159 esp_write8(esp->config2, ESP_CFG2);
2160 esp_write8(0, ESP_CFG3);
2161 esp_write8(esp->prev_cfg3, ESP_CFG3);
2162
2163 val = esp_read8(ESP_CFG3);
2164 if (val != 5) {
2165 /* The cfg2 register is implemented, however
2166 * cfg3 is not, must be esp100a.
2167 */
2168 esp->rev = ESP100A;
2169 } else {
2170 esp_set_all_config3(esp, 0);
2171 esp->prev_cfg3 = 0;
2172 esp_write8(esp->prev_cfg3, ESP_CFG3);
2173
2174 /* All of cfg{1,2,3} implemented, must be one of
2175 * the fas variants, figure out which one.
2176 */
2177 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2178 esp->rev = FAST;
2179 esp->sync_defp = SYNC_DEFP_FAST;
2180 } else {
2181 esp->rev = ESP236;
2182 }
2183 esp->config2 = 0;
2184 esp_write8(esp->config2, ESP_CFG2);
2185 }
2186 }
2187}
2188
2189static void __devinit esp_init_swstate(struct esp *esp)
2190{
2191 int i;
2192
2193 INIT_LIST_HEAD(&esp->queued_cmds);
2194 INIT_LIST_HEAD(&esp->active_cmds);
2195 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2196
2197 /* Start with a clear state, domain validation (via ->slave_configure,
2198 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2199 * commands.
2200 */
2201 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2202 esp->target[i].flags = 0;
2203 esp->target[i].nego_goal_period = 0;
2204 esp->target[i].nego_goal_offset = 0;
2205 esp->target[i].nego_goal_width = 0;
2206 esp->target[i].nego_goal_tags = 0;
2207 }
2208}
2209
2210/* This places the ESP into a known state at boot time. */
Martin Habetsd679f802007-05-07 14:05:03 -07002211static void esp_bootup_reset(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002212{
2213 u8 val;
2214
2215 /* Reset the DMA */
2216 esp->ops->reset_dma(esp);
2217
2218 /* Reset the ESP */
2219 esp_reset_esp(esp);
2220
2221 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2222 val = esp_read8(ESP_CFG1);
2223 val |= ESP_CONFIG1_SRRDISAB;
2224 esp_write8(val, ESP_CFG1);
2225
2226 scsi_esp_cmd(esp, ESP_CMD_RS);
2227 udelay(400);
2228
2229 esp_write8(esp->config1, ESP_CFG1);
2230
2231 /* Eat any bitrot in the chip and we are done... */
2232 esp_read8(ESP_INTRPT);
2233}
2234
2235static void __devinit esp_set_clock_params(struct esp *esp)
2236{
2237 int fmhz;
2238 u8 ccf;
2239
2240 /* This is getting messy but it has to be done correctly or else
2241 * you get weird behavior all over the place. We are trying to
2242 * basically figure out three pieces of information.
2243 *
2244 * a) Clock Conversion Factor
2245 *
2246 * This is a representation of the input crystal clock frequency
2247 * going into the ESP on this machine. Any operation whose timing
2248 * is longer than 400ns depends on this value being correct. For
2249 * example, you'll get blips for arbitration/selection during high
2250 * load or with multiple targets if this is not set correctly.
2251 *
2252 * b) Selection Time-Out
2253 *
2254 * The ESP isn't very bright and will arbitrate for the bus and try
2255 * to select a target forever if you let it. This value tells the
2256 * ESP when it has taken too long to negotiate and that it should
2257 * interrupt the CPU so we can see what happened. The value is
2258 * computed as follows (from NCR/Symbios chip docs).
2259 *
2260 * (Time Out Period) * (Input Clock)
2261 * STO = ----------------------------------
2262 * (8192) * (Clock Conversion Factor)
2263 *
2264 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2265 *
2266 * c) Imperical constants for synchronous offset and transfer period
2267 * register values
2268 *
2269 * This entails the smallest and largest sync period we could ever
2270 * handle on this ESP.
2271 */
2272 fmhz = esp->cfreq;
2273
2274 ccf = ((fmhz / 1000000) + 4) / 5;
2275 if (ccf == 1)
2276 ccf = 2;
2277
2278 /* If we can't find anything reasonable, just assume 20MHZ.
2279 * This is the clock frequency of the older sun4c's where I've
2280 * been unable to find the clock-frequency PROM property. All
2281 * other machines provide useful values it seems.
2282 */
2283 if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
2284 fmhz = 20000000;
2285 ccf = 4;
2286 }
2287
2288 esp->cfact = (ccf == 8 ? 0 : ccf);
2289 esp->cfreq = fmhz;
2290 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
2291 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2292 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
2293 esp->sync_defp = SYNC_DEFP_SLOW;
2294}
2295
2296static const char *esp_chip_names[] = {
2297 "ESP100",
2298 "ESP100A",
2299 "ESP236",
2300 "FAS236",
2301 "FAS100A",
2302 "FAST",
2303 "FASHME",
2304};
2305
2306static struct scsi_transport_template *esp_transport_template;
2307
2308int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2309{
2310 static int instance;
2311 int err;
2312
2313 esp->host->transportt = esp_transport_template;
2314 esp->host->max_lun = ESP_MAX_LUN;
2315 esp->host->cmd_per_lun = 2;
2316
2317 esp_set_clock_params(esp);
2318
2319 esp_get_revision(esp);
2320
2321 esp_init_swstate(esp);
2322
2323 esp_bootup_reset(esp);
2324
2325 printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2326 esp->host->unique_id, esp->regs, esp->dma_regs,
2327 esp->host->irq);
2328 printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2329 esp->host->unique_id, esp_chip_names[esp->rev],
2330 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2331
2332 /* Let the SCSI bus reset settle. */
2333 ssleep(esp_bus_reset_settle);
2334
2335 err = scsi_add_host(esp->host, dev);
2336 if (err)
2337 return err;
2338
2339 esp->host->unique_id = instance++;
2340
2341 scsi_scan_host(esp->host);
2342
2343 return 0;
2344}
2345EXPORT_SYMBOL(scsi_esp_register);
2346
2347void __devexit scsi_esp_unregister(struct esp *esp)
2348{
2349 scsi_remove_host(esp->host);
2350}
2351EXPORT_SYMBOL(scsi_esp_unregister);
2352
2353static int esp_slave_alloc(struct scsi_device *dev)
2354{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002355 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002356 struct esp_target_data *tp = &esp->target[dev->id];
2357 struct esp_lun_data *lp;
2358
2359 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2360 if (!lp)
2361 return -ENOMEM;
2362 dev->hostdata = lp;
2363
2364 tp->starget = dev->sdev_target;
2365
2366 spi_min_period(tp->starget) = esp->min_period;
2367 spi_max_offset(tp->starget) = 15;
2368
2369 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2370 spi_max_width(tp->starget) = 1;
2371 else
2372 spi_max_width(tp->starget) = 0;
2373
2374 return 0;
2375}
2376
2377static int esp_slave_configure(struct scsi_device *dev)
2378{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002379 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002380 struct esp_target_data *tp = &esp->target[dev->id];
2381 int goal_tags, queue_depth;
2382
2383 goal_tags = 0;
2384
2385 if (dev->tagged_supported) {
2386 /* XXX make this configurable somehow XXX */
2387 goal_tags = ESP_DEFAULT_TAGS;
2388
2389 if (goal_tags > ESP_MAX_TAG)
2390 goal_tags = ESP_MAX_TAG;
2391 }
2392
2393 queue_depth = goal_tags;
2394 if (queue_depth < dev->host->cmd_per_lun)
2395 queue_depth = dev->host->cmd_per_lun;
2396
2397 if (goal_tags) {
2398 scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2399 scsi_activate_tcq(dev, queue_depth);
2400 } else {
2401 scsi_deactivate_tcq(dev, queue_depth);
2402 }
2403 tp->flags |= ESP_TGT_DISCONNECT;
2404
2405 if (!spi_initial_dv(dev->sdev_target))
2406 spi_dv_device(dev);
2407
2408 return 0;
2409}
2410
2411static void esp_slave_destroy(struct scsi_device *dev)
2412{
2413 struct esp_lun_data *lp = dev->hostdata;
2414
2415 kfree(lp);
2416 dev->hostdata = NULL;
2417}
2418
2419static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2420{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002421 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002422 struct esp_cmd_entry *ent, *tmp;
2423 struct completion eh_done;
2424 unsigned long flags;
2425
2426 /* XXX This helps a lot with debugging but might be a bit
2427 * XXX much for the final driver.
2428 */
2429 spin_lock_irqsave(esp->host->host_lock, flags);
2430 printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2431 esp->host->unique_id, cmd, cmd->cmnd[0]);
2432 ent = esp->active_cmd;
2433 if (ent)
2434 printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2435 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2436 list_for_each_entry(ent, &esp->queued_cmds, list) {
2437 printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2438 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2439 }
2440 list_for_each_entry(ent, &esp->active_cmds, list) {
2441 printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2442 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2443 }
2444 esp_dump_cmd_log(esp);
2445 spin_unlock_irqrestore(esp->host->host_lock, flags);
2446
2447 spin_lock_irqsave(esp->host->host_lock, flags);
2448
2449 ent = NULL;
2450 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2451 if (tmp->cmd == cmd) {
2452 ent = tmp;
2453 break;
2454 }
2455 }
2456
2457 if (ent) {
2458 /* Easiest case, we didn't even issue the command
2459 * yet so it is trivial to abort.
2460 */
2461 list_del(&ent->list);
2462
2463 cmd->result = DID_ABORT << 16;
2464 cmd->scsi_done(cmd);
2465
2466 esp_put_ent(esp, ent);
2467
2468 goto out_success;
2469 }
2470
2471 init_completion(&eh_done);
2472
2473 ent = esp->active_cmd;
2474 if (ent && ent->cmd == cmd) {
2475 /* Command is the currently active command on
2476 * the bus. If we already have an output message
2477 * pending, no dice.
2478 */
2479 if (esp->msg_out_len)
2480 goto out_failure;
2481
2482 /* Send out an abort, encouraging the target to
2483 * go to MSGOUT phase by asserting ATN.
2484 */
2485 esp->msg_out[0] = ABORT_TASK_SET;
2486 esp->msg_out_len = 1;
2487 ent->eh_done = &eh_done;
2488
2489 scsi_esp_cmd(esp, ESP_CMD_SATN);
2490 } else {
2491 /* The command is disconnected. This is not easy to
2492 * abort. For now we fail and let the scsi error
2493 * handling layer go try a scsi bus reset or host
2494 * reset.
2495 *
2496 * What we could do is put together a scsi command
2497 * solely for the purpose of sending an abort message
2498 * to the target. Coming up with all the code to
2499 * cook up scsi commands, special case them everywhere,
2500 * etc. is for questionable gain and it would be better
2501 * if the generic scsi error handling layer could do at
2502 * least some of that for us.
2503 *
2504 * Anyways this is an area for potential future improvement
2505 * in this driver.
2506 */
2507 goto out_failure;
2508 }
2509
2510 spin_unlock_irqrestore(esp->host->host_lock, flags);
2511
2512 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2513 spin_lock_irqsave(esp->host->host_lock, flags);
2514 ent->eh_done = NULL;
2515 spin_unlock_irqrestore(esp->host->host_lock, flags);
2516
2517 return FAILED;
2518 }
2519
2520 return SUCCESS;
2521
2522out_success:
2523 spin_unlock_irqrestore(esp->host->host_lock, flags);
2524 return SUCCESS;
2525
2526out_failure:
2527 /* XXX This might be a good location to set ESP_TGT_BROKEN
2528 * XXX since we know which target/lun in particular is
2529 * XXX causing trouble.
2530 */
2531 spin_unlock_irqrestore(esp->host->host_lock, flags);
2532 return FAILED;
2533}
2534
2535static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2536{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002537 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002538 struct completion eh_reset;
2539 unsigned long flags;
2540
2541 init_completion(&eh_reset);
2542
2543 spin_lock_irqsave(esp->host->host_lock, flags);
2544
2545 esp->eh_reset = &eh_reset;
2546
2547 /* XXX This is too simple... We should add lots of
2548 * XXX checks here so that if we find that the chip is
2549 * XXX very wedged we return failure immediately so
2550 * XXX that we can perform a full chip reset.
2551 */
2552 esp->flags |= ESP_FLAG_RESETTING;
2553 scsi_esp_cmd(esp, ESP_CMD_RS);
2554
2555 spin_unlock_irqrestore(esp->host->host_lock, flags);
2556
2557 ssleep(esp_bus_reset_settle);
2558
2559 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2560 spin_lock_irqsave(esp->host->host_lock, flags);
2561 esp->eh_reset = NULL;
2562 spin_unlock_irqrestore(esp->host->host_lock, flags);
2563
2564 return FAILED;
2565 }
2566
2567 return SUCCESS;
2568}
2569
2570/* All bets are off, reset the entire device. */
2571static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2572{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002573 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002574 unsigned long flags;
2575
2576 spin_lock_irqsave(esp->host->host_lock, flags);
2577 esp_bootup_reset(esp);
2578 esp_reset_cleanup(esp);
2579 spin_unlock_irqrestore(esp->host->host_lock, flags);
2580
2581 ssleep(esp_bus_reset_settle);
2582
2583 return SUCCESS;
2584}
2585
2586static const char *esp_info(struct Scsi_Host *host)
2587{
2588 return "esp";
2589}
2590
2591struct scsi_host_template scsi_esp_template = {
2592 .module = THIS_MODULE,
2593 .name = "esp",
2594 .info = esp_info,
2595 .queuecommand = esp_queuecommand,
2596 .slave_alloc = esp_slave_alloc,
2597 .slave_configure = esp_slave_configure,
2598 .slave_destroy = esp_slave_destroy,
2599 .eh_abort_handler = esp_eh_abort_handler,
2600 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2601 .eh_host_reset_handler = esp_eh_host_reset_handler,
2602 .can_queue = 7,
2603 .this_id = 7,
2604 .sg_tablesize = SG_ALL,
2605 .use_clustering = ENABLE_CLUSTERING,
2606 .max_sectors = 0xffff,
2607 .skip_settle_delay = 1,
2608};
2609EXPORT_SYMBOL(scsi_esp_template);
2610
2611static void esp_get_signalling(struct Scsi_Host *host)
2612{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002613 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002614 enum spi_signal_type type;
2615
2616 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2617 type = SPI_SIGNAL_HVD;
2618 else
2619 type = SPI_SIGNAL_SE;
2620
2621 spi_signalling(host) = type;
2622}
2623
2624static void esp_set_offset(struct scsi_target *target, int offset)
2625{
2626 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002627 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002628 struct esp_target_data *tp = &esp->target[target->id];
2629
2630 tp->nego_goal_offset = offset;
2631 tp->flags |= ESP_TGT_CHECK_NEGO;
2632}
2633
2634static void esp_set_period(struct scsi_target *target, int period)
2635{
2636 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002637 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002638 struct esp_target_data *tp = &esp->target[target->id];
2639
2640 tp->nego_goal_period = period;
2641 tp->flags |= ESP_TGT_CHECK_NEGO;
2642}
2643
2644static void esp_set_width(struct scsi_target *target, int width)
2645{
2646 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002647 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002648 struct esp_target_data *tp = &esp->target[target->id];
2649
2650 tp->nego_goal_width = (width ? 1 : 0);
2651 tp->flags |= ESP_TGT_CHECK_NEGO;
2652}
2653
2654static struct spi_function_template esp_transport_ops = {
2655 .set_offset = esp_set_offset,
2656 .show_offset = 1,
2657 .set_period = esp_set_period,
2658 .show_period = 1,
2659 .set_width = esp_set_width,
2660 .show_width = 1,
2661 .get_signalling = esp_get_signalling,
2662};
2663
2664static int __init esp_init(void)
2665{
2666 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2667 sizeof(struct esp_cmd_priv));
2668
2669 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2670 if (!esp_transport_template)
2671 return -ENODEV;
2672
2673 return 0;
2674}
2675
2676static void __exit esp_exit(void)
2677{
2678 spi_release_transport(esp_transport_template);
2679}
2680
2681MODULE_DESCRIPTION("ESP SCSI driver core");
2682MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2683MODULE_LICENSE("GPL");
2684MODULE_VERSION(DRV_VERSION);
2685
2686module_param(esp_bus_reset_settle, int, 0);
2687MODULE_PARM_DESC(esp_bus_reset_settle,
2688 "ESP scsi bus reset delay in seconds");
2689
2690module_param(esp_debug, int, 0);
2691MODULE_PARM_DESC(esp_debug,
2692"ESP bitmapped debugging message enable value:\n"
2693" 0x00000001 Log interrupt events\n"
2694" 0x00000002 Log scsi commands\n"
2695" 0x00000004 Log resets\n"
2696" 0x00000008 Log message in events\n"
2697" 0x00000010 Log message out events\n"
2698" 0x00000020 Log command completion\n"
2699" 0x00000040 Log disconnects\n"
2700" 0x00000080 Log data start\n"
2701" 0x00000100 Log data done\n"
2702" 0x00000200 Log reconnects\n"
2703" 0x00000400 Log auto-sense data\n"
2704);
2705
2706module_init(esp_init);
2707module_exit(esp_exit);