blob: 9a8c037a2f21952bd396296514bcbf7e0c7c717a [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
David S. Millercd9ad582007-04-26 21:19:23 -07002/* esp_scsi.c: ESP SCSI driver.
3 *
4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/delay.h>
11#include <linux/list.h>
12#include <linux/completion.h>
13#include <linux/kallsyms.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
Alexey Dobriyane1f2a092007-04-27 15:19:27 -070017#include <linux/irqreturn.h>
David S. Millercd9ad582007-04-26 21:19:23 -070018
19#include <asm/irq.h>
20#include <asm/io.h>
21#include <asm/dma.h>
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_host.h>
25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_tcq.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_transport_spi.h>
30
31#include "esp_scsi.h"
32
33#define DRV_MODULE_NAME "esp"
34#define PFX DRV_MODULE_NAME ": "
35#define DRV_VERSION "2.000"
36#define DRV_MODULE_RELDATE "April 19, 2007"
37
38/* SCSI bus reset settle time in seconds. */
39static int esp_bus_reset_settle = 3;
40
41static u32 esp_debug;
42#define ESP_DEBUG_INTR 0x00000001
43#define ESP_DEBUG_SCSICMD 0x00000002
44#define ESP_DEBUG_RESET 0x00000004
45#define ESP_DEBUG_MSGIN 0x00000008
46#define ESP_DEBUG_MSGOUT 0x00000010
47#define ESP_DEBUG_CMDDONE 0x00000020
48#define ESP_DEBUG_DISCONNECT 0x00000040
49#define ESP_DEBUG_DATASTART 0x00000080
50#define ESP_DEBUG_DATADONE 0x00000100
51#define ESP_DEBUG_RECONNECT 0x00000200
52#define ESP_DEBUG_AUTOSENSE 0x00000400
Hannes Reinecke1af6f602014-11-24 15:37:22 +010053#define ESP_DEBUG_EVENT 0x00000800
54#define ESP_DEBUG_COMMAND 0x00001000
David S. Millercd9ad582007-04-26 21:19:23 -070055
56#define esp_log_intr(f, a...) \
57do { if (esp_debug & ESP_DEBUG_INTR) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010058 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070059} while (0)
60
61#define esp_log_reset(f, a...) \
62do { if (esp_debug & ESP_DEBUG_RESET) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010063 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070064} while (0)
65
66#define esp_log_msgin(f, a...) \
67do { if (esp_debug & ESP_DEBUG_MSGIN) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010068 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070069} while (0)
70
71#define esp_log_msgout(f, a...) \
72do { if (esp_debug & ESP_DEBUG_MSGOUT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010073 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070074} while (0)
75
76#define esp_log_cmddone(f, a...) \
77do { if (esp_debug & ESP_DEBUG_CMDDONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010078 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070079} while (0)
80
81#define esp_log_disconnect(f, a...) \
82do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010083 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070084} while (0)
85
86#define esp_log_datastart(f, a...) \
87do { if (esp_debug & ESP_DEBUG_DATASTART) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010088 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070089} while (0)
90
91#define esp_log_datadone(f, a...) \
92do { if (esp_debug & ESP_DEBUG_DATADONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010093 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070094} while (0)
95
96#define esp_log_reconnect(f, a...) \
97do { if (esp_debug & ESP_DEBUG_RECONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010098 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070099} while (0)
100
101#define esp_log_autosense(f, a...) \
102do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100103 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -0700104} while (0)
105
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100106#define esp_log_event(f, a...) \
107do { if (esp_debug & ESP_DEBUG_EVENT) \
108 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
109} while (0)
110
111#define esp_log_command(f, a...) \
112do { if (esp_debug & ESP_DEBUG_COMMAND) \
113 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
114} while (0)
115
David S. Millercd9ad582007-04-26 21:19:23 -0700116#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
117#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
118
119static void esp_log_fill_regs(struct esp *esp,
120 struct esp_event_ent *p)
121{
122 p->sreg = esp->sreg;
123 p->seqreg = esp->seqreg;
124 p->sreg2 = esp->sreg2;
125 p->ireg = esp->ireg;
126 p->select_state = esp->select_state;
127 p->event = esp->event;
128}
129
130void scsi_esp_cmd(struct esp *esp, u8 val)
131{
132 struct esp_event_ent *p;
133 int idx = esp->esp_event_cur;
134
135 p = &esp->esp_event_log[idx];
136 p->type = ESP_EVENT_TYPE_CMD;
137 p->val = val;
138 esp_log_fill_regs(esp, p);
139
140 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
141
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100142 esp_log_command("cmd[%02x]\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -0700143 esp_write8(val, ESP_CMD);
144}
145EXPORT_SYMBOL(scsi_esp_cmd);
146
Hannes Reinecke31708662014-11-24 15:37:24 +0100147static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
148{
149 if (esp->flags & ESP_FLAG_USE_FIFO) {
150 int i;
151
152 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
153 for (i = 0; i < len; i++)
154 esp_write8(esp->command_block[i], ESP_FDATA);
155 scsi_esp_cmd(esp, cmd);
156 } else {
157 if (esp->rev == FASHME)
158 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
159 cmd |= ESP_CMD_DMA;
160 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
161 len, max_len, 0, cmd);
162 }
163}
164
David S. Millercd9ad582007-04-26 21:19:23 -0700165static void esp_event(struct esp *esp, u8 val)
166{
167 struct esp_event_ent *p;
168 int idx = esp->esp_event_cur;
169
170 p = &esp->esp_event_log[idx];
171 p->type = ESP_EVENT_TYPE_EVENT;
172 p->val = val;
173 esp_log_fill_regs(esp, p);
174
175 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
176
177 esp->event = val;
178}
179
180static void esp_dump_cmd_log(struct esp *esp)
181{
182 int idx = esp->esp_event_cur;
183 int stop = idx;
184
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100185 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700186 do {
187 struct esp_event_ent *p = &esp->esp_event_log[idx];
188
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100189 shost_printk(KERN_INFO, esp->host,
190 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
191 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
192 idx,
193 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
194 p->val, p->sreg, p->seqreg,
195 p->sreg2, p->ireg, p->select_state, p->event);
David S. Millercd9ad582007-04-26 21:19:23 -0700196
197 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
198 } while (idx != stop);
199}
200
201static void esp_flush_fifo(struct esp *esp)
202{
203 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
204 if (esp->rev == ESP236) {
205 int lim = 1000;
206
207 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
208 if (--lim == 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100209 shost_printk(KERN_ALERT, esp->host,
210 "ESP_FF_BYTES will not clear!\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700211 break;
212 }
213 udelay(1);
214 }
215 }
216}
217
218static void hme_read_fifo(struct esp *esp)
219{
220 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
221 int idx = 0;
222
223 while (fcnt--) {
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 esp->fifo[idx++] = esp_read8(ESP_FDATA);
226 }
227 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
228 esp_write8(0, ESP_FDATA);
229 esp->fifo[idx++] = esp_read8(ESP_FDATA);
230 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
231 }
232 esp->fifo_cnt = idx;
233}
234
235static void esp_set_all_config3(struct esp *esp, u8 val)
236{
237 int i;
238
239 for (i = 0; i < ESP_MAX_TARGET; i++)
240 esp->target[i].esp_config3 = val;
241}
242
243/* Reset the ESP chip, _not_ the SCSI bus. */
244static void esp_reset_esp(struct esp *esp)
245{
David S. Millercd9ad582007-04-26 21:19:23 -0700246 /* Now reset the ESP chip */
247 scsi_esp_cmd(esp, ESP_CMD_RC);
248 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
David S. Millera7938042007-09-30 17:10:42 -0700249 if (esp->rev == FAST)
250 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
David S. Millercd9ad582007-04-26 21:19:23 -0700251 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
252
David S. Millercd9ad582007-04-26 21:19:23 -0700253 /* This is the only point at which it is reliable to read
254 * the ID-code for a fast ESP chip variants.
255 */
256 esp->max_period = ((35 * esp->ccycle) / 1000);
257 if (esp->rev == FAST) {
Kars de Jongbd407262019-11-19 21:20:21 +0100258 u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
259
260 if (family_code == ESP_UID_F236) {
David S. Millercd9ad582007-04-26 21:19:23 -0700261 esp->rev = FAS236;
Kars de Jongbd407262019-11-19 21:20:21 +0100262 } else if (family_code == ESP_UID_HME) {
David S. Millercd9ad582007-04-26 21:19:23 -0700263 esp->rev = FASHME; /* Version is usually '5'. */
Kars de Jongbd407262019-11-19 21:20:21 +0100264 } else if (family_code == ESP_UID_FSC) {
265 esp->rev = FSC;
266 /* Enable Active Negation */
267 esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
268 } else {
David S. Millercd9ad582007-04-26 21:19:23 -0700269 esp->rev = FAS100A;
Kars de Jongbd407262019-11-19 21:20:21 +0100270 }
David S. Millercd9ad582007-04-26 21:19:23 -0700271 esp->min_period = ((4 * esp->ccycle) / 1000);
272 } else {
273 esp->min_period = ((5 * esp->ccycle) / 1000);
274 }
Hannes Reineckeeeea2f92014-11-24 15:37:27 +0100275 if (esp->rev == FAS236) {
276 /*
277 * The AM53c974 chip returns the same ID as FAS236;
278 * try to configure glitch eater.
279 */
280 u8 config4 = ESP_CONFIG4_GE1;
281 esp_write8(config4, ESP_CFG4);
282 config4 = esp_read8(ESP_CFG4);
283 if (config4 & ESP_CONFIG4_GE1) {
284 esp->rev = PCSCSI;
285 esp_write8(esp->config4, ESP_CFG4);
286 }
287 }
David S. Millercd9ad582007-04-26 21:19:23 -0700288 esp->max_period = (esp->max_period + 3)>>2;
289 esp->min_period = (esp->min_period + 3)>>2;
290
291 esp_write8(esp->config1, ESP_CFG1);
292 switch (esp->rev) {
293 case ESP100:
294 /* nothing to do */
295 break;
296
297 case ESP100A:
298 esp_write8(esp->config2, ESP_CFG2);
299 break;
300
301 case ESP236:
302 /* Slow 236 */
303 esp_write8(esp->config2, ESP_CFG2);
304 esp->prev_cfg3 = esp->target[0].esp_config3;
305 esp_write8(esp->prev_cfg3, ESP_CFG3);
306 break;
307
308 case FASHME:
309 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500310 fallthrough;
David S. Millercd9ad582007-04-26 21:19:23 -0700311
312 case FAS236:
Hannes Reineckeeeea2f92014-11-24 15:37:27 +0100313 case PCSCSI:
Kars de Jongbd407262019-11-19 21:20:21 +0100314 case FSC:
David S. Millercd9ad582007-04-26 21:19:23 -0700315 esp_write8(esp->config2, ESP_CFG2);
316 if (esp->rev == FASHME) {
317 u8 cfg3 = esp->target[0].esp_config3;
318
319 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
320 if (esp->scsi_id >= 8)
321 cfg3 |= ESP_CONFIG3_IDBIT3;
322 esp_set_all_config3(esp, cfg3);
323 } else {
324 u32 cfg3 = esp->target[0].esp_config3;
325
326 cfg3 |= ESP_CONFIG3_FCLK;
327 esp_set_all_config3(esp, cfg3);
328 }
329 esp->prev_cfg3 = esp->target[0].esp_config3;
330 esp_write8(esp->prev_cfg3, ESP_CFG3);
331 if (esp->rev == FASHME) {
332 esp->radelay = 80;
333 } else {
334 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
335 esp->radelay = 0;
336 else
337 esp->radelay = 96;
338 }
339 break;
340
341 case FAS100A:
342 /* Fast 100a */
343 esp_write8(esp->config2, ESP_CFG2);
344 esp_set_all_config3(esp,
345 (esp->target[0].esp_config3 |
346 ESP_CONFIG3_FCLOCK));
347 esp->prev_cfg3 = esp->target[0].esp_config3;
348 esp_write8(esp->prev_cfg3, ESP_CFG3);
349 esp->radelay = 32;
350 break;
351
352 default:
353 break;
354 }
355
David S. Millera7938042007-09-30 17:10:42 -0700356 /* Reload the configuration registers */
357 esp_write8(esp->cfact, ESP_CFACT);
358
359 esp->prev_stp = 0;
360 esp_write8(esp->prev_stp, ESP_STP);
361
362 esp->prev_soff = 0;
363 esp_write8(esp->prev_soff, ESP_SOFF);
364
365 esp_write8(esp->neg_defp, ESP_TIMEO);
366
David S. Millercd9ad582007-04-26 21:19:23 -0700367 /* Eat any bitrot in the chip */
368 esp_read8(ESP_INTRPT);
369 udelay(100);
370}
371
372static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
373{
374 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900375 struct scatterlist *sg = scsi_sglist(cmd);
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200376 int total = 0, i;
Ming Leiee5a1db2019-06-06 16:34:10 +0800377 struct scatterlist *s;
David S. Millercd9ad582007-04-26 21:19:23 -0700378
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200379 if (cmd->sc_data_direction == DMA_NONE)
David S. Millercd9ad582007-04-26 21:19:23 -0700380 return;
381
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200382 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
383 /*
384 * For pseudo DMA and PIO we need the virtual address instead of
385 * a dma address, so perform an identity mapping.
386 */
Christoph Hellwig86117d72018-10-13 09:26:28 +0200387 spriv->num_sg = scsi_sg_count(cmd);
Ming Leiee5a1db2019-06-06 16:34:10 +0800388
389 scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
390 s->dma_address = (uintptr_t)sg_virt(s);
391 total += sg_dma_len(s);
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200392 }
393 } else {
Christoph Hellwig86117d72018-10-13 09:26:28 +0200394 spriv->num_sg = scsi_dma_map(cmd);
Ming Leiee5a1db2019-06-06 16:34:10 +0800395 scsi_for_each_sg(cmd, s, spriv->num_sg, i)
396 total += sg_dma_len(s);
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200397 }
David S. Millercd9ad582007-04-26 21:19:23 -0700398 spriv->cur_residue = sg_dma_len(sg);
Ming Leiee5a1db2019-06-06 16:34:10 +0800399 spriv->prv_sg = NULL;
David S. Millercd9ad582007-04-26 21:19:23 -0700400 spriv->cur_sg = sg;
David S. Millercd9ad582007-04-26 21:19:23 -0700401 spriv->tot_residue = total;
402}
403
404static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
405 struct scsi_cmnd *cmd)
406{
407 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
408
409 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
410 return ent->sense_dma +
411 (ent->sense_ptr - cmd->sense_buffer);
412 }
413
414 return sg_dma_address(p->cur_sg) +
415 (sg_dma_len(p->cur_sg) -
416 p->cur_residue);
417}
418
419static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
420 struct scsi_cmnd *cmd)
421{
422 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
423
424 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
425 return SCSI_SENSE_BUFFERSIZE -
426 (ent->sense_ptr - cmd->sense_buffer);
427 }
428 return p->cur_residue;
429}
430
431static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
432 struct scsi_cmnd *cmd, unsigned int len)
433{
434 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
435
436 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
437 ent->sense_ptr += len;
438 return;
439 }
440
441 p->cur_residue -= len;
442 p->tot_residue -= len;
443 if (p->cur_residue < 0 || p->tot_residue < 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100444 shost_printk(KERN_ERR, esp->host,
445 "Data transfer overflow.\n");
446 shost_printk(KERN_ERR, esp->host,
447 "cur_residue[%d] tot_residue[%d] len[%u]\n",
448 p->cur_residue, p->tot_residue, len);
David S. Millercd9ad582007-04-26 21:19:23 -0700449 p->cur_residue = 0;
450 p->tot_residue = 0;
451 }
452 if (!p->cur_residue && p->tot_residue) {
Ming Leiee5a1db2019-06-06 16:34:10 +0800453 p->prv_sg = p->cur_sg;
454 p->cur_sg = sg_next(p->cur_sg);
David S. Millercd9ad582007-04-26 21:19:23 -0700455 p->cur_residue = sg_dma_len(p->cur_sg);
456 }
457}
458
459static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
460{
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200461 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
462 scsi_dma_unmap(cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700463}
464
465static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
466{
467 struct scsi_cmnd *cmd = ent->cmd;
468 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
469
470 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
471 ent->saved_sense_ptr = ent->sense_ptr;
472 return;
473 }
474 ent->saved_cur_residue = spriv->cur_residue;
Ming Leiee5a1db2019-06-06 16:34:10 +0800475 ent->saved_prv_sg = spriv->prv_sg;
David S. Millercd9ad582007-04-26 21:19:23 -0700476 ent->saved_cur_sg = spriv->cur_sg;
477 ent->saved_tot_residue = spriv->tot_residue;
478}
479
480static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
481{
482 struct scsi_cmnd *cmd = ent->cmd;
483 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
484
485 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
486 ent->sense_ptr = ent->saved_sense_ptr;
487 return;
488 }
489 spriv->cur_residue = ent->saved_cur_residue;
Ming Leiee5a1db2019-06-06 16:34:10 +0800490 spriv->prv_sg = ent->saved_prv_sg;
David S. Millercd9ad582007-04-26 21:19:23 -0700491 spriv->cur_sg = ent->saved_cur_sg;
492 spriv->tot_residue = ent->saved_tot_residue;
493}
494
David S. Millercd9ad582007-04-26 21:19:23 -0700495static void esp_write_tgt_config3(struct esp *esp, int tgt)
496{
497 if (esp->rev > ESP100A) {
498 u8 val = esp->target[tgt].esp_config3;
499
500 if (val != esp->prev_cfg3) {
501 esp->prev_cfg3 = val;
502 esp_write8(val, ESP_CFG3);
503 }
504 }
505}
506
507static void esp_write_tgt_sync(struct esp *esp, int tgt)
508{
509 u8 off = esp->target[tgt].esp_offset;
510 u8 per = esp->target[tgt].esp_period;
511
512 if (off != esp->prev_soff) {
513 esp->prev_soff = off;
514 esp_write8(off, ESP_SOFF);
515 }
516 if (per != esp->prev_stp) {
517 esp->prev_stp = per;
518 esp_write8(per, ESP_STP);
519 }
520}
521
522static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
523{
524 if (esp->rev == FASHME) {
525 /* Arbitrary segment boundaries, 24-bit counts. */
526 if (dma_len > (1U << 24))
527 dma_len = (1U << 24);
528 } else {
529 u32 base, end;
530
531 /* ESP chip limits other variants by 16-bits of transfer
532 * count. Actually on FAS100A and FAS236 we could get
533 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
534 * in the ESP_CFG2 register but that causes other unwanted
535 * changes so we don't use it currently.
536 */
537 if (dma_len > (1U << 16))
538 dma_len = (1U << 16);
539
540 /* All of the DMA variants hooked up to these chips
541 * cannot handle crossing a 24-bit address boundary.
542 */
543 base = dma_addr & ((1U << 24) - 1U);
544 end = base + dma_len;
545 if (end > (1U << 24))
546 end = (1U <<24);
547 dma_len = end - base;
548 }
549 return dma_len;
550}
551
552static int esp_need_to_nego_wide(struct esp_target_data *tp)
553{
554 struct scsi_target *target = tp->starget;
555
556 return spi_width(target) != tp->nego_goal_width;
557}
558
559static int esp_need_to_nego_sync(struct esp_target_data *tp)
560{
561 struct scsi_target *target = tp->starget;
562
563 /* When offset is zero, period is "don't care". */
564 if (!spi_offset(target) && !tp->nego_goal_offset)
565 return 0;
566
567 if (spi_offset(target) == tp->nego_goal_offset &&
568 spi_period(target) == tp->nego_goal_period)
569 return 0;
570
571 return 1;
572}
573
574static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
575 struct esp_lun_data *lp)
576{
David S. Miller21af8102013-08-01 18:08:34 -0700577 if (!ent->orig_tag[0]) {
David S. Millercd9ad582007-04-26 21:19:23 -0700578 /* Non-tagged, slot already taken? */
579 if (lp->non_tagged_cmd)
580 return -EBUSY;
581
582 if (lp->hold) {
583 /* We are being held by active tagged
584 * commands.
585 */
586 if (lp->num_tagged)
587 return -EBUSY;
588
589 /* Tagged commands completed, we can unplug
590 * the queue and run this untagged command.
591 */
592 lp->hold = 0;
593 } else if (lp->num_tagged) {
594 /* Plug the queue until num_tagged decreases
595 * to zero in esp_free_lun_tag.
596 */
597 lp->hold = 1;
598 return -EBUSY;
599 }
600
601 lp->non_tagged_cmd = ent;
602 return 0;
David S. Millercd9ad582007-04-26 21:19:23 -0700603 }
604
Finn Thain201c37d2017-08-04 01:43:19 -0400605 /* Tagged command. Check that it isn't blocked by a non-tagged one. */
606 if (lp->non_tagged_cmd || lp->hold)
607 return -EBUSY;
608
David S. Miller21af8102013-08-01 18:08:34 -0700609 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
David S. Millercd9ad582007-04-26 21:19:23 -0700610
David S. Miller21af8102013-08-01 18:08:34 -0700611 lp->tagged_cmds[ent->orig_tag[1]] = ent;
David S. Millercd9ad582007-04-26 21:19:23 -0700612 lp->num_tagged++;
613
614 return 0;
615}
616
617static void esp_free_lun_tag(struct esp_cmd_entry *ent,
618 struct esp_lun_data *lp)
619{
David S. Miller21af8102013-08-01 18:08:34 -0700620 if (ent->orig_tag[0]) {
621 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
622 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
David S. Millercd9ad582007-04-26 21:19:23 -0700623 lp->num_tagged--;
624 } else {
625 BUG_ON(lp->non_tagged_cmd != ent);
626 lp->non_tagged_cmd = NULL;
627 }
628}
629
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200630static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
631{
632 ent->sense_ptr = ent->cmd->sense_buffer;
633 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
634 ent->sense_dma = (uintptr_t)ent->sense_ptr;
635 return;
636 }
637
638 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
639 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
640}
641
642static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
643{
644 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
645 dma_unmap_single(esp->dev, ent->sense_dma,
646 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
647 ent->sense_ptr = NULL;
648}
649
Bhaskar Chowdhury835b8c12021-03-24 11:43:18 +0530650/* When a contingent allegiance condition is created, we force feed a
David S. Millercd9ad582007-04-26 21:19:23 -0700651 * REQUEST_SENSE command to the device to fetch the sense data. I
652 * tried many other schemes, relying on the scsi error handling layer
653 * to send out the REQUEST_SENSE automatically, but this was difficult
654 * to get right especially in the presence of applications like smartd
655 * which use SG_IO to send out their own REQUEST_SENSE commands.
656 */
657static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
658{
659 struct scsi_cmnd *cmd = ent->cmd;
660 struct scsi_device *dev = cmd->device;
661 int tgt, lun;
662 u8 *p, val;
663
664 tgt = dev->id;
665 lun = dev->lun;
666
667
668 if (!ent->sense_ptr) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100669 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
670 tgt, lun);
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200671 esp_map_sense(esp, ent);
David S. Millercd9ad582007-04-26 21:19:23 -0700672 }
673 ent->saved_sense_ptr = ent->sense_ptr;
674
675 esp->active_cmd = ent;
676
677 p = esp->command_block;
678 esp->msg_out_len = 0;
679
680 *p++ = IDENTIFY(0, lun);
681 *p++ = REQUEST_SENSE;
682 *p++ = ((dev->scsi_level <= SCSI_2) ?
683 (lun << 5) : 0);
684 *p++ = 0;
685 *p++ = 0;
686 *p++ = SCSI_SENSE_BUFFERSIZE;
687 *p++ = 0;
688
689 esp->select_state = ESP_SELECT_BASIC;
690
691 val = tgt;
692 if (esp->rev == FASHME)
693 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
694 esp_write8(val, ESP_BUSID);
695
696 esp_write_tgt_sync(esp, tgt);
697 esp_write_tgt_config3(esp, tgt);
698
699 val = (p - esp->command_block);
700
Hannes Reinecke31708662014-11-24 15:37:24 +0100701 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
David S. Millercd9ad582007-04-26 21:19:23 -0700702}
703
704static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
705{
706 struct esp_cmd_entry *ent;
707
708 list_for_each_entry(ent, &esp->queued_cmds, list) {
709 struct scsi_cmnd *cmd = ent->cmd;
710 struct scsi_device *dev = cmd->device;
711 struct esp_lun_data *lp = dev->hostdata;
712
713 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
714 ent->tag[0] = 0;
715 ent->tag[1] = 0;
716 return ent;
717 }
718
Christoph Hellwig50668632014-10-30 14:30:06 +0100719 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
David S. Millercd9ad582007-04-26 21:19:23 -0700720 ent->tag[0] = 0;
721 ent->tag[1] = 0;
722 }
David S. Miller21af8102013-08-01 18:08:34 -0700723 ent->orig_tag[0] = ent->tag[0];
724 ent->orig_tag[1] = ent->tag[1];
David S. Millercd9ad582007-04-26 21:19:23 -0700725
726 if (esp_alloc_lun_tag(ent, lp) < 0)
727 continue;
728
729 return ent;
730 }
731
732 return NULL;
733}
734
735static void esp_maybe_execute_command(struct esp *esp)
736{
737 struct esp_target_data *tp;
David S. Millercd9ad582007-04-26 21:19:23 -0700738 struct scsi_device *dev;
739 struct scsi_cmnd *cmd;
740 struct esp_cmd_entry *ent;
Finn Thain8bca2142018-10-16 16:31:25 +1100741 bool select_and_stop = false;
David S. Millercd9ad582007-04-26 21:19:23 -0700742 int tgt, lun, i;
743 u32 val, start_cmd;
744 u8 *p;
745
746 if (esp->active_cmd ||
747 (esp->flags & ESP_FLAG_RESETTING))
748 return;
749
750 ent = find_and_prep_issuable_command(esp);
751 if (!ent)
752 return;
753
754 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
755 esp_autosense(esp, ent);
756 return;
757 }
758
759 cmd = ent->cmd;
760 dev = cmd->device;
761 tgt = dev->id;
762 lun = dev->lun;
763 tp = &esp->target[tgt];
David S. Millercd9ad582007-04-26 21:19:23 -0700764
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -0700765 list_move(&ent->list, &esp->active_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -0700766
767 esp->active_cmd = ent;
768
769 esp_map_dma(esp, cmd);
770 esp_save_pointers(esp, ent);
771
Finn Thain8bca2142018-10-16 16:31:25 +1100772 if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
773 select_and_stop = true;
David S. Millercd9ad582007-04-26 21:19:23 -0700774
775 p = esp->command_block;
776
777 esp->msg_out_len = 0;
778 if (tp->flags & ESP_TGT_CHECK_NEGO) {
779 /* Need to negotiate. If the target is broken
780 * go for synchronous transfers and non-wide.
781 */
782 if (tp->flags & ESP_TGT_BROKEN) {
783 tp->flags &= ~ESP_TGT_DISCONNECT;
784 tp->nego_goal_period = 0;
785 tp->nego_goal_offset = 0;
786 tp->nego_goal_width = 0;
787 tp->nego_goal_tags = 0;
788 }
789
790 /* If the settings are not changing, skip this. */
791 if (spi_width(tp->starget) == tp->nego_goal_width &&
792 spi_period(tp->starget) == tp->nego_goal_period &&
793 spi_offset(tp->starget) == tp->nego_goal_offset) {
794 tp->flags &= ~ESP_TGT_CHECK_NEGO;
795 goto build_identify;
796 }
797
798 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
799 esp->msg_out_len =
800 spi_populate_width_msg(&esp->msg_out[0],
801 (tp->nego_goal_width ?
802 1 : 0));
803 tp->flags |= ESP_TGT_NEGO_WIDE;
804 } else if (esp_need_to_nego_sync(tp)) {
805 esp->msg_out_len =
806 spi_populate_sync_msg(&esp->msg_out[0],
807 tp->nego_goal_period,
808 tp->nego_goal_offset);
809 tp->flags |= ESP_TGT_NEGO_SYNC;
810 } else {
811 tp->flags &= ~ESP_TGT_CHECK_NEGO;
812 }
813
Finn Thain8bca2142018-10-16 16:31:25 +1100814 /* If there are multiple message bytes, use Select and Stop */
815 if (esp->msg_out_len)
816 select_and_stop = true;
David S. Millercd9ad582007-04-26 21:19:23 -0700817 }
818
819build_identify:
Finn Thain87c58ef2018-10-16 16:31:25 +1100820 *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
David S. Millercd9ad582007-04-26 21:19:23 -0700821
822 if (ent->tag[0] && esp->rev == ESP100) {
823 /* ESP100 lacks select w/atn3 command, use select
824 * and stop instead.
825 */
Finn Thain8bca2142018-10-16 16:31:25 +1100826 select_and_stop = true;
David S. Millercd9ad582007-04-26 21:19:23 -0700827 }
828
Finn Thain8bca2142018-10-16 16:31:25 +1100829 if (select_and_stop) {
David S. Millercd9ad582007-04-26 21:19:23 -0700830 esp->cmd_bytes_left = cmd->cmd_len;
831 esp->cmd_bytes_ptr = &cmd->cmnd[0];
832
833 if (ent->tag[0]) {
834 for (i = esp->msg_out_len - 1;
835 i >= 0; i--)
836 esp->msg_out[i + 2] = esp->msg_out[i];
837 esp->msg_out[0] = ent->tag[0];
838 esp->msg_out[1] = ent->tag[1];
839 esp->msg_out_len += 2;
840 }
841
Hannes Reinecke31708662014-11-24 15:37:24 +0100842 start_cmd = ESP_CMD_SELAS;
David S. Millercd9ad582007-04-26 21:19:23 -0700843 esp->select_state = ESP_SELECT_MSGOUT;
Finn Thain8bca2142018-10-16 16:31:25 +1100844 } else {
845 start_cmd = ESP_CMD_SELA;
846 if (ent->tag[0]) {
847 *p++ = ent->tag[0];
848 *p++ = ent->tag[1];
849
850 start_cmd = ESP_CMD_SA3;
851 }
852
853 for (i = 0; i < cmd->cmd_len; i++)
854 *p++ = cmd->cmnd[i];
855
856 esp->select_state = ESP_SELECT_BASIC;
David S. Millercd9ad582007-04-26 21:19:23 -0700857 }
858 val = tgt;
859 if (esp->rev == FASHME)
860 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
861 esp_write8(val, ESP_BUSID);
862
863 esp_write_tgt_sync(esp, tgt);
864 esp_write_tgt_config3(esp, tgt);
865
866 val = (p - esp->command_block);
867
868 if (esp_debug & ESP_DEBUG_SCSICMD) {
869 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
870 for (i = 0; i < cmd->cmd_len; i++)
871 printk("%02x ", cmd->cmnd[i]);
872 printk("]\n");
873 }
874
Hannes Reinecke31708662014-11-24 15:37:24 +0100875 esp_send_dma_cmd(esp, val, 16, start_cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700876}
877
878static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
879{
880 struct list_head *head = &esp->esp_cmd_pool;
881 struct esp_cmd_entry *ret;
882
883 if (list_empty(head)) {
884 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
885 } else {
886 ret = list_entry(head->next, struct esp_cmd_entry, list);
887 list_del(&ret->list);
888 memset(ret, 0, sizeof(*ret));
889 }
890 return ret;
891}
892
893static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
894{
895 list_add(&ent->list, &esp->esp_cmd_pool);
896}
897
898static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
Hannes Reinecke6b505292021-01-13 10:04:53 +0100899 struct scsi_cmnd *cmd, unsigned char host_byte)
David S. Millercd9ad582007-04-26 21:19:23 -0700900{
901 struct scsi_device *dev = cmd->device;
902 int tgt = dev->id;
903 int lun = dev->lun;
904
905 esp->active_cmd = NULL;
906 esp_unmap_dma(esp, cmd);
907 esp_free_lun_tag(ent, dev->hostdata);
Hannes Reinecke6b505292021-01-13 10:04:53 +0100908 cmd->result = 0;
909 set_host_byte(cmd, host_byte);
Hannes Reinecke809dadb2021-01-13 10:04:54 +0100910 if (host_byte == DID_OK)
Hannes Reinecke6b505292021-01-13 10:04:53 +0100911 set_status_byte(cmd, ent->status);
David S. Millercd9ad582007-04-26 21:19:23 -0700912
913 if (ent->eh_done) {
914 complete(ent->eh_done);
915 ent->eh_done = NULL;
916 }
917
918 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
Christoph Hellwig3f9295b2018-10-13 09:26:27 +0200919 esp_unmap_sense(esp, ent);
David S. Millercd9ad582007-04-26 21:19:23 -0700920
921 /* Restore the message/status bytes to what we actually
922 * saw originally. Also, report that we are providing
923 * the sense data.
924 */
Hannes Reinecke464a00c2021-04-27 10:30:15 +0200925 cmd->result = SAM_STAT_CHECK_CONDITION;
David S. Millercd9ad582007-04-26 21:19:23 -0700926
927 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
928 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
929 int i;
930
931 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
932 esp->host->unique_id, tgt, lun);
933 for (i = 0; i < 18; i++)
934 printk("%02x ", cmd->sense_buffer[i]);
935 printk("]\n");
936 }
937 }
938
939 cmd->scsi_done(cmd);
940
941 list_del(&ent->list);
942 esp_put_ent(esp, ent);
943
944 esp_maybe_execute_command(esp);
945}
946
David S. Millercd9ad582007-04-26 21:19:23 -0700947static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
948{
949 struct scsi_device *dev = ent->cmd->device;
950 struct esp_lun_data *lp = dev->hostdata;
951
952 scsi_track_queue_full(dev, lp->num_tagged - 1);
953}
954
Jeff Garzikf2812332010-11-16 02:10:29 -0500955static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
David S. Millercd9ad582007-04-26 21:19:23 -0700956{
957 struct scsi_device *dev = cmd->device;
Christoph Hellwig2b14ec72007-05-31 20:12:32 +0200958 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -0700959 struct esp_cmd_priv *spriv;
960 struct esp_cmd_entry *ent;
961
962 ent = esp_get_ent(esp);
963 if (!ent)
964 return SCSI_MLQUEUE_HOST_BUSY;
965
966 ent->cmd = cmd;
967
968 cmd->scsi_done = done;
969
970 spriv = ESP_CMD_PRIV(cmd);
Christoph Hellwig86117d72018-10-13 09:26:28 +0200971 spriv->num_sg = 0;
David S. Millercd9ad582007-04-26 21:19:23 -0700972
973 list_add_tail(&ent->list, &esp->queued_cmds);
974
975 esp_maybe_execute_command(esp);
976
977 return 0;
978}
979
Jeff Garzikf2812332010-11-16 02:10:29 -0500980static DEF_SCSI_QCMD(esp_queuecommand)
981
David S. Millercd9ad582007-04-26 21:19:23 -0700982static int esp_check_gross_error(struct esp *esp)
983{
984 if (esp->sreg & ESP_STAT_SPAM) {
985 /* Gross Error, could be one of:
986 * - top of fifo overwritten
987 * - top of command register overwritten
988 * - DMA programmed with wrong direction
989 * - improper phase change
990 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100991 shost_printk(KERN_ERR, esp->host,
992 "Gross error sreg[%02x]\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -0700993 /* XXX Reset the chip. XXX */
994 return 1;
995 }
996 return 0;
997}
998
999static int esp_check_spur_intr(struct esp *esp)
1000{
1001 switch (esp->rev) {
1002 case ESP100:
1003 case ESP100A:
1004 /* The interrupt pending bit of the status register cannot
1005 * be trusted on these revisions.
1006 */
1007 esp->sreg &= ~ESP_STAT_INTR;
1008 break;
1009
1010 default:
1011 if (!(esp->sreg & ESP_STAT_INTR)) {
David S. Millercd9ad582007-04-26 21:19:23 -07001012 if (esp->ireg & ESP_INTR_SR)
1013 return 1;
1014
1015 /* If the DMA is indicating interrupt pending and the
1016 * ESP is not, the only possibility is a DMA error.
1017 */
1018 if (!esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001019 shost_printk(KERN_ERR, esp->host,
1020 "Spurious irq, sreg=%02x.\n",
1021 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001022 return -1;
1023 }
1024
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001025 shost_printk(KERN_ERR, esp->host, "DMA error\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001026
1027 /* XXX Reset the chip. XXX */
1028 return -1;
1029 }
1030 break;
1031 }
1032
1033 return 0;
1034}
1035
1036static void esp_schedule_reset(struct esp *esp)
1037{
Sakari Ailusd75f7732019-03-25 21:32:28 +02001038 esp_log_reset("esp_schedule_reset() from %ps\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001039 __builtin_return_address(0));
1040 esp->flags |= ESP_FLAG_RESETTING;
1041 esp_event(esp, ESP_EVENT_RESET);
1042}
1043
1044/* In order to avoid having to add a special half-reconnected state
1045 * into the driver we just sit here and poll through the rest of
1046 * the reselection process to get the tag message bytes.
1047 */
1048static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1049 struct esp_lun_data *lp)
1050{
1051 struct esp_cmd_entry *ent;
1052 int i;
1053
1054 if (!lp->num_tagged) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001055 shost_printk(KERN_ERR, esp->host,
1056 "Reconnect w/num_tagged==0\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001057 return NULL;
1058 }
1059
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001060 esp_log_reconnect("reconnect tag, ");
David S. Millercd9ad582007-04-26 21:19:23 -07001061
1062 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1063 if (esp->ops->irq_pending(esp))
1064 break;
1065 }
1066 if (i == ESP_QUICKIRQ_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001067 shost_printk(KERN_ERR, esp->host,
1068 "Reconnect IRQ1 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001069 return NULL;
1070 }
1071
1072 esp->sreg = esp_read8(ESP_STATUS);
1073 esp->ireg = esp_read8(ESP_INTRPT);
1074
1075 esp_log_reconnect("IRQ(%d:%x:%x), ",
1076 i, esp->ireg, esp->sreg);
1077
1078 if (esp->ireg & ESP_INTR_DC) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001079 shost_printk(KERN_ERR, esp->host,
1080 "Reconnect, got disconnect.\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001081 return NULL;
1082 }
1083
1084 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001085 shost_printk(KERN_ERR, esp->host,
1086 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001087 return NULL;
1088 }
1089
1090 /* DMA in the tag bytes... */
1091 esp->command_block[0] = 0xff;
1092 esp->command_block[1] = 0xff;
1093 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1094 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1095
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02001096 /* ACK the message. */
David S. Millercd9ad582007-04-26 21:19:23 -07001097 scsi_esp_cmd(esp, ESP_CMD_MOK);
1098
1099 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1100 if (esp->ops->irq_pending(esp)) {
1101 esp->sreg = esp_read8(ESP_STATUS);
1102 esp->ireg = esp_read8(ESP_INTRPT);
1103 if (esp->ireg & ESP_INTR_FDONE)
1104 break;
1105 }
1106 udelay(1);
1107 }
1108 if (i == ESP_RESELECT_TAG_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001109 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001110 return NULL;
1111 }
1112 esp->ops->dma_drain(esp);
1113 esp->ops->dma_invalidate(esp);
1114
1115 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1116 i, esp->ireg, esp->sreg,
1117 esp->command_block[0],
1118 esp->command_block[1]);
1119
1120 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1121 esp->command_block[0] > ORDERED_QUEUE_TAG) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001122 shost_printk(KERN_ERR, esp->host,
1123 "Reconnect, bad tag type %02x.\n",
1124 esp->command_block[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07001125 return NULL;
1126 }
1127
1128 ent = lp->tagged_cmds[esp->command_block[1]];
1129 if (!ent) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001130 shost_printk(KERN_ERR, esp->host,
1131 "Reconnect, no entry for tag %02x.\n",
1132 esp->command_block[1]);
David S. Millercd9ad582007-04-26 21:19:23 -07001133 return NULL;
1134 }
1135
1136 return ent;
1137}
1138
1139static int esp_reconnect(struct esp *esp)
1140{
1141 struct esp_cmd_entry *ent;
1142 struct esp_target_data *tp;
1143 struct esp_lun_data *lp;
1144 struct scsi_device *dev;
1145 int target, lun;
1146
1147 BUG_ON(esp->active_cmd);
1148 if (esp->rev == FASHME) {
1149 /* FASHME puts the target and lun numbers directly
1150 * into the fifo.
1151 */
1152 target = esp->fifo[0];
1153 lun = esp->fifo[1] & 0x7;
1154 } else {
1155 u8 bits = esp_read8(ESP_FDATA);
1156
1157 /* Older chips put the lun directly into the fifo, but
1158 * the target is given as a sample of the arbitration
1159 * lines on the bus at reselection time. So we should
1160 * see the ID of the ESP and the one reconnecting target
1161 * set in the bitmap.
1162 */
1163 if (!(bits & esp->scsi_id_mask))
1164 goto do_reset;
1165 bits &= ~esp->scsi_id_mask;
1166 if (!bits || (bits & (bits - 1)))
1167 goto do_reset;
1168
1169 target = ffs(bits) - 1;
1170 lun = (esp_read8(ESP_FDATA) & 0x7);
1171
1172 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1173 if (esp->rev == ESP100) {
1174 u8 ireg = esp_read8(ESP_INTRPT);
1175 /* This chip has a bug during reselection that can
1176 * cause a spurious illegal-command interrupt, which
1177 * we simply ACK here. Another possibility is a bus
1178 * reset so we must check for that.
1179 */
1180 if (ireg & ESP_INTR_SR)
1181 goto do_reset;
1182 }
1183 scsi_esp_cmd(esp, ESP_CMD_NULL);
1184 }
1185
1186 esp_write_tgt_sync(esp, target);
1187 esp_write_tgt_config3(esp, target);
1188
1189 scsi_esp_cmd(esp, ESP_CMD_MOK);
1190
1191 if (esp->rev == FASHME)
1192 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1193 ESP_BUSID);
1194
1195 tp = &esp->target[target];
1196 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1197 if (!dev) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001198 shost_printk(KERN_ERR, esp->host,
1199 "Reconnect, no lp tgt[%u] lun[%u]\n",
1200 target, lun);
David S. Millercd9ad582007-04-26 21:19:23 -07001201 goto do_reset;
1202 }
1203 lp = dev->hostdata;
1204
1205 ent = lp->non_tagged_cmd;
1206 if (!ent) {
1207 ent = esp_reconnect_with_tag(esp, lp);
1208 if (!ent)
1209 goto do_reset;
1210 }
1211
1212 esp->active_cmd = ent;
1213
David S. Millercd9ad582007-04-26 21:19:23 -07001214 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1215 esp_restore_pointers(esp, ent);
1216 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1217 return 1;
1218
1219do_reset:
1220 esp_schedule_reset(esp);
1221 return 0;
1222}
1223
1224static int esp_finish_select(struct esp *esp)
1225{
1226 struct esp_cmd_entry *ent;
1227 struct scsi_cmnd *cmd;
David S. Millercd9ad582007-04-26 21:19:23 -07001228
1229 /* No longer selecting. */
1230 esp->select_state = ESP_SELECT_NONE;
1231
1232 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1233 ent = esp->active_cmd;
1234 cmd = ent->cmd;
1235
1236 if (esp->ops->dma_error(esp)) {
1237 /* If we see a DMA error during or as a result of selection,
1238 * all bets are off.
1239 */
1240 esp_schedule_reset(esp);
Hannes Reinecke6b505292021-01-13 10:04:53 +01001241 esp_cmd_is_done(esp, ent, cmd, DID_ERROR);
David S. Millercd9ad582007-04-26 21:19:23 -07001242 return 0;
1243 }
1244
1245 esp->ops->dma_invalidate(esp);
1246
1247 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1248 struct esp_target_data *tp = &esp->target[cmd->device->id];
1249
1250 /* Carefully back out of the selection attempt. Release
1251 * resources (such as DMA mapping & TAG) and reset state (such
1252 * as message out and command delivery variables).
1253 */
1254 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1255 esp_unmap_dma(esp, cmd);
1256 esp_free_lun_tag(ent, cmd->device->hostdata);
1257 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
David S. Millercd9ad582007-04-26 21:19:23 -07001258 esp->cmd_bytes_ptr = NULL;
1259 esp->cmd_bytes_left = 0;
1260 } else {
Christoph Hellwig3f9295b2018-10-13 09:26:27 +02001261 esp_unmap_sense(esp, ent);
David S. Millercd9ad582007-04-26 21:19:23 -07001262 }
1263
1264 /* Now that the state is unwound properly, put back onto
1265 * the issue queue. This command is no longer active.
1266 */
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -07001267 list_move(&ent->list, &esp->queued_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -07001268 esp->active_cmd = NULL;
1269
1270 /* Return value ignored by caller, it directly invokes
1271 * esp_reconnect().
1272 */
1273 return 0;
1274 }
1275
1276 if (esp->ireg == ESP_INTR_DC) {
1277 struct scsi_device *dev = cmd->device;
1278
1279 /* Disconnect. Make sure we re-negotiate sync and
1280 * wide parameters if this target starts responding
1281 * again in the future.
1282 */
1283 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1284
1285 scsi_esp_cmd(esp, ESP_CMD_ESEL);
Hannes Reinecke6b505292021-01-13 10:04:53 +01001286 esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET);
David S. Millercd9ad582007-04-26 21:19:23 -07001287 return 1;
1288 }
1289
1290 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1291 /* Selection successful. On pre-FAST chips we have
1292 * to do a NOP and possibly clean out the FIFO.
1293 */
1294 if (esp->rev <= ESP236) {
1295 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1296
1297 scsi_esp_cmd(esp, ESP_CMD_NULL);
1298
1299 if (!fcnt &&
1300 (!esp->prev_soff ||
1301 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1302 esp_flush_fifo(esp);
1303 }
1304
Finn Thain8bca2142018-10-16 16:31:25 +11001305 /* If we are doing a Select And Stop command, negotiation, etc.
1306 * we'll do the right thing as we transition to the next phase.
David S. Millercd9ad582007-04-26 21:19:23 -07001307 */
1308 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1309 return 0;
1310 }
1311
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001312 shost_printk(KERN_INFO, esp->host,
1313 "Unexpected selection completion ireg[%x]\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07001314 esp_schedule_reset(esp);
1315 return 0;
1316}
1317
1318static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1319 struct scsi_cmnd *cmd)
1320{
1321 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1322
1323 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1324 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1325 fifo_cnt <<= 1;
1326
1327 ecount = 0;
1328 if (!(esp->sreg & ESP_STAT_TCNT)) {
1329 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1330 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1331 if (esp->rev == FASHME)
1332 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
Hannes Reineckee858d932014-11-24 15:37:29 +01001333 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1334 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
David S. Millercd9ad582007-04-26 21:19:23 -07001335 }
1336
1337 bytes_sent = esp->data_dma_len;
1338 bytes_sent -= ecount;
Finn Thainfd47d912018-10-16 16:31:25 +11001339 bytes_sent -= esp->send_cmd_residual;
David S. Millercd9ad582007-04-26 21:19:23 -07001340
Hannes Reinecke6df388f2014-11-24 15:37:26 +01001341 /*
Bhaskar Chowdhury835b8c12021-03-24 11:43:18 +05301342 * The am53c974 has a DMA 'peculiarity'. The doc states:
Hannes Reinecke6df388f2014-11-24 15:37:26 +01001343 * In some odd byte conditions, one residual byte will
1344 * be left in the SCSI FIFO, and the FIFO Flags will
1345 * never count to '0 '. When this happens, the residual
1346 * byte should be retrieved via PIO following completion
1347 * of the BLAST operation.
1348 */
1349 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1350 size_t count = 1;
1351 size_t offset = bytes_sent;
1352 u8 bval = esp_read8(ESP_FDATA);
1353
1354 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1355 ent->sense_ptr[bytes_sent] = bval;
1356 else {
1357 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1358 u8 *ptr;
1359
Christoph Hellwig86117d72018-10-13 09:26:28 +02001360 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
Hannes Reinecke6df388f2014-11-24 15:37:26 +01001361 &offset, &count);
1362 if (likely(ptr)) {
1363 *(ptr + offset) = bval;
1364 scsi_kunmap_atomic_sg(ptr);
1365 }
1366 }
1367 bytes_sent += fifo_cnt;
1368 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1369 }
David S. Millercd9ad582007-04-26 21:19:23 -07001370 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1371 bytes_sent -= fifo_cnt;
1372
1373 flush_fifo = 0;
1374 if (!esp->prev_soff) {
1375 /* Synchronous data transfer, always flush fifo. */
1376 flush_fifo = 1;
1377 } else {
1378 if (esp->rev == ESP100) {
1379 u32 fflags, phase;
1380
1381 /* ESP100 has a chip bug where in the synchronous data
1382 * phase it can mistake a final long REQ pulse from the
1383 * target as an extra data byte. Fun.
1384 *
1385 * To detect this case we resample the status register
1386 * and fifo flags. If we're still in a data phase and
1387 * we see spurious chunks in the fifo, we return error
1388 * to the caller which should reset and set things up
1389 * such that we only try future transfers to this
1390 * target in synchronous mode.
1391 */
1392 esp->sreg = esp_read8(ESP_STATUS);
1393 phase = esp->sreg & ESP_STAT_PMASK;
1394 fflags = esp_read8(ESP_FFLAGS);
1395
1396 if ((phase == ESP_DOP &&
1397 (fflags & ESP_FF_ONOTZERO)) ||
1398 (phase == ESP_DIP &&
1399 (fflags & ESP_FF_FBYTES)))
1400 return -1;
1401 }
1402 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1403 flush_fifo = 1;
1404 }
1405
1406 if (flush_fifo)
1407 esp_flush_fifo(esp);
1408
1409 return bytes_sent;
1410}
1411
1412static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1413 u8 scsi_period, u8 scsi_offset,
1414 u8 esp_stp, u8 esp_soff)
1415{
1416 spi_period(tp->starget) = scsi_period;
1417 spi_offset(tp->starget) = scsi_offset;
1418 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1419
1420 if (esp_soff) {
1421 esp_stp &= 0x1f;
1422 esp_soff |= esp->radelay;
1423 if (esp->rev >= FAS236) {
1424 u8 bit = ESP_CONFIG3_FSCSI;
1425 if (esp->rev >= FAS100A)
1426 bit = ESP_CONFIG3_FAST;
1427
1428 if (scsi_period < 50) {
1429 if (esp->rev == FASHME)
1430 esp_soff &= ~esp->radelay;
1431 tp->esp_config3 |= bit;
1432 } else {
1433 tp->esp_config3 &= ~bit;
1434 }
1435 esp->prev_cfg3 = tp->esp_config3;
1436 esp_write8(esp->prev_cfg3, ESP_CFG3);
1437 }
1438 }
1439
1440 tp->esp_period = esp->prev_stp = esp_stp;
1441 tp->esp_offset = esp->prev_soff = esp_soff;
1442
1443 esp_write8(esp_soff, ESP_SOFF);
1444 esp_write8(esp_stp, ESP_STP);
1445
1446 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1447
1448 spi_display_xfer_agreement(tp->starget);
1449}
1450
1451static void esp_msgin_reject(struct esp *esp)
1452{
1453 struct esp_cmd_entry *ent = esp->active_cmd;
1454 struct scsi_cmnd *cmd = ent->cmd;
1455 struct esp_target_data *tp;
1456 int tgt;
1457
1458 tgt = cmd->device->id;
1459 tp = &esp->target[tgt];
1460
1461 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1462 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1463
1464 if (!esp_need_to_nego_sync(tp)) {
1465 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1466 scsi_esp_cmd(esp, ESP_CMD_RATN);
1467 } else {
1468 esp->msg_out_len =
1469 spi_populate_sync_msg(&esp->msg_out[0],
1470 tp->nego_goal_period,
1471 tp->nego_goal_offset);
1472 tp->flags |= ESP_TGT_NEGO_SYNC;
1473 scsi_esp_cmd(esp, ESP_CMD_SATN);
1474 }
1475 return;
1476 }
1477
1478 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1479 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1480 tp->esp_period = 0;
1481 tp->esp_offset = 0;
1482 esp_setsync(esp, tp, 0, 0, 0, 0);
1483 scsi_esp_cmd(esp, ESP_CMD_RATN);
1484 return;
1485 }
1486
Finn Thainc69edff52017-08-04 01:43:20 -04001487 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1488 esp_schedule_reset(esp);
David S. Millercd9ad582007-04-26 21:19:23 -07001489}
1490
1491static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1492{
1493 u8 period = esp->msg_in[3];
1494 u8 offset = esp->msg_in[4];
1495 u8 stp;
1496
1497 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1498 goto do_reject;
1499
1500 if (offset > 15)
1501 goto do_reject;
1502
1503 if (offset) {
Julia Lawall237abac2008-10-29 14:24:40 -07001504 int one_clock;
David S. Millercd9ad582007-04-26 21:19:23 -07001505
1506 if (period > esp->max_period) {
1507 period = offset = 0;
1508 goto do_sdtr;
1509 }
1510 if (period < esp->min_period)
1511 goto do_reject;
1512
1513 one_clock = esp->ccycle / 1000;
Julia Lawall237abac2008-10-29 14:24:40 -07001514 stp = DIV_ROUND_UP(period << 2, one_clock);
David S. Millercd9ad582007-04-26 21:19:23 -07001515 if (stp && esp->rev >= FAS236) {
1516 if (stp >= 50)
1517 stp--;
1518 }
1519 } else {
1520 stp = 0;
1521 }
1522
1523 esp_setsync(esp, tp, period, offset, stp, offset);
1524 return;
1525
1526do_reject:
1527 esp->msg_out[0] = MESSAGE_REJECT;
1528 esp->msg_out_len = 1;
1529 scsi_esp_cmd(esp, ESP_CMD_SATN);
1530 return;
1531
1532do_sdtr:
1533 tp->nego_goal_period = period;
1534 tp->nego_goal_offset = offset;
1535 esp->msg_out_len =
1536 spi_populate_sync_msg(&esp->msg_out[0],
1537 tp->nego_goal_period,
1538 tp->nego_goal_offset);
1539 scsi_esp_cmd(esp, ESP_CMD_SATN);
1540}
1541
1542static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1543{
1544 int size = 8 << esp->msg_in[3];
1545 u8 cfg3;
1546
1547 if (esp->rev != FASHME)
1548 goto do_reject;
1549
1550 if (size != 8 && size != 16)
1551 goto do_reject;
1552
1553 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1554 goto do_reject;
1555
1556 cfg3 = tp->esp_config3;
1557 if (size == 16) {
1558 tp->flags |= ESP_TGT_WIDE;
1559 cfg3 |= ESP_CONFIG3_EWIDE;
1560 } else {
1561 tp->flags &= ~ESP_TGT_WIDE;
1562 cfg3 &= ~ESP_CONFIG3_EWIDE;
1563 }
1564 tp->esp_config3 = cfg3;
1565 esp->prev_cfg3 = cfg3;
1566 esp_write8(cfg3, ESP_CFG3);
1567
1568 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1569
1570 spi_period(tp->starget) = 0;
1571 spi_offset(tp->starget) = 0;
1572 if (!esp_need_to_nego_sync(tp)) {
1573 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1574 scsi_esp_cmd(esp, ESP_CMD_RATN);
1575 } else {
1576 esp->msg_out_len =
1577 spi_populate_sync_msg(&esp->msg_out[0],
1578 tp->nego_goal_period,
1579 tp->nego_goal_offset);
1580 tp->flags |= ESP_TGT_NEGO_SYNC;
1581 scsi_esp_cmd(esp, ESP_CMD_SATN);
1582 }
1583 return;
1584
1585do_reject:
1586 esp->msg_out[0] = MESSAGE_REJECT;
1587 esp->msg_out_len = 1;
1588 scsi_esp_cmd(esp, ESP_CMD_SATN);
1589}
1590
1591static void esp_msgin_extended(struct esp *esp)
1592{
1593 struct esp_cmd_entry *ent = esp->active_cmd;
1594 struct scsi_cmnd *cmd = ent->cmd;
1595 struct esp_target_data *tp;
1596 int tgt = cmd->device->id;
1597
1598 tp = &esp->target[tgt];
1599 if (esp->msg_in[2] == EXTENDED_SDTR) {
1600 esp_msgin_sdtr(esp, tp);
1601 return;
1602 }
1603 if (esp->msg_in[2] == EXTENDED_WDTR) {
1604 esp_msgin_wdtr(esp, tp);
1605 return;
1606 }
1607
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001608 shost_printk(KERN_INFO, esp->host,
1609 "Unexpected extended msg type %x\n", esp->msg_in[2]);
David S. Millercd9ad582007-04-26 21:19:23 -07001610
Finn Thainc69edff52017-08-04 01:43:20 -04001611 esp->msg_out[0] = MESSAGE_REJECT;
David S. Millercd9ad582007-04-26 21:19:23 -07001612 esp->msg_out_len = 1;
1613 scsi_esp_cmd(esp, ESP_CMD_SATN);
1614}
1615
1616/* Analyze msgin bytes received from target so far. Return non-zero
1617 * if there are more bytes needed to complete the message.
1618 */
1619static int esp_msgin_process(struct esp *esp)
1620{
1621 u8 msg0 = esp->msg_in[0];
1622 int len = esp->msg_in_len;
1623
1624 if (msg0 & 0x80) {
1625 /* Identify */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001626 shost_printk(KERN_INFO, esp->host,
1627 "Unexpected msgin identify\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001628 return 0;
1629 }
1630
1631 switch (msg0) {
1632 case EXTENDED_MESSAGE:
1633 if (len == 1)
1634 return 1;
1635 if (len < esp->msg_in[1] + 2)
1636 return 1;
1637 esp_msgin_extended(esp);
1638 return 0;
1639
1640 case IGNORE_WIDE_RESIDUE: {
1641 struct esp_cmd_entry *ent;
1642 struct esp_cmd_priv *spriv;
1643 if (len == 1)
1644 return 1;
1645
1646 if (esp->msg_in[1] != 1)
1647 goto do_reject;
1648
1649 ent = esp->active_cmd;
1650 spriv = ESP_CMD_PRIV(ent->cmd);
1651
1652 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
Ming Leiee5a1db2019-06-06 16:34:10 +08001653 spriv->cur_sg = spriv->prv_sg;
David S. Millercd9ad582007-04-26 21:19:23 -07001654 spriv->cur_residue = 1;
1655 } else
1656 spriv->cur_residue++;
1657 spriv->tot_residue++;
1658 return 0;
1659 }
1660 case NOP:
1661 return 0;
1662 case RESTORE_POINTERS:
1663 esp_restore_pointers(esp, esp->active_cmd);
1664 return 0;
1665 case SAVE_POINTERS:
1666 esp_save_pointers(esp, esp->active_cmd);
1667 return 0;
1668
1669 case COMMAND_COMPLETE:
1670 case DISCONNECT: {
1671 struct esp_cmd_entry *ent = esp->active_cmd;
1672
1673 ent->message = msg0;
1674 esp_event(esp, ESP_EVENT_FREE_BUS);
1675 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1676 return 0;
1677 }
1678 case MESSAGE_REJECT:
1679 esp_msgin_reject(esp);
1680 return 0;
1681
1682 default:
1683 do_reject:
1684 esp->msg_out[0] = MESSAGE_REJECT;
1685 esp->msg_out_len = 1;
1686 scsi_esp_cmd(esp, ESP_CMD_SATN);
1687 return 0;
1688 }
1689}
1690
1691static int esp_process_event(struct esp *esp)
1692{
Hannes Reinecke31708662014-11-24 15:37:24 +01001693 int write, i;
David S. Millercd9ad582007-04-26 21:19:23 -07001694
1695again:
1696 write = 0;
Hannes Reinecke1af6f602014-11-24 15:37:22 +01001697 esp_log_event("process event %d phase %x\n",
1698 esp->event, esp->sreg & ESP_STAT_PMASK);
David S. Millercd9ad582007-04-26 21:19:23 -07001699 switch (esp->event) {
1700 case ESP_EVENT_CHECK_PHASE:
1701 switch (esp->sreg & ESP_STAT_PMASK) {
1702 case ESP_DOP:
1703 esp_event(esp, ESP_EVENT_DATA_OUT);
1704 break;
1705 case ESP_DIP:
1706 esp_event(esp, ESP_EVENT_DATA_IN);
1707 break;
1708 case ESP_STATP:
1709 esp_flush_fifo(esp);
1710 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1711 esp_event(esp, ESP_EVENT_STATUS);
1712 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1713 return 1;
1714
1715 case ESP_MOP:
1716 esp_event(esp, ESP_EVENT_MSGOUT);
1717 break;
1718
1719 case ESP_MIP:
1720 esp_event(esp, ESP_EVENT_MSGIN);
1721 break;
1722
1723 case ESP_CMDP:
1724 esp_event(esp, ESP_EVENT_CMD_START);
1725 break;
1726
1727 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001728 shost_printk(KERN_INFO, esp->host,
1729 "Unexpected phase, sreg=%02x\n",
1730 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001731 esp_schedule_reset(esp);
1732 return 0;
1733 }
1734 goto again;
David S. Millercd9ad582007-04-26 21:19:23 -07001735
1736 case ESP_EVENT_DATA_IN:
1737 write = 1;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001738 fallthrough;
David S. Millercd9ad582007-04-26 21:19:23 -07001739
1740 case ESP_EVENT_DATA_OUT: {
1741 struct esp_cmd_entry *ent = esp->active_cmd;
1742 struct scsi_cmnd *cmd = ent->cmd;
1743 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1744 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1745
1746 if (esp->rev == ESP100)
1747 scsi_esp_cmd(esp, ESP_CMD_NULL);
1748
1749 if (write)
1750 ent->flags |= ESP_CMD_FLAG_WRITE;
1751 else
1752 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1753
Finn Thain6fe07aa2008-04-25 10:06:05 -05001754 if (esp->ops->dma_length_limit)
1755 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1756 dma_len);
1757 else
1758 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1759
David S. Millercd9ad582007-04-26 21:19:23 -07001760 esp->data_dma_len = dma_len;
1761
1762 if (!dma_len) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001763 shost_printk(KERN_ERR, esp->host,
1764 "DMA length is zero!\n");
1765 shost_printk(KERN_ERR, esp->host,
1766 "cur adr[%08llx] len[%08x]\n",
1767 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1768 esp_cur_dma_len(ent, cmd));
David S. Millercd9ad582007-04-26 21:19:23 -07001769 esp_schedule_reset(esp);
1770 return 0;
1771 }
1772
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001773 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001774 (unsigned long long)dma_addr, dma_len, write);
David S. Millercd9ad582007-04-26 21:19:23 -07001775
1776 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1777 write, ESP_CMD_DMA | ESP_CMD_TI);
1778 esp_event(esp, ESP_EVENT_DATA_DONE);
1779 break;
1780 }
1781 case ESP_EVENT_DATA_DONE: {
1782 struct esp_cmd_entry *ent = esp->active_cmd;
1783 struct scsi_cmnd *cmd = ent->cmd;
1784 int bytes_sent;
1785
1786 if (esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001787 shost_printk(KERN_INFO, esp->host,
1788 "data done, DMA error, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001789 esp_schedule_reset(esp);
1790 return 0;
1791 }
1792
1793 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1794 /* XXX parity errors, etc. XXX */
1795
1796 esp->ops->dma_drain(esp);
1797 }
1798 esp->ops->dma_invalidate(esp);
1799
1800 if (esp->ireg != ESP_INTR_BSERV) {
1801 /* We should always see exactly a bus-service
1802 * interrupt at the end of a successful transfer.
1803 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001804 shost_printk(KERN_INFO, esp->host,
1805 "data done, not BSERV, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001806 esp_schedule_reset(esp);
1807 return 0;
1808 }
1809
1810 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1811
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001812 esp_log_datadone("data done flgs[%x] sent[%d]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001813 ent->flags, bytes_sent);
1814
1815 if (bytes_sent < 0) {
1816 /* XXX force sync mode for this target XXX */
1817 esp_schedule_reset(esp);
1818 return 0;
1819 }
1820
1821 esp_advance_dma(esp, ent, cmd, bytes_sent);
1822 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1823 goto again;
David S. Millercd9ad582007-04-26 21:19:23 -07001824 }
1825
1826 case ESP_EVENT_STATUS: {
1827 struct esp_cmd_entry *ent = esp->active_cmd;
1828
1829 if (esp->ireg & ESP_INTR_FDONE) {
1830 ent->status = esp_read8(ESP_FDATA);
1831 ent->message = esp_read8(ESP_FDATA);
1832 scsi_esp_cmd(esp, ESP_CMD_MOK);
1833 } else if (esp->ireg == ESP_INTR_BSERV) {
1834 ent->status = esp_read8(ESP_FDATA);
1835 ent->message = 0xff;
1836 esp_event(esp, ESP_EVENT_MSGIN);
1837 return 0;
1838 }
1839
1840 if (ent->message != COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001841 shost_printk(KERN_INFO, esp->host,
1842 "Unexpected message %x in status\n",
1843 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001844 esp_schedule_reset(esp);
1845 return 0;
1846 }
1847
1848 esp_event(esp, ESP_EVENT_FREE_BUS);
1849 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1850 break;
1851 }
1852 case ESP_EVENT_FREE_BUS: {
1853 struct esp_cmd_entry *ent = esp->active_cmd;
1854 struct scsi_cmnd *cmd = ent->cmd;
1855
1856 if (ent->message == COMMAND_COMPLETE ||
1857 ent->message == DISCONNECT)
1858 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1859
1860 if (ent->message == COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001861 esp_log_cmddone("Command done status[%x] message[%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001862 ent->status, ent->message);
1863 if (ent->status == SAM_STAT_TASK_SET_FULL)
1864 esp_event_queue_full(esp, ent);
1865
1866 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1867 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1868 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1869 esp_autosense(esp, ent);
1870 } else {
Hannes Reinecke6b505292021-01-13 10:04:53 +01001871 esp_cmd_is_done(esp, ent, cmd, DID_OK);
David S. Millercd9ad582007-04-26 21:19:23 -07001872 }
1873 } else if (ent->message == DISCONNECT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001874 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001875 cmd->device->id,
1876 ent->tag[0], ent->tag[1]);
1877
1878 esp->active_cmd = NULL;
1879 esp_maybe_execute_command(esp);
1880 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001881 shost_printk(KERN_INFO, esp->host,
1882 "Unexpected message %x in freebus\n",
1883 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001884 esp_schedule_reset(esp);
1885 return 0;
1886 }
1887 if (esp->active_cmd)
1888 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1889 break;
1890 }
1891 case ESP_EVENT_MSGOUT: {
1892 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1893
1894 if (esp_debug & ESP_DEBUG_MSGOUT) {
1895 int i;
1896 printk("ESP: Sending message [ ");
1897 for (i = 0; i < esp->msg_out_len; i++)
1898 printk("%02x ", esp->msg_out[i]);
1899 printk("]\n");
1900 }
1901
1902 if (esp->rev == FASHME) {
1903 int i;
1904
1905 /* Always use the fifo. */
1906 for (i = 0; i < esp->msg_out_len; i++) {
1907 esp_write8(esp->msg_out[i], ESP_FDATA);
1908 esp_write8(0, ESP_FDATA);
1909 }
1910 scsi_esp_cmd(esp, ESP_CMD_TI);
1911 } else {
1912 if (esp->msg_out_len == 1) {
1913 esp_write8(esp->msg_out[0], ESP_FDATA);
1914 scsi_esp_cmd(esp, ESP_CMD_TI);
Hannes Reinecke31708662014-11-24 15:37:24 +01001915 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1916 for (i = 0; i < esp->msg_out_len; i++)
1917 esp_write8(esp->msg_out[i], ESP_FDATA);
1918 scsi_esp_cmd(esp, ESP_CMD_TI);
David S. Millercd9ad582007-04-26 21:19:23 -07001919 } else {
1920 /* Use DMA. */
1921 memcpy(esp->command_block,
1922 esp->msg_out,
1923 esp->msg_out_len);
1924
1925 esp->ops->send_dma_cmd(esp,
1926 esp->command_block_dma,
1927 esp->msg_out_len,
1928 esp->msg_out_len,
1929 0,
1930 ESP_CMD_DMA|ESP_CMD_TI);
1931 }
1932 }
1933 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1934 break;
1935 }
1936 case ESP_EVENT_MSGOUT_DONE:
1937 if (esp->rev == FASHME) {
1938 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1939 } else {
1940 if (esp->msg_out_len > 1)
1941 esp->ops->dma_invalidate(esp);
David S. Millercd9ad582007-04-26 21:19:23 -07001942
Finn Thain201c37d2017-08-04 01:43:19 -04001943 /* XXX if the chip went into disconnected mode,
1944 * we can't run the phase state machine anyway.
1945 */
1946 if (!(esp->ireg & ESP_INTR_DC))
David S. Millercd9ad582007-04-26 21:19:23 -07001947 scsi_esp_cmd(esp, ESP_CMD_NULL);
1948 }
Finn Thain201c37d2017-08-04 01:43:19 -04001949
Finn Thaind60e9ee2017-08-04 01:43:20 -04001950 esp->msg_out_len = 0;
1951
David S. Millercd9ad582007-04-26 21:19:23 -07001952 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1953 goto again;
1954 case ESP_EVENT_MSGIN:
1955 if (esp->ireg & ESP_INTR_BSERV) {
1956 if (esp->rev == FASHME) {
1957 if (!(esp_read8(ESP_STATUS2) &
1958 ESP_STAT2_FEMPTY))
1959 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1960 } else {
1961 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1962 if (esp->rev == ESP100)
1963 scsi_esp_cmd(esp, ESP_CMD_NULL);
1964 }
1965 scsi_esp_cmd(esp, ESP_CMD_TI);
1966 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1967 return 1;
1968 }
1969 if (esp->ireg & ESP_INTR_FDONE) {
1970 u8 val;
1971
1972 if (esp->rev == FASHME)
1973 val = esp->fifo[0];
1974 else
1975 val = esp_read8(ESP_FDATA);
1976 esp->msg_in[esp->msg_in_len++] = val;
1977
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001978 esp_log_msgin("Got msgin byte %x\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -07001979
1980 if (!esp_msgin_process(esp))
1981 esp->msg_in_len = 0;
1982
1983 if (esp->rev == FASHME)
1984 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1985
1986 scsi_esp_cmd(esp, ESP_CMD_MOK);
1987
Finn Thainc69edff52017-08-04 01:43:20 -04001988 /* Check whether a bus reset is to be done next */
1989 if (esp->event == ESP_EVENT_RESET)
1990 return 0;
1991
David S. Millercd9ad582007-04-26 21:19:23 -07001992 if (esp->event != ESP_EVENT_FREE_BUS)
1993 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1994 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001995 shost_printk(KERN_INFO, esp->host,
1996 "MSGIN neither BSERV not FDON, resetting");
David S. Millercd9ad582007-04-26 21:19:23 -07001997 esp_schedule_reset(esp);
1998 return 0;
1999 }
2000 break;
2001 case ESP_EVENT_CMD_START:
2002 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2003 esp->cmd_bytes_left);
Hannes Reinecke31708662014-11-24 15:37:24 +01002004 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
David S. Millercd9ad582007-04-26 21:19:23 -07002005 esp_event(esp, ESP_EVENT_CMD_DONE);
2006 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2007 break;
2008 case ESP_EVENT_CMD_DONE:
2009 esp->ops->dma_invalidate(esp);
2010 if (esp->ireg & ESP_INTR_BSERV) {
2011 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2012 goto again;
2013 }
2014 esp_schedule_reset(esp);
2015 return 0;
David S. Millercd9ad582007-04-26 21:19:23 -07002016
2017 case ESP_EVENT_RESET:
2018 scsi_esp_cmd(esp, ESP_CMD_RS);
2019 break;
2020
2021 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002022 shost_printk(KERN_INFO, esp->host,
2023 "Unexpected event %x, resetting\n", esp->event);
David S. Millercd9ad582007-04-26 21:19:23 -07002024 esp_schedule_reset(esp);
2025 return 0;
David S. Millercd9ad582007-04-26 21:19:23 -07002026 }
2027 return 1;
2028}
2029
2030static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2031{
2032 struct scsi_cmnd *cmd = ent->cmd;
2033
2034 esp_unmap_dma(esp, cmd);
2035 esp_free_lun_tag(ent, cmd->device->hostdata);
2036 cmd->result = DID_RESET << 16;
2037
Christoph Hellwig3f9295b2018-10-13 09:26:27 +02002038 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2039 esp_unmap_sense(esp, ent);
David S. Millercd9ad582007-04-26 21:19:23 -07002040
2041 cmd->scsi_done(cmd);
2042 list_del(&ent->list);
2043 esp_put_ent(esp, ent);
2044}
2045
2046static void esp_clear_hold(struct scsi_device *dev, void *data)
2047{
2048 struct esp_lun_data *lp = dev->hostdata;
2049
2050 BUG_ON(lp->num_tagged);
2051 lp->hold = 0;
2052}
2053
2054static void esp_reset_cleanup(struct esp *esp)
2055{
2056 struct esp_cmd_entry *ent, *tmp;
2057 int i;
2058
2059 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2060 struct scsi_cmnd *cmd = ent->cmd;
2061
2062 list_del(&ent->list);
2063 cmd->result = DID_RESET << 16;
2064 cmd->scsi_done(cmd);
2065 esp_put_ent(esp, ent);
2066 }
2067
2068 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2069 if (ent == esp->active_cmd)
2070 esp->active_cmd = NULL;
2071 esp_reset_cleanup_one(esp, ent);
2072 }
2073
2074 BUG_ON(esp->active_cmd != NULL);
2075
2076 /* Force renegotiation of sync/wide transfers. */
2077 for (i = 0; i < ESP_MAX_TARGET; i++) {
2078 struct esp_target_data *tp = &esp->target[i];
2079
2080 tp->esp_period = 0;
2081 tp->esp_offset = 0;
2082 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2083 ESP_CONFIG3_FSCSI |
2084 ESP_CONFIG3_FAST);
2085 tp->flags &= ~ESP_TGT_WIDE;
2086 tp->flags |= ESP_TGT_CHECK_NEGO;
2087
2088 if (tp->starget)
Maciej W. Rozycki522939d2007-12-10 15:49:31 -08002089 __starget_for_each_device(tp->starget, NULL,
2090 esp_clear_hold);
David S. Millercd9ad582007-04-26 21:19:23 -07002091 }
Thomas Bogendoerfer204abf22007-06-13 12:58:53 -07002092 esp->flags &= ~ESP_FLAG_RESETTING;
David S. Millercd9ad582007-04-26 21:19:23 -07002093}
2094
2095/* Runs under host->lock */
2096static void __esp_interrupt(struct esp *esp)
2097{
2098 int finish_reset, intr_done;
2099 u8 phase;
2100
Hannes Reinecke9535fff2014-11-24 15:37:23 +01002101 /*
2102 * Once INTRPT is read STATUS and SSTEP are cleared.
2103 */
David S. Millercd9ad582007-04-26 21:19:23 -07002104 esp->sreg = esp_read8(ESP_STATUS);
Hannes Reinecke9535fff2014-11-24 15:37:23 +01002105 esp->seqreg = esp_read8(ESP_SSTEP);
2106 esp->ireg = esp_read8(ESP_INTRPT);
David S. Millercd9ad582007-04-26 21:19:23 -07002107
2108 if (esp->flags & ESP_FLAG_RESETTING) {
2109 finish_reset = 1;
2110 } else {
2111 if (esp_check_gross_error(esp))
2112 return;
2113
2114 finish_reset = esp_check_spur_intr(esp);
2115 if (finish_reset < 0)
2116 return;
2117 }
2118
David S. Millercd9ad582007-04-26 21:19:23 -07002119 if (esp->ireg & ESP_INTR_SR)
2120 finish_reset = 1;
2121
2122 if (finish_reset) {
2123 esp_reset_cleanup(esp);
2124 if (esp->eh_reset) {
2125 complete(esp->eh_reset);
2126 esp->eh_reset = NULL;
2127 }
2128 return;
2129 }
2130
2131 phase = (esp->sreg & ESP_STAT_PMASK);
2132 if (esp->rev == FASHME) {
2133 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2134 esp->select_state == ESP_SELECT_NONE &&
2135 esp->event != ESP_EVENT_STATUS &&
2136 esp->event != ESP_EVENT_DATA_DONE) ||
2137 (esp->ireg & ESP_INTR_RSEL)) {
2138 esp->sreg2 = esp_read8(ESP_STATUS2);
2139 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2140 (esp->sreg2 & ESP_STAT2_F1BYTE))
2141 hme_read_fifo(esp);
2142 }
2143 }
2144
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002145 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
David S. Millercd9ad582007-04-26 21:19:23 -07002146 "sreg2[%02x] ireg[%02x]\n",
2147 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2148
2149 intr_done = 0;
2150
2151 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002152 shost_printk(KERN_INFO, esp->host,
2153 "unexpected IREG %02x\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07002154 if (esp->ireg & ESP_INTR_IC)
2155 esp_dump_cmd_log(esp);
2156
2157 esp_schedule_reset(esp);
2158 } else {
Finn Thain201c37d2017-08-04 01:43:19 -04002159 if (esp->ireg & ESP_INTR_RSEL) {
David S. Millercd9ad582007-04-26 21:19:23 -07002160 if (esp->active_cmd)
2161 (void) esp_finish_select(esp);
2162 intr_done = esp_reconnect(esp);
Finn Thain201c37d2017-08-04 01:43:19 -04002163 } else {
2164 /* Some combination of FDONE, BSERV, DC. */
2165 if (esp->select_state != ESP_SELECT_NONE)
2166 intr_done = esp_finish_select(esp);
David S. Millercd9ad582007-04-26 21:19:23 -07002167 }
2168 }
2169 while (!intr_done)
2170 intr_done = esp_process_event(esp);
2171}
2172
2173irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2174{
2175 struct esp *esp = dev_id;
2176 unsigned long flags;
2177 irqreturn_t ret;
2178
2179 spin_lock_irqsave(esp->host->host_lock, flags);
2180 ret = IRQ_NONE;
2181 if (esp->ops->irq_pending(esp)) {
2182 ret = IRQ_HANDLED;
2183 for (;;) {
2184 int i;
2185
2186 __esp_interrupt(esp);
2187 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2188 break;
2189 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2190
2191 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2192 if (esp->ops->irq_pending(esp))
2193 break;
2194 }
2195 if (i == ESP_QUICKIRQ_LIMIT)
2196 break;
2197 }
2198 }
2199 spin_unlock_irqrestore(esp->host->host_lock, flags);
2200
2201 return ret;
2202}
2203EXPORT_SYMBOL(scsi_esp_intr);
2204
Adrian Bunk76246802007-10-11 17:35:20 +02002205static void esp_get_revision(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002206{
2207 u8 val;
2208
2209 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002210 if (esp->config2 == 0) {
2211 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
David S. Millercd9ad582007-04-26 21:19:23 -07002212 esp_write8(esp->config2, ESP_CFG2);
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002213
2214 val = esp_read8(ESP_CFG2);
2215 val &= ~ESP_CONFIG2_MAGIC;
2216
2217 esp->config2 = 0;
2218 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2219 /*
2220 * If what we write to cfg2 does not come back,
2221 * cfg2 is not implemented.
2222 * Therefore this must be a plain esp100.
2223 */
2224 esp->rev = ESP100;
2225 return;
2226 }
2227 }
2228
2229 esp_set_all_config3(esp, 5);
2230 esp->prev_cfg3 = 5;
2231 esp_write8(esp->config2, ESP_CFG2);
2232 esp_write8(0, ESP_CFG3);
2233 esp_write8(esp->prev_cfg3, ESP_CFG3);
2234
2235 val = esp_read8(ESP_CFG3);
2236 if (val != 5) {
2237 /* The cfg2 register is implemented, however
2238 * cfg3 is not, must be esp100a.
2239 */
2240 esp->rev = ESP100A;
2241 } else {
2242 esp_set_all_config3(esp, 0);
2243 esp->prev_cfg3 = 0;
David S. Millercd9ad582007-04-26 21:19:23 -07002244 esp_write8(esp->prev_cfg3, ESP_CFG3);
2245
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002246 /* All of cfg{1,2,3} implemented, must be one of
2247 * the fas variants, figure out which one.
2248 */
2249 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2250 esp->rev = FAST;
2251 esp->sync_defp = SYNC_DEFP_FAST;
David S. Millercd9ad582007-04-26 21:19:23 -07002252 } else {
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002253 esp->rev = ESP236;
David S. Millercd9ad582007-04-26 21:19:23 -07002254 }
2255 }
2256}
2257
Adrian Bunk76246802007-10-11 17:35:20 +02002258static void esp_init_swstate(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002259{
2260 int i;
2261
2262 INIT_LIST_HEAD(&esp->queued_cmds);
2263 INIT_LIST_HEAD(&esp->active_cmds);
2264 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2265
2266 /* Start with a clear state, domain validation (via ->slave_configure,
2267 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2268 * commands.
2269 */
2270 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2271 esp->target[i].flags = 0;
2272 esp->target[i].nego_goal_period = 0;
2273 esp->target[i].nego_goal_offset = 0;
2274 esp->target[i].nego_goal_width = 0;
2275 esp->target[i].nego_goal_tags = 0;
2276 }
2277}
2278
2279/* This places the ESP into a known state at boot time. */
Martin Habetsd679f802007-05-07 14:05:03 -07002280static void esp_bootup_reset(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002281{
2282 u8 val;
2283
2284 /* Reset the DMA */
2285 esp->ops->reset_dma(esp);
2286
2287 /* Reset the ESP */
2288 esp_reset_esp(esp);
2289
2290 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2291 val = esp_read8(ESP_CFG1);
2292 val |= ESP_CONFIG1_SRRDISAB;
2293 esp_write8(val, ESP_CFG1);
2294
2295 scsi_esp_cmd(esp, ESP_CMD_RS);
2296 udelay(400);
2297
2298 esp_write8(esp->config1, ESP_CFG1);
2299
2300 /* Eat any bitrot in the chip and we are done... */
2301 esp_read8(ESP_INTRPT);
2302}
2303
Adrian Bunk76246802007-10-11 17:35:20 +02002304static void esp_set_clock_params(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002305{
Finn Thain6fe07aa2008-04-25 10:06:05 -05002306 int fhz;
David S. Millercd9ad582007-04-26 21:19:23 -07002307 u8 ccf;
2308
2309 /* This is getting messy but it has to be done correctly or else
2310 * you get weird behavior all over the place. We are trying to
2311 * basically figure out three pieces of information.
2312 *
2313 * a) Clock Conversion Factor
2314 *
2315 * This is a representation of the input crystal clock frequency
2316 * going into the ESP on this machine. Any operation whose timing
2317 * is longer than 400ns depends on this value being correct. For
2318 * example, you'll get blips for arbitration/selection during high
2319 * load or with multiple targets if this is not set correctly.
2320 *
2321 * b) Selection Time-Out
2322 *
2323 * The ESP isn't very bright and will arbitrate for the bus and try
2324 * to select a target forever if you let it. This value tells the
2325 * ESP when it has taken too long to negotiate and that it should
2326 * interrupt the CPU so we can see what happened. The value is
2327 * computed as follows (from NCR/Symbios chip docs).
2328 *
2329 * (Time Out Period) * (Input Clock)
2330 * STO = ----------------------------------
2331 * (8192) * (Clock Conversion Factor)
2332 *
2333 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2334 *
2335 * c) Imperical constants for synchronous offset and transfer period
2336 * register values
2337 *
2338 * This entails the smallest and largest sync period we could ever
2339 * handle on this ESP.
2340 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002341 fhz = esp->cfreq;
David S. Millercd9ad582007-04-26 21:19:23 -07002342
Finn Thain6fe07aa2008-04-25 10:06:05 -05002343 ccf = ((fhz / 1000000) + 4) / 5;
David S. Millercd9ad582007-04-26 21:19:23 -07002344 if (ccf == 1)
2345 ccf = 2;
2346
2347 /* If we can't find anything reasonable, just assume 20MHZ.
2348 * This is the clock frequency of the older sun4c's where I've
2349 * been unable to find the clock-frequency PROM property. All
2350 * other machines provide useful values it seems.
2351 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002352 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2353 fhz = 20000000;
David S. Millercd9ad582007-04-26 21:19:23 -07002354 ccf = 4;
2355 }
2356
2357 esp->cfact = (ccf == 8 ? 0 : ccf);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002358 esp->cfreq = fhz;
2359 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
David S. Millercd9ad582007-04-26 21:19:23 -07002360 esp->ctick = ESP_TICK(ccf, esp->ccycle);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002361 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
David S. Millercd9ad582007-04-26 21:19:23 -07002362 esp->sync_defp = SYNC_DEFP_SLOW;
2363}
2364
2365static const char *esp_chip_names[] = {
2366 "ESP100",
2367 "ESP100A",
2368 "ESP236",
2369 "FAS236",
Kars de Jong2086faa2019-11-19 21:20:20 +01002370 "AM53C974",
Kars de Jongbd407262019-11-19 21:20:21 +01002371 "53CF9x-2",
David S. Millercd9ad582007-04-26 21:19:23 -07002372 "FAS100A",
2373 "FAST",
2374 "FASHME",
2375};
2376
2377static struct scsi_transport_template *esp_transport_template;
2378
Christoph Hellwig44b1b4d2018-10-13 09:26:26 +02002379int scsi_esp_register(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002380{
2381 static int instance;
2382 int err;
2383
Hannes Reinecke3707a182014-11-24 15:37:20 +01002384 if (!esp->num_tags)
2385 esp->num_tags = ESP_DEFAULT_TAGS;
David S. Millercd9ad582007-04-26 21:19:23 -07002386 esp->host->transportt = esp_transport_template;
2387 esp->host->max_lun = ESP_MAX_LUN;
2388 esp->host->cmd_per_lun = 2;
David Millerff4abd62007-08-24 22:25:58 -07002389 esp->host->unique_id = instance;
David S. Millercd9ad582007-04-26 21:19:23 -07002390
2391 esp_set_clock_params(esp);
2392
2393 esp_get_revision(esp);
2394
2395 esp_init_swstate(esp);
2396
2397 esp_bootup_reset(esp);
2398
Christoph Hellwig44b1b4d2018-10-13 09:26:26 +02002399 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002400 esp->host->unique_id, esp->regs, esp->dma_regs,
2401 esp->host->irq);
Christoph Hellwig44b1b4d2018-10-13 09:26:26 +02002402 dev_printk(KERN_INFO, esp->dev,
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002403 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2404 esp->host->unique_id, esp_chip_names[esp->rev],
2405 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
David S. Millercd9ad582007-04-26 21:19:23 -07002406
2407 /* Let the SCSI bus reset settle. */
2408 ssleep(esp_bus_reset_settle);
2409
Christoph Hellwig44b1b4d2018-10-13 09:26:26 +02002410 err = scsi_add_host(esp->host, esp->dev);
David S. Millercd9ad582007-04-26 21:19:23 -07002411 if (err)
2412 return err;
2413
David Millerff4abd62007-08-24 22:25:58 -07002414 instance++;
David S. Millercd9ad582007-04-26 21:19:23 -07002415
2416 scsi_scan_host(esp->host);
2417
2418 return 0;
2419}
2420EXPORT_SYMBOL(scsi_esp_register);
2421
Adrian Bunk76246802007-10-11 17:35:20 +02002422void scsi_esp_unregister(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002423{
2424 scsi_remove_host(esp->host);
2425}
2426EXPORT_SYMBOL(scsi_esp_unregister);
2427
James Bottomleyec5e69f2008-06-23 14:52:09 -05002428static int esp_target_alloc(struct scsi_target *starget)
2429{
2430 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2431 struct esp_target_data *tp = &esp->target[starget->id];
2432
2433 tp->starget = starget;
2434
2435 return 0;
2436}
2437
2438static void esp_target_destroy(struct scsi_target *starget)
2439{
2440 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2441 struct esp_target_data *tp = &esp->target[starget->id];
2442
2443 tp->starget = NULL;
2444}
2445
David S. Millercd9ad582007-04-26 21:19:23 -07002446static int esp_slave_alloc(struct scsi_device *dev)
2447{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002448 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002449 struct esp_target_data *tp = &esp->target[dev->id];
2450 struct esp_lun_data *lp;
2451
2452 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2453 if (!lp)
2454 return -ENOMEM;
2455 dev->hostdata = lp;
2456
David S. Millercd9ad582007-04-26 21:19:23 -07002457 spi_min_period(tp->starget) = esp->min_period;
2458 spi_max_offset(tp->starget) = 15;
2459
2460 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2461 spi_max_width(tp->starget) = 1;
2462 else
2463 spi_max_width(tp->starget) = 0;
2464
2465 return 0;
2466}
2467
2468static int esp_slave_configure(struct scsi_device *dev)
2469{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002470 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002471 struct esp_target_data *tp = &esp->target[dev->id];
David S. Millercd9ad582007-04-26 21:19:23 -07002472
Hannes Reinecke3707a182014-11-24 15:37:20 +01002473 if (dev->tagged_supported)
2474 scsi_change_queue_depth(dev, esp->num_tags);
David S. Millercd9ad582007-04-26 21:19:23 -07002475
David S. Millercd9ad582007-04-26 21:19:23 -07002476 tp->flags |= ESP_TGT_DISCONNECT;
2477
2478 if (!spi_initial_dv(dev->sdev_target))
2479 spi_dv_device(dev);
2480
2481 return 0;
2482}
2483
2484static void esp_slave_destroy(struct scsi_device *dev)
2485{
2486 struct esp_lun_data *lp = dev->hostdata;
2487
2488 kfree(lp);
2489 dev->hostdata = NULL;
2490}
2491
2492static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2493{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002494 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002495 struct esp_cmd_entry *ent, *tmp;
2496 struct completion eh_done;
2497 unsigned long flags;
2498
2499 /* XXX This helps a lot with debugging but might be a bit
2500 * XXX much for the final driver.
2501 */
2502 spin_lock_irqsave(esp->host->host_lock, flags);
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002503 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2504 cmd, cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002505 ent = esp->active_cmd;
2506 if (ent)
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002507 shost_printk(KERN_ERR, esp->host,
2508 "Current command [%p:%02x]\n",
2509 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002510 list_for_each_entry(ent, &esp->queued_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002511 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2512 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002513 }
2514 list_for_each_entry(ent, &esp->active_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002515 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2516 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002517 }
2518 esp_dump_cmd_log(esp);
2519 spin_unlock_irqrestore(esp->host->host_lock, flags);
2520
2521 spin_lock_irqsave(esp->host->host_lock, flags);
2522
2523 ent = NULL;
2524 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2525 if (tmp->cmd == cmd) {
2526 ent = tmp;
2527 break;
2528 }
2529 }
2530
2531 if (ent) {
2532 /* Easiest case, we didn't even issue the command
2533 * yet so it is trivial to abort.
2534 */
2535 list_del(&ent->list);
2536
2537 cmd->result = DID_ABORT << 16;
2538 cmd->scsi_done(cmd);
2539
2540 esp_put_ent(esp, ent);
2541
2542 goto out_success;
2543 }
2544
2545 init_completion(&eh_done);
2546
2547 ent = esp->active_cmd;
2548 if (ent && ent->cmd == cmd) {
2549 /* Command is the currently active command on
2550 * the bus. If we already have an output message
2551 * pending, no dice.
2552 */
2553 if (esp->msg_out_len)
2554 goto out_failure;
2555
2556 /* Send out an abort, encouraging the target to
2557 * go to MSGOUT phase by asserting ATN.
2558 */
2559 esp->msg_out[0] = ABORT_TASK_SET;
2560 esp->msg_out_len = 1;
2561 ent->eh_done = &eh_done;
2562
2563 scsi_esp_cmd(esp, ESP_CMD_SATN);
2564 } else {
2565 /* The command is disconnected. This is not easy to
2566 * abort. For now we fail and let the scsi error
2567 * handling layer go try a scsi bus reset or host
2568 * reset.
2569 *
2570 * What we could do is put together a scsi command
2571 * solely for the purpose of sending an abort message
2572 * to the target. Coming up with all the code to
2573 * cook up scsi commands, special case them everywhere,
2574 * etc. is for questionable gain and it would be better
2575 * if the generic scsi error handling layer could do at
2576 * least some of that for us.
2577 *
2578 * Anyways this is an area for potential future improvement
2579 * in this driver.
2580 */
2581 goto out_failure;
2582 }
2583
2584 spin_unlock_irqrestore(esp->host->host_lock, flags);
2585
2586 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2587 spin_lock_irqsave(esp->host->host_lock, flags);
2588 ent->eh_done = NULL;
2589 spin_unlock_irqrestore(esp->host->host_lock, flags);
2590
2591 return FAILED;
2592 }
2593
2594 return SUCCESS;
2595
2596out_success:
2597 spin_unlock_irqrestore(esp->host->host_lock, flags);
2598 return SUCCESS;
2599
2600out_failure:
2601 /* XXX This might be a good location to set ESP_TGT_BROKEN
2602 * XXX since we know which target/lun in particular is
2603 * XXX causing trouble.
2604 */
2605 spin_unlock_irqrestore(esp->host->host_lock, flags);
2606 return FAILED;
2607}
2608
2609static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2610{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002611 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002612 struct completion eh_reset;
2613 unsigned long flags;
2614
2615 init_completion(&eh_reset);
2616
2617 spin_lock_irqsave(esp->host->host_lock, flags);
2618
2619 esp->eh_reset = &eh_reset;
2620
2621 /* XXX This is too simple... We should add lots of
2622 * XXX checks here so that if we find that the chip is
2623 * XXX very wedged we return failure immediately so
2624 * XXX that we can perform a full chip reset.
2625 */
2626 esp->flags |= ESP_FLAG_RESETTING;
2627 scsi_esp_cmd(esp, ESP_CMD_RS);
2628
2629 spin_unlock_irqrestore(esp->host->host_lock, flags);
2630
2631 ssleep(esp_bus_reset_settle);
2632
2633 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2634 spin_lock_irqsave(esp->host->host_lock, flags);
2635 esp->eh_reset = NULL;
2636 spin_unlock_irqrestore(esp->host->host_lock, flags);
2637
2638 return FAILED;
2639 }
2640
2641 return SUCCESS;
2642}
2643
2644/* All bets are off, reset the entire device. */
2645static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2646{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002647 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002648 unsigned long flags;
2649
2650 spin_lock_irqsave(esp->host->host_lock, flags);
2651 esp_bootup_reset(esp);
2652 esp_reset_cleanup(esp);
2653 spin_unlock_irqrestore(esp->host->host_lock, flags);
2654
2655 ssleep(esp_bus_reset_settle);
2656
2657 return SUCCESS;
2658}
2659
2660static const char *esp_info(struct Scsi_Host *host)
2661{
2662 return "esp";
2663}
2664
2665struct scsi_host_template scsi_esp_template = {
2666 .module = THIS_MODULE,
2667 .name = "esp",
2668 .info = esp_info,
2669 .queuecommand = esp_queuecommand,
James Bottomleyec5e69f2008-06-23 14:52:09 -05002670 .target_alloc = esp_target_alloc,
2671 .target_destroy = esp_target_destroy,
David S. Millercd9ad582007-04-26 21:19:23 -07002672 .slave_alloc = esp_slave_alloc,
2673 .slave_configure = esp_slave_configure,
2674 .slave_destroy = esp_slave_destroy,
2675 .eh_abort_handler = esp_eh_abort_handler,
2676 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2677 .eh_host_reset_handler = esp_eh_host_reset_handler,
2678 .can_queue = 7,
2679 .this_id = 7,
2680 .sg_tablesize = SG_ALL,
David S. Millercd9ad582007-04-26 21:19:23 -07002681 .max_sectors = 0xffff,
2682 .skip_settle_delay = 1,
2683};
2684EXPORT_SYMBOL(scsi_esp_template);
2685
2686static void esp_get_signalling(struct Scsi_Host *host)
2687{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002688 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002689 enum spi_signal_type type;
2690
2691 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2692 type = SPI_SIGNAL_HVD;
2693 else
2694 type = SPI_SIGNAL_SE;
2695
2696 spi_signalling(host) = type;
2697}
2698
2699static void esp_set_offset(struct scsi_target *target, int offset)
2700{
2701 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002702 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002703 struct esp_target_data *tp = &esp->target[target->id];
2704
Finn Thain02507a82009-12-05 12:30:42 +11002705 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2706 tp->nego_goal_offset = 0;
2707 else
2708 tp->nego_goal_offset = offset;
David S. Millercd9ad582007-04-26 21:19:23 -07002709 tp->flags |= ESP_TGT_CHECK_NEGO;
2710}
2711
2712static void esp_set_period(struct scsi_target *target, int period)
2713{
2714 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002715 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002716 struct esp_target_data *tp = &esp->target[target->id];
2717
2718 tp->nego_goal_period = period;
2719 tp->flags |= ESP_TGT_CHECK_NEGO;
2720}
2721
2722static void esp_set_width(struct scsi_target *target, int width)
2723{
2724 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002725 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002726 struct esp_target_data *tp = &esp->target[target->id];
2727
2728 tp->nego_goal_width = (width ? 1 : 0);
2729 tp->flags |= ESP_TGT_CHECK_NEGO;
2730}
2731
2732static struct spi_function_template esp_transport_ops = {
2733 .set_offset = esp_set_offset,
2734 .show_offset = 1,
2735 .set_period = esp_set_period,
2736 .show_period = 1,
2737 .set_width = esp_set_width,
2738 .show_width = 1,
2739 .get_signalling = esp_get_signalling,
2740};
2741
2742static int __init esp_init(void)
2743{
2744 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2745 sizeof(struct esp_cmd_priv));
2746
2747 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2748 if (!esp_transport_template)
2749 return -ENODEV;
2750
2751 return 0;
2752}
2753
2754static void __exit esp_exit(void)
2755{
2756 spi_release_transport(esp_transport_template);
2757}
2758
2759MODULE_DESCRIPTION("ESP SCSI driver core");
2760MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2761MODULE_LICENSE("GPL");
2762MODULE_VERSION(DRV_VERSION);
2763
2764module_param(esp_bus_reset_settle, int, 0);
2765MODULE_PARM_DESC(esp_bus_reset_settle,
2766 "ESP scsi bus reset delay in seconds");
2767
2768module_param(esp_debug, int, 0);
2769MODULE_PARM_DESC(esp_debug,
2770"ESP bitmapped debugging message enable value:\n"
2771" 0x00000001 Log interrupt events\n"
2772" 0x00000002 Log scsi commands\n"
2773" 0x00000004 Log resets\n"
2774" 0x00000008 Log message in events\n"
2775" 0x00000010 Log message out events\n"
2776" 0x00000020 Log command completion\n"
2777" 0x00000040 Log disconnects\n"
2778" 0x00000080 Log data start\n"
2779" 0x00000100 Log data done\n"
2780" 0x00000200 Log reconnects\n"
2781" 0x00000400 Log auto-sense data\n"
2782);
2783
2784module_init(esp_init);
2785module_exit(esp_exit);
Finn Thain53dce332018-10-16 16:31:25 +11002786
2787#ifdef CONFIG_SCSI_ESP_PIO
2788static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2789{
2790 int i = 500000;
2791
2792 do {
2793 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2794
2795 if (fbytes)
2796 return fbytes;
2797
Finn Thain8c6f8032018-10-16 16:31:25 +11002798 udelay(1);
Finn Thain53dce332018-10-16 16:31:25 +11002799 } while (--i);
2800
2801 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2802 esp_read8(ESP_STATUS));
2803 return 0;
2804}
2805
2806static inline int esp_wait_for_intr(struct esp *esp)
2807{
2808 int i = 500000;
2809
2810 do {
2811 esp->sreg = esp_read8(ESP_STATUS);
2812 if (esp->sreg & ESP_STAT_INTR)
2813 return 0;
2814
Finn Thain8c6f8032018-10-16 16:31:25 +11002815 udelay(1);
Finn Thain53dce332018-10-16 16:31:25 +11002816 } while (--i);
2817
2818 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2819 esp->sreg);
2820 return 1;
2821}
2822
2823#define ESP_FIFO_SIZE 16
2824
2825void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2826 u32 dma_count, int write, u8 cmd)
2827{
2828 u8 phase = esp->sreg & ESP_STAT_PMASK;
2829
2830 cmd &= ~ESP_CMD_DMA;
2831 esp->send_cmd_error = 0;
2832
2833 if (write) {
2834 u8 *dst = (u8 *)addr;
2835 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2836
2837 scsi_esp_cmd(esp, cmd);
2838
2839 while (1) {
2840 if (!esp_wait_for_fifo(esp))
2841 break;
2842
Finn Thain8c6f8032018-10-16 16:31:25 +11002843 *dst++ = readb(esp->fifo_reg);
Finn Thain53dce332018-10-16 16:31:25 +11002844 --esp_count;
2845
2846 if (!esp_count)
2847 break;
2848
2849 if (esp_wait_for_intr(esp)) {
2850 esp->send_cmd_error = 1;
2851 break;
2852 }
2853
2854 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2855 break;
2856
2857 esp->ireg = esp_read8(ESP_INTRPT);
2858 if (esp->ireg & mask) {
2859 esp->send_cmd_error = 1;
2860 break;
2861 }
2862
2863 if (phase == ESP_MIP)
Finn Thain8c6f8032018-10-16 16:31:25 +11002864 esp_write8(ESP_CMD_MOK, ESP_CMD);
Finn Thain53dce332018-10-16 16:31:25 +11002865
Finn Thain8c6f8032018-10-16 16:31:25 +11002866 esp_write8(ESP_CMD_TI, ESP_CMD);
Finn Thain53dce332018-10-16 16:31:25 +11002867 }
2868 } else {
2869 unsigned int n = ESP_FIFO_SIZE;
2870 u8 *src = (u8 *)addr;
2871
2872 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2873
2874 if (n > esp_count)
2875 n = esp_count;
2876 writesb(esp->fifo_reg, src, n);
2877 src += n;
2878 esp_count -= n;
2879
2880 scsi_esp_cmd(esp, cmd);
2881
2882 while (esp_count) {
2883 if (esp_wait_for_intr(esp)) {
2884 esp->send_cmd_error = 1;
2885 break;
2886 }
2887
2888 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2889 break;
2890
2891 esp->ireg = esp_read8(ESP_INTRPT);
2892 if (esp->ireg & ~ESP_INTR_BSERV) {
2893 esp->send_cmd_error = 1;
2894 break;
2895 }
2896
2897 n = ESP_FIFO_SIZE -
2898 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2899
2900 if (n > esp_count)
2901 n = esp_count;
2902 writesb(esp->fifo_reg, src, n);
2903 src += n;
2904 esp_count -= n;
2905
Finn Thain8c6f8032018-10-16 16:31:25 +11002906 esp_write8(ESP_CMD_TI, ESP_CMD);
Finn Thain53dce332018-10-16 16:31:25 +11002907 }
2908 }
2909
2910 esp->send_cmd_residual = esp_count;
2911}
2912EXPORT_SYMBOL(esp_send_pio_cmd);
2913#endif