blob: 861dc522723ce1d8341abb844f814a5d1ca62204 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Andrew Vasquezfa90c542005-10-27 11:10:08 -07002 * QLogic Fibre Channel HBA Driver
Armen Baloyanbd21eaf2014-04-11 16:54:24 -04003 * Copyright (c) 2003-2014 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Quinn Tranf83adb62014-04-11 16:54:43 -04008#include "qla_target.h"
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04009/**
10 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
11 * Continuation Type 1 IOCBs to allocate.
12 *
Bart Van Assche2db62282018-01-23 16:33:51 -080013 * @vha: HA context
Kieran Bingham0a19a722020-06-09 13:45:59 +010014 * @dsds: number of data segment descriptors needed
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040015 *
16 * Returns the number of IOCB entries needed to store @dsds.
17 */
18static inline uint16_t
19qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
20{
21 uint16_t iocbs;
22
23 iocbs = 1;
24 if (dsds > 1) {
25 iocbs += (dsds - 1) / 5;
26 if ((dsds - 1) % 5)
27 iocbs++;
28 }
29 return iocbs;
30}
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/*
33 * qla2x00_debounce_register
34 * Debounce register.
35 *
36 * Input:
37 * port = register address.
38 *
39 * Returns:
40 * register value.
41 */
42static __inline__ uint16_t
Bart Van Assche21038b02020-05-18 14:17:11 -070043qla2x00_debounce_register(volatile __le16 __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044{
45 volatile uint16_t first;
46 volatile uint16_t second;
47
48 do {
Bart Van Assche04474d32020-05-18 14:17:08 -070049 first = rd_reg_word(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 barrier();
51 cpu_relax();
Bart Van Assche04474d32020-05-18 14:17:08 -070052 second = rd_reg_word(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 } while (first != second);
54
55 return (first);
56}
57
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -070058static inline void
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080059qla2x00_poll(struct rsp_que *rsp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080061 struct qla_hw_data *ha = rsp->hw;
Sebastian Andrzej Siewiorb3a8aa902018-06-11 16:40:53 +020062
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -040063 if (IS_P3P_TYPE(ha))
Giridhar Malavalia9083012010-04-12 17:59:55 -070064 qla82xx_poll(0, rsp);
65 else
66 ha->isp_ops->intr_handler(0, rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067}
68
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -070069static inline uint8_t *
70host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
71{
72 uint32_t *ifcp = (uint32_t *) fcp;
73 uint32_t *ofcp = (uint32_t *) fcp;
74 uint32_t iter = bsize >> 2;
75
76 for (; iter ; iter--)
77 *ofcp++ = swab32(*ifcp++);
78
79 return fcp;
80}
Andrew Vasquez3d716442005-07-06 10:30:26 -070081
Chad Dupuis5f16b332012-08-22 14:21:00 -040082static inline void
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040083host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
84{
85 uint32_t *isrc = (uint32_t *) src;
Saurav Kashyap1f8deef2013-06-25 11:27:21 -040086 __le32 *odest = (__le32 *) dst;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040087 uint32_t iter = bsize >> 2;
88
Joe Carnuccioda08ef52016-01-27 12:03:34 -050089 for ( ; iter--; isrc++)
90 *odest++ = cpu_to_le32(*isrc);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040091}
92
Arun Easibad75002010-05-04 15:01:30 -070093static inline void
Joe Carnucciod5ff0ee2017-05-24 18:06:24 -070094qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
Arun Easibad75002010-05-04 15:01:30 -070095{
Joe Carnucciod5ff0ee2017-05-24 18:06:24 -070096 struct dsd_dma *dsd, *tdsd;
Arun Easibad75002010-05-04 15:01:30 -070097
98 /* clean up allocated prev pool */
Joe Carnucciod5ff0ee2017-05-24 18:06:24 -070099 list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
100 dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
101 dsd->dsd_list_dma);
102 list_del(&dsd->list);
103 kfree(dsd);
Arun Easibad75002010-05-04 15:01:30 -0700104 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800105 INIT_LIST_HEAD(&ctx->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -0700106}
Chad Dupuisec426e12011-03-30 11:46:32 -0700107
Shyam Sundar27258a52019-12-17 14:06:06 -0800108static inline void
109qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
110{
111 int old_val;
112 uint8_t shiftbits, mask;
113
114 /* This will have to change when the max no. of states > 16 */
115 shiftbits = 4;
116 mask = (1 << shiftbits) - 1;
117
118 fcport->disc_state = state;
119 while (1) {
120 old_val = atomic_read(&fcport->shadow_disc_state);
121 if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
122 old_val, (old_val << shiftbits) | state)) {
123 ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
124 "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
125 fcport->port_name, port_dstate_str[old_val & mask],
126 port_dstate_str[state], fcport->d_id.b24);
127 return;
128 }
129 }
130}
131
Arun Easi8cb20492011-08-16 11:29:22 -0700132static inline int
Arun Easie02587d2011-08-16 11:29:23 -0700133qla2x00_hba_err_chk_enabled(srb_t *sp)
Arun Easi8cb20492011-08-16 11:29:22 -0700134{
Arun Easie02587d2011-08-16 11:29:23 -0700135 /*
136 * Uncomment when corresponding SCSI changes are done.
137 *
138 if (!sp->cmd->prot_chk)
139 return 0;
140 *
141 */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800142 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
Arun Easi8cb20492011-08-16 11:29:22 -0700143 case SCSI_PROT_READ_STRIP:
144 case SCSI_PROT_WRITE_INSERT:
145 if (ql2xenablehba_err_chk >= 1)
146 return 1;
147 break;
148 case SCSI_PROT_READ_PASS:
149 case SCSI_PROT_WRITE_PASS:
150 if (ql2xenablehba_err_chk >= 2)
151 return 1;
152 break;
153 case SCSI_PROT_READ_INSERT:
154 case SCSI_PROT_WRITE_STRIP:
155 return 1;
156 }
157 return 0;
158}
Andrew Vasquezd051a5aa2012-02-09 11:14:05 -0800159
160static inline int
161qla2x00_reset_active(scsi_qla_host_t *vha)
162{
163 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
164
165 /* Test appropriate base-vha and vha flags. */
166 return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
167 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
168 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
169 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
170 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
171}
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800172
Quinn Tran22ebde12018-08-02 13:16:47 -0700173static inline int
174qla2x00_chip_is_down(scsi_qla_host_t *vha)
175{
176 return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
177}
178
Bart Van Asschebdb61b92019-08-08 20:02:05 -0700179static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
180 struct qla_qpair *qpair, fc_port_t *fcport)
181{
182 memset(sp, 0, sizeof(*sp));
183 sp->fcport = fcport;
184 sp->iocbs = 1;
185 sp->vha = vha;
186 sp->qpair = qpair;
187 sp->cmd_type = TYPE_SRB;
188 INIT_LIST_HEAD(&sp->elem);
189}
190
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800191static inline srb_t *
Quinn Tran6a629462018-09-04 14:19:15 -0700192qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
193 fc_port_t *fcport, gfp_t flag)
Michael Hernandezd7459522016-12-12 14:40:07 -0800194{
195 srb_t *sp = NULL;
196 uint8_t bail;
197
198 QLA_QPAIR_MARK_BUSY(qpair, bail);
199 if (unlikely(bail))
200 return NULL;
201
202 sp = mempool_alloc(qpair->srb_mempool, flag);
Bart Van Asschebdb61b92019-08-08 20:02:05 -0700203 if (sp)
204 qla2xxx_init_sp(sp, vha, qpair, fcport);
205 else
Michael Hernandezd7459522016-12-12 14:40:07 -0800206 QLA_QPAIR_MARK_NOT_BUSY(qpair);
207 return sp;
208}
209
210static inline void
211qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
212{
Giridhar Malavaliae6ccb02019-04-02 14:24:21 -0700213 sp->qpair = NULL;
Michael Hernandezd7459522016-12-12 14:40:07 -0800214 mempool_free(sp, qpair->srb_mempool);
215 QLA_QPAIR_MARK_NOT_BUSY(qpair);
216}
217
218static inline srb_t *
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800219qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
220{
221 srb_t *sp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800222 uint8_t bail;
Quinn Tran6a629462018-09-04 14:19:15 -0700223 struct qla_qpair *qpair;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800224
225 QLA_VHA_MARK_BUSY(vha, bail);
226 if (unlikely(bail))
227 return NULL;
228
Quinn Tran6a629462018-09-04 14:19:15 -0700229 qpair = vha->hw->base_qpair;
230 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800231 if (!sp)
232 goto done;
233
Quinn Tran726b8542017-01-19 22:28:00 -0800234 sp->vha = vha;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800235done:
236 if (!sp)
237 QLA_VHA_MARK_NOT_BUSY(vha);
238 return sp;
239}
240
241static inline void
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800242qla2x00_rel_sp(srb_t *sp)
Chad Dupuisb00ee7d2013-02-08 01:57:50 -0500243{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800244 QLA_VHA_MARK_NOT_BUSY(sp->vha);
Quinn Tran6a629462018-09-04 14:19:15 -0700245 qla2xxx_rel_qpair_sp(sp->qpair, sp);
Chad Dupuisb00ee7d2013-02-08 01:57:50 -0500246}
247
Chad Dupuis642ef982012-02-09 11:15:57 -0800248static inline int
249qla2x00_gid_list_size(struct qla_hw_data *ha)
250{
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400251 if (IS_QLAFX00(ha))
252 return sizeof(uint32_t) * 32;
253 else
254 return sizeof(struct gid_list_info) * ha->max_fibre_devices;
Chad Dupuis642ef982012-02-09 11:15:57 -0800255}
Chad Dupuis3c290d02013-01-30 03:34:38 -0500256
257static inline void
gurinder.shergill@hp.com36439832013-04-23 10:13:17 -0700258qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
259{
260 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
261 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
262 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
263 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
264 complete(&ha->mbx_intr_comp);
265 }
266}
Chad Dupuise05fe292014-09-25 05:16:59 -0400267
268static inline void
269qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
270{
271 if (retry_delay)
272 fcport->retry_delay_timestamp = jiffies +
273 (retry_delay * HZ / 10);
274}
Quinn Tran99e1b682017-06-02 09:12:03 -0700275
276static inline bool
277qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
278{
279 if (qla_ini_mode_enabled(vha) &&
Quinn Tran0645cb82018-09-11 10:18:18 -0700280 (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
Quinn Tran99e1b682017-06-02 09:12:03 -0700281 return true;
282 else if (qla_tgt_mode_enabled(vha) &&
Quinn Tran0645cb82018-09-11 10:18:18 -0700283 (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
Quinn Tran99e1b682017-06-02 09:12:03 -0700284 return true;
285 else if (qla_dual_mode_enabled(vha) &&
Quinn Tran0645cb82018-09-11 10:18:18 -0700286 ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
Quinn Tran99e1b682017-06-02 09:12:03 -0700287 return true;
288 else
289 return false;
290}
Quinn Trane326d222017-06-13 20:47:18 -0700291
292static inline void
293qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
294{
295 qpair->cpuid = cpuid;
296
297 if (!list_empty(&qpair->hints_list)) {
298 struct qla_qpair_hint *h;
299
300 list_for_each_entry(h, &qpair->hints_list, hint_elem)
301 h->cpuid = qpair->cpuid;
302 }
303}
304
305static inline struct qla_qpair_hint *
306qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
307{
308 struct qla_qpair_hint *h;
309 u16 i;
310
311 for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
312 h = &tgt->qphints[i];
313 if (h->qpair == qpair)
314 return h;
315 }
316
317 return NULL;
318}
Quinn Tran8abfa9e2017-06-13 20:47:24 -0700319
320static inline void
321qla_83xx_start_iocbs(struct qla_qpair *qpair)
322{
323 struct req_que *req = qpair->req;
324
325 req->ring_index++;
326 if (req->ring_index == req->length) {
327 req->ring_index = 0;
328 req->ring_ptr = req->ring;
329 } else
330 req->ring_ptr++;
331
Bart Van Assche04474d32020-05-18 14:17:08 -0700332 wrt_reg_dword(req->req_q_in, req->ring_index);
Quinn Tran8abfa9e2017-06-13 20:47:24 -0700333}
Michael Hernandez84ed3622019-09-12 11:09:12 -0700334
335static inline int
336qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
337{
338 uint32_t data;
339
340 data =
341 ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
342
343
Martin Wilcka10c8802019-11-07 22:48:57 +0000344 return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
Michael Hernandez84ed3622019-09-12 11:09:12 -0700345}