blob: 8dd6220857d40018810e89b0fc4dfd640312a02a [file] [log] [blame]
Duane Grigsbye84067d2017-06-21 13:48:43 -07001/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_nvme.h"
Duane Grigsbye84067d2017-06-21 13:48:43 -07008#include <linux/scatterlist.h>
9#include <linux/delay.h>
10#include <linux/nvme.h>
11#include <linux/nvme-fc.h>
12
13static struct nvme_fc_port_template qla_nvme_fc_transport;
14
15static void qla_nvme_unregister_remote_port(struct work_struct *);
16
himanshu.madhani@cavium.com0f7e51f2017-07-21 09:32:24 -070017int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
Duane Grigsbye84067d2017-06-21 13:48:43 -070018{
Darren Trapp9dd96862018-03-20 23:09:32 -070019 struct qla_nvme_rport *rport;
20 struct nvme_fc_port_info req;
Duane Grigsbye84067d2017-06-21 13:48:43 -070021 int ret;
22
Arnd Bergmannbcda7712017-06-30 18:10:40 +020023 if (!IS_ENABLED(CONFIG_NVME_FC))
24 return 0;
25
Duane Grigsbye84067d2017-06-21 13:48:43 -070026 if (!vha->flags.nvme_enabled) {
27 ql_log(ql_log_info, vha, 0x2100,
28 "%s: Not registering target since Host NVME is not enabled\n",
29 __func__);
30 return 0;
31 }
32
Quinn Tran8777e432018-08-02 13:16:57 -070033 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
34 return 0;
35
Duane Grigsbye84067d2017-06-21 13:48:43 -070036 if (!(fcport->nvme_prli_service_param &
Darren Trapp9dd96862018-03-20 23:09:32 -070037 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
38 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
Duane Grigsbye84067d2017-06-21 13:48:43 -070039 return 0;
40
41 INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
Darren Trapp870fe242018-03-20 23:09:35 -070042 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
Duane Grigsbye84067d2017-06-21 13:48:43 -070043
Darren Trapp9dd96862018-03-20 23:09:32 -070044 memset(&req, 0, sizeof(struct nvme_fc_port_info));
45 req.port_name = wwn_to_u64(fcport->port_name);
46 req.node_name = wwn_to_u64(fcport->node_name);
47 req.port_role = 0;
48 req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
Duane Grigsbye84067d2017-06-21 13:48:43 -070049
50 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
Darren Trapp9dd96862018-03-20 23:09:32 -070051 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
Duane Grigsbye84067d2017-06-21 13:48:43 -070052
53 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
Darren Trapp9dd96862018-03-20 23:09:32 -070054 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
Duane Grigsbye84067d2017-06-21 13:48:43 -070055
56 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
Darren Trapp9dd96862018-03-20 23:09:32 -070057 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
Duane Grigsbye84067d2017-06-21 13:48:43 -070058
Darren Trapp9dd96862018-03-20 23:09:32 -070059 req.port_id = fcport->d_id.b24;
Duane Grigsbye84067d2017-06-21 13:48:43 -070060
61 ql_log(ql_log_info, vha, 0x2102,
Darren Trapd7936a92017-08-23 15:04:59 -070062 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
Darren Trapp9dd96862018-03-20 23:09:32 -070063 __func__, req.node_name, req.port_name,
64 req.port_id);
Duane Grigsbye84067d2017-06-21 13:48:43 -070065
Darren Trapp9dd96862018-03-20 23:09:32 -070066 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
Duane Grigsbye84067d2017-06-21 13:48:43 -070067 &fcport->nvme_remote_port);
68 if (ret) {
69 ql_log(ql_log_warn, vha, 0x212e,
70 "Failed to register remote port. Transport returned %d\n",
71 ret);
72 return ret;
73 }
74
Darren Trapp9dd96862018-03-20 23:09:32 -070075 rport = fcport->nvme_remote_port->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -070076 rport->fcport = fcport;
77 list_add_tail(&rport->list, &vha->nvme_rport_list);
Darren Trapp9dd96862018-03-20 23:09:32 -070078
79 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
Duane Grigsbye84067d2017-06-21 13:48:43 -070080 return 0;
81}
82
83/* Allocate a queue for NVMe traffic */
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -070084static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
85 unsigned int qidx, u16 qsize, void **handle)
Duane Grigsbye84067d2017-06-21 13:48:43 -070086{
87 struct scsi_qla_host *vha;
88 struct qla_hw_data *ha;
89 struct qla_qpair *qpair;
90
91 if (!qidx)
92 qidx++;
93
94 vha = (struct scsi_qla_host *)lport->private;
95 ha = vha->hw;
96
97 ql_log(ql_log_info, vha, 0x2104,
98 "%s: handle %p, idx =%d, qsize %d\n",
99 __func__, handle, qidx, qsize);
100
101 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
102 ql_log(ql_log_warn, vha, 0x212f,
103 "%s: Illegal qidx=%d. Max=%d\n",
104 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
105 return -EINVAL;
106 }
107
108 if (ha->queue_pair_map[qidx]) {
109 *handle = ha->queue_pair_map[qidx];
110 ql_log(ql_log_info, vha, 0x2121,
111 "Returning existing qpair of %p for idx=%x\n",
112 *handle, qidx);
113 return 0;
114 }
115
Duane Grigsbye84067d2017-06-21 13:48:43 -0700116 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
117 if (qpair == NULL) {
118 ql_log(ql_log_warn, vha, 0x2122,
119 "Failed to allocate qpair\n");
120 return -EINVAL;
121 }
122 *handle = qpair;
123
124 return 0;
125}
126
127static void qla_nvme_sp_ls_done(void *ptr, int res)
128{
129 srb_t *sp = ptr;
130 struct srb_iocb *nvme;
131 struct nvmefc_ls_req *fd;
132 struct nvme_private *priv;
133
134 if (atomic_read(&sp->ref_count) == 0) {
135 ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
136 "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
137 return;
138 }
139
140 if (!atomic_dec_and_test(&sp->ref_count))
141 return;
142
143 if (res)
144 res = -EINVAL;
145
146 nvme = &sp->u.iocb_cmd;
147 fd = nvme->u.nvme.desc;
148 priv = fd->private;
149 priv->comp_status = res;
150 schedule_work(&priv->ls_work);
151 /* work schedule doesn't need the sp */
152 qla2x00_rel_sp(sp);
153}
154
155static void qla_nvme_sp_done(void *ptr, int res)
156{
157 srb_t *sp = ptr;
158 struct srb_iocb *nvme;
159 struct nvmefc_fcp_req *fd;
160
161 nvme = &sp->u.iocb_cmd;
162 fd = nvme->u.nvme.desc;
163
164 if (!atomic_dec_and_test(&sp->ref_count))
165 return;
166
Giridhar Malavalib2d1453a2019-04-02 14:24:32 -0700167 if (res == QLA_SUCCESS) {
168 fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
169 } else {
170 fd->rcv_rsplen = 0;
171 fd->transferred_length = 0;
172 }
173 fd->status = 0;
Darren Trapp2e4c5d22018-03-20 23:09:36 -0700174 fd->done(fd);
175 qla2xxx_rel_qpair_sp(sp->qpair, sp);
Darren Trapp9dd96862018-03-20 23:09:32 -0700176
Duane Grigsbycf19c452017-08-23 15:04:58 -0700177 return;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700178}
179
Darren Trappe473b302018-03-20 23:09:33 -0700180static void qla_nvme_abort_work(struct work_struct *work)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700181{
Darren Trappe473b302018-03-20 23:09:33 -0700182 struct nvme_private *priv =
183 container_of(work, struct nvme_private, abort_work);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700184 srb_t *sp = priv->sp;
Darren Trappe473b302018-03-20 23:09:33 -0700185 fc_port_t *fcport = sp->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700186 struct qla_hw_data *ha = fcport->vha->hw;
Darren Trappe473b302018-03-20 23:09:33 -0700187 int rval;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700188
Himanshu Madhani471f8e02019-02-15 14:37:15 -0800189 if (fcport)
190 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
191 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
192 __func__, sp, sp->handle, fcport, fcport->deleted);
193
194 if (!ha->flags.fw_started && (fcport && fcport->deleted))
195 return;
196
Giridhar Malavalib2d1453a2019-04-02 14:24:32 -0700197 if (ha->flags.host_shutting_down) {
198 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
199 "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
200 __func__, sp, sp->type, atomic_read(&sp->ref_count));
201 sp->done(sp, 0);
202 return;
203 }
204
205 if (atomic_read(&sp->ref_count) == 0) {
206 WARN_ON(1);
207 ql_log(ql_log_info, fcport->vha, 0xffff,
Colin Ian Kingef19af92019-04-08 09:20:14 +0100208 "%s: command already aborted on sp: %p\n",
Giridhar Malavalib2d1453a2019-04-02 14:24:32 -0700209 __func__, sp);
210 return;
211 }
212
Duane Grigsbye84067d2017-06-21 13:48:43 -0700213 rval = ha->isp_ops->abort_command(sp);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700214
215 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
Darren Trapp870fe242018-03-20 23:09:35 -0700216 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
217 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
218 sp, sp->handle, fcport, rval);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700219}
220
Darren Trappe473b302018-03-20 23:09:33 -0700221static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
222 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
223{
224 struct nvme_private *priv = fd->private;
225
226 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
227 schedule_work(&priv->abort_work);
228}
229
Duane Grigsbye84067d2017-06-21 13:48:43 -0700230static void qla_nvme_ls_complete(struct work_struct *work)
231{
232 struct nvme_private *priv =
233 container_of(work, struct nvme_private, ls_work);
234 struct nvmefc_ls_req *fd = priv->fd;
235
236 fd->done(fd, priv->comp_status);
237}
238
239static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
240 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
241{
Darren Trapp9dd96862018-03-20 23:09:32 -0700242 struct qla_nvme_rport *qla_rport = rport->private;
243 fc_port_t *fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700244 struct srb_iocb *nvme;
245 struct nvme_private *priv = fd->private;
246 struct scsi_qla_host *vha;
247 int rval = QLA_FUNCTION_FAILED;
248 struct qla_hw_data *ha;
249 srb_t *sp;
250
Duane Grigsbye84067d2017-06-21 13:48:43 -0700251 vha = fcport->vha;
252 ha = vha->hw;
253 /* Alloc SRB structure */
254 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
255 if (!sp)
256 return rval;
257
258 sp->type = SRB_NVME_LS;
259 sp->name = "nvme_ls";
260 sp->done = qla_nvme_sp_ls_done;
261 atomic_set(&sp->ref_count, 1);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700262 nvme = &sp->u.iocb_cmd;
263 priv->sp = sp;
264 priv->fd = fd;
265 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
266 nvme->u.nvme.desc = fd;
267 nvme->u.nvme.dir = 0;
268 nvme->u.nvme.dl = 0;
269 nvme->u.nvme.cmd_len = fd->rqstlen;
270 nvme->u.nvme.rsp_len = fd->rsplen;
271 nvme->u.nvme.rsp_dma = fd->rspdma;
272 nvme->u.nvme.timeout_sec = fd->timeout;
273 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
274 fd->rqstlen, DMA_TO_DEVICE);
275 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
276 fd->rqstlen, DMA_TO_DEVICE);
277
278 rval = qla2x00_start_sp(sp);
279 if (rval != QLA_SUCCESS) {
280 ql_log(ql_log_warn, vha, 0x700e,
281 "qla2x00_start_sp failed = %d\n", rval);
282 atomic_dec(&sp->ref_count);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700283 wake_up(&sp->nvme_ls_waitq);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700284 return rval;
285 }
286
287 return rval;
288}
289
290static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
291 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
292 struct nvmefc_fcp_req *fd)
293{
294 struct nvme_private *priv = fd->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700295
Darren Trappe473b302018-03-20 23:09:33 -0700296 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
297 schedule_work(&priv->abort_work);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700298}
299
Darren Trapp60dd6e8e2018-03-20 23:09:39 -0700300static inline int qla2x00_start_nvme_mq(srb_t *sp)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700301{
302 unsigned long flags;
303 uint32_t *clr_ptr;
304 uint32_t index;
305 uint32_t handle;
306 struct cmd_nvme *cmd_pkt;
307 uint16_t cnt, i;
308 uint16_t req_cnt;
309 uint16_t tot_dsds;
310 uint16_t avail_dsds;
311 uint32_t *cur_dsd;
312 struct req_que *req = NULL;
313 struct scsi_qla_host *vha = sp->fcport->vha;
314 struct qla_hw_data *ha = vha->hw;
315 struct qla_qpair *qpair = sp->qpair;
316 struct srb_iocb *nvme = &sp->u.iocb_cmd;
317 struct scatterlist *sgl, *sg;
318 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
319 uint32_t rval = QLA_SUCCESS;
320
himanshu.madhani@cavium.com1d4614e2018-03-20 23:09:30 -0700321 /* Setup qpair pointers */
322 req = qpair->req;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700323 tot_dsds = fd->sg_cnt;
324
325 /* Acquire qpair specific lock */
326 spin_lock_irqsave(&qpair->qp_lock, flags);
327
328 /* Check for room in outstanding command list. */
329 handle = req->current_outstanding_cmd;
330 for (index = 1; index < req->num_outstanding_cmds; index++) {
331 handle++;
332 if (handle == req->num_outstanding_cmds)
333 handle = 1;
334 if (!req->outstanding_cmds[handle])
335 break;
336 }
337
338 if (index == req->num_outstanding_cmds) {
Darren Trapp870fe242018-03-20 23:09:35 -0700339 rval = -EBUSY;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700340 goto queuing_error;
341 }
342 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
343 if (req->cnt < (req_cnt + 2)) {
344 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
345 RD_REG_DWORD_RELAXED(req->req_q_out);
346
347 if (req->ring_index < cnt)
348 req->cnt = cnt - req->ring_index;
349 else
350 req->cnt = req->length - (req->ring_index - cnt);
351
352 if (req->cnt < (req_cnt + 2)){
Darren Trapp870fe242018-03-20 23:09:35 -0700353 rval = -EBUSY;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700354 goto queuing_error;
355 }
356 }
357
358 if (unlikely(!fd->sqid)) {
359 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
Bart Van Asschebd432bb2019-04-11 14:53:17 -0700360
Duane Grigsbye84067d2017-06-21 13:48:43 -0700361 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
362 nvme->u.nvme.aen_op = 1;
himanshu.madhani@cavium.com1d4614e2018-03-20 23:09:30 -0700363 atomic_inc(&ha->nvme_active_aen_cnt);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700364 }
365 }
366
367 /* Build command packet. */
368 req->current_outstanding_cmd = handle;
369 req->outstanding_cmds[handle] = sp;
370 sp->handle = handle;
371 req->cnt -= req_cnt;
372
373 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
374 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
375
376 /* Zero out remaining portion of packet. */
377 clr_ptr = (uint32_t *)cmd_pkt + 2;
378 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
379
380 cmd_pkt->entry_status = 0;
381
382 /* Update entry type to indicate Command NVME IOCB */
383 cmd_pkt->entry_type = COMMAND_NVME;
384
385 /* No data transfer how do we check buffer len == 0?? */
386 if (fd->io_dir == NVMEFC_FCP_READ) {
Darren Trapp03aaa892019-02-15 14:37:13 -0800387 cmd_pkt->control_flags = CF_READ_DATA;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700388 vha->qla_stats.input_bytes += fd->payload_length;
389 vha->qla_stats.input_requests++;
390 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
Darren Trapp03aaa892019-02-15 14:37:13 -0800391 cmd_pkt->control_flags = CF_WRITE_DATA;
392 if ((vha->flags.nvme_first_burst) &&
393 (sp->fcport->nvme_prli_service_param &
394 NVME_PRLI_SP_FIRST_BURST)) {
395 if ((fd->payload_length <=
396 sp->fcport->nvme_first_burst_size) ||
397 (sp->fcport->nvme_first_burst_size == 0))
398 cmd_pkt->control_flags |=
399 CF_NVME_FIRST_BURST_ENABLE;
400 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700401 vha->qla_stats.output_bytes += fd->payload_length;
402 vha->qla_stats.output_requests++;
403 } else if (fd->io_dir == 0) {
Darren Trapp03aaa892019-02-15 14:37:13 -0800404 cmd_pkt->control_flags = 0;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700405 }
406
407 /* Set NPORT-ID */
408 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
409 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
410 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
411 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
412 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
413
414 /* NVME RSP IU */
415 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
416 cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
417 cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
418
419 /* NVME CNMD IU */
420 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
421 cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
422 cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
423
424 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
425 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
426
427 /* One DSD is available in the Command Type NVME IOCB */
428 avail_dsds = 1;
429 cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
430 sgl = fd->first_sgl;
431
432 /* Load data segments */
433 for_each_sg(sgl, sg, tot_dsds, i) {
434 dma_addr_t sle_dma;
435 cont_a64_entry_t *cont_pkt;
436
437 /* Allocate additional continuation packets? */
438 if (avail_dsds == 0) {
439 /*
440 * Five DSDs are available in the Continuation
441 * Type 1 IOCB.
442 */
443
444 /* Adjust ring index */
445 req->ring_index++;
446 if (req->ring_index == req->length) {
447 req->ring_index = 0;
448 req->ring_ptr = req->ring;
449 } else {
450 req->ring_ptr++;
451 }
452 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Bart Van Assche2c263482019-04-04 12:44:45 -0700453 put_unaligned_le32(CONTINUE_A64_TYPE,
454 &cont_pkt->entry_type);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700455
456 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
457 avail_dsds = 5;
458 }
459
460 sle_dma = sg_dma_address(sg);
461 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
462 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
463 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
464 avail_dsds--;
465 }
466
467 /* Set total entry count. */
468 cmd_pkt->entry_count = (uint8_t)req_cnt;
469 wmb();
470
471 /* Adjust ring index. */
472 req->ring_index++;
473 if (req->ring_index == req->length) {
474 req->ring_index = 0;
475 req->ring_ptr = req->ring;
476 } else {
477 req->ring_ptr++;
478 }
479
480 /* Set chip new ring index. */
481 WRT_REG_DWORD(req->req_q_in, req->ring_index);
482
483queuing_error:
484 spin_unlock_irqrestore(&qpair->qp_lock, flags);
485 return rval;
486}
487
488/* Post a command */
489static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
490 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
491 struct nvmefc_fcp_req *fd)
492{
493 fc_port_t *fcport;
494 struct srb_iocb *nvme;
495 struct scsi_qla_host *vha;
Darren Trapp870fe242018-03-20 23:09:35 -0700496 int rval = -ENODEV;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700497 srb_t *sp;
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700498 struct qla_qpair *qpair = hw_queue_handle;
Himanshu Madhani5e6803b2018-12-10 12:36:23 -0800499 struct nvme_private *priv = fd->private;
Darren Trapp9dd96862018-03-20 23:09:32 -0700500 struct qla_nvme_rport *qla_rport = rport->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700501
Darren Trapp9dd96862018-03-20 23:09:32 -0700502 fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700503
504 vha = fcport->vha;
Darren Trapp870fe242018-03-20 23:09:35 -0700505
Darren Trapp623ee8242018-03-20 23:09:38 -0700506 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
507 return rval;
508
Darren Trapp870fe242018-03-20 23:09:35 -0700509 /*
510 * If we know the dev is going away while the transport is still sending
511 * IO's return busy back to stall the IO Q. This happens when the
512 * link goes away and fw hasn't notified us yet, but IO's are being
513 * returned. If the dev comes back quickly we won't exhaust the IO
514 * retry count at the core.
515 */
516 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700517 return -EBUSY;
518
519 /* Alloc SRB structure */
Quinn Tran6a629462018-09-04 14:19:15 -0700520 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700521 if (!sp)
Darren Trapp870fe242018-03-20 23:09:35 -0700522 return -EBUSY;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700523
524 atomic_set(&sp->ref_count, 1);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700525 init_waitqueue_head(&sp->nvme_ls_waitq);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700526 priv->sp = sp;
527 sp->type = SRB_NVME_CMD;
528 sp->name = "nvme_cmd";
529 sp->done = qla_nvme_sp_done;
530 sp->qpair = qpair;
Himanshu Madhani5e6803b2018-12-10 12:36:23 -0800531 sp->vha = vha;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700532 nvme = &sp->u.iocb_cmd;
533 nvme->u.nvme.desc = fd;
534
535 rval = qla2x00_start_nvme_mq(sp);
536 if (rval != QLA_SUCCESS) {
537 ql_log(ql_log_warn, vha, 0x212d,
538 "qla2x00_start_nvme_mq failed = %d\n", rval);
539 atomic_dec(&sp->ref_count);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700540 wake_up(&sp->nvme_ls_waitq);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700541 }
542
543 return rval;
544}
545
546static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
547{
548 struct scsi_qla_host *vha = lport->private;
549
Duane Grigsbye84067d2017-06-21 13:48:43 -0700550 ql_log(ql_log_info, vha, 0x210f,
551 "localport delete of %p completed.\n", vha->nvme_local_port);
552 vha->nvme_local_port = NULL;
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700553 complete(&vha->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700554}
555
556static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
557{
558 fc_port_t *fcport;
Darren Trapp9dd96862018-03-20 23:09:32 -0700559 struct qla_nvme_rport *qla_rport = rport->private, *trport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700560
Darren Trapp9dd96862018-03-20 23:09:32 -0700561 fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700562 fcport->nvme_remote_port = NULL;
563 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700564
Darren Trapp9dd96862018-03-20 23:09:32 -0700565 list_for_each_entry_safe(qla_rport, trport,
Duane Grigsbye84067d2017-06-21 13:48:43 -0700566 &fcport->vha->nvme_rport_list, list) {
Darren Trapp9dd96862018-03-20 23:09:32 -0700567 if (qla_rport->fcport == fcport) {
568 list_del(&qla_rport->list);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700569 break;
570 }
571 }
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700572 complete(&fcport->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700573
Darren Trapp9dd96862018-03-20 23:09:32 -0700574 if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
575 INIT_WORK(&fcport->free_work, qlt_free_session_done);
576 schedule_work(&fcport->free_work);
577 }
578
Himanshu Madhani5e6803b2018-12-10 12:36:23 -0800579 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700580 ql_log(ql_log_info, fcport->vha, 0x2110,
581 "remoteport_delete of %p completed.\n", fcport);
582}
583
584static struct nvme_fc_port_template qla_nvme_fc_transport = {
585 .localport_delete = qla_nvme_localport_delete,
586 .remoteport_delete = qla_nvme_remoteport_delete,
587 .create_queue = qla_nvme_alloc_queue,
588 .delete_queue = NULL,
589 .ls_req = qla_nvme_ls_req,
590 .ls_abort = qla_nvme_ls_abort,
591 .fcp_io = qla_nvme_post_cmd,
592 .fcp_abort = qla_nvme_fcp_abort,
Duane Grigsbye84067d2017-06-21 13:48:43 -0700593 .max_hw_queues = 8,
Giridhar Malavali6b1f4442019-04-02 14:24:23 -0700594 .max_sgl_segments = 1024,
Duane Grigsbye84067d2017-06-21 13:48:43 -0700595 .max_dif_sgl_segments = 64,
596 .dma_boundary = 0xFFFFFFFF,
597 .local_priv_sz = 8,
Darren Trapp9dd96862018-03-20 23:09:32 -0700598 .remote_priv_sz = sizeof(struct qla_nvme_rport),
Duane Grigsbye84067d2017-06-21 13:48:43 -0700599 .lsrqst_priv_sz = sizeof(struct nvme_private),
600 .fcprqst_priv_sz = sizeof(struct nvme_private),
601};
602
603#define NVME_ABORT_POLLING_PERIOD 2
604static int qla_nvme_wait_on_command(srb_t *sp)
605{
606 int ret = QLA_SUCCESS;
607
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700608 wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
Duane Grigsbye84067d2017-06-21 13:48:43 -0700609 NVME_ABORT_POLLING_PERIOD*HZ);
610
611 if (atomic_read(&sp->ref_count) > 1)
612 ret = QLA_FUNCTION_FAILED;
613
614 return ret;
615}
616
Darren Trapp623ee8242018-03-20 23:09:38 -0700617void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700618{
619 int rval;
620
Quinn Tranf7d61c92018-09-26 22:05:11 -0700621 if (ha->flags.fw_started) {
Darren Trapp623ee8242018-03-20 23:09:38 -0700622 rval = ha->isp_ops->abort_command(sp);
623 if (!rval && !qla_nvme_wait_on_command(sp))
624 ql_log(ql_log_warn, NULL, 0x2112,
625 "timed out waiting on sp=%p\n", sp);
626 } else {
627 sp->done(sp, res);
628 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700629}
630
Duane Grigsbye84067d2017-06-21 13:48:43 -0700631static void qla_nvme_unregister_remote_port(struct work_struct *work)
632{
Duane Grigsbye84067d2017-06-21 13:48:43 -0700633 struct fc_port *fcport = container_of(work, struct fc_port,
634 nvme_del_work);
Darren Trapp9dd96862018-03-20 23:09:32 -0700635 struct qla_nvme_rport *qla_rport, *trport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700636
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200637 if (!IS_ENABLED(CONFIG_NVME_FC))
638 return;
639
himanshu.madhani@cavium.com49b3d5f62017-07-21 09:32:27 -0700640 ql_log(ql_log_warn, NULL, 0x2112,
641 "%s: unregister remoteport on %p\n",__func__, fcport);
642
Darren Trapp9dd96862018-03-20 23:09:32 -0700643 list_for_each_entry_safe(qla_rport, trport,
Duane Grigsbye84067d2017-06-21 13:48:43 -0700644 &fcport->vha->nvme_rport_list, list) {
Darren Trapp9dd96862018-03-20 23:09:32 -0700645 if (qla_rport->fcport == fcport) {
Duane Grigsbye84067d2017-06-21 13:48:43 -0700646 ql_log(ql_log_info, fcport->vha, 0x2113,
647 "%s: fcport=%p\n", __func__, fcport);
Giridhar Malavaliffc81fc02019-04-02 14:24:30 -0700648 nvme_fc_set_remoteport_devloss
649 (fcport->nvme_remote_port, 0);
himanshu.madhani@cavium.com49b3d5f62017-07-21 09:32:27 -0700650 init_completion(&fcport->nvme_del_done);
Giridhar Malavaliffc81fc02019-04-02 14:24:30 -0700651 if (nvme_fc_unregister_remoteport
652 (fcport->nvme_remote_port))
653 ql_log(ql_log_info, fcport->vha, 0x2114,
654 "%s: Failed to unregister nvme_remote_port\n",
655 __func__);
Darren Trapp9dd96862018-03-20 23:09:32 -0700656 wait_for_completion(&fcport->nvme_del_done);
657 break;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700658 }
659 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700660}
661
himanshu.madhani@cavium.com0f7e51f2017-07-21 09:32:24 -0700662void qla_nvme_delete(struct scsi_qla_host *vha)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700663{
Duane Grigsbye84067d2017-06-21 13:48:43 -0700664 int nv_ret;
665
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200666 if (!IS_ENABLED(CONFIG_NVME_FC))
667 return;
668
Duane Grigsbye84067d2017-06-21 13:48:43 -0700669 if (vha->nvme_local_port) {
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700670 init_completion(&vha->nvme_del_done);
Darren Trapp9dd96862018-03-20 23:09:32 -0700671 ql_log(ql_log_info, vha, 0x2116,
672 "unregister localport=%p\n",
673 vha->nvme_local_port);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700674 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
Darren Trapp9dd96862018-03-20 23:09:32 -0700675 if (nv_ret)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700676 ql_log(ql_log_info, vha, 0x2115,
677 "Unregister of localport failed\n");
Darren Trapp9dd96862018-03-20 23:09:32 -0700678 else
679 wait_for_completion(&vha->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700680 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700681}
682
Quinn Tran8777e432018-08-02 13:16:57 -0700683int qla_nvme_register_hba(struct scsi_qla_host *vha)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700684{
Duane Grigsbye84067d2017-06-21 13:48:43 -0700685 struct nvme_fc_port_template *tmpl;
686 struct qla_hw_data *ha;
687 struct nvme_fc_port_info pinfo;
Quinn Tran8777e432018-08-02 13:16:57 -0700688 int ret = EINVAL;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700689
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200690 if (!IS_ENABLED(CONFIG_NVME_FC))
Quinn Tran8777e432018-08-02 13:16:57 -0700691 return ret;
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200692
Duane Grigsbye84067d2017-06-21 13:48:43 -0700693 ha = vha->hw;
694 tmpl = &qla_nvme_fc_transport;
695
696 WARN_ON(vha->nvme_local_port);
697 WARN_ON(ha->max_req_queues < 3);
698
699 qla_nvme_fc_transport.max_hw_queues =
700 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
701 (uint8_t)(ha->max_req_queues - 2));
702
703 pinfo.node_name = wwn_to_u64(vha->node_name);
704 pinfo.port_name = wwn_to_u64(vha->port_name);
705 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
706 pinfo.port_id = vha->d_id.b24;
707
708 ql_log(ql_log_info, vha, 0xffff,
Darren Trapd7936a92017-08-23 15:04:59 -0700709 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
710 pinfo.node_name, pinfo.port_name, pinfo.port_id);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700711 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
712
713 ret = nvme_fc_register_localport(&pinfo, tmpl,
714 get_device(&ha->pdev->dev), &vha->nvme_local_port);
715 if (ret) {
716 ql_log(ql_log_warn, vha, 0xffff,
717 "register_localport failed: ret=%x\n", ret);
Quinn Tran8777e432018-08-02 13:16:57 -0700718 } else {
719 vha->nvme_local_port->private = vha;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700720 }
Quinn Tran8777e432018-08-02 13:16:57 -0700721
722 return ret;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700723}