blob: 951fbbab961f619fc0754e1ca384a41c4b08451a [file] [log] [blame]
Duane Grigsbye84067d2017-06-21 13:48:43 -07001/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_nvme.h"
Duane Grigsbye84067d2017-06-21 13:48:43 -07008#include <linux/scatterlist.h>
9#include <linux/delay.h>
10#include <linux/nvme.h>
11#include <linux/nvme-fc.h>
12
13static struct nvme_fc_port_template qla_nvme_fc_transport;
14
15static void qla_nvme_unregister_remote_port(struct work_struct *);
16
himanshu.madhani@cavium.com0f7e51f2017-07-21 09:32:24 -070017int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
Duane Grigsbye84067d2017-06-21 13:48:43 -070018{
Darren Trapp9dd96862018-03-20 23:09:32 -070019 struct qla_nvme_rport *rport;
20 struct nvme_fc_port_info req;
Duane Grigsbye84067d2017-06-21 13:48:43 -070021 int ret;
22
Arnd Bergmannbcda7712017-06-30 18:10:40 +020023 if (!IS_ENABLED(CONFIG_NVME_FC))
24 return 0;
25
Duane Grigsbye84067d2017-06-21 13:48:43 -070026 if (!vha->flags.nvme_enabled) {
27 ql_log(ql_log_info, vha, 0x2100,
28 "%s: Not registering target since Host NVME is not enabled\n",
29 __func__);
30 return 0;
31 }
32
33 if (!(fcport->nvme_prli_service_param &
Darren Trapp9dd96862018-03-20 23:09:32 -070034 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
Duane Grigsbye84067d2017-06-21 13:48:43 -070036 return 0;
37
38 INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
Duane Grigsbye84067d2017-06-21 13:48:43 -070039
Darren Trapp9dd96862018-03-20 23:09:32 -070040 memset(&req, 0, sizeof(struct nvme_fc_port_info));
41 req.port_name = wwn_to_u64(fcport->port_name);
42 req.node_name = wwn_to_u64(fcport->node_name);
43 req.port_role = 0;
44 req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
Duane Grigsbye84067d2017-06-21 13:48:43 -070045
46 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
Darren Trapp9dd96862018-03-20 23:09:32 -070047 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
Duane Grigsbye84067d2017-06-21 13:48:43 -070048
49 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
Darren Trapp9dd96862018-03-20 23:09:32 -070050 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
Duane Grigsbye84067d2017-06-21 13:48:43 -070051
52 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
Darren Trapp9dd96862018-03-20 23:09:32 -070053 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
Duane Grigsbye84067d2017-06-21 13:48:43 -070054
Darren Trapp9dd96862018-03-20 23:09:32 -070055 req.port_id = fcport->d_id.b24;
Duane Grigsbye84067d2017-06-21 13:48:43 -070056
57 ql_log(ql_log_info, vha, 0x2102,
Darren Trapd7936a92017-08-23 15:04:59 -070058 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
Darren Trapp9dd96862018-03-20 23:09:32 -070059 __func__, req.node_name, req.port_name,
60 req.port_id);
Duane Grigsbye84067d2017-06-21 13:48:43 -070061
Darren Trapp9dd96862018-03-20 23:09:32 -070062 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
Duane Grigsbye84067d2017-06-21 13:48:43 -070063 &fcport->nvme_remote_port);
64 if (ret) {
65 ql_log(ql_log_warn, vha, 0x212e,
66 "Failed to register remote port. Transport returned %d\n",
67 ret);
68 return ret;
69 }
70
Darren Trapp9dd96862018-03-20 23:09:32 -070071 rport = fcport->nvme_remote_port->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -070072 rport->fcport = fcport;
73 list_add_tail(&rport->list, &vha->nvme_rport_list);
Darren Trapp9dd96862018-03-20 23:09:32 -070074
75 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
Duane Grigsbye84067d2017-06-21 13:48:43 -070076 return 0;
77}
78
79/* Allocate a queue for NVMe traffic */
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -070080static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
81 unsigned int qidx, u16 qsize, void **handle)
Duane Grigsbye84067d2017-06-21 13:48:43 -070082{
83 struct scsi_qla_host *vha;
84 struct qla_hw_data *ha;
85 struct qla_qpair *qpair;
86
87 if (!qidx)
88 qidx++;
89
90 vha = (struct scsi_qla_host *)lport->private;
91 ha = vha->hw;
92
93 ql_log(ql_log_info, vha, 0x2104,
94 "%s: handle %p, idx =%d, qsize %d\n",
95 __func__, handle, qidx, qsize);
96
97 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
98 ql_log(ql_log_warn, vha, 0x212f,
99 "%s: Illegal qidx=%d. Max=%d\n",
100 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
101 return -EINVAL;
102 }
103
104 if (ha->queue_pair_map[qidx]) {
105 *handle = ha->queue_pair_map[qidx];
106 ql_log(ql_log_info, vha, 0x2121,
107 "Returning existing qpair of %p for idx=%x\n",
108 *handle, qidx);
109 return 0;
110 }
111
Duane Grigsbye84067d2017-06-21 13:48:43 -0700112 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
113 if (qpair == NULL) {
114 ql_log(ql_log_warn, vha, 0x2122,
115 "Failed to allocate qpair\n");
116 return -EINVAL;
117 }
118 *handle = qpair;
119
120 return 0;
121}
122
123static void qla_nvme_sp_ls_done(void *ptr, int res)
124{
125 srb_t *sp = ptr;
126 struct srb_iocb *nvme;
127 struct nvmefc_ls_req *fd;
128 struct nvme_private *priv;
129
130 if (atomic_read(&sp->ref_count) == 0) {
131 ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
132 "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
133 return;
134 }
135
136 if (!atomic_dec_and_test(&sp->ref_count))
137 return;
138
139 if (res)
140 res = -EINVAL;
141
142 nvme = &sp->u.iocb_cmd;
143 fd = nvme->u.nvme.desc;
144 priv = fd->private;
145 priv->comp_status = res;
146 schedule_work(&priv->ls_work);
147 /* work schedule doesn't need the sp */
148 qla2x00_rel_sp(sp);
149}
150
Duane Grigsbycf19c452017-08-23 15:04:58 -0700151void qla_nvme_cmpl_io(struct srb_iocb *nvme)
152{
153 srb_t *sp;
154 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
155
156 sp = container_of(nvme, srb_t, u.iocb_cmd);
157 fd->done(fd);
158 qla2xxx_rel_qpair_sp(sp->qpair, sp);
159}
160
Duane Grigsbye84067d2017-06-21 13:48:43 -0700161static void qla_nvme_sp_done(void *ptr, int res)
162{
163 srb_t *sp = ptr;
164 struct srb_iocb *nvme;
165 struct nvmefc_fcp_req *fd;
166
167 nvme = &sp->u.iocb_cmd;
168 fd = nvme->u.nvme.desc;
169
170 if (!atomic_dec_and_test(&sp->ref_count))
171 return;
172
Darren Trapp9dd96862018-03-20 23:09:32 -0700173 if (res == QLA_SUCCESS)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700174 fd->status = 0;
Darren Trapp9dd96862018-03-20 23:09:32 -0700175 else
176 fd->status = NVME_SC_INTERNAL;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700177
178 fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
Duane Grigsbycf19c452017-08-23 15:04:58 -0700179 list_add_tail(&nvme->u.nvme.entry, &sp->qpair->nvme_done_list);
Darren Trapp9dd96862018-03-20 23:09:32 -0700180
Duane Grigsbycf19c452017-08-23 15:04:58 -0700181 return;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700182}
183
Darren Trappe473b302018-03-20 23:09:33 -0700184static void qla_nvme_abort_work(struct work_struct *work)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700185{
Darren Trappe473b302018-03-20 23:09:33 -0700186 struct nvme_private *priv =
187 container_of(work, struct nvme_private, abort_work);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700188 srb_t *sp = priv->sp;
Darren Trappe473b302018-03-20 23:09:33 -0700189 fc_port_t *fcport = sp->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700190 struct qla_hw_data *ha = fcport->vha->hw;
Darren Trappe473b302018-03-20 23:09:33 -0700191 int rval;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700192
193 rval = ha->isp_ops->abort_command(sp);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700194
195 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
Darren Trappe473b302018-03-20 23:09:33 -0700196 "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__,
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700197 (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
198 sp, fcport, rval);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700199}
200
Darren Trappe473b302018-03-20 23:09:33 -0700201static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
202 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
203{
204 struct nvme_private *priv = fd->private;
205
206 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
207 schedule_work(&priv->abort_work);
208}
209
Duane Grigsbye84067d2017-06-21 13:48:43 -0700210static void qla_nvme_ls_complete(struct work_struct *work)
211{
212 struct nvme_private *priv =
213 container_of(work, struct nvme_private, ls_work);
214 struct nvmefc_ls_req *fd = priv->fd;
215
216 fd->done(fd, priv->comp_status);
217}
218
219static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
220 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
221{
Darren Trapp9dd96862018-03-20 23:09:32 -0700222 struct qla_nvme_rport *qla_rport = rport->private;
223 fc_port_t *fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700224 struct srb_iocb *nvme;
225 struct nvme_private *priv = fd->private;
226 struct scsi_qla_host *vha;
227 int rval = QLA_FUNCTION_FAILED;
228 struct qla_hw_data *ha;
229 srb_t *sp;
230
Duane Grigsbye84067d2017-06-21 13:48:43 -0700231 vha = fcport->vha;
232 ha = vha->hw;
233 /* Alloc SRB structure */
234 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
235 if (!sp)
236 return rval;
237
238 sp->type = SRB_NVME_LS;
239 sp->name = "nvme_ls";
240 sp->done = qla_nvme_sp_ls_done;
241 atomic_set(&sp->ref_count, 1);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700242 nvme = &sp->u.iocb_cmd;
243 priv->sp = sp;
244 priv->fd = fd;
245 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
246 nvme->u.nvme.desc = fd;
247 nvme->u.nvme.dir = 0;
248 nvme->u.nvme.dl = 0;
249 nvme->u.nvme.cmd_len = fd->rqstlen;
250 nvme->u.nvme.rsp_len = fd->rsplen;
251 nvme->u.nvme.rsp_dma = fd->rspdma;
252 nvme->u.nvme.timeout_sec = fd->timeout;
253 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
254 fd->rqstlen, DMA_TO_DEVICE);
255 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
256 fd->rqstlen, DMA_TO_DEVICE);
257
258 rval = qla2x00_start_sp(sp);
259 if (rval != QLA_SUCCESS) {
260 ql_log(ql_log_warn, vha, 0x700e,
261 "qla2x00_start_sp failed = %d\n", rval);
262 atomic_dec(&sp->ref_count);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700263 wake_up(&sp->nvme_ls_waitq);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700264 return rval;
265 }
266
267 return rval;
268}
269
270static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
271 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
272 struct nvmefc_fcp_req *fd)
273{
274 struct nvme_private *priv = fd->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700275
Darren Trappe473b302018-03-20 23:09:33 -0700276 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
277 schedule_work(&priv->abort_work);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700278}
279
280static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
281{
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700282 struct qla_qpair *qpair = hw_queue_handle;
Darren Trapp9dd96862018-03-20 23:09:32 -0700283 unsigned long flags;
284 struct scsi_qla_host *vha = lport->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700285
Duane Grigsbye84067d2017-06-21 13:48:43 -0700286 spin_lock_irqsave(&qpair->qp_lock, flags);
287 qla24xx_process_response_queue(vha, qpair->rsp);
288 spin_unlock_irqrestore(&qpair->qp_lock, flags);
289}
290
291static int qla2x00_start_nvme_mq(srb_t *sp)
292{
293 unsigned long flags;
294 uint32_t *clr_ptr;
295 uint32_t index;
296 uint32_t handle;
297 struct cmd_nvme *cmd_pkt;
298 uint16_t cnt, i;
299 uint16_t req_cnt;
300 uint16_t tot_dsds;
301 uint16_t avail_dsds;
302 uint32_t *cur_dsd;
303 struct req_que *req = NULL;
304 struct scsi_qla_host *vha = sp->fcport->vha;
305 struct qla_hw_data *ha = vha->hw;
306 struct qla_qpair *qpair = sp->qpair;
307 struct srb_iocb *nvme = &sp->u.iocb_cmd;
308 struct scatterlist *sgl, *sg;
309 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
310 uint32_t rval = QLA_SUCCESS;
311
himanshu.madhani@cavium.com1d4614e2018-03-20 23:09:30 -0700312 /* Setup qpair pointers */
313 req = qpair->req;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700314 tot_dsds = fd->sg_cnt;
315
316 /* Acquire qpair specific lock */
317 spin_lock_irqsave(&qpair->qp_lock, flags);
318
319 /* Check for room in outstanding command list. */
320 handle = req->current_outstanding_cmd;
321 for (index = 1; index < req->num_outstanding_cmds; index++) {
322 handle++;
323 if (handle == req->num_outstanding_cmds)
324 handle = 1;
325 if (!req->outstanding_cmds[handle])
326 break;
327 }
328
329 if (index == req->num_outstanding_cmds) {
330 rval = -1;
331 goto queuing_error;
332 }
333 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
334 if (req->cnt < (req_cnt + 2)) {
335 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
336 RD_REG_DWORD_RELAXED(req->req_q_out);
337
338 if (req->ring_index < cnt)
339 req->cnt = cnt - req->ring_index;
340 else
341 req->cnt = req->length - (req->ring_index - cnt);
342
343 if (req->cnt < (req_cnt + 2)){
344 rval = -1;
345 goto queuing_error;
346 }
347 }
348
349 if (unlikely(!fd->sqid)) {
350 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
351 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
352 nvme->u.nvme.aen_op = 1;
himanshu.madhani@cavium.com1d4614e2018-03-20 23:09:30 -0700353 atomic_inc(&ha->nvme_active_aen_cnt);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700354 }
355 }
356
357 /* Build command packet. */
358 req->current_outstanding_cmd = handle;
359 req->outstanding_cmds[handle] = sp;
360 sp->handle = handle;
361 req->cnt -= req_cnt;
362
363 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
364 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
365
366 /* Zero out remaining portion of packet. */
367 clr_ptr = (uint32_t *)cmd_pkt + 2;
368 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
369
370 cmd_pkt->entry_status = 0;
371
372 /* Update entry type to indicate Command NVME IOCB */
373 cmd_pkt->entry_type = COMMAND_NVME;
374
375 /* No data transfer how do we check buffer len == 0?? */
376 if (fd->io_dir == NVMEFC_FCP_READ) {
377 cmd_pkt->control_flags =
378 cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
379 vha->qla_stats.input_bytes += fd->payload_length;
380 vha->qla_stats.input_requests++;
381 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
382 cmd_pkt->control_flags =
383 cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
384 vha->qla_stats.output_bytes += fd->payload_length;
385 vha->qla_stats.output_requests++;
386 } else if (fd->io_dir == 0) {
387 cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
388 }
389
390 /* Set NPORT-ID */
391 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
392 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
393 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
394 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
395 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
396
397 /* NVME RSP IU */
398 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
399 cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
400 cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
401
402 /* NVME CNMD IU */
403 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
404 cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
405 cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
406
407 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
408 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
409
410 /* One DSD is available in the Command Type NVME IOCB */
411 avail_dsds = 1;
412 cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
413 sgl = fd->first_sgl;
414
415 /* Load data segments */
416 for_each_sg(sgl, sg, tot_dsds, i) {
417 dma_addr_t sle_dma;
418 cont_a64_entry_t *cont_pkt;
419
420 /* Allocate additional continuation packets? */
421 if (avail_dsds == 0) {
422 /*
423 * Five DSDs are available in the Continuation
424 * Type 1 IOCB.
425 */
426
427 /* Adjust ring index */
428 req->ring_index++;
429 if (req->ring_index == req->length) {
430 req->ring_index = 0;
431 req->ring_ptr = req->ring;
432 } else {
433 req->ring_ptr++;
434 }
435 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Himanshu Madhanic345c6c2017-06-30 19:32:53 -0700436 *((uint32_t *)(&cont_pkt->entry_type)) =
437 cpu_to_le32(CONTINUE_A64_TYPE);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700438
439 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
440 avail_dsds = 5;
441 }
442
443 sle_dma = sg_dma_address(sg);
444 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
445 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
446 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
447 avail_dsds--;
448 }
449
450 /* Set total entry count. */
451 cmd_pkt->entry_count = (uint8_t)req_cnt;
452 wmb();
453
454 /* Adjust ring index. */
455 req->ring_index++;
456 if (req->ring_index == req->length) {
457 req->ring_index = 0;
458 req->ring_ptr = req->ring;
459 } else {
460 req->ring_ptr++;
461 }
462
463 /* Set chip new ring index. */
464 WRT_REG_DWORD(req->req_q_in, req->ring_index);
465
466queuing_error:
467 spin_unlock_irqrestore(&qpair->qp_lock, flags);
468 return rval;
469}
470
471/* Post a command */
472static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
473 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
474 struct nvmefc_fcp_req *fd)
475{
476 fc_port_t *fcport;
477 struct srb_iocb *nvme;
478 struct scsi_qla_host *vha;
479 int rval = QLA_FUNCTION_FAILED;
480 srb_t *sp;
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700481 struct qla_qpair *qpair = hw_queue_handle;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700482 struct nvme_private *priv;
Darren Trapp9dd96862018-03-20 23:09:32 -0700483 struct qla_nvme_rport *qla_rport = rport->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700484
485 if (!fd) {
Colin Ian King0bf0efa2017-06-30 14:47:41 +0100486 ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n");
Duane Grigsbye84067d2017-06-21 13:48:43 -0700487 return rval;
488 }
489
490 priv = fd->private;
Darren Trapp9dd96862018-03-20 23:09:32 -0700491 fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700492 if (!fcport) {
493 ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
494 return rval;
495 }
496
497 vha = fcport->vha;
Darren Trapp9dd96862018-03-20 23:09:32 -0700498 if (!qpair)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700499 return -EBUSY;
500
501 /* Alloc SRB structure */
502 sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
503 if (!sp)
504 return -EIO;
505
506 atomic_set(&sp->ref_count, 1);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700507 init_waitqueue_head(&sp->nvme_ls_waitq);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700508 priv->sp = sp;
509 sp->type = SRB_NVME_CMD;
510 sp->name = "nvme_cmd";
511 sp->done = qla_nvme_sp_done;
512 sp->qpair = qpair;
513 nvme = &sp->u.iocb_cmd;
514 nvme->u.nvme.desc = fd;
515
516 rval = qla2x00_start_nvme_mq(sp);
517 if (rval != QLA_SUCCESS) {
518 ql_log(ql_log_warn, vha, 0x212d,
519 "qla2x00_start_nvme_mq failed = %d\n", rval);
520 atomic_dec(&sp->ref_count);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700521 wake_up(&sp->nvme_ls_waitq);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700522 return -EIO;
523 }
524
525 return rval;
526}
527
528static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
529{
530 struct scsi_qla_host *vha = lport->private;
531
Duane Grigsbye84067d2017-06-21 13:48:43 -0700532 ql_log(ql_log_info, vha, 0x210f,
533 "localport delete of %p completed.\n", vha->nvme_local_port);
534 vha->nvme_local_port = NULL;
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700535 complete(&vha->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700536}
537
538static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
539{
540 fc_port_t *fcport;
Darren Trapp9dd96862018-03-20 23:09:32 -0700541 struct qla_nvme_rport *qla_rport = rport->private, *trport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700542
Darren Trapp9dd96862018-03-20 23:09:32 -0700543 fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700544 fcport->nvme_remote_port = NULL;
545 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700546
Darren Trapp9dd96862018-03-20 23:09:32 -0700547 list_for_each_entry_safe(qla_rport, trport,
Duane Grigsbye84067d2017-06-21 13:48:43 -0700548 &fcport->vha->nvme_rport_list, list) {
Darren Trapp9dd96862018-03-20 23:09:32 -0700549 if (qla_rport->fcport == fcport) {
550 list_del(&qla_rport->list);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700551 break;
552 }
553 }
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700554 complete(&fcport->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700555
Darren Trapp9dd96862018-03-20 23:09:32 -0700556 if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
557 INIT_WORK(&fcport->free_work, qlt_free_session_done);
558 schedule_work(&fcport->free_work);
559 }
560
561 fcport->nvme_flag &= ~(NVME_FLAG_REGISTERED | NVME_FLAG_DELETING);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700562 ql_log(ql_log_info, fcport->vha, 0x2110,
563 "remoteport_delete of %p completed.\n", fcport);
564}
565
566static struct nvme_fc_port_template qla_nvme_fc_transport = {
567 .localport_delete = qla_nvme_localport_delete,
568 .remoteport_delete = qla_nvme_remoteport_delete,
569 .create_queue = qla_nvme_alloc_queue,
570 .delete_queue = NULL,
571 .ls_req = qla_nvme_ls_req,
572 .ls_abort = qla_nvme_ls_abort,
573 .fcp_io = qla_nvme_post_cmd,
574 .fcp_abort = qla_nvme_fcp_abort,
575 .poll_queue = qla_nvme_poll,
576 .max_hw_queues = 8,
577 .max_sgl_segments = 128,
578 .max_dif_sgl_segments = 64,
579 .dma_boundary = 0xFFFFFFFF,
580 .local_priv_sz = 8,
Darren Trapp9dd96862018-03-20 23:09:32 -0700581 .remote_priv_sz = sizeof(struct qla_nvme_rport),
Duane Grigsbye84067d2017-06-21 13:48:43 -0700582 .lsrqst_priv_sz = sizeof(struct nvme_private),
583 .fcprqst_priv_sz = sizeof(struct nvme_private),
584};
585
586#define NVME_ABORT_POLLING_PERIOD 2
587static int qla_nvme_wait_on_command(srb_t *sp)
588{
589 int ret = QLA_SUCCESS;
590
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700591 wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
Duane Grigsbye84067d2017-06-21 13:48:43 -0700592 NVME_ABORT_POLLING_PERIOD*HZ);
593
594 if (atomic_read(&sp->ref_count) > 1)
595 ret = QLA_FUNCTION_FAILED;
596
597 return ret;
598}
599
himanshu.madhani@cavium.com0f7e51f2017-07-21 09:32:24 -0700600void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700601{
602 int rval;
603
604 rval = ha->isp_ops->abort_command(sp);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700605 if (!rval && !qla_nvme_wait_on_command(sp))
606 ql_log(ql_log_warn, NULL, 0x2112,
607 "nvme_wait_on_comand timed out waiting on sp=%p\n", sp);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700608}
609
Duane Grigsbye84067d2017-06-21 13:48:43 -0700610static void qla_nvme_unregister_remote_port(struct work_struct *work)
611{
Duane Grigsbye84067d2017-06-21 13:48:43 -0700612 struct fc_port *fcport = container_of(work, struct fc_port,
613 nvme_del_work);
Darren Trapp9dd96862018-03-20 23:09:32 -0700614 struct qla_nvme_rport *qla_rport, *trport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700615
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200616 if (!IS_ENABLED(CONFIG_NVME_FC))
617 return;
618
himanshu.madhani@cavium.com49b3d5f62017-07-21 09:32:27 -0700619 ql_log(ql_log_warn, NULL, 0x2112,
620 "%s: unregister remoteport on %p\n",__func__, fcport);
621
Darren Trapp9dd96862018-03-20 23:09:32 -0700622 list_for_each_entry_safe(qla_rport, trport,
Duane Grigsbye84067d2017-06-21 13:48:43 -0700623 &fcport->vha->nvme_rport_list, list) {
Darren Trapp9dd96862018-03-20 23:09:32 -0700624 if (qla_rport->fcport == fcport) {
Duane Grigsbye84067d2017-06-21 13:48:43 -0700625 ql_log(ql_log_info, fcport->vha, 0x2113,
626 "%s: fcport=%p\n", __func__, fcport);
himanshu.madhani@cavium.com49b3d5f62017-07-21 09:32:27 -0700627 init_completion(&fcport->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700628 nvme_fc_unregister_remoteport(
629 fcport->nvme_remote_port);
Darren Trapp9dd96862018-03-20 23:09:32 -0700630 wait_for_completion(&fcport->nvme_del_done);
631 break;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700632 }
633 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700634}
635
himanshu.madhani@cavium.com0f7e51f2017-07-21 09:32:24 -0700636void qla_nvme_delete(struct scsi_qla_host *vha)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700637{
Darren Trapp9dd96862018-03-20 23:09:32 -0700638 struct qla_nvme_rport *qla_rport, *trport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700639 fc_port_t *fcport;
640 int nv_ret;
641
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200642 if (!IS_ENABLED(CONFIG_NVME_FC))
643 return;
644
Darren Trapp9dd96862018-03-20 23:09:32 -0700645 list_for_each_entry_safe(qla_rport, trport,
646 &vha->nvme_rport_list, list) {
647 fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700648
649 ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
650 __func__, fcport);
651
Darren Trappe473b302018-03-20 23:09:33 -0700652 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700653 init_completion(&fcport->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700654 nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
Darren Trapp9dd96862018-03-20 23:09:32 -0700655 wait_for_completion(&fcport->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700656 }
657
658 if (vha->nvme_local_port) {
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700659 init_completion(&vha->nvme_del_done);
Darren Trapp9dd96862018-03-20 23:09:32 -0700660 ql_log(ql_log_info, vha, 0x2116,
661 "unregister localport=%p\n",
662 vha->nvme_local_port);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700663 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
Darren Trapp9dd96862018-03-20 23:09:32 -0700664 if (nv_ret)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700665 ql_log(ql_log_info, vha, 0x2115,
666 "Unregister of localport failed\n");
Darren Trapp9dd96862018-03-20 23:09:32 -0700667 else
668 wait_for_completion(&vha->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700669 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700670}
671
himanshu.madhani@cavium.com0f7e51f2017-07-21 09:32:24 -0700672void qla_nvme_register_hba(struct scsi_qla_host *vha)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700673{
Duane Grigsbye84067d2017-06-21 13:48:43 -0700674 struct nvme_fc_port_template *tmpl;
675 struct qla_hw_data *ha;
676 struct nvme_fc_port_info pinfo;
677 int ret;
678
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200679 if (!IS_ENABLED(CONFIG_NVME_FC))
680 return;
681
Duane Grigsbye84067d2017-06-21 13:48:43 -0700682 ha = vha->hw;
683 tmpl = &qla_nvme_fc_transport;
684
685 WARN_ON(vha->nvme_local_port);
686 WARN_ON(ha->max_req_queues < 3);
687
688 qla_nvme_fc_transport.max_hw_queues =
689 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
690 (uint8_t)(ha->max_req_queues - 2));
691
692 pinfo.node_name = wwn_to_u64(vha->node_name);
693 pinfo.port_name = wwn_to_u64(vha->port_name);
694 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
695 pinfo.port_id = vha->d_id.b24;
696
697 ql_log(ql_log_info, vha, 0xffff,
Darren Trapd7936a92017-08-23 15:04:59 -0700698 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
699 pinfo.node_name, pinfo.port_name, pinfo.port_id);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700700 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
701
702 ret = nvme_fc_register_localport(&pinfo, tmpl,
703 get_device(&ha->pdev->dev), &vha->nvme_local_port);
704 if (ret) {
705 ql_log(ql_log_warn, vha, 0xffff,
706 "register_localport failed: ret=%x\n", ret);
707 return;
708 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700709 vha->nvme_local_port->private = vha;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700710}