blob: 8b3cb0fd307e143a244037fb46c6807b0aba489e [file] [log] [blame]
Duane Grigsbye84067d2017-06-21 13:48:43 -07001/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_nvme.h"
Duane Grigsbye84067d2017-06-21 13:48:43 -07008#include <linux/scatterlist.h>
9#include <linux/delay.h>
10#include <linux/nvme.h>
11#include <linux/nvme-fc.h>
12
13static struct nvme_fc_port_template qla_nvme_fc_transport;
14
15static void qla_nvme_unregister_remote_port(struct work_struct *);
16
himanshu.madhani@cavium.com0f7e51f2017-07-21 09:32:24 -070017int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
Duane Grigsbye84067d2017-06-21 13:48:43 -070018{
Darren Trapp9dd96862018-03-20 23:09:32 -070019 struct qla_nvme_rport *rport;
20 struct nvme_fc_port_info req;
Duane Grigsbye84067d2017-06-21 13:48:43 -070021 int ret;
22
Arnd Bergmannbcda7712017-06-30 18:10:40 +020023 if (!IS_ENABLED(CONFIG_NVME_FC))
24 return 0;
25
Duane Grigsbye84067d2017-06-21 13:48:43 -070026 if (!vha->flags.nvme_enabled) {
27 ql_log(ql_log_info, vha, 0x2100,
28 "%s: Not registering target since Host NVME is not enabled\n",
29 __func__);
30 return 0;
31 }
32
Quinn Tran8777e432018-08-02 13:16:57 -070033 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
34 return 0;
35
Duane Grigsbye84067d2017-06-21 13:48:43 -070036 if (!(fcport->nvme_prli_service_param &
Darren Trapp9dd96862018-03-20 23:09:32 -070037 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
38 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
Duane Grigsbye84067d2017-06-21 13:48:43 -070039 return 0;
40
41 INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
Darren Trapp870fe242018-03-20 23:09:35 -070042 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
Duane Grigsbye84067d2017-06-21 13:48:43 -070043
Darren Trapp9dd96862018-03-20 23:09:32 -070044 memset(&req, 0, sizeof(struct nvme_fc_port_info));
45 req.port_name = wwn_to_u64(fcport->port_name);
46 req.node_name = wwn_to_u64(fcport->node_name);
47 req.port_role = 0;
48 req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
Duane Grigsbye84067d2017-06-21 13:48:43 -070049
50 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
Darren Trapp9dd96862018-03-20 23:09:32 -070051 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
Duane Grigsbye84067d2017-06-21 13:48:43 -070052
53 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
Darren Trapp9dd96862018-03-20 23:09:32 -070054 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
Duane Grigsbye84067d2017-06-21 13:48:43 -070055
56 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
Darren Trapp9dd96862018-03-20 23:09:32 -070057 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
Duane Grigsbye84067d2017-06-21 13:48:43 -070058
Darren Trapp9dd96862018-03-20 23:09:32 -070059 req.port_id = fcport->d_id.b24;
Duane Grigsbye84067d2017-06-21 13:48:43 -070060
61 ql_log(ql_log_info, vha, 0x2102,
Darren Trapd7936a92017-08-23 15:04:59 -070062 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
Darren Trapp9dd96862018-03-20 23:09:32 -070063 __func__, req.node_name, req.port_name,
64 req.port_id);
Duane Grigsbye84067d2017-06-21 13:48:43 -070065
Darren Trapp9dd96862018-03-20 23:09:32 -070066 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
Duane Grigsbye84067d2017-06-21 13:48:43 -070067 &fcport->nvme_remote_port);
68 if (ret) {
69 ql_log(ql_log_warn, vha, 0x212e,
70 "Failed to register remote port. Transport returned %d\n",
71 ret);
72 return ret;
73 }
74
Darren Trapp9dd96862018-03-20 23:09:32 -070075 rport = fcport->nvme_remote_port->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -070076 rport->fcport = fcport;
Darren Trapp9dd96862018-03-20 23:09:32 -070077
78 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
Duane Grigsbye84067d2017-06-21 13:48:43 -070079 return 0;
80}
81
82/* Allocate a queue for NVMe traffic */
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -070083static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
84 unsigned int qidx, u16 qsize, void **handle)
Duane Grigsbye84067d2017-06-21 13:48:43 -070085{
86 struct scsi_qla_host *vha;
87 struct qla_hw_data *ha;
88 struct qla_qpair *qpair;
89
90 if (!qidx)
91 qidx++;
92
93 vha = (struct scsi_qla_host *)lport->private;
94 ha = vha->hw;
95
96 ql_log(ql_log_info, vha, 0x2104,
97 "%s: handle %p, idx =%d, qsize %d\n",
98 __func__, handle, qidx, qsize);
99
100 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
101 ql_log(ql_log_warn, vha, 0x212f,
102 "%s: Illegal qidx=%d. Max=%d\n",
103 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
104 return -EINVAL;
105 }
106
107 if (ha->queue_pair_map[qidx]) {
108 *handle = ha->queue_pair_map[qidx];
109 ql_log(ql_log_info, vha, 0x2121,
110 "Returning existing qpair of %p for idx=%x\n",
111 *handle, qidx);
112 return 0;
113 }
114
Duane Grigsbye84067d2017-06-21 13:48:43 -0700115 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
116 if (qpair == NULL) {
117 ql_log(ql_log_warn, vha, 0x2122,
118 "Failed to allocate qpair\n");
119 return -EINVAL;
120 }
121 *handle = qpair;
122
123 return 0;
124}
125
126static void qla_nvme_sp_ls_done(void *ptr, int res)
127{
128 srb_t *sp = ptr;
129 struct srb_iocb *nvme;
130 struct nvmefc_ls_req *fd;
131 struct nvme_private *priv;
132
Bart Van Asschedb4bf822019-04-17 14:44:40 -0700133 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
Duane Grigsbye84067d2017-06-21 13:48:43 -0700134 return;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700135
Bart Van Assche219d27d2019-04-17 14:44:35 -0700136 atomic_dec(&sp->ref_count);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700137
138 if (res)
139 res = -EINVAL;
140
141 nvme = &sp->u.iocb_cmd;
142 fd = nvme->u.nvme.desc;
143 priv = fd->private;
144 priv->comp_status = res;
145 schedule_work(&priv->ls_work);
146 /* work schedule doesn't need the sp */
147 qla2x00_rel_sp(sp);
148}
149
150static void qla_nvme_sp_done(void *ptr, int res)
151{
152 srb_t *sp = ptr;
153 struct srb_iocb *nvme;
154 struct nvmefc_fcp_req *fd;
155
156 nvme = &sp->u.iocb_cmd;
157 fd = nvme->u.nvme.desc;
158
Bart Van Asschedb4bf822019-04-17 14:44:40 -0700159 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
160 return;
161
Bart Van Assche219d27d2019-04-17 14:44:35 -0700162 atomic_dec(&sp->ref_count);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700163
Giridhar Malavalib2d1453a2019-04-02 14:24:32 -0700164 if (res == QLA_SUCCESS) {
165 fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
166 } else {
167 fd->rcv_rsplen = 0;
168 fd->transferred_length = 0;
169 }
170 fd->status = 0;
Darren Trapp2e4c5d22018-03-20 23:09:36 -0700171 fd->done(fd);
172 qla2xxx_rel_qpair_sp(sp->qpair, sp);
Darren Trapp9dd96862018-03-20 23:09:32 -0700173
Duane Grigsbycf19c452017-08-23 15:04:58 -0700174 return;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700175}
176
Darren Trappe473b302018-03-20 23:09:33 -0700177static void qla_nvme_abort_work(struct work_struct *work)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700178{
Darren Trappe473b302018-03-20 23:09:33 -0700179 struct nvme_private *priv =
180 container_of(work, struct nvme_private, abort_work);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700181 srb_t *sp = priv->sp;
Darren Trappe473b302018-03-20 23:09:33 -0700182 fc_port_t *fcport = sp->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700183 struct qla_hw_data *ha = fcport->vha->hw;
Darren Trappe473b302018-03-20 23:09:33 -0700184 int rval;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700185
Bart Van Asschedafbe562019-04-17 14:44:18 -0700186 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
187 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
188 __func__, sp, sp->handle, fcport, fcport->deleted);
Himanshu Madhani471f8e02019-02-15 14:37:15 -0800189
190 if (!ha->flags.fw_started && (fcport && fcport->deleted))
191 return;
192
Giridhar Malavalib2d1453a2019-04-02 14:24:32 -0700193 if (ha->flags.host_shutting_down) {
194 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
195 "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
196 __func__, sp, sp->type, atomic_read(&sp->ref_count));
197 sp->done(sp, 0);
198 return;
199 }
200
Bart Van Asschedb4bf822019-04-17 14:44:40 -0700201 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
Giridhar Malavalib2d1453a2019-04-02 14:24:32 -0700202 return;
Giridhar Malavalib2d1453a2019-04-02 14:24:32 -0700203
Duane Grigsbye84067d2017-06-21 13:48:43 -0700204 rval = ha->isp_ops->abort_command(sp);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700205
206 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
Darren Trapp870fe242018-03-20 23:09:35 -0700207 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
208 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
209 sp, sp->handle, fcport, rval);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700210}
211
Darren Trappe473b302018-03-20 23:09:33 -0700212static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
213 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
214{
215 struct nvme_private *priv = fd->private;
216
217 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
218 schedule_work(&priv->abort_work);
219}
220
Duane Grigsbye84067d2017-06-21 13:48:43 -0700221static void qla_nvme_ls_complete(struct work_struct *work)
222{
223 struct nvme_private *priv =
224 container_of(work, struct nvme_private, ls_work);
225 struct nvmefc_ls_req *fd = priv->fd;
226
227 fd->done(fd, priv->comp_status);
228}
229
230static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
231 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
232{
Darren Trapp9dd96862018-03-20 23:09:32 -0700233 struct qla_nvme_rport *qla_rport = rport->private;
234 fc_port_t *fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700235 struct srb_iocb *nvme;
236 struct nvme_private *priv = fd->private;
237 struct scsi_qla_host *vha;
238 int rval = QLA_FUNCTION_FAILED;
239 struct qla_hw_data *ha;
240 srb_t *sp;
241
Quinn Tran2eb92382019-06-21 09:50:23 -0700242
243 if (!fcport || (fcport && fcport->deleted))
244 return rval;
245
Duane Grigsbye84067d2017-06-21 13:48:43 -0700246 vha = fcport->vha;
247 ha = vha->hw;
Quinn Tran2eb92382019-06-21 09:50:23 -0700248
249 if (!ha->flags.fw_started)
250 return rval;
251
Duane Grigsbye84067d2017-06-21 13:48:43 -0700252 /* Alloc SRB structure */
253 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
254 if (!sp)
255 return rval;
256
257 sp->type = SRB_NVME_LS;
258 sp->name = "nvme_ls";
259 sp->done = qla_nvme_sp_ls_done;
260 atomic_set(&sp->ref_count, 1);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700261 nvme = &sp->u.iocb_cmd;
262 priv->sp = sp;
263 priv->fd = fd;
264 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
265 nvme->u.nvme.desc = fd;
266 nvme->u.nvme.dir = 0;
267 nvme->u.nvme.dl = 0;
268 nvme->u.nvme.cmd_len = fd->rqstlen;
269 nvme->u.nvme.rsp_len = fd->rsplen;
270 nvme->u.nvme.rsp_dma = fd->rspdma;
271 nvme->u.nvme.timeout_sec = fd->timeout;
272 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
273 fd->rqstlen, DMA_TO_DEVICE);
274 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
275 fd->rqstlen, DMA_TO_DEVICE);
276
277 rval = qla2x00_start_sp(sp);
278 if (rval != QLA_SUCCESS) {
279 ql_log(ql_log_warn, vha, 0x700e,
280 "qla2x00_start_sp failed = %d\n", rval);
281 atomic_dec(&sp->ref_count);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700282 wake_up(&sp->nvme_ls_waitq);
Quinn Tran2eb92382019-06-21 09:50:23 -0700283 sp->free(sp);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700284 return rval;
285 }
286
287 return rval;
288}
289
290static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
291 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
292 struct nvmefc_fcp_req *fd)
293{
294 struct nvme_private *priv = fd->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700295
Darren Trappe473b302018-03-20 23:09:33 -0700296 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
297 schedule_work(&priv->abort_work);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700298}
299
Darren Trapp60dd6e8e2018-03-20 23:09:39 -0700300static inline int qla2x00_start_nvme_mq(srb_t *sp)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700301{
302 unsigned long flags;
303 uint32_t *clr_ptr;
304 uint32_t index;
305 uint32_t handle;
306 struct cmd_nvme *cmd_pkt;
307 uint16_t cnt, i;
308 uint16_t req_cnt;
309 uint16_t tot_dsds;
310 uint16_t avail_dsds;
Bart Van Assche15b7a682019-04-17 14:44:38 -0700311 struct dsd64 *cur_dsd;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700312 struct req_que *req = NULL;
313 struct scsi_qla_host *vha = sp->fcport->vha;
314 struct qla_hw_data *ha = vha->hw;
315 struct qla_qpair *qpair = sp->qpair;
316 struct srb_iocb *nvme = &sp->u.iocb_cmd;
317 struct scatterlist *sgl, *sg;
318 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
319 uint32_t rval = QLA_SUCCESS;
320
himanshu.madhani@cavium.com1d4614e2018-03-20 23:09:30 -0700321 /* Setup qpair pointers */
322 req = qpair->req;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700323 tot_dsds = fd->sg_cnt;
324
325 /* Acquire qpair specific lock */
326 spin_lock_irqsave(&qpair->qp_lock, flags);
327
328 /* Check for room in outstanding command list. */
329 handle = req->current_outstanding_cmd;
330 for (index = 1; index < req->num_outstanding_cmds; index++) {
331 handle++;
332 if (handle == req->num_outstanding_cmds)
333 handle = 1;
334 if (!req->outstanding_cmds[handle])
335 break;
336 }
337
338 if (index == req->num_outstanding_cmds) {
Darren Trapp870fe242018-03-20 23:09:35 -0700339 rval = -EBUSY;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700340 goto queuing_error;
341 }
342 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
343 if (req->cnt < (req_cnt + 2)) {
344 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
345 RD_REG_DWORD_RELAXED(req->req_q_out);
346
347 if (req->ring_index < cnt)
348 req->cnt = cnt - req->ring_index;
349 else
350 req->cnt = req->length - (req->ring_index - cnt);
351
352 if (req->cnt < (req_cnt + 2)){
Darren Trapp870fe242018-03-20 23:09:35 -0700353 rval = -EBUSY;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700354 goto queuing_error;
355 }
356 }
357
358 if (unlikely(!fd->sqid)) {
359 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
Bart Van Asschebd432bb2019-04-11 14:53:17 -0700360
Duane Grigsbye84067d2017-06-21 13:48:43 -0700361 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
362 nvme->u.nvme.aen_op = 1;
himanshu.madhani@cavium.com1d4614e2018-03-20 23:09:30 -0700363 atomic_inc(&ha->nvme_active_aen_cnt);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700364 }
365 }
366
367 /* Build command packet. */
368 req->current_outstanding_cmd = handle;
369 req->outstanding_cmds[handle] = sp;
370 sp->handle = handle;
371 req->cnt -= req_cnt;
372
373 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
374 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
375
376 /* Zero out remaining portion of packet. */
377 clr_ptr = (uint32_t *)cmd_pkt + 2;
378 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
379
380 cmd_pkt->entry_status = 0;
381
382 /* Update entry type to indicate Command NVME IOCB */
383 cmd_pkt->entry_type = COMMAND_NVME;
384
385 /* No data transfer how do we check buffer len == 0?? */
386 if (fd->io_dir == NVMEFC_FCP_READ) {
Darren Trapp03aaa892019-02-15 14:37:13 -0800387 cmd_pkt->control_flags = CF_READ_DATA;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700388 vha->qla_stats.input_bytes += fd->payload_length;
389 vha->qla_stats.input_requests++;
390 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
Darren Trapp03aaa892019-02-15 14:37:13 -0800391 cmd_pkt->control_flags = CF_WRITE_DATA;
392 if ((vha->flags.nvme_first_burst) &&
393 (sp->fcport->nvme_prli_service_param &
394 NVME_PRLI_SP_FIRST_BURST)) {
395 if ((fd->payload_length <=
396 sp->fcport->nvme_first_burst_size) ||
397 (sp->fcport->nvme_first_burst_size == 0))
398 cmd_pkt->control_flags |=
399 CF_NVME_FIRST_BURST_ENABLE;
400 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700401 vha->qla_stats.output_bytes += fd->payload_length;
402 vha->qla_stats.output_requests++;
403 } else if (fd->io_dir == 0) {
Darren Trapp03aaa892019-02-15 14:37:13 -0800404 cmd_pkt->control_flags = 0;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700405 }
406
407 /* Set NPORT-ID */
408 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
409 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
410 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
411 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
412 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
413
414 /* NVME RSP IU */
415 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
Bart Van Assched4556a42019-04-17 14:44:39 -0700416 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700417
418 /* NVME CNMD IU */
419 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
Bart Van Assched4556a42019-04-17 14:44:39 -0700420 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700421
422 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
423 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
424
425 /* One DSD is available in the Command Type NVME IOCB */
426 avail_dsds = 1;
Bart Van Assche15b7a682019-04-17 14:44:38 -0700427 cur_dsd = &cmd_pkt->nvme_dsd;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700428 sgl = fd->first_sgl;
429
430 /* Load data segments */
431 for_each_sg(sgl, sg, tot_dsds, i) {
Duane Grigsbye84067d2017-06-21 13:48:43 -0700432 cont_a64_entry_t *cont_pkt;
433
434 /* Allocate additional continuation packets? */
435 if (avail_dsds == 0) {
436 /*
437 * Five DSDs are available in the Continuation
438 * Type 1 IOCB.
439 */
440
441 /* Adjust ring index */
442 req->ring_index++;
443 if (req->ring_index == req->length) {
444 req->ring_index = 0;
445 req->ring_ptr = req->ring;
446 } else {
447 req->ring_ptr++;
448 }
449 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Bart Van Assche2c263482019-04-04 12:44:45 -0700450 put_unaligned_le32(CONTINUE_A64_TYPE,
451 &cont_pkt->entry_type);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700452
Bart Van Assche15b7a682019-04-17 14:44:38 -0700453 cur_dsd = cont_pkt->dsd;
454 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700455 }
456
Bart Van Assche15b7a682019-04-17 14:44:38 -0700457 append_dsd64(&cur_dsd, sg);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700458 avail_dsds--;
459 }
460
461 /* Set total entry count. */
462 cmd_pkt->entry_count = (uint8_t)req_cnt;
463 wmb();
464
465 /* Adjust ring index. */
466 req->ring_index++;
467 if (req->ring_index == req->length) {
468 req->ring_index = 0;
469 req->ring_ptr = req->ring;
470 } else {
471 req->ring_ptr++;
472 }
473
474 /* Set chip new ring index. */
475 WRT_REG_DWORD(req->req_q_in, req->ring_index);
476
477queuing_error:
478 spin_unlock_irqrestore(&qpair->qp_lock, flags);
479 return rval;
480}
481
482/* Post a command */
483static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
484 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
485 struct nvmefc_fcp_req *fd)
486{
487 fc_port_t *fcport;
488 struct srb_iocb *nvme;
489 struct scsi_qla_host *vha;
Darren Trapp870fe242018-03-20 23:09:35 -0700490 int rval = -ENODEV;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700491 srb_t *sp;
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700492 struct qla_qpair *qpair = hw_queue_handle;
Himanshu Madhani5e6803b2018-12-10 12:36:23 -0800493 struct nvme_private *priv = fd->private;
Darren Trapp9dd96862018-03-20 23:09:32 -0700494 struct qla_nvme_rport *qla_rport = rport->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700495
Darren Trapp9dd96862018-03-20 23:09:32 -0700496 fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700497
Quinn Tran2eb92382019-06-21 09:50:23 -0700498 if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
499 (fcport && fcport->deleted))
Darren Trapp623ee8242018-03-20 23:09:38 -0700500 return rval;
501
Quinn Tran2eb92382019-06-21 09:50:23 -0700502 vha = fcport->vha;
Darren Trapp870fe242018-03-20 23:09:35 -0700503 /*
504 * If we know the dev is going away while the transport is still sending
505 * IO's return busy back to stall the IO Q. This happens when the
506 * link goes away and fw hasn't notified us yet, but IO's are being
507 * returned. If the dev comes back quickly we won't exhaust the IO
508 * retry count at the core.
509 */
510 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700511 return -EBUSY;
512
513 /* Alloc SRB structure */
Quinn Tran6a629462018-09-04 14:19:15 -0700514 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700515 if (!sp)
Darren Trapp870fe242018-03-20 23:09:35 -0700516 return -EBUSY;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700517
518 atomic_set(&sp->ref_count, 1);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700519 init_waitqueue_head(&sp->nvme_ls_waitq);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700520 priv->sp = sp;
521 sp->type = SRB_NVME_CMD;
522 sp->name = "nvme_cmd";
523 sp->done = qla_nvme_sp_done;
524 sp->qpair = qpair;
Himanshu Madhani5e6803b2018-12-10 12:36:23 -0800525 sp->vha = vha;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700526 nvme = &sp->u.iocb_cmd;
527 nvme->u.nvme.desc = fd;
528
529 rval = qla2x00_start_nvme_mq(sp);
530 if (rval != QLA_SUCCESS) {
531 ql_log(ql_log_warn, vha, 0x212d,
532 "qla2x00_start_nvme_mq failed = %d\n", rval);
533 atomic_dec(&sp->ref_count);
himanshu.madhani@cavium.com6fcd98f2017-07-21 09:32:23 -0700534 wake_up(&sp->nvme_ls_waitq);
Quinn Tran2eb92382019-06-21 09:50:23 -0700535 sp->free(sp);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700536 }
537
538 return rval;
539}
540
541static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
542{
543 struct scsi_qla_host *vha = lport->private;
544
Duane Grigsbye84067d2017-06-21 13:48:43 -0700545 ql_log(ql_log_info, vha, 0x210f,
546 "localport delete of %p completed.\n", vha->nvme_local_port);
547 vha->nvme_local_port = NULL;
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700548 complete(&vha->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700549}
550
551static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
552{
553 fc_port_t *fcport;
Arun Easi6a815332019-06-21 09:50:22 -0700554 struct qla_nvme_rport *qla_rport = rport->private;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700555
Darren Trapp9dd96862018-03-20 23:09:32 -0700556 fcport = qla_rport->fcport;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700557 fcport->nvme_remote_port = NULL;
558 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700559
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700560 complete(&fcport->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700561
Quinn Tran2eb92382019-06-21 09:50:23 -0700562 INIT_WORK(&fcport->free_work, qlt_free_session_done);
563 schedule_work(&fcport->free_work);
Darren Trapp9dd96862018-03-20 23:09:32 -0700564
Himanshu Madhani5e6803b2018-12-10 12:36:23 -0800565 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700566 ql_log(ql_log_info, fcport->vha, 0x2110,
Quinn Tran2eb92382019-06-21 09:50:23 -0700567 "remoteport_delete of %p %8phN completed.\n",
568 fcport, fcport->port_name);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700569}
570
571static struct nvme_fc_port_template qla_nvme_fc_transport = {
572 .localport_delete = qla_nvme_localport_delete,
573 .remoteport_delete = qla_nvme_remoteport_delete,
574 .create_queue = qla_nvme_alloc_queue,
575 .delete_queue = NULL,
576 .ls_req = qla_nvme_ls_req,
577 .ls_abort = qla_nvme_ls_abort,
578 .fcp_io = qla_nvme_post_cmd,
579 .fcp_abort = qla_nvme_fcp_abort,
Duane Grigsbye84067d2017-06-21 13:48:43 -0700580 .max_hw_queues = 8,
Giridhar Malavali6b1f4442019-04-02 14:24:23 -0700581 .max_sgl_segments = 1024,
Duane Grigsbye84067d2017-06-21 13:48:43 -0700582 .max_dif_sgl_segments = 64,
583 .dma_boundary = 0xFFFFFFFF,
584 .local_priv_sz = 8,
Darren Trapp9dd96862018-03-20 23:09:32 -0700585 .remote_priv_sz = sizeof(struct qla_nvme_rport),
Duane Grigsbye84067d2017-06-21 13:48:43 -0700586 .lsrqst_priv_sz = sizeof(struct nvme_private),
587 .fcprqst_priv_sz = sizeof(struct nvme_private),
588};
589
Duane Grigsbye84067d2017-06-21 13:48:43 -0700590static void qla_nvme_unregister_remote_port(struct work_struct *work)
591{
Duane Grigsbye84067d2017-06-21 13:48:43 -0700592 struct fc_port *fcport = container_of(work, struct fc_port,
593 nvme_del_work);
Arun Easi6a815332019-06-21 09:50:22 -0700594 int ret;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700595
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200596 if (!IS_ENABLED(CONFIG_NVME_FC))
597 return;
598
himanshu.madhani@cavium.com49b3d5f62017-07-21 09:32:27 -0700599 ql_log(ql_log_warn, NULL, 0x2112,
Quinn Tran2eb92382019-06-21 09:50:23 -0700600 "%s: unregister remoteport on %p %8phN\n",
601 __func__, fcport, fcport->port_name);
himanshu.madhani@cavium.com49b3d5f62017-07-21 09:32:27 -0700602
Arun Easi6a815332019-06-21 09:50:22 -0700603 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
604 init_completion(&fcport->nvme_del_done);
605 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
606 if (ret)
607 ql_log(ql_log_info, fcport->vha, 0x2114,
608 "%s: Failed to unregister nvme_remote_port (%d)\n",
609 __func__, ret);
610 wait_for_completion(&fcport->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700611}
612
himanshu.madhani@cavium.com0f7e51f2017-07-21 09:32:24 -0700613void qla_nvme_delete(struct scsi_qla_host *vha)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700614{
Duane Grigsbye84067d2017-06-21 13:48:43 -0700615 int nv_ret;
616
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200617 if (!IS_ENABLED(CONFIG_NVME_FC))
618 return;
619
Duane Grigsbye84067d2017-06-21 13:48:43 -0700620 if (vha->nvme_local_port) {
himanshu.madhani@cavium.com5621b0d2017-07-21 09:32:26 -0700621 init_completion(&vha->nvme_del_done);
Darren Trapp9dd96862018-03-20 23:09:32 -0700622 ql_log(ql_log_info, vha, 0x2116,
623 "unregister localport=%p\n",
624 vha->nvme_local_port);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700625 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
Darren Trapp9dd96862018-03-20 23:09:32 -0700626 if (nv_ret)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700627 ql_log(ql_log_info, vha, 0x2115,
628 "Unregister of localport failed\n");
Darren Trapp9dd96862018-03-20 23:09:32 -0700629 else
630 wait_for_completion(&vha->nvme_del_done);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700631 }
Duane Grigsbye84067d2017-06-21 13:48:43 -0700632}
633
Quinn Tran8777e432018-08-02 13:16:57 -0700634int qla_nvme_register_hba(struct scsi_qla_host *vha)
Duane Grigsbye84067d2017-06-21 13:48:43 -0700635{
Duane Grigsbye84067d2017-06-21 13:48:43 -0700636 struct nvme_fc_port_template *tmpl;
637 struct qla_hw_data *ha;
638 struct nvme_fc_port_info pinfo;
Quinn Tran8777e432018-08-02 13:16:57 -0700639 int ret = EINVAL;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700640
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200641 if (!IS_ENABLED(CONFIG_NVME_FC))
Quinn Tran8777e432018-08-02 13:16:57 -0700642 return ret;
Arnd Bergmannbcda7712017-06-30 18:10:40 +0200643
Duane Grigsbye84067d2017-06-21 13:48:43 -0700644 ha = vha->hw;
645 tmpl = &qla_nvme_fc_transport;
646
647 WARN_ON(vha->nvme_local_port);
648 WARN_ON(ha->max_req_queues < 3);
649
650 qla_nvme_fc_transport.max_hw_queues =
651 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
652 (uint8_t)(ha->max_req_queues - 2));
653
654 pinfo.node_name = wwn_to_u64(vha->node_name);
655 pinfo.port_name = wwn_to_u64(vha->port_name);
656 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
657 pinfo.port_id = vha->d_id.b24;
658
659 ql_log(ql_log_info, vha, 0xffff,
Darren Trapd7936a92017-08-23 15:04:59 -0700660 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
661 pinfo.node_name, pinfo.port_name, pinfo.port_id);
Duane Grigsbye84067d2017-06-21 13:48:43 -0700662 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
663
664 ret = nvme_fc_register_localport(&pinfo, tmpl,
665 get_device(&ha->pdev->dev), &vha->nvme_local_port);
666 if (ret) {
667 ql_log(ql_log_warn, vha, 0xffff,
668 "register_localport failed: ret=%x\n", ret);
Quinn Tran8777e432018-08-02 13:16:57 -0700669 } else {
670 vha->nvme_local_port->private = vha;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700671 }
Quinn Tran8777e432018-08-02 13:16:57 -0700672
673 return ret;
Duane Grigsbye84067d2017-06-21 13:48:43 -0700674}