scsi: lpfc: NVME Target: Base modifications
NVME Target: Base modifications
This set of patches adds the base modifications for NVME target support
The base modifications consist of:
- Additional module parameters or configuration tuning
- Enablement of configuration mode for NVME target. Ties into the
queueing model put into place by the initiator basemods patches.
- Target-specific buffer pools, dma pools, sgl pools
[mkp: fixed space at end of file]
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 5f6ad67..f2cd0f3 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -45,6 +45,7 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
#include "lpfc_compat.h"
@@ -976,6 +977,34 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
}
/**
+ * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to the iocbq.
+ *
+ * This function is called with the sgl_list lock held. This function
+ * gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+struct lpfc_sglq *
+__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
+{
+ struct list_head *lpfc_nvmet_sgl_list;
+ struct lpfc_sglq *sglq = NULL;
+
+ lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
+
+ lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
+
+ list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
+ if (!sglq)
+ return NULL;
+ phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
+ sglq->state = SGL_ALLOCATED;
+ return sglq;
+}
+
+/**
* lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
* @phba: Pointer to HBA context object.
*
@@ -1031,6 +1060,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (sglq) {
+ if (iocbq->iocb_flag & LPFC_IO_NVMET) {
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
+ iflag);
+ sglq->state = SGL_FREED;
+ sglq->ndlp = NULL;
+ list_add_tail(&sglq->list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock_irqrestore(
+ &phba->sli4_hba.sgl_list_lock, iflag);
+ goto out;
+ }
+
pring = phba->sli4_hba.els_wq->pring;
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -1056,13 +1097,15 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
}
}
+out:
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
- iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVME_LS);
+ iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
+ LPFC_IO_NVME_LS);
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -2450,6 +2493,14 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
int i;
+ switch (fch_type) {
+ case FC_TYPE_NVME:
+ /* todo: tgt: forward NVME LS to transport */
+ return 1;
+ default:
+ break;
+ }
+
/* unSolicited Responses */
if (pring->prt[0].profile) {
if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -6761,7 +6812,31 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.els_xri_cnt = rc;
- if (phba->nvmet_support == 0) {
+ if (phba->nvmet_support) {
+ /* update host nvmet xri-sgl sizes and mappings */
+ rc = lpfc_sli4_nvmet_sgl_update(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "6308 Failed to update nvmet-sgl size "
+ "and mapping: %d\n", rc);
+ goto out_destroy_queue;
+ }
+
+ /* register the nvmet sgl pool to the port */
+ rc = lpfc_sli4_repost_sgl_list(
+ phba,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list,
+ phba->sli4_hba.nvmet_xri_cnt);
+ if (unlikely(rc < 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "3117 Error %d during nvmet "
+ "sgl post\n", rc);
+ rc = -ENODEV;
+ goto out_destroy_queue;
+ }
+ phba->sli4_hba.nvmet_xri_cnt = rc;
+ /* todo: tgt: create targetport */
+ } else {
/* update host scsi xri-sgl sizes and mappings */
rc = lpfc_sli4_scsi_sgl_update(phba);
if (unlikely(rc)) {
@@ -13006,7 +13081,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
if (phba->sli4_hba.nvme_cq_map &&
(cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
- /* Process NVME command completion */
+ /* Process NVME / NVMET command completion */
cq = phba->sli4_hba.nvme_cq[qidx];
goto process_cq;
}
@@ -17912,6 +17987,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *pwqe)
{
union lpfc_wqe *wqe = &pwqe->wqe;
+ struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_queue *wq;
struct lpfc_sglq *sglq;
struct lpfc_sli_ring *pring;
@@ -17961,5 +18037,30 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
return 0;
}
+ /* NVMET requests */
+ if (pwqe->iocb_flag & LPFC_IO_NVMET) {
+ /* Get the IO distribution (hba_wqidx) for WQ assignment. */
+ pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ ctxp = pwqe->context2;
+ sglq = ctxp->rqb_buffer->sglq;
+ if (pwqe->sli4_xritag == NO_XRI) {
+ pwqe->sli4_lxritag = sglq->sli4_lxritag;
+ pwqe->sli4_xritag = sglq->sli4_xritag;
+ }
+ bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
+ pwqe->sli4_xritag);
+ wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
+ bf_set(wqe_cqid, &wqe->generic.wqe_com,
+ phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+ if (lpfc_sli4_wq_put(wq, wqe)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return WQE_ERROR;
+ }
+ lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return 0;
+ }
return WQE_ERROR;
}