scsi: lpfc: NVME Target: Base modifications
NVME Target: Base modifications This set of patches adds the base modifications for NVME target support The base modifications consist of: - Additional module parameters or configuration tuning - Enablement of configuration mode for NVME target. Ties into the queueing model put into place by the initiator basemods patches. - Target-specific buffer pools, dma pools, sgl pools [mkp: fixed space at end of file] Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <james.smart@broadcom.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:

committed by
Martin K. Petersen

parent
bd2cdd5e40
commit
f358dd0ca2
@@ -73,6 +73,7 @@ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
|
||||
static int lpfc_setup_endian_order(struct lpfc_hba *);
|
||||
static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
|
||||
static void lpfc_free_els_sgl_list(struct lpfc_hba *);
|
||||
static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
|
||||
static void lpfc_init_sgl_list(struct lpfc_hba *);
|
||||
static int lpfc_init_active_sgl_array(struct lpfc_hba *);
|
||||
static void lpfc_free_active_sgl(struct lpfc_hba *);
|
||||
@@ -88,6 +89,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
|
||||
static struct scsi_transport_template *lpfc_transport_template = NULL;
|
||||
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
|
||||
static DEFINE_IDR(lpfc_hba_index);
|
||||
#define LPFC_NVMET_BUF_POST 254
|
||||
|
||||
/**
|
||||
* lpfc_config_port_prep - Perform lpfc initialization prior to config port
|
||||
@@ -1023,10 +1025,17 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
list_for_each_entry(sglq_entry,
|
||||
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
|
||||
sglq_entry->state = SGL_FREED;
|
||||
list_for_each_entry(sglq_entry,
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list)
|
||||
sglq_entry->state = SGL_FREED;
|
||||
|
||||
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
|
||||
&phba->sli4_hba.lpfc_els_sgl_list);
|
||||
|
||||
if (phba->sli4_hba.nvme_wq)
|
||||
list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
/* abts_scsi_buf_list_lock required because worker thread uses this
|
||||
* list.
|
||||
@@ -3320,6 +3329,128 @@ out_free_mem:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine first calculates the sizes of the current els and allocated
|
||||
* scsi sgl lists, and then goes through all sgls to updates the physical
|
||||
* XRIs assigned due to port function reset. During port initialization, the
|
||||
* current els and allocated scsi sgl lists are 0s.
|
||||
*
|
||||
* Return codes
|
||||
* 0 - successful (for now, it always returns 0)
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
|
||||
uint16_t i, lxri, xri_cnt, els_xri_cnt;
|
||||
uint16_t nvmet_xri_cnt, tot_cnt;
|
||||
LIST_HEAD(nvmet_sgl_list);
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* update on pci function's nvmet xri-sgl list
|
||||
*/
|
||||
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
|
||||
nvmet_xri_cnt = 0;
|
||||
tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
|
||||
|
||||
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
|
||||
/* els xri-sgl expanded */
|
||||
xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"6302 NVMET xri-sgl cnt grew from %d to %d\n",
|
||||
phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
|
||||
/* allocate the additional nvmet sgls */
|
||||
for (i = 0; i < xri_cnt; i++) {
|
||||
sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
|
||||
GFP_KERNEL);
|
||||
if (sglq_entry == NULL) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"6303 Failure to allocate an "
|
||||
"NVMET sgl entry:%d\n", i);
|
||||
rc = -ENOMEM;
|
||||
goto out_free_mem;
|
||||
}
|
||||
sglq_entry->buff_type = NVMET_BUFF_TYPE;
|
||||
sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
|
||||
&sglq_entry->phys);
|
||||
if (sglq_entry->virt == NULL) {
|
||||
kfree(sglq_entry);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"6304 Failure to allocate an "
|
||||
"NVMET buf:%d\n", i);
|
||||
rc = -ENOMEM;
|
||||
goto out_free_mem;
|
||||
}
|
||||
sglq_entry->sgl = sglq_entry->virt;
|
||||
memset(sglq_entry->sgl, 0,
|
||||
phba->cfg_sg_dma_buf_size);
|
||||
sglq_entry->state = SGL_FREED;
|
||||
list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
|
||||
}
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
list_splice_init(&nvmet_sgl_list,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
} else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
|
||||
/* nvmet xri-sgl shrunk */
|
||||
xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"6305 NVMET xri-sgl count decreased from "
|
||||
"%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
|
||||
nvmet_xri_cnt);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
|
||||
&nvmet_sgl_list);
|
||||
/* release extra nvmet sgls from list */
|
||||
for (i = 0; i < xri_cnt; i++) {
|
||||
list_remove_head(&nvmet_sgl_list,
|
||||
sglq_entry, struct lpfc_sglq, list);
|
||||
if (sglq_entry) {
|
||||
lpfc_nvmet_buf_free(phba, sglq_entry->virt,
|
||||
sglq_entry->phys);
|
||||
kfree(sglq_entry);
|
||||
}
|
||||
}
|
||||
list_splice_init(&nvmet_sgl_list,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
} else
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"6306 NVMET xri-sgl count unchanged: %d\n",
|
||||
nvmet_xri_cnt);
|
||||
phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
|
||||
|
||||
/* update xris to nvmet sgls on the list */
|
||||
sglq_entry = NULL;
|
||||
sglq_entry_next = NULL;
|
||||
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"6307 Failed to allocate xri for "
|
||||
"NVMET sgl\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_free_mem;
|
||||
}
|
||||
sglq_entry->sli4_lxritag = lxri;
|
||||
sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_free_mem:
|
||||
lpfc_free_nvmet_sgl_list(phba);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@@ -5228,11 +5359,12 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
|
||||
init_waitqueue_head(&phba->work_waitq);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"1403 Protocols supported %s %s\n",
|
||||
"1403 Protocols supported %s %s %s\n",
|
||||
((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
|
||||
"SCSI" : " "),
|
||||
((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
|
||||
"NVME" : " "));
|
||||
"NVME" : " "),
|
||||
(phba->nvmet_support ? "NVMET" : " "));
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
||||
/* Initialize the scsi buffer list used by driver for scsi IO */
|
||||
@@ -5447,11 +5579,13 @@ static int
|
||||
lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
{
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
MAILBOX_t *mb;
|
||||
int rc, i, max_buf_size;
|
||||
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
|
||||
struct lpfc_mqe *mqe;
|
||||
int longs;
|
||||
int fof_vectors = 0;
|
||||
uint64_t wwn;
|
||||
|
||||
phba->sli4_hba.num_online_cpu = num_online_cpus();
|
||||
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
|
||||
@@ -5597,6 +5731,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
|
||||
/* This abort list used by worker thread */
|
||||
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
|
||||
|
||||
/*
|
||||
* Initialize driver internal slow-path work queues
|
||||
@@ -5673,7 +5808,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
goto out_free_bsmbx;
|
||||
}
|
||||
|
||||
/* Check for NVMET being configured */
|
||||
phba->nvmet_support = 0;
|
||||
if (lpfc_enable_nvmet_cnt) {
|
||||
|
||||
/* First get WWN of HBA instance */
|
||||
lpfc_read_nv(phba, mboxq);
|
||||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||||
if (rc != MBX_SUCCESS) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"6016 Mailbox failed , mbxCmd x%x "
|
||||
"READ_NV, mbxStatus x%x\n",
|
||||
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
|
||||
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
|
||||
rc = -EIO;
|
||||
goto out_free_bsmbx;
|
||||
}
|
||||
mb = &mboxq->u.mb;
|
||||
memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
|
||||
sizeof(uint64_t));
|
||||
wwn = cpu_to_be64(wwn);
|
||||
phba->sli4_hba.wwnn.u.name = wwn;
|
||||
memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
|
||||
sizeof(uint64_t));
|
||||
/* wwn is WWPN of HBA instance */
|
||||
wwn = cpu_to_be64(wwn);
|
||||
phba->sli4_hba.wwpn.u.name = wwn;
|
||||
|
||||
/* Check to see if it matches any module parameter */
|
||||
for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
|
||||
if (wwn == lpfc_enable_nvmet[i]) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6017 NVME Target %016llx\n",
|
||||
wwn);
|
||||
phba->nvmet_support = 1; /* a match */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lpfc_nvme_mod_param_dep(phba);
|
||||
|
||||
@@ -5869,6 +6040,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
|
||||
/* Free the ELS sgl list */
|
||||
lpfc_free_active_sgl(phba);
|
||||
lpfc_free_els_sgl_list(phba);
|
||||
lpfc_free_nvmet_sgl_list(phba);
|
||||
|
||||
/* Free the completion queue EQ event pool */
|
||||
lpfc_sli4_cq_event_release_all(phba);
|
||||
@@ -6089,6 +6261,33 @@ lpfc_free_els_sgl_list(struct lpfc_hba *phba)
|
||||
lpfc_free_sgl_list(phba, &sglq_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine is invoked to free the driver's nvmet sgl list and memory.
|
||||
**/
|
||||
static void
|
||||
lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
|
||||
LIST_HEAD(sglq_list);
|
||||
|
||||
/* Retrieve all nvmet sgls from driver list */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* Now free the sgl list */
|
||||
list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
|
||||
list_del(&sglq_entry->list);
|
||||
lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
|
||||
kfree(sglq_entry);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@@ -6138,6 +6337,8 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
|
||||
/* Initialize and populate the sglq list per host/VF. */
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
||||
|
||||
/* els xri-sgl book keeping */
|
||||
phba->sli4_hba.els_xri_cnt = 0;
|
||||
@@ -6416,6 +6617,22 @@ lpfc_create_shost(struct lpfc_hba *phba)
|
||||
shost = lpfc_shost_from_vport(vport);
|
||||
phba->pport = vport;
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
/* Only 1 vport (pport) will support NVME target */
|
||||
if (phba->txrdy_payload_pool == NULL) {
|
||||
phba->txrdy_payload_pool = pci_pool_create(
|
||||
"txrdy_pool", phba->pcidev,
|
||||
TXRDY_PAYLOAD_LEN, 16, 0);
|
||||
if (phba->txrdy_payload_pool) {
|
||||
phba->targetport = NULL;
|
||||
phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_INIT | LOG_NVME_DISC,
|
||||
"6076 NVME Target Found\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lpfc_debugfs_initialize(vport);
|
||||
/* Put reference to SCSI host to driver's device private data */
|
||||
pci_set_drvdata(phba->pcidev, shost);
|
||||
@@ -7459,7 +7676,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
|
||||
phba->cfg_nvme_io_channel = io_channel;
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2574 IRQs: %d, IO Channels: fcp %d nvme %d\n",
|
||||
"2574 IO channels: irqs %d fcp %d nvme %d\n",
|
||||
phba->io_channel_irqs, phba->cfg_fcp_io_channel,
|
||||
phba->cfg_nvme_io_channel);
|
||||
|
||||
@@ -9164,8 +9381,9 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
if (phba->cfg_fof)
|
||||
vectors++;
|
||||
|
||||
rc = pci_alloc_irq_vectors(phba->pcidev, 2, vectors,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
|
||||
rc = pci_alloc_irq_vectors(phba->pcidev,
|
||||
(phba->nvmet_support) ? 1 : 2,
|
||||
vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
|
||||
if (rc < 0) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"0484 PCI enable MSI-X failed (%d)\n", rc);
|
||||
@@ -9447,6 +9665,8 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
int nvme_xri_cmpl = 1;
|
||||
int fcp_xri_cmpl = 1;
|
||||
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
int nvmet_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
|
||||
fcp_xri_cmpl =
|
||||
@@ -9455,7 +9675,8 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
nvme_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
|
||||
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl) {
|
||||
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
|
||||
!nvmet_xri_cmpl) {
|
||||
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
|
||||
if (!nvme_xri_cmpl)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@@ -9488,6 +9709,9 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
|
||||
els_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
|
||||
nvmet_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9725,6 +9949,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
|
||||
}
|
||||
|
||||
if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
|
||||
phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
|
||||
|
||||
/* Make sure that sge_supp_len can be handled by the driver */
|
||||
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
|
||||
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
|
||||
@@ -10376,13 +10603,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
|
||||
* lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* returns the number of ELS/CT
|
||||
* returns the number of ELS/CT + NVMET IOCBs to reserve
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
|
||||
{
|
||||
int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
|
||||
|
||||
if (phba->nvmet_support)
|
||||
max_xri += LPFC_NVMET_BUF_POST;
|
||||
return max_xri;
|
||||
}
|
||||
|
||||
@@ -10755,6 +10984,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
|
||||
/* Remove FC host and then SCSI host with the physical port */
|
||||
fc_remove_host(shost);
|
||||
scsi_remove_host(shost);
|
||||
/* todo: tgt: remove targetport */
|
||||
|
||||
/* Perform ndlp cleanup on the physical port. The nvme localport
|
||||
* is destroyed after to ensure all rports are io-disabled.
|
||||
|
Reference in New Issue
Block a user