[SCSI] lpfc 8.1.10 : Add support for dev_loss_tmo_callbk and fast_io_fail_tmo_callbk

Add support for new dev_loss_tmo callback
  Goodness is that it removes code for a parallel nodev timer that
  existed in the driver
Add support for the new fast_io_fail callback

Signed-off-by: James Smart <James.Smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
This commit is contained in:
James Smart
2006-08-18 17:47:08 -04:00
committed by James Bottomley
parent 0f29b966d6
commit c01f320879
8 changed files with 210 additions and 174 deletions

View File

@@ -56,28 +56,63 @@ static uint8_t lpfcAlpaArray[] = {
static void lpfc_disc_timeout_handler(struct lpfc_hba *);
static void
lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
void
lpfc_terminate_rport_io(struct fc_rport *rport)
{
uint8_t *name = (uint8_t *)&ndlp->nlp_portname;
int warn_on = 0;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist * ndlp;
struct lpfc_hba *phba;
spin_lock_irq(phba->host->host_lock);
if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
spin_unlock_irq(phba->host->host_lock);
rdata = rport->dd_data;
ndlp = rdata->pnode;
if (!ndlp) {
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
printk(KERN_ERR "Cannot find remote node"
" to terminate I/O Data x%x\n",
rport->port_id);
return;
}
/*
* If a discovery event readded nodev_timer after timer
* firing and before processing the timer, cancel the
* nlp_tmofunc.
*/
spin_unlock_irq(phba->host->host_lock);
del_timer_sync(&ndlp->nlp_tmofunc);
spin_lock_irq(phba->host->host_lock);
phba = ndlp->nlp_phba;
ndlp->nlp_flag &= ~NLP_NODEV_TMO;
spin_lock_irq(phba->host->host_lock);
if (ndlp->nlp_sid != NLP_NO_SID) {
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
}
spin_unlock_irq(phba->host->host_lock);
return;
}
/*
* This function will be called when dev_loss_tmo fire.
*/
void
lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
struct lpfc_nodelist * ndlp;
uint8_t *name;
int warn_on = 0;
struct lpfc_hba *phba;
rdata = rport->dd_data;
ndlp = rdata->pnode;
if (!ndlp) {
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
printk(KERN_ERR "Cannot find remote node"
" for rport in dev_loss_tmo_callbk x%x\n",
rport->port_id);
return;
}
name = (uint8_t *)&ndlp->nlp_portname;
phba = ndlp->nlp_phba;
spin_lock_irq(phba->host->host_lock);
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
@@ -85,11 +120,14 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
}
if (phba->fc_flag & FC_UNLOADING)
warn_on = 0;
spin_unlock_irq(phba->host->host_lock);
if (warn_on) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0203 Nodev timeout on "
"%d:0203 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
phba->brd_no,
@@ -99,7 +137,7 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0204 Nodev timeout on "
"%d:0204 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
phba->brd_no,
@@ -109,7 +147,12 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
ndlp->rport = NULL;
rdata->pnode = NULL;
if (!(phba->fc_flag & FC_UNLOADING))
lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
return;
}
@@ -127,11 +170,6 @@ lpfc_work_list_done(struct lpfc_hba * phba)
spin_unlock_irq(phba->host->host_lock);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_NODEV_TMO:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
lpfc_process_nodev_timeout(phba, ndlp);
free_evt = 0;
break;
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
lpfc_els_retry_delay_handler(ndlp);
@@ -377,16 +415,6 @@ lpfc_linkdown(struct lpfc_hba * phba)
rc = lpfc_disc_state_machine(phba, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
/* Check config parameter use-adisc or FCP-2 */
if ((rc != NLP_STE_FREED_NODE) &&
(phba->cfg_use_adisc == 0) &&
!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) {
/* We know we will have to relogin, so
* unreglogin the rpi right now to fail
* any outstanding I/Os quickly.
*/
lpfc_unreg_rpi(phba, ndlp);
}
}
}
@@ -1104,8 +1132,11 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba,
struct fc_rport *rport = ndlp->rport;
struct lpfc_rport_data *rdata = rport->dd_data;
ndlp->rport = NULL;
rdata->pnode = NULL;
if (rport->scsi_target_id == -1) {
ndlp->rport = NULL;
rdata->pnode = NULL;
}
fc_remote_port_delete(rport);
return;
@@ -1233,17 +1264,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
phba->fc_unmap_cnt++;
phba->nport_event_cnt++;
/* stop nodev tmo if running */
if (nlp->nlp_flag & NLP_NODEV_TMO) {
nlp->nlp_flag &= ~NLP_NODEV_TMO;
spin_unlock_irq(phba->host->host_lock);
del_timer_sync(&nlp->nlp_tmofunc);
spin_lock_irq(phba->host->host_lock);
if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
list_del_init(&nlp->nodev_timeout_evt.
evt_listp);
}
nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
nlp->nlp_type |= NLP_FC_NODE;
break;
@@ -1254,17 +1274,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
phba->fc_map_cnt++;
phba->nport_event_cnt++;
/* stop nodev tmo if running */
if (nlp->nlp_flag & NLP_NODEV_TMO) {
nlp->nlp_flag &= ~NLP_NODEV_TMO;
spin_unlock_irq(phba->host->host_lock);
del_timer_sync(&nlp->nlp_tmofunc);
spin_lock_irq(phba->host->host_lock);
if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
list_del_init(&nlp->nodev_timeout_evt.
evt_listp);
}
nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
break;
case NLP_NPR_LIST:
@@ -1273,11 +1282,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
phba->fc_npr_cnt++;
if (!(nlp->nlp_flag & NLP_NODEV_TMO))
mod_timer(&nlp->nlp_tmofunc,
jiffies + HZ * phba->cfg_nodev_tmo);
nlp->nlp_flag |= NLP_NODEV_TMO;
nlp->nlp_flag &= ~NLP_RCV_PLOGI;
break;
case NLP_JUST_DQ:
@@ -1307,7 +1311,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
* already. If we have, and it's a scsi entity, be
* sure to unblock any attached scsi devices
*/
if (!nlp->rport)
if ((!nlp->rport) || (nlp->rport->port_state ==
FC_PORTSTATE_BLOCKED))
lpfc_register_remote_port(phba, nlp);
/*
@@ -1581,15 +1586,12 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
lpfc_els_abort(phba,ndlp,0);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
spin_unlock_irq(phba->host->host_lock);
del_timer_sync(&ndlp->nlp_tmofunc);
ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc);
if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
if (!list_empty(&ndlp->els_retry_evt.evt_listp))
list_del_init(&ndlp->els_retry_evt.evt_listp);
@@ -1606,16 +1608,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
int
lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
{
if (ndlp->nlp_flag & NLP_NODEV_TMO) {
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NODEV_TMO;
spin_unlock_irq(phba->host->host_lock);
del_timer_sync(&ndlp->nlp_tmofunc);
if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
}
if (ndlp->nlp_flag & NLP_DELAY_TMO) {
lpfc_cancel_retry_delay_tmo(phba, ndlp);
@@ -2430,34 +2422,6 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
return;
}
static void
lpfc_nodev_timeout(unsigned long ptr)
{
struct lpfc_hba *phba;
struct lpfc_nodelist *ndlp;
unsigned long iflag;
struct lpfc_work_evt *evtp;
ndlp = (struct lpfc_nodelist *)ptr;
phba = ndlp->nlp_phba;
evtp = &ndlp->nodev_timeout_evt;
spin_lock_irqsave(phba->host->host_lock, iflag);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(phba->host->host_lock, iflag);
return;
}
evtp->evt_arg1 = ndlp;
evtp->evt = LPFC_EVT_NODEV_TMO;
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
wake_up(phba->work_wait);
spin_unlock_irqrestore(phba->host->host_lock, iflag);
return;
}
/*
* This routine handles processing a NameServer REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -2581,11 +2545,7 @@ lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
uint32_t did)
{
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
init_timer(&ndlp->nlp_tmofunc);
ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
init_timer(&ndlp->nlp_delayfunc);
ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;