qla_mid.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include "qla_gbl.h"
  8. #include "qla_target.h"
  9. #include <linux/moduleparam.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/list.h>
  13. #include <scsi/scsi_tcq.h>
  14. #include <scsi/scsicam.h>
  15. #include <linux/delay.h>
  16. void
  17. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  18. {
  19. if (vha->vp_idx && vha->timer_active) {
  20. del_timer_sync(&vha->timer);
  21. vha->timer_active = 0;
  22. }
  23. }
  24. static uint32_t
  25. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  26. {
  27. uint32_t vp_id;
  28. struct qla_hw_data *ha = vha->hw;
  29. unsigned long flags;
  30. /* Find an empty slot and assign an vp_id */
  31. mutex_lock(&ha->vport_lock);
  32. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  33. if (vp_id > ha->max_npiv_vports) {
  34. ql_dbg(ql_dbg_vport, vha, 0xa000,
  35. "vp_id %d is bigger than max-supported %d.\n",
  36. vp_id, ha->max_npiv_vports);
  37. mutex_unlock(&ha->vport_lock);
  38. return vp_id;
  39. }
  40. set_bit(vp_id, ha->vp_idx_map);
  41. ha->num_vhosts++;
  42. vha->vp_idx = vp_id;
  43. spin_lock_irqsave(&ha->vport_slock, flags);
  44. list_add_tail(&vha->list, &ha->vp_list);
  45. spin_unlock_irqrestore(&ha->vport_slock, flags);
  46. spin_lock_irqsave(&ha->hardware_lock, flags);
  47. qlt_update_vp_map(vha, SET_VP_IDX);
  48. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  49. mutex_unlock(&ha->vport_lock);
  50. return vp_id;
  51. }
  52. void
  53. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  54. {
  55. uint16_t vp_id;
  56. struct qla_hw_data *ha = vha->hw;
  57. unsigned long flags = 0;
  58. u32 i, bailout;
  59. mutex_lock(&ha->vport_lock);
  60. /*
  61. * Wait for all pending activities to finish before removing vport from
  62. * the list.
  63. * Lock needs to be held for safe removal from the list (it
  64. * ensures no active vp_list traversal while the vport is removed
  65. * from the queue)
  66. */
  67. bailout = 0;
  68. for (i = 0; i < 500; i++) {
  69. spin_lock_irqsave(&ha->vport_slock, flags);
  70. if (atomic_read(&vha->vref_count) == 0) {
  71. list_del(&vha->list);
  72. qlt_update_vp_map(vha, RESET_VP_IDX);
  73. bailout = 1;
  74. }
  75. spin_unlock_irqrestore(&ha->vport_slock, flags);
  76. if (bailout)
  77. break;
  78. else
  79. msleep(20);
  80. }
  81. if (!bailout) {
  82. ql_log(ql_log_info, vha, 0xfffa,
  83. "vha->vref_count=%u timeout\n", vha->vref_count.counter);
  84. spin_lock_irqsave(&ha->vport_slock, flags);
  85. list_del(&vha->list);
  86. qlt_update_vp_map(vha, RESET_VP_IDX);
  87. spin_unlock_irqrestore(&ha->vport_slock, flags);
  88. }
  89. vp_id = vha->vp_idx;
  90. ha->num_vhosts--;
  91. clear_bit(vp_id, ha->vp_idx_map);
  92. mutex_unlock(&ha->vport_lock);
  93. }
  94. static scsi_qla_host_t *
  95. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  96. {
  97. scsi_qla_host_t *vha;
  98. struct scsi_qla_host *tvha;
  99. unsigned long flags;
  100. spin_lock_irqsave(&ha->vport_slock, flags);
  101. /* Locate matching device in database. */
  102. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  103. if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
  104. spin_unlock_irqrestore(&ha->vport_slock, flags);
  105. return vha;
  106. }
  107. }
  108. spin_unlock_irqrestore(&ha->vport_slock, flags);
  109. return NULL;
  110. }
  111. /*
  112. * qla2x00_mark_vp_devices_dead
  113. * Updates fcport state when device goes offline.
  114. *
  115. * Input:
  116. * ha = adapter block pointer.
  117. * fcport = port structure pointer.
  118. *
  119. * Return:
  120. * None.
  121. *
  122. * Context:
  123. */
  124. static void
  125. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  126. {
  127. /*
  128. * !!! NOTE !!!
  129. * This function, if called in contexts other than vp create, disable
  130. * or delete, please make sure this is synchronized with the
  131. * delete thread.
  132. */
  133. fc_port_t *fcport;
  134. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  135. ql_dbg(ql_dbg_vport, vha, 0xa001,
  136. "Marking port dead, loop_id=0x%04x : %x.\n",
  137. fcport->loop_id, fcport->vha->vp_idx);
  138. qla2x00_mark_device_lost(vha, fcport, 0);
  139. qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
  140. }
  141. }
  142. int
  143. qla24xx_disable_vp(scsi_qla_host_t *vha)
  144. {
  145. unsigned long flags;
  146. int ret = QLA_SUCCESS;
  147. fc_port_t *fcport;
  148. if (vha->hw->flags.edif_enabled) {
  149. if (DBELL_ACTIVE(vha))
  150. qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE,
  151. FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN);
  152. /* delete sessions and flush sa_indexes */
  153. qla2x00_wait_for_sess_deletion(vha);
  154. }
  155. if (vha->hw->flags.fw_started)
  156. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  157. atomic_set(&vha->loop_state, LOOP_DOWN);
  158. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  159. list_for_each_entry(fcport, &vha->vp_fcports, list)
  160. fcport->logout_on_delete = 0;
  161. if (!vha->hw->flags.edif_enabled)
  162. qla2x00_wait_for_sess_deletion(vha);
  163. /* Remove port id from vp target map */
  164. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  165. qlt_update_vp_map(vha, RESET_AL_PA);
  166. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  167. qla2x00_mark_vp_devices_dead(vha);
  168. atomic_set(&vha->vp_state, VP_FAILED);
  169. vha->flags.management_server_logged_in = 0;
  170. if (ret == QLA_SUCCESS) {
  171. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  172. } else {
  173. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  174. return -1;
  175. }
  176. return 0;
  177. }
  178. int
  179. qla24xx_enable_vp(scsi_qla_host_t *vha)
  180. {
  181. int ret;
  182. struct qla_hw_data *ha = vha->hw;
  183. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  184. /* Check if physical ha port is Up */
  185. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  186. atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
  187. !(ha->current_topology & ISP_CFG_F)) {
  188. vha->vp_err_state = VP_ERR_PORTDWN;
  189. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  190. ql_dbg(ql_dbg_taskm, vha, 0x800b,
  191. "%s skip enable. loop_state %x topo %x\n",
  192. __func__, base_vha->loop_state.counter,
  193. ha->current_topology);
  194. goto enable_failed;
  195. }
  196. /* Initialize the new vport unless it is a persistent port */
  197. mutex_lock(&ha->vport_lock);
  198. ret = qla24xx_modify_vp_config(vha);
  199. mutex_unlock(&ha->vport_lock);
  200. if (ret != QLA_SUCCESS) {
  201. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  202. goto enable_failed;
  203. }
  204. ql_dbg(ql_dbg_taskm, vha, 0x801a,
  205. "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
  206. return 0;
  207. enable_failed:
  208. ql_dbg(ql_dbg_taskm, vha, 0x801b,
  209. "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
  210. return 1;
  211. }
  212. static void
  213. qla24xx_configure_vp(scsi_qla_host_t *vha)
  214. {
  215. struct fc_vport *fc_vport;
  216. int ret;
  217. fc_vport = vha->fc_vport;
  218. ql_dbg(ql_dbg_vport, vha, 0xa002,
  219. "%s: change request #3.\n", __func__);
  220. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  221. if (ret != QLA_SUCCESS) {
  222. ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
  223. "receiving of RSCN requests: 0x%x.\n", ret);
  224. return;
  225. } else {
  226. /* Corresponds to SCR enabled */
  227. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  228. }
  229. vha->flags.online = 1;
  230. if (qla24xx_configure_vhba(vha))
  231. return;
  232. atomic_set(&vha->vp_state, VP_ACTIVE);
  233. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  234. }
  235. void
  236. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  237. {
  238. scsi_qla_host_t *vha, *tvp;
  239. struct qla_hw_data *ha = rsp->hw;
  240. int i = 0;
  241. unsigned long flags;
  242. spin_lock_irqsave(&ha->vport_slock, flags);
  243. list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
  244. if (vha->vp_idx) {
  245. if (test_bit(VPORT_DELETE, &vha->dpc_flags))
  246. continue;
  247. atomic_inc(&vha->vref_count);
  248. spin_unlock_irqrestore(&ha->vport_slock, flags);
  249. switch (mb[0]) {
  250. case MBA_LIP_OCCURRED:
  251. case MBA_LOOP_UP:
  252. case MBA_LOOP_DOWN:
  253. case MBA_LIP_RESET:
  254. case MBA_POINT_TO_POINT:
  255. case MBA_CHG_IN_CONNECTION:
  256. ql_dbg(ql_dbg_async, vha, 0x5024,
  257. "Async_event for VP[%d], mb=0x%x vha=%p.\n",
  258. i, *mb, vha);
  259. qla2x00_async_event(vha, rsp, mb);
  260. break;
  261. case MBA_PORT_UPDATE:
  262. case MBA_RSCN_UPDATE:
  263. if ((mb[3] & 0xff) == vha->vp_idx) {
  264. ql_dbg(ql_dbg_async, vha, 0x5024,
  265. "Async_event for VP[%d], mb=0x%x vha=%p\n",
  266. i, *mb, vha);
  267. qla2x00_async_event(vha, rsp, mb);
  268. }
  269. break;
  270. }
  271. spin_lock_irqsave(&ha->vport_slock, flags);
  272. atomic_dec(&vha->vref_count);
  273. wake_up(&vha->vref_waitq);
  274. }
  275. i++;
  276. }
  277. spin_unlock_irqrestore(&ha->vport_slock, flags);
  278. }
  279. int
  280. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  281. {
  282. fc_port_t *fcport;
  283. /*
  284. * To exclusively reset vport, we need to log it out first.
  285. * Note: This control_vp can fail if ISP reset is already
  286. * issued, this is expected, as the vp would be already
  287. * logged out due to ISP reset.
  288. */
  289. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
  290. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  291. list_for_each_entry(fcport, &vha->vp_fcports, list)
  292. fcport->logout_on_delete = 0;
  293. }
  294. /*
  295. * Physical port will do most of the abort and recovery work. We can
  296. * just treat it as a loop down
  297. */
  298. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  299. atomic_set(&vha->loop_state, LOOP_DOWN);
  300. qla2x00_mark_all_devices_lost(vha);
  301. } else {
  302. if (!atomic_read(&vha->loop_down_timer))
  303. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  304. }
  305. ql_dbg(ql_dbg_taskm, vha, 0x801d,
  306. "Scheduling enable of Vport %d.\n", vha->vp_idx);
  307. return qla24xx_enable_vp(vha);
  308. }
  309. static int
  310. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  311. {
  312. struct qla_hw_data *ha = vha->hw;
  313. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  314. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
  315. "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
  316. /* Check if Fw is ready to configure VP first */
  317. if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
  318. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  319. /* VP acquired. complete port configuration */
  320. ql_dbg(ql_dbg_dpc, vha, 0x4014,
  321. "Configure VP scheduled.\n");
  322. qla24xx_configure_vp(vha);
  323. ql_dbg(ql_dbg_dpc, vha, 0x4015,
  324. "Configure VP end.\n");
  325. return 0;
  326. }
  327. }
  328. if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
  329. if (atomic_read(&vha->loop_state) == LOOP_READY) {
  330. qla24xx_process_purex_list(&vha->purex_list);
  331. clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
  332. }
  333. }
  334. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  335. ql_dbg(ql_dbg_dpc, vha, 0x4016,
  336. "FCPort update scheduled.\n");
  337. qla2x00_update_fcports(vha);
  338. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  339. ql_dbg(ql_dbg_dpc, vha, 0x4017,
  340. "FCPort update end.\n");
  341. }
  342. if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
  343. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  344. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  345. if (!vha->relogin_jif ||
  346. time_after_eq(jiffies, vha->relogin_jif)) {
  347. vha->relogin_jif = jiffies + HZ;
  348. clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  349. ql_dbg(ql_dbg_dpc, vha, 0x4018,
  350. "Relogin needed scheduled.\n");
  351. qla24xx_post_relogin_work(vha);
  352. }
  353. }
  354. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  355. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  356. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  357. }
  358. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  359. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  360. ql_dbg(ql_dbg_dpc, vha, 0x401a,
  361. "Loop resync scheduled.\n");
  362. qla2x00_loop_resync(vha);
  363. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  364. ql_dbg(ql_dbg_dpc, vha, 0x401b,
  365. "Loop resync end.\n");
  366. }
  367. }
  368. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
  369. "Exiting %s.\n", __func__);
  370. return 0;
  371. }
  372. void
  373. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  374. {
  375. struct qla_hw_data *ha = vha->hw;
  376. scsi_qla_host_t *vp, *tvp;
  377. unsigned long flags = 0;
  378. if (vha->vp_idx)
  379. return;
  380. if (list_empty(&ha->vp_list))
  381. return;
  382. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  383. if (!(ha->current_topology & ISP_CFG_F))
  384. return;
  385. spin_lock_irqsave(&ha->vport_slock, flags);
  386. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  387. if (vp->vp_idx) {
  388. atomic_inc(&vp->vref_count);
  389. spin_unlock_irqrestore(&ha->vport_slock, flags);
  390. qla2x00_do_dpc_vp(vp);
  391. spin_lock_irqsave(&ha->vport_slock, flags);
  392. atomic_dec(&vp->vref_count);
  393. }
  394. }
  395. spin_unlock_irqrestore(&ha->vport_slock, flags);
  396. }
  397. int
  398. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  399. {
  400. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  401. struct qla_hw_data *ha = base_vha->hw;
  402. scsi_qla_host_t *vha;
  403. uint8_t port_name[WWN_SIZE];
  404. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  405. return VPCERR_UNSUPPORTED;
  406. /* Check up the F/W and H/W support NPIV */
  407. if (!ha->flags.npiv_supported)
  408. return VPCERR_UNSUPPORTED;
  409. /* Check up whether npiv supported switch presented */
  410. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  411. return VPCERR_NO_FABRIC_SUPP;
  412. /* Check up unique WWPN */
  413. u64_to_wwn(fc_vport->port_name, port_name);
  414. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  415. return VPCERR_BAD_WWN;
  416. vha = qla24xx_find_vhost_by_name(ha, port_name);
  417. if (vha)
  418. return VPCERR_BAD_WWN;
  419. /* Check up max-npiv-supports */
  420. if (ha->num_vhosts > ha->max_npiv_vports) {
  421. ql_dbg(ql_dbg_vport, vha, 0xa004,
  422. "num_vhosts %ud is bigger "
  423. "than max_npiv_vports %ud.\n",
  424. ha->num_vhosts, ha->max_npiv_vports);
  425. return VPCERR_UNSUPPORTED;
  426. }
  427. return 0;
  428. }
  429. scsi_qla_host_t *
  430. qla24xx_create_vhost(struct fc_vport *fc_vport)
  431. {
  432. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  433. struct qla_hw_data *ha = base_vha->hw;
  434. scsi_qla_host_t *vha;
  435. struct scsi_host_template *sht = &qla2xxx_driver_template;
  436. struct Scsi_Host *host;
  437. vha = qla2x00_create_host(sht, ha);
  438. if (!vha) {
  439. ql_log(ql_log_warn, vha, 0xa005,
  440. "scsi_host_alloc() failed for vport.\n");
  441. return(NULL);
  442. }
  443. host = vha->host;
  444. fc_vport->dd_data = vha;
  445. /* New host info */
  446. u64_to_wwn(fc_vport->node_name, vha->node_name);
  447. u64_to_wwn(fc_vport->port_name, vha->port_name);
  448. vha->fc_vport = fc_vport;
  449. vha->device_flags = 0;
  450. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  451. if (vha->vp_idx > ha->max_npiv_vports) {
  452. ql_dbg(ql_dbg_vport, vha, 0xa006,
  453. "Couldn't allocate vp_id.\n");
  454. goto create_vhost_failed;
  455. }
  456. vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
  457. vha->dpc_flags = 0L;
  458. ha->dpc_active = 0;
  459. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  460. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  461. /*
  462. * To fix the issue of processing a parent's RSCN for the vport before
  463. * its SCR is complete.
  464. */
  465. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  466. atomic_set(&vha->loop_state, LOOP_DOWN);
  467. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  468. qla2x00_start_timer(vha, WATCH_INTERVAL);
  469. vha->req = base_vha->req;
  470. vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
  471. host->can_queue = base_vha->req->length + 128;
  472. host->cmd_per_lun = 3;
  473. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
  474. host->max_cmd_len = 32;
  475. else
  476. host->max_cmd_len = MAX_CMDSZ;
  477. host->max_channel = MAX_BUSES - 1;
  478. host->max_lun = ql2xmaxlun;
  479. host->unique_id = host->host_no;
  480. host->max_id = ha->max_fibre_devices;
  481. host->transportt = qla2xxx_transport_vport_template;
  482. ql_dbg(ql_dbg_vport, vha, 0xa007,
  483. "Detect vport hba %ld at address = %p.\n",
  484. vha->host_no, vha);
  485. vha->flags.init_done = 1;
  486. mutex_lock(&ha->vport_lock);
  487. set_bit(vha->vp_idx, ha->vp_idx_map);
  488. ha->cur_vport_count++;
  489. mutex_unlock(&ha->vport_lock);
  490. return vha;
  491. create_vhost_failed:
  492. return NULL;
  493. }
  494. static void
  495. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  496. {
  497. struct qla_hw_data *ha = vha->hw;
  498. uint16_t que_id = req->id;
  499. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  500. sizeof(request_t), req->ring, req->dma);
  501. req->ring = NULL;
  502. req->dma = 0;
  503. if (que_id) {
  504. ha->req_q_map[que_id] = NULL;
  505. mutex_lock(&ha->vport_lock);
  506. clear_bit(que_id, ha->req_qid_map);
  507. mutex_unlock(&ha->vport_lock);
  508. }
  509. kfree(req->outstanding_cmds);
  510. kfree(req);
  511. }
  512. static void
  513. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  514. {
  515. struct qla_hw_data *ha = vha->hw;
  516. uint16_t que_id = rsp->id;
  517. if (rsp->msix && rsp->msix->have_irq) {
  518. free_irq(rsp->msix->vector, rsp->msix->handle);
  519. rsp->msix->have_irq = 0;
  520. rsp->msix->in_use = 0;
  521. rsp->msix->handle = NULL;
  522. }
  523. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  524. sizeof(response_t), rsp->ring, rsp->dma);
  525. rsp->ring = NULL;
  526. rsp->dma = 0;
  527. if (que_id) {
  528. ha->rsp_q_map[que_id] = NULL;
  529. mutex_lock(&ha->vport_lock);
  530. clear_bit(que_id, ha->rsp_qid_map);
  531. mutex_unlock(&ha->vport_lock);
  532. }
  533. kfree(rsp);
  534. }
  535. int
  536. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  537. {
  538. int ret = QLA_SUCCESS;
  539. if (req && vha->flags.qpairs_req_created) {
  540. req->options |= BIT_0;
  541. ret = qla25xx_init_req_que(vha, req);
  542. if (ret != QLA_SUCCESS)
  543. return QLA_FUNCTION_FAILED;
  544. qla25xx_free_req_que(vha, req);
  545. }
  546. return ret;
  547. }
  548. int
  549. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  550. {
  551. int ret = QLA_SUCCESS;
  552. if (rsp && vha->flags.qpairs_rsp_created) {
  553. rsp->options |= BIT_0;
  554. ret = qla25xx_init_rsp_que(vha, rsp);
  555. if (ret != QLA_SUCCESS)
  556. return QLA_FUNCTION_FAILED;
  557. qla25xx_free_rsp_que(vha, rsp);
  558. }
  559. return ret;
  560. }
  561. /* Delete all queues for a given vhost */
  562. int
  563. qla25xx_delete_queues(struct scsi_qla_host *vha)
  564. {
  565. int cnt, ret = 0;
  566. struct req_que *req = NULL;
  567. struct rsp_que *rsp = NULL;
  568. struct qla_hw_data *ha = vha->hw;
  569. struct qla_qpair *qpair, *tqpair;
  570. if (ql2xmqsupport || ql2xnvmeenable) {
  571. list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
  572. qp_list_elem)
  573. qla2xxx_delete_qpair(vha, qpair);
  574. } else {
  575. /* Delete request queues */
  576. for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
  577. req = ha->req_q_map[cnt];
  578. if (req && test_bit(cnt, ha->req_qid_map)) {
  579. ret = qla25xx_delete_req_que(vha, req);
  580. if (ret != QLA_SUCCESS) {
  581. ql_log(ql_log_warn, vha, 0x00ea,
  582. "Couldn't delete req que %d.\n",
  583. req->id);
  584. return ret;
  585. }
  586. }
  587. }
  588. /* Delete response queues */
  589. for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
  590. rsp = ha->rsp_q_map[cnt];
  591. if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
  592. ret = qla25xx_delete_rsp_que(vha, rsp);
  593. if (ret != QLA_SUCCESS) {
  594. ql_log(ql_log_warn, vha, 0x00eb,
  595. "Couldn't delete rsp que %d.\n",
  596. rsp->id);
  597. return ret;
  598. }
  599. }
  600. }
  601. }
  602. return ret;
  603. }
  604. int
  605. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  606. uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
  607. {
  608. int ret = 0;
  609. struct req_que *req = NULL;
  610. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  611. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  612. uint16_t que_id = 0;
  613. device_reg_t *reg;
  614. uint32_t cnt;
  615. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  616. if (req == NULL) {
  617. ql_log(ql_log_fatal, base_vha, 0x00d9,
  618. "Failed to allocate memory for request queue.\n");
  619. goto failed;
  620. }
  621. req->length = REQUEST_ENTRY_CNT_24XX;
  622. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  623. (req->length + 1) * sizeof(request_t),
  624. &req->dma, GFP_KERNEL);
  625. if (req->ring == NULL) {
  626. ql_log(ql_log_fatal, base_vha, 0x00da,
  627. "Failed to allocate memory for request_ring.\n");
  628. goto que_failed;
  629. }
  630. ret = qla2x00_alloc_outstanding_cmds(ha, req);
  631. if (ret != QLA_SUCCESS)
  632. goto que_failed;
  633. mutex_lock(&ha->mq_lock);
  634. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
  635. if (que_id >= ha->max_req_queues) {
  636. mutex_unlock(&ha->mq_lock);
  637. ql_log(ql_log_warn, base_vha, 0x00db,
  638. "No resources to create additional request queue.\n");
  639. goto que_failed;
  640. }
  641. set_bit(que_id, ha->req_qid_map);
  642. ha->req_q_map[que_id] = req;
  643. req->rid = rid;
  644. req->vp_idx = vp_idx;
  645. req->qos = qos;
  646. ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
  647. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  648. que_id, req->rid, req->vp_idx, req->qos);
  649. ql_dbg(ql_dbg_init, base_vha, 0x00dc,
  650. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  651. que_id, req->rid, req->vp_idx, req->qos);
  652. if (rsp_que < 0)
  653. req->rsp = NULL;
  654. else
  655. req->rsp = ha->rsp_q_map[rsp_que];
  656. /* Use alternate PCI bus number */
  657. if (MSB(req->rid))
  658. options |= BIT_4;
  659. /* Use alternate PCI devfn */
  660. if (LSB(req->rid))
  661. options |= BIT_5;
  662. req->options = options;
  663. ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
  664. "options=0x%x.\n", req->options);
  665. ql_dbg(ql_dbg_init, base_vha, 0x00dd,
  666. "options=0x%x.\n", req->options);
  667. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
  668. req->outstanding_cmds[cnt] = NULL;
  669. req->current_outstanding_cmd = 1;
  670. req->ring_ptr = req->ring;
  671. req->ring_index = 0;
  672. req->cnt = req->length;
  673. req->id = que_id;
  674. reg = ISP_QUE_REG(ha, que_id);
  675. req->req_q_in = &reg->isp25mq.req_q_in;
  676. req->req_q_out = &reg->isp25mq.req_q_out;
  677. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  678. req->out_ptr = (uint16_t *)(req->ring + req->length);
  679. mutex_unlock(&ha->mq_lock);
  680. ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
  681. "ring_ptr=%p ring_index=%d, "
  682. "cnt=%d id=%d max_q_depth=%d.\n",
  683. req->ring_ptr, req->ring_index,
  684. req->cnt, req->id, req->max_q_depth);
  685. ql_dbg(ql_dbg_init, base_vha, 0x00de,
  686. "ring_ptr=%p ring_index=%d, "
  687. "cnt=%d id=%d max_q_depth=%d.\n",
  688. req->ring_ptr, req->ring_index, req->cnt,
  689. req->id, req->max_q_depth);
  690. if (startqp) {
  691. ret = qla25xx_init_req_que(base_vha, req);
  692. if (ret != QLA_SUCCESS) {
  693. ql_log(ql_log_fatal, base_vha, 0x00df,
  694. "%s failed.\n", __func__);
  695. mutex_lock(&ha->mq_lock);
  696. clear_bit(que_id, ha->req_qid_map);
  697. mutex_unlock(&ha->mq_lock);
  698. goto que_failed;
  699. }
  700. vha->flags.qpairs_req_created = 1;
  701. }
  702. return req->id;
  703. que_failed:
  704. qla25xx_free_req_que(base_vha, req);
  705. failed:
  706. return 0;
  707. }
  708. static void qla_do_work(struct work_struct *work)
  709. {
  710. unsigned long flags;
  711. struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
  712. struct scsi_qla_host *vha = qpair->vha;
  713. spin_lock_irqsave(&qpair->qp_lock, flags);
  714. qla24xx_process_response_queue(vha, qpair->rsp);
  715. spin_unlock_irqrestore(&qpair->qp_lock, flags);
  716. }
  717. /* create response queue */
  718. int
  719. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  720. uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
  721. {
  722. int ret = 0;
  723. struct rsp_que *rsp = NULL;
  724. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  725. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  726. uint16_t que_id = 0;
  727. device_reg_t *reg;
  728. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  729. if (rsp == NULL) {
  730. ql_log(ql_log_warn, base_vha, 0x0066,
  731. "Failed to allocate memory for response queue.\n");
  732. goto failed;
  733. }
  734. rsp->length = RESPONSE_ENTRY_CNT_MQ;
  735. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  736. (rsp->length + 1) * sizeof(response_t),
  737. &rsp->dma, GFP_KERNEL);
  738. if (rsp->ring == NULL) {
  739. ql_log(ql_log_warn, base_vha, 0x00e1,
  740. "Failed to allocate memory for response ring.\n");
  741. goto que_failed;
  742. }
  743. mutex_lock(&ha->mq_lock);
  744. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
  745. if (que_id >= ha->max_rsp_queues) {
  746. mutex_unlock(&ha->mq_lock);
  747. ql_log(ql_log_warn, base_vha, 0x00e2,
  748. "No resources to create additional request queue.\n");
  749. goto que_failed;
  750. }
  751. set_bit(que_id, ha->rsp_qid_map);
  752. rsp->msix = qpair->msix;
  753. ha->rsp_q_map[que_id] = rsp;
  754. rsp->rid = rid;
  755. rsp->vp_idx = vp_idx;
  756. rsp->hw = ha;
  757. ql_dbg(ql_dbg_init, base_vha, 0x00e4,
  758. "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
  759. que_id, rsp->rid, rsp->vp_idx, rsp->hw);
  760. /* Use alternate PCI bus number */
  761. if (MSB(rsp->rid))
  762. options |= BIT_4;
  763. /* Use alternate PCI devfn */
  764. if (LSB(rsp->rid))
  765. options |= BIT_5;
  766. /* Enable MSIX handshake mode on for uncapable adapters */
  767. if (!IS_MSIX_NACK_CAPABLE(ha))
  768. options |= BIT_6;
  769. /* Set option to indicate response queue creation */
  770. options |= BIT_1;
  771. rsp->options = options;
  772. rsp->id = que_id;
  773. reg = ISP_QUE_REG(ha, que_id);
  774. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  775. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  776. rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
  777. mutex_unlock(&ha->mq_lock);
  778. ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
  779. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
  780. rsp->options, rsp->id, rsp->rsp_q_in,
  781. rsp->rsp_q_out);
  782. ql_dbg(ql_dbg_init, base_vha, 0x00e5,
  783. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
  784. rsp->options, rsp->id, rsp->rsp_q_in,
  785. rsp->rsp_q_out);
  786. ret = qla25xx_request_irq(ha, qpair, qpair->msix,
  787. ha->flags.disable_msix_handshake ?
  788. QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
  789. if (ret)
  790. goto que_failed;
  791. if (startqp) {
  792. ret = qla25xx_init_rsp_que(base_vha, rsp);
  793. if (ret != QLA_SUCCESS) {
  794. ql_log(ql_log_fatal, base_vha, 0x00e7,
  795. "%s failed.\n", __func__);
  796. mutex_lock(&ha->mq_lock);
  797. clear_bit(que_id, ha->rsp_qid_map);
  798. mutex_unlock(&ha->mq_lock);
  799. goto que_failed;
  800. }
  801. vha->flags.qpairs_rsp_created = 1;
  802. }
  803. rsp->req = NULL;
  804. qla2x00_init_response_q_entries(rsp);
  805. if (qpair->hw->wq)
  806. INIT_WORK(&qpair->q_work, qla_do_work);
  807. return rsp->id;
  808. que_failed:
  809. qla25xx_free_rsp_que(base_vha, rsp);
  810. failed:
  811. return 0;
  812. }
  813. static void qla_ctrlvp_sp_done(srb_t *sp, int res)
  814. {
  815. if (sp->comp)
  816. complete(sp->comp);
  817. /* don't free sp here. Let the caller do the free */
  818. }
  819. /**
  820. * qla24xx_control_vp() - Enable a virtual port for given host
  821. * @vha: adapter block pointer
  822. * @cmd: command type to be sent for enable virtual port
  823. *
  824. * Return: qla2xxx local function return status code.
  825. */
  826. int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
  827. {
  828. int rval = QLA_MEMORY_ALLOC_FAILED;
  829. struct qla_hw_data *ha = vha->hw;
  830. int vp_index = vha->vp_idx;
  831. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  832. DECLARE_COMPLETION_ONSTACK(comp);
  833. srb_t *sp;
  834. ql_dbg(ql_dbg_vport, vha, 0x10c1,
  835. "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
  836. if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
  837. return QLA_PARAMETER_ERROR;
  838. /* ref: INIT */
  839. sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
  840. if (!sp)
  841. return rval;
  842. sp->type = SRB_CTRL_VP;
  843. sp->name = "ctrl_vp";
  844. sp->comp = &comp;
  845. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  846. qla_ctrlvp_sp_done);
  847. sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
  848. sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
  849. rval = qla2x00_start_sp(sp);
  850. if (rval != QLA_SUCCESS) {
  851. ql_dbg(ql_dbg_async, vha, 0xffff,
  852. "%s: %s Failed submission. %x.\n",
  853. __func__, sp->name, rval);
  854. goto done;
  855. }
  856. ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
  857. sp->name, sp->handle);
  858. wait_for_completion(&comp);
  859. sp->comp = NULL;
  860. rval = sp->rc;
  861. switch (rval) {
  862. case QLA_FUNCTION_TIMEOUT:
  863. ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
  864. __func__, sp->name, rval);
  865. break;
  866. case QLA_SUCCESS:
  867. ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
  868. __func__, sp->name);
  869. break;
  870. default:
  871. ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
  872. __func__, sp->name, rval);
  873. break;
  874. }
  875. done:
  876. /* ref: INIT */
  877. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  878. return rval;
  879. }