efc_cmds.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
  4. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  5. */
  6. #include "efclib.h"
  7. #include "../libefc_sli/sli4.h"
  8. #include "efc_cmds.h"
  9. #include "efc_sm.h"
  10. static void
  11. efc_nport_free_resources(struct efc_nport *nport, int evt, void *data)
  12. {
  13. struct efc *efc = nport->efc;
  14. /* Clear the nport attached flag */
  15. nport->attached = false;
  16. /* Free the service parameters buffer */
  17. if (nport->dma.virt) {
  18. dma_free_coherent(&efc->pci->dev, nport->dma.size,
  19. nport->dma.virt, nport->dma.phys);
  20. memset(&nport->dma, 0, sizeof(struct efc_dma));
  21. }
  22. /* Free the SLI resources */
  23. sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
  24. efc_nport_cb(efc, evt, nport);
  25. }
  26. static int
  27. efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status)
  28. {
  29. struct efc *efc = nport->efc;
  30. struct sli4_mbox_command_header *hdr =
  31. (struct sli4_mbox_command_header *)mqe;
  32. if (status || le16_to_cpu(hdr->status)) {
  33. efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n",
  34. nport->indicator, status, le16_to_cpu(hdr->status));
  35. return -EIO;
  36. }
  37. return 0;
  38. }
  39. static int
  40. efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
  41. {
  42. struct efc_nport *nport = arg;
  43. int evt = EFC_EVT_NPORT_FREE_OK;
  44. int rc;
  45. rc = efc_nport_get_mbox_status(nport, mqe, status);
  46. if (rc)
  47. evt = EFC_EVT_NPORT_FREE_FAIL;
  48. efc_nport_free_resources(nport, evt, mqe);
  49. return rc;
  50. }
  51. static void
  52. efc_nport_free_unreg_vpi(struct efc_nport *nport)
  53. {
  54. struct efc *efc = nport->efc;
  55. int rc;
  56. u8 data[SLI4_BMBX_SIZE];
  57. rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator,
  58. SLI4_UNREG_TYPE_PORT);
  59. if (rc) {
  60. efc_log_err(efc, "UNREG_VPI format failure\n");
  61. efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
  62. return;
  63. }
  64. rc = efc->tt.issue_mbox_rqst(efc->base, data,
  65. efc_nport_free_unreg_vpi_cb, nport);
  66. if (rc) {
  67. efc_log_err(efc, "UNREG_VPI command failure\n");
  68. efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
  69. }
  70. }
  71. static void
  72. efc_nport_send_evt(struct efc_nport *nport, int evt, void *data)
  73. {
  74. struct efc *efc = nport->efc;
  75. /* Now inform the registered callbacks */
  76. efc_nport_cb(efc, evt, nport);
  77. /* Set the nport attached flag */
  78. if (evt == EFC_EVT_NPORT_ATTACH_OK)
  79. nport->attached = true;
  80. /* If there is a pending free request, then handle it now */
  81. if (nport->free_req_pending)
  82. efc_nport_free_unreg_vpi(nport);
  83. }
  84. static int
  85. efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
  86. {
  87. struct efc_nport *nport = arg;
  88. if (efc_nport_get_mbox_status(nport, mqe, status)) {
  89. efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
  90. return -EIO;
  91. }
  92. efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe);
  93. return 0;
  94. }
  95. static void
  96. efc_nport_alloc_init_vpi(struct efc_nport *nport)
  97. {
  98. struct efc *efc = nport->efc;
  99. u8 data[SLI4_BMBX_SIZE];
  100. int rc;
  101. /* If there is a pending free request, then handle it now */
  102. if (nport->free_req_pending) {
  103. efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data);
  104. return;
  105. }
  106. rc = sli_cmd_init_vpi(efc->sli, data,
  107. nport->indicator, nport->domain->indicator);
  108. if (rc) {
  109. efc_log_err(efc, "INIT_VPI format failure\n");
  110. efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
  111. return;
  112. }
  113. rc = efc->tt.issue_mbox_rqst(efc->base, data,
  114. efc_nport_alloc_init_vpi_cb, nport);
  115. if (rc) {
  116. efc_log_err(efc, "INIT_VPI command failure\n");
  117. efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
  118. }
  119. }
  120. static int
  121. efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg)
  122. {
  123. struct efc_nport *nport = arg;
  124. u8 *payload = NULL;
  125. if (efc_nport_get_mbox_status(nport, mqe, status)) {
  126. efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
  127. return -EIO;
  128. }
  129. payload = nport->dma.virt;
  130. memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
  131. sizeof(nport->sli_wwpn));
  132. memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
  133. sizeof(nport->sli_wwnn));
  134. dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt,
  135. nport->dma.phys);
  136. memset(&nport->dma, 0, sizeof(struct efc_dma));
  137. efc_nport_alloc_init_vpi(nport);
  138. return 0;
  139. }
  140. static void
  141. efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
  142. {
  143. u8 data[SLI4_BMBX_SIZE];
  144. int rc;
  145. /* Allocate memory for the service parameters */
  146. nport->dma.size = EFC_SPARAM_DMA_SZ;
  147. nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
  148. nport->dma.size, &nport->dma.phys,
  149. GFP_KERNEL);
  150. if (!nport->dma.virt) {
  151. efc_log_err(efc, "Failed to allocate DMA memory\n");
  152. efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
  153. return;
  154. }
  155. rc = sli_cmd_read_sparm64(efc->sli, data,
  156. &nport->dma, nport->indicator);
  157. if (rc) {
  158. efc_log_err(efc, "READ_SPARM64 format failure\n");
  159. efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
  160. return;
  161. }
  162. rc = efc->tt.issue_mbox_rqst(efc->base, data,
  163. efc_nport_alloc_read_sparm64_cb, nport);
  164. if (rc) {
  165. efc_log_err(efc, "READ_SPARM64 command failure\n");
  166. efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
  167. }
  168. }
  169. int
  170. efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
  171. struct efc_domain *domain, u8 *wwpn)
  172. {
  173. u32 index;
  174. nport->indicator = U32_MAX;
  175. nport->free_req_pending = false;
  176. if (wwpn)
  177. memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn));
  178. /*
  179. * allocate a VPI object for the port and stores it in the
  180. * indicator field of the port object.
  181. */
  182. if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI,
  183. &nport->indicator, &index)) {
  184. efc_log_err(efc, "VPI allocation failure\n");
  185. return -EIO;
  186. }
  187. if (domain) {
  188. /*
  189. * If the WWPN is NULL, fetch the default
  190. * WWPN and WWNN before initializing the VPI
  191. */
  192. if (!wwpn)
  193. efc_nport_alloc_read_sparm64(efc, nport);
  194. else
  195. efc_nport_alloc_init_vpi(nport);
  196. } else if (!wwpn) {
  197. /* domain NULL and wwpn non-NULL */
  198. efc_log_err(efc, "need WWN for physical port\n");
  199. sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
  200. return -EIO;
  201. }
  202. return 0;
  203. }
  204. static int
  205. efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
  206. void *arg)
  207. {
  208. struct efc_nport *nport = arg;
  209. nport->attaching = false;
  210. if (efc_nport_get_mbox_status(nport, mqe, status)) {
  211. efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
  212. return -EIO;
  213. }
  214. efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe);
  215. return 0;
  216. }
  217. int
  218. efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
  219. {
  220. u8 buf[SLI4_BMBX_SIZE];
  221. int rc = 0;
  222. if (!nport) {
  223. efc_log_err(efc, "bad param(s) nport=%p\n", nport);
  224. return -EIO;
  225. }
  226. nport->fc_id = fc_id;
  227. /* register previously-allocated VPI with the device */
  228. rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id,
  229. nport->sli_wwpn, nport->indicator,
  230. nport->domain->indicator, false);
  231. if (rc) {
  232. efc_log_err(efc, "REG_VPI format failure\n");
  233. efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
  234. return rc;
  235. }
  236. rc = efc->tt.issue_mbox_rqst(efc->base, buf,
  237. efc_nport_attach_reg_vpi_cb, nport);
  238. if (rc) {
  239. efc_log_err(efc, "REG_VPI command failure\n");
  240. efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
  241. } else {
  242. nport->attaching = true;
  243. }
  244. return rc;
  245. }
  246. int
  247. efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
  248. {
  249. if (!nport) {
  250. efc_log_err(efc, "bad parameter(s) nport=%p\n", nport);
  251. return -EIO;
  252. }
  253. /* Issue the UNREG_VPI command to free the assigned VPI context */
  254. if (nport->attached)
  255. efc_nport_free_unreg_vpi(nport);
  256. else if (nport->attaching)
  257. nport->free_req_pending = true;
  258. else
  259. efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL);
  260. return 0;
  261. }
  262. static int
  263. efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status)
  264. {
  265. struct efc *efc = domain->efc;
  266. struct sli4_mbox_command_header *hdr =
  267. (struct sli4_mbox_command_header *)mqe;
  268. if (status || le16_to_cpu(hdr->status)) {
  269. efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n",
  270. domain->indicator, status,
  271. le16_to_cpu(hdr->status));
  272. return -EIO;
  273. }
  274. return 0;
  275. }
  276. static void
  277. efc_domain_free_resources(struct efc_domain *domain, int evt, void *data)
  278. {
  279. struct efc *efc = domain->efc;
  280. /* Free the service parameters buffer */
  281. if (domain->dma.virt) {
  282. dma_free_coherent(&efc->pci->dev,
  283. domain->dma.size, domain->dma.virt,
  284. domain->dma.phys);
  285. memset(&domain->dma, 0, sizeof(struct efc_dma));
  286. }
  287. /* Free the SLI resources */
  288. sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator);
  289. efc_domain_cb(efc, evt, domain);
  290. }
  291. static void
  292. efc_domain_send_nport_evt(struct efc_domain *domain,
  293. int port_evt, int domain_evt, void *data)
  294. {
  295. struct efc *efc = domain->efc;
  296. /* Send alloc/attach ok to the physical nport */
  297. efc_nport_send_evt(domain->nport, port_evt, NULL);
  298. /* Now inform the registered callbacks */
  299. efc_domain_cb(efc, domain_evt, domain);
  300. }
  301. static int
  302. efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe,
  303. void *arg)
  304. {
  305. struct efc_domain *domain = arg;
  306. if (efc_domain_get_mbox_status(domain, mqe, status)) {
  307. efc_domain_free_resources(domain,
  308. EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
  309. return -EIO;
  310. }
  311. efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK,
  312. EFC_HW_DOMAIN_ALLOC_OK, mqe);
  313. return 0;
  314. }
  315. static void
  316. efc_domain_alloc_read_sparm64(struct efc_domain *domain)
  317. {
  318. struct efc *efc = domain->efc;
  319. u8 data[SLI4_BMBX_SIZE];
  320. int rc;
  321. rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0);
  322. if (rc) {
  323. efc_log_err(efc, "READ_SPARM64 format failure\n");
  324. efc_domain_free_resources(domain,
  325. EFC_HW_DOMAIN_ALLOC_FAIL, data);
  326. return;
  327. }
  328. rc = efc->tt.issue_mbox_rqst(efc->base, data,
  329. efc_domain_alloc_read_sparm64_cb, domain);
  330. if (rc) {
  331. efc_log_err(efc, "READ_SPARM64 command failure\n");
  332. efc_domain_free_resources(domain,
  333. EFC_HW_DOMAIN_ALLOC_FAIL, data);
  334. }
  335. }
  336. static int
  337. efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe,
  338. void *arg)
  339. {
  340. struct efc_domain *domain = arg;
  341. if (efc_domain_get_mbox_status(domain, mqe, status)) {
  342. efc_domain_free_resources(domain,
  343. EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
  344. return -EIO;
  345. }
  346. efc_domain_alloc_read_sparm64(domain);
  347. return 0;
  348. }
  349. static void
  350. efc_domain_alloc_init_vfi(struct efc_domain *domain)
  351. {
  352. struct efc *efc = domain->efc;
  353. struct efc_nport *nport = domain->nport;
  354. u8 data[SLI4_BMBX_SIZE];
  355. int rc;
  356. /*
  357. * For FC, the HW alread registered an FCFI.
  358. * Copy FCF information into the domain and jump to INIT_VFI.
  359. */
  360. domain->fcf_indicator = efc->fcfi;
  361. rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator,
  362. domain->fcf_indicator, nport->indicator);
  363. if (rc) {
  364. efc_log_err(efc, "INIT_VFI format failure\n");
  365. efc_domain_free_resources(domain,
  366. EFC_HW_DOMAIN_ALLOC_FAIL, data);
  367. return;
  368. }
  369. efc_log_err(efc, "%s issue mbox\n", __func__);
  370. rc = efc->tt.issue_mbox_rqst(efc->base, data,
  371. efc_domain_alloc_init_vfi_cb, domain);
  372. if (rc) {
  373. efc_log_err(efc, "INIT_VFI command failure\n");
  374. efc_domain_free_resources(domain,
  375. EFC_HW_DOMAIN_ALLOC_FAIL, data);
  376. }
  377. }
  378. int
  379. efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
  380. {
  381. u32 index;
  382. if (!domain || !domain->nport) {
  383. efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n",
  384. domain, domain ? domain->nport : NULL);
  385. return -EIO;
  386. }
  387. /* allocate memory for the service parameters */
  388. domain->dma.size = EFC_SPARAM_DMA_SZ;
  389. domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
  390. domain->dma.size,
  391. &domain->dma.phys, GFP_KERNEL);
  392. if (!domain->dma.virt) {
  393. efc_log_err(efc, "Failed to allocate DMA memory\n");
  394. return -EIO;
  395. }
  396. domain->fcf = fcf;
  397. domain->fcf_indicator = U32_MAX;
  398. domain->indicator = U32_MAX;
  399. if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator,
  400. &index)) {
  401. efc_log_err(efc, "VFI allocation failure\n");
  402. dma_free_coherent(&efc->pci->dev,
  403. domain->dma.size, domain->dma.virt,
  404. domain->dma.phys);
  405. memset(&domain->dma, 0, sizeof(struct efc_dma));
  406. return -EIO;
  407. }
  408. efc_domain_alloc_init_vfi(domain);
  409. return 0;
  410. }
  411. static int
  412. efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe,
  413. void *arg)
  414. {
  415. struct efc_domain *domain = arg;
  416. if (efc_domain_get_mbox_status(domain, mqe, status)) {
  417. efc_domain_free_resources(domain,
  418. EFC_HW_DOMAIN_ATTACH_FAIL, mqe);
  419. return -EIO;
  420. }
  421. efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK,
  422. EFC_HW_DOMAIN_ATTACH_OK, mqe);
  423. return 0;
  424. }
  425. int
  426. efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id)
  427. {
  428. u8 buf[SLI4_BMBX_SIZE];
  429. int rc = 0;
  430. if (!domain) {
  431. efc_log_err(efc, "bad param(s) domain=%p\n", domain);
  432. return -EIO;
  433. }
  434. domain->nport->fc_id = fc_id;
  435. rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator,
  436. domain->fcf_indicator, domain->dma,
  437. domain->nport->indicator, domain->nport->sli_wwpn,
  438. domain->nport->fc_id);
  439. if (rc) {
  440. efc_log_err(efc, "REG_VFI format failure\n");
  441. goto cleanup;
  442. }
  443. rc = efc->tt.issue_mbox_rqst(efc->base, buf,
  444. efc_domain_attach_reg_vfi_cb, domain);
  445. if (rc) {
  446. efc_log_err(efc, "REG_VFI command failure\n");
  447. goto cleanup;
  448. }
  449. return rc;
  450. cleanup:
  451. efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf);
  452. return rc;
  453. }
  454. static int
  455. efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
  456. {
  457. struct efc_domain *domain = arg;
  458. int evt = EFC_HW_DOMAIN_FREE_OK;
  459. int rc;
  460. rc = efc_domain_get_mbox_status(domain, mqe, status);
  461. if (rc) {
  462. evt = EFC_HW_DOMAIN_FREE_FAIL;
  463. rc = -EIO;
  464. }
  465. efc_domain_free_resources(domain, evt, mqe);
  466. return rc;
  467. }
  468. static void
  469. efc_domain_free_unreg_vfi(struct efc_domain *domain)
  470. {
  471. struct efc *efc = domain->efc;
  472. int rc;
  473. u8 data[SLI4_BMBX_SIZE];
  474. rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator,
  475. SLI4_UNREG_TYPE_DOMAIN);
  476. if (rc) {
  477. efc_log_err(efc, "UNREG_VFI format failure\n");
  478. goto cleanup;
  479. }
  480. rc = efc->tt.issue_mbox_rqst(efc->base, data,
  481. efc_domain_free_unreg_vfi_cb, domain);
  482. if (rc) {
  483. efc_log_err(efc, "UNREG_VFI command failure\n");
  484. goto cleanup;
  485. }
  486. return;
  487. cleanup:
  488. efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data);
  489. }
  490. int
  491. efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain)
  492. {
  493. if (!domain) {
  494. efc_log_err(efc, "bad parameter(s) domain=%p\n", domain);
  495. return -EIO;
  496. }
  497. efc_domain_free_unreg_vfi(domain);
  498. return 0;
  499. }
  500. int
  501. efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
  502. struct efc_nport *nport)
  503. {
  504. /* Check for invalid indicator */
  505. if (rnode->indicator != U32_MAX) {
  506. efc_log_err(efc,
  507. "RPI allocation failure addr=%#x rpi=%#x\n",
  508. fc_addr, rnode->indicator);
  509. return -EIO;
  510. }
  511. /* NULL SLI port indicates an unallocated remote node */
  512. rnode->nport = NULL;
  513. if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI,
  514. &rnode->indicator, &rnode->index)) {
  515. efc_log_err(efc, "RPI allocation failure addr=%#x\n",
  516. fc_addr);
  517. return -EIO;
  518. }
  519. rnode->fc_id = fc_addr;
  520. rnode->nport = nport;
  521. return 0;
  522. }
  523. static int
  524. efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg)
  525. {
  526. struct efc_remote_node *rnode = arg;
  527. struct sli4_mbox_command_header *hdr =
  528. (struct sli4_mbox_command_header *)mqe;
  529. int evt = 0;
  530. if (status || le16_to_cpu(hdr->status)) {
  531. efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
  532. le16_to_cpu(hdr->status));
  533. rnode->attached = false;
  534. evt = EFC_EVT_NODE_ATTACH_FAIL;
  535. } else {
  536. rnode->attached = true;
  537. evt = EFC_EVT_NODE_ATTACH_OK;
  538. }
  539. efc_remote_node_cb(efc, evt, rnode);
  540. return 0;
  541. }
  542. int
  543. efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
  544. struct efc_dma *sparms)
  545. {
  546. int rc = -EIO;
  547. u8 buf[SLI4_BMBX_SIZE];
  548. if (!rnode || !sparms) {
  549. efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n",
  550. rnode, sparms);
  551. return -EIO;
  552. }
  553. /*
  554. * If the attach count is non-zero, this RPI has already been reg'd.
  555. * Otherwise, register the RPI
  556. */
  557. if (rnode->index == U32_MAX) {
  558. efc_log_err(efc, "bad parameter rnode->index invalid\n");
  559. return -EIO;
  560. }
  561. /* Update a remote node object with the remote port's service params */
  562. if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator,
  563. rnode->nport->indicator, rnode->fc_id, sparms, 0, 0))
  564. rc = efc->tt.issue_mbox_rqst(efc->base, buf,
  565. efc_cmd_node_attach_cb, rnode);
  566. return rc;
  567. }
  568. int
  569. efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode)
  570. {
  571. int rc = 0;
  572. if (!rnode) {
  573. efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
  574. return -EIO;
  575. }
  576. if (rnode->nport) {
  577. if (rnode->attached) {
  578. efc_log_err(efc, "rnode is still attached\n");
  579. return -EIO;
  580. }
  581. if (rnode->indicator != U32_MAX) {
  582. if (sli_resource_free(efc->sli, SLI4_RSRC_RPI,
  583. rnode->indicator)) {
  584. efc_log_err(efc,
  585. "RPI free fail RPI %d addr=%#x\n",
  586. rnode->indicator, rnode->fc_id);
  587. rc = -EIO;
  588. } else {
  589. rnode->indicator = U32_MAX;
  590. rnode->index = U32_MAX;
  591. }
  592. }
  593. }
  594. return rc;
  595. }
  596. static int
  597. efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg)
  598. {
  599. struct efc_remote_node *rnode = arg;
  600. struct sli4_mbox_command_header *hdr =
  601. (struct sli4_mbox_command_header *)mqe;
  602. int evt = EFC_EVT_NODE_FREE_FAIL;
  603. int rc = 0;
  604. if (status || le16_to_cpu(hdr->status)) {
  605. efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
  606. le16_to_cpu(hdr->status));
  607. /*
  608. * In certain cases, a non-zero MQE status is OK (all must be
  609. * true):
  610. * - node is attached
  611. * - status is 0x1400
  612. */
  613. if (!rnode->attached ||
  614. (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG))
  615. rc = -EIO;
  616. }
  617. if (!rc) {
  618. rnode->attached = false;
  619. evt = EFC_EVT_NODE_FREE_OK;
  620. }
  621. efc_remote_node_cb(efc, evt, rnode);
  622. return rc;
  623. }
  624. int
  625. efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode)
  626. {
  627. u8 buf[SLI4_BMBX_SIZE];
  628. int rc = -EIO;
  629. if (!rnode) {
  630. efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
  631. return -EIO;
  632. }
  633. if (rnode->nport) {
  634. if (!rnode->attached)
  635. return -EIO;
  636. rc = -EIO;
  637. if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator,
  638. SLI4_RSRC_RPI, U32_MAX))
  639. rc = efc->tt.issue_mbox_rqst(efc->base, buf,
  640. efc_cmd_node_free_cb, rnode);
  641. if (rc != 0) {
  642. efc_log_err(efc, "UNREG_RPI failed\n");
  643. rc = -EIO;
  644. }
  645. }
  646. return rc;
  647. }