efc_node.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
  4. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  5. */
  6. #include "efc.h"
  7. int
  8. efc_remote_node_cb(void *arg, int event, void *data)
  9. {
  10. struct efc *efc = arg;
  11. struct efc_remote_node *rnode = data;
  12. struct efc_node *node = rnode->node;
  13. unsigned long flags = 0;
  14. spin_lock_irqsave(&efc->lock, flags);
  15. efc_node_post_event(node, event, NULL);
  16. spin_unlock_irqrestore(&efc->lock, flags);
  17. return 0;
  18. }
  19. struct efc_node *
  20. efc_node_find(struct efc_nport *nport, u32 port_id)
  21. {
  22. /* Find an FC node structure given the FC port ID */
  23. return xa_load(&nport->lookup, port_id);
  24. }
  25. static void
  26. _efc_node_free(struct kref *arg)
  27. {
  28. struct efc_node *node = container_of(arg, struct efc_node, ref);
  29. struct efc *efc = node->efc;
  30. struct efc_dma *dma;
  31. dma = &node->sparm_dma_buf;
  32. dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
  33. memset(dma, 0, sizeof(struct efc_dma));
  34. mempool_free(node, efc->node_pool);
  35. }
  36. struct efc_node *efc_node_alloc(struct efc_nport *nport,
  37. u32 port_id, bool init, bool targ)
  38. {
  39. int rc;
  40. struct efc_node *node = NULL;
  41. struct efc *efc = nport->efc;
  42. struct efc_dma *dma;
  43. if (nport->shutting_down) {
  44. efc_log_debug(efc, "node allocation when shutting down %06x",
  45. port_id);
  46. return NULL;
  47. }
  48. node = mempool_alloc(efc->node_pool, GFP_ATOMIC);
  49. if (!node) {
  50. efc_log_err(efc, "node allocation failed %06x", port_id);
  51. return NULL;
  52. }
  53. memset(node, 0, sizeof(*node));
  54. dma = &node->sparm_dma_buf;
  55. dma->size = NODE_SPARAMS_SIZE;
  56. dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys);
  57. if (!dma->virt) {
  58. efc_log_err(efc, "node dma alloc failed\n");
  59. goto dma_fail;
  60. }
  61. node->rnode.indicator = U32_MAX;
  62. node->nport = nport;
  63. node->efc = efc;
  64. node->init = init;
  65. node->targ = targ;
  66. spin_lock_init(&node->pend_frames_lock);
  67. INIT_LIST_HEAD(&node->pend_frames);
  68. spin_lock_init(&node->els_ios_lock);
  69. INIT_LIST_HEAD(&node->els_ios_list);
  70. node->els_io_enabled = true;
  71. rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport);
  72. if (rc) {
  73. efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc);
  74. goto hw_alloc_fail;
  75. }
  76. node->rnode.node = node;
  77. node->sm.app = node;
  78. node->evtdepth = 0;
  79. efc_node_update_display_name(node);
  80. rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
  81. if (rc) {
  82. efc_log_err(efc, "Node lookup store failed: %d\n", rc);
  83. goto xa_fail;
  84. }
  85. /* initialize refcount */
  86. kref_init(&node->ref);
  87. node->release = _efc_node_free;
  88. kref_get(&nport->ref);
  89. return node;
  90. xa_fail:
  91. efc_node_free_resources(efc, &node->rnode);
  92. hw_alloc_fail:
  93. dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
  94. dma_fail:
  95. mempool_free(node, efc->node_pool);
  96. return NULL;
  97. }
  98. void
  99. efc_node_free(struct efc_node *node)
  100. {
  101. struct efc_nport *nport;
  102. struct efc *efc;
  103. int rc = 0;
  104. struct efc_node *ns = NULL;
  105. nport = node->nport;
  106. efc = node->efc;
  107. node_printf(node, "Free'd\n");
  108. if (node->refound) {
  109. /*
  110. * Save the name server node. We will send fake RSCN event at
  111. * the end to handle ignored RSCN event during node deletion
  112. */
  113. ns = efc_node_find(node->nport, FC_FID_DIR_SERV);
  114. }
  115. if (!node->nport) {
  116. efc_log_err(efc, "Node already Freed\n");
  117. return;
  118. }
  119. /* Free HW resources */
  120. rc = efc_node_free_resources(efc, &node->rnode);
  121. if (rc < 0)
  122. efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc);
  123. /* if the gidpt_delay_timer is still running, then delete it */
  124. if (timer_pending(&node->gidpt_delay_timer))
  125. del_timer(&node->gidpt_delay_timer);
  126. xa_erase(&nport->lookup, node->rnode.fc_id);
  127. /*
  128. * If the node_list is empty,
  129. * then post a ALL_CHILD_NODES_FREE event to the nport,
  130. * after the lock is released.
  131. * The nport may be free'd as a result of the event.
  132. */
  133. if (xa_empty(&nport->lookup))
  134. efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE,
  135. NULL);
  136. node->nport = NULL;
  137. node->sm.current_state = NULL;
  138. kref_put(&nport->ref, nport->release);
  139. kref_put(&node->ref, node->release);
  140. if (ns) {
  141. /* sending fake RSCN event to name server node */
  142. efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL);
  143. }
  144. }
  145. static void
  146. efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length)
  147. {
  148. if (!dma || !buffer || !buffer_length)
  149. return;
  150. if (buffer_length > dma->size)
  151. buffer_length = dma->size;
  152. memcpy(dma->virt, buffer, buffer_length);
  153. dma->len = buffer_length;
  154. }
  155. int
  156. efc_node_attach(struct efc_node *node)
  157. {
  158. int rc = 0;
  159. struct efc_nport *nport = node->nport;
  160. struct efc_domain *domain = nport->domain;
  161. struct efc *efc = node->efc;
  162. if (!domain->attached) {
  163. efc_log_err(efc, "Warning: unattached domain\n");
  164. return -EIO;
  165. }
  166. /* Update node->wwpn/wwnn */
  167. efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn),
  168. efc_node_get_wwpn(node));
  169. efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn),
  170. efc_node_get_wwnn(node));
  171. efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4,
  172. sizeof(node->service_params) - 4);
  173. /* take lock to protect node->rnode.attached */
  174. rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf);
  175. if (rc < 0)
  176. efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc);
  177. return rc;
  178. }
  179. void
  180. efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length)
  181. {
  182. switch (fc_id) {
  183. case FC_FID_FLOGI:
  184. snprintf(buffer, buffer_length, "fabric");
  185. break;
  186. case FC_FID_FCTRL:
  187. snprintf(buffer, buffer_length, "fabctl");
  188. break;
  189. case FC_FID_DIR_SERV:
  190. snprintf(buffer, buffer_length, "nserve");
  191. break;
  192. default:
  193. if (fc_id == FC_FID_DOM_MGR) {
  194. snprintf(buffer, buffer_length, "dctl%02x",
  195. (fc_id & 0x0000ff));
  196. } else {
  197. snprintf(buffer, buffer_length, "%06x", fc_id);
  198. }
  199. break;
  200. }
  201. }
  202. void
  203. efc_node_update_display_name(struct efc_node *node)
  204. {
  205. u32 port_id = node->rnode.fc_id;
  206. struct efc_nport *nport = node->nport;
  207. char portid_display[16];
  208. efc_node_fcid_display(port_id, portid_display, sizeof(portid_display));
  209. snprintf(node->display_name, sizeof(node->display_name), "%s.%s",
  210. nport->display_name, portid_display);
  211. }
  212. void
  213. efc_node_send_ls_io_cleanup(struct efc_node *node)
  214. {
  215. if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) {
  216. efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n",
  217. node->display_name, node->ls_acc_oxid);
  218. node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
  219. node->ls_acc_io = NULL;
  220. }
  221. }
  222. static void efc_node_handle_implicit_logo(struct efc_node *node)
  223. {
  224. int rc;
  225. /*
  226. * currently, only case for implicit logo is PLOGI
  227. * recvd. Thus, node's ELS IO pending list won't be
  228. * empty (PLOGI will be on it)
  229. */
  230. WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
  231. node_printf(node, "Reason: implicit logout, re-authenticate\n");
  232. /* Re-attach node with the same HW node resources */
  233. node->req_free = false;
  234. rc = efc_node_attach(node);
  235. efc_node_transition(node, __efc_d_wait_node_attach, NULL);
  236. node->els_io_enabled = true;
  237. if (rc < 0)
  238. efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
  239. }
  240. static void efc_node_handle_explicit_logo(struct efc_node *node)
  241. {
  242. s8 pend_frames_empty;
  243. unsigned long flags = 0;
  244. /* cleanup any pending LS_ACC ELSs */
  245. efc_node_send_ls_io_cleanup(node);
  246. spin_lock_irqsave(&node->pend_frames_lock, flags);
  247. pend_frames_empty = list_empty(&node->pend_frames);
  248. spin_unlock_irqrestore(&node->pend_frames_lock, flags);
  249. /*
  250. * there are two scenarios where we want to keep
  251. * this node alive:
  252. * 1. there are pending frames that need to be
  253. * processed or
  254. * 2. we're an initiator and the remote node is
  255. * a target and we need to re-authenticate
  256. */
  257. node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty);
  258. node_printf(node, "nport.ini=%d node.tgt=%d\n",
  259. node->nport->enable_ini, node->targ);
  260. if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) {
  261. u8 send_plogi = false;
  262. if (node->nport->enable_ini && node->targ) {
  263. /*
  264. * we're an initiator and
  265. * node shutting down is a target;
  266. * we'll need to re-authenticate in
  267. * initial state
  268. */
  269. send_plogi = true;
  270. }
  271. /*
  272. * transition to __efc_d_init
  273. * (will retain HW node resources)
  274. */
  275. node->els_io_enabled = true;
  276. node->req_free = false;
  277. /*
  278. * either pending frames exist or we are re-authenticating
  279. * with PLOGI (or both); in either case, return to initial
  280. * state
  281. */
  282. efc_node_init_device(node, send_plogi);
  283. }
  284. /* else: let node shutdown occur */
  285. }
  286. static void
  287. efc_node_purge_pending(struct efc_node *node)
  288. {
  289. struct efc *efc = node->efc;
  290. struct efc_hw_sequence *frame, *next;
  291. unsigned long flags = 0;
  292. spin_lock_irqsave(&node->pend_frames_lock, flags);
  293. list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) {
  294. list_del(&frame->list_entry);
  295. efc->tt.hw_seq_free(efc, frame);
  296. }
  297. spin_unlock_irqrestore(&node->pend_frames_lock, flags);
  298. }
  299. void
  300. __efc_node_shutdown(struct efc_sm_ctx *ctx,
  301. enum efc_sm_event evt, void *arg)
  302. {
  303. struct efc_node *node = ctx->app;
  304. efc_node_evt_set(ctx, evt, __func__);
  305. node_sm_trace();
  306. switch (evt) {
  307. case EFC_EVT_ENTER: {
  308. efc_node_hold_frames(node);
  309. WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
  310. /* by default, we will be freeing node after we unwind */
  311. node->req_free = true;
  312. switch (node->shutdown_reason) {
  313. case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO:
  314. /* Node shutdown b/c of PLOGI received when node
  315. * already logged in. We have PLOGI service
  316. * parameters, so submit node attach; we won't be
  317. * freeing this node
  318. */
  319. efc_node_handle_implicit_logo(node);
  320. break;
  321. case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO:
  322. efc_node_handle_explicit_logo(node);
  323. break;
  324. case EFC_NODE_SHUTDOWN_DEFAULT:
  325. default: {
  326. /*
  327. * shutdown due to link down,
  328. * node going away (xport event) or
  329. * nport shutdown, purge pending and
  330. * proceed to cleanup node
  331. */
  332. /* cleanup any pending LS_ACC ELSs */
  333. efc_node_send_ls_io_cleanup(node);
  334. node_printf(node,
  335. "Shutdown reason: default, purge pending\n");
  336. efc_node_purge_pending(node);
  337. break;
  338. }
  339. }
  340. break;
  341. }
  342. case EFC_EVT_EXIT:
  343. efc_node_accept_frames(node);
  344. break;
  345. default:
  346. __efc_node_common(__func__, ctx, evt, arg);
  347. }
  348. }
  349. static bool
  350. efc_node_check_els_quiesced(struct efc_node *node)
  351. {
  352. /* check to see if ELS requests, completions are quiesced */
  353. if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 &&
  354. efc_els_io_list_empty(node, &node->els_ios_list)) {
  355. if (!node->attached) {
  356. /* hw node detach already completed, proceed */
  357. node_printf(node, "HW node not attached\n");
  358. efc_node_transition(node,
  359. __efc_node_wait_ios_shutdown,
  360. NULL);
  361. } else {
  362. /*
  363. * hw node detach hasn't completed,
  364. * transition and wait
  365. */
  366. node_printf(node, "HW node still attached\n");
  367. efc_node_transition(node, __efc_node_wait_node_free,
  368. NULL);
  369. }
  370. return true;
  371. }
  372. return false;
  373. }
  374. void
  375. efc_node_initiate_cleanup(struct efc_node *node)
  376. {
  377. /*
  378. * if ELS's have already been quiesced, will move to next state
  379. * if ELS's have not been quiesced, abort them
  380. */
  381. if (!efc_node_check_els_quiesced(node)) {
  382. efc_node_hold_frames(node);
  383. efc_node_transition(node, __efc_node_wait_els_shutdown, NULL);
  384. }
  385. }
  386. void
  387. __efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
  388. enum efc_sm_event evt, void *arg)
  389. {
  390. bool check_quiesce = false;
  391. struct efc_node *node = ctx->app;
  392. efc_node_evt_set(ctx, evt, __func__);
  393. node_sm_trace();
  394. /* Node state machine: Wait for all ELSs to complete */
  395. switch (evt) {
  396. case EFC_EVT_ENTER:
  397. efc_node_hold_frames(node);
  398. if (efc_els_io_list_empty(node, &node->els_ios_list)) {
  399. node_printf(node, "All ELS IOs complete\n");
  400. check_quiesce = true;
  401. }
  402. break;
  403. case EFC_EVT_EXIT:
  404. efc_node_accept_frames(node);
  405. break;
  406. case EFC_EVT_SRRS_ELS_REQ_OK:
  407. case EFC_EVT_SRRS_ELS_REQ_FAIL:
  408. case EFC_EVT_SRRS_ELS_REQ_RJT:
  409. case EFC_EVT_ELS_REQ_ABORTED:
  410. if (WARN_ON(!node->els_req_cnt))
  411. break;
  412. node->els_req_cnt--;
  413. check_quiesce = true;
  414. break;
  415. case EFC_EVT_SRRS_ELS_CMPL_OK:
  416. case EFC_EVT_SRRS_ELS_CMPL_FAIL:
  417. if (WARN_ON(!node->els_cmpl_cnt))
  418. break;
  419. node->els_cmpl_cnt--;
  420. check_quiesce = true;
  421. break;
  422. case EFC_EVT_ALL_CHILD_NODES_FREE:
  423. /* all ELS IO's complete */
  424. node_printf(node, "All ELS IOs complete\n");
  425. WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
  426. check_quiesce = true;
  427. break;
  428. case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
  429. check_quiesce = true;
  430. break;
  431. case EFC_EVT_DOMAIN_ATTACH_OK:
  432. /* don't care about domain_attach_ok */
  433. break;
  434. /* ignore shutdown events as we're already in shutdown path */
  435. case EFC_EVT_SHUTDOWN:
  436. /* have default shutdown event take precedence */
  437. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  438. fallthrough;
  439. case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
  440. case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
  441. node_printf(node, "%s received\n", efc_sm_event_name(evt));
  442. break;
  443. default:
  444. __efc_node_common(__func__, ctx, evt, arg);
  445. }
  446. if (check_quiesce)
  447. efc_node_check_els_quiesced(node);
  448. }
  449. void
  450. __efc_node_wait_node_free(struct efc_sm_ctx *ctx,
  451. enum efc_sm_event evt, void *arg)
  452. {
  453. struct efc_node *node = ctx->app;
  454. efc_node_evt_set(ctx, evt, __func__);
  455. node_sm_trace();
  456. switch (evt) {
  457. case EFC_EVT_ENTER:
  458. efc_node_hold_frames(node);
  459. break;
  460. case EFC_EVT_EXIT:
  461. efc_node_accept_frames(node);
  462. break;
  463. case EFC_EVT_NODE_FREE_OK:
  464. /* node is officially no longer attached */
  465. node->attached = false;
  466. efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL);
  467. break;
  468. case EFC_EVT_ALL_CHILD_NODES_FREE:
  469. case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
  470. /* As IOs and ELS IO's complete we expect to get these events */
  471. break;
  472. case EFC_EVT_DOMAIN_ATTACH_OK:
  473. /* don't care about domain_attach_ok */
  474. break;
  475. /* ignore shutdown events as we're already in shutdown path */
  476. case EFC_EVT_SHUTDOWN:
  477. /* have default shutdown event take precedence */
  478. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  479. fallthrough;
  480. case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
  481. case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
  482. node_printf(node, "%s received\n", efc_sm_event_name(evt));
  483. break;
  484. default:
  485. __efc_node_common(__func__, ctx, evt, arg);
  486. }
  487. }
  488. void
  489. __efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
  490. enum efc_sm_event evt, void *arg)
  491. {
  492. struct efc_node *node = ctx->app;
  493. struct efc *efc = node->efc;
  494. efc_node_evt_set(ctx, evt, __func__);
  495. node_sm_trace();
  496. switch (evt) {
  497. case EFC_EVT_ENTER:
  498. efc_node_hold_frames(node);
  499. /* first check to see if no ELS IOs are outstanding */
  500. if (efc_els_io_list_empty(node, &node->els_ios_list))
  501. /* If there are any active IOS, Free them. */
  502. efc_node_transition(node, __efc_node_shutdown, NULL);
  503. break;
  504. case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
  505. case EFC_EVT_ALL_CHILD_NODES_FREE:
  506. if (efc_els_io_list_empty(node, &node->els_ios_list))
  507. efc_node_transition(node, __efc_node_shutdown, NULL);
  508. break;
  509. case EFC_EVT_EXIT:
  510. efc_node_accept_frames(node);
  511. break;
  512. case EFC_EVT_SRRS_ELS_REQ_FAIL:
  513. /* Can happen as ELS IO IO's complete */
  514. if (WARN_ON(!node->els_req_cnt))
  515. break;
  516. node->els_req_cnt--;
  517. break;
  518. /* ignore shutdown events as we're already in shutdown path */
  519. case EFC_EVT_SHUTDOWN:
  520. /* have default shutdown event take precedence */
  521. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  522. fallthrough;
  523. case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
  524. case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
  525. efc_log_debug(efc, "[%s] %-20s\n", node->display_name,
  526. efc_sm_event_name(evt));
  527. break;
  528. case EFC_EVT_DOMAIN_ATTACH_OK:
  529. /* don't care about domain_attach_ok */
  530. break;
  531. default:
  532. __efc_node_common(__func__, ctx, evt, arg);
  533. }
  534. }
  535. void
  536. __efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
  537. enum efc_sm_event evt, void *arg)
  538. {
  539. struct efc_node *node = NULL;
  540. struct efc *efc = NULL;
  541. struct efc_node_cb *cbdata = arg;
  542. node = ctx->app;
  543. efc = node->efc;
  544. switch (evt) {
  545. case EFC_EVT_ENTER:
  546. case EFC_EVT_REENTER:
  547. case EFC_EVT_EXIT:
  548. case EFC_EVT_NPORT_TOPOLOGY_NOTIFY:
  549. case EFC_EVT_NODE_MISSING:
  550. case EFC_EVT_FCP_CMD_RCVD:
  551. break;
  552. case EFC_EVT_NODE_REFOUND:
  553. node->refound = true;
  554. break;
  555. /*
  556. * node->attached must be set appropriately
  557. * for all node attach/detach events
  558. */
  559. case EFC_EVT_NODE_ATTACH_OK:
  560. node->attached = true;
  561. break;
  562. case EFC_EVT_NODE_FREE_OK:
  563. case EFC_EVT_NODE_ATTACH_FAIL:
  564. node->attached = false;
  565. break;
  566. /*
  567. * handle any ELS completions that
  568. * other states either didn't care about
  569. * or forgot about
  570. */
  571. case EFC_EVT_SRRS_ELS_CMPL_OK:
  572. case EFC_EVT_SRRS_ELS_CMPL_FAIL:
  573. if (WARN_ON(!node->els_cmpl_cnt))
  574. break;
  575. node->els_cmpl_cnt--;
  576. break;
  577. /*
  578. * handle any ELS request completions that
  579. * other states either didn't care about
  580. * or forgot about
  581. */
  582. case EFC_EVT_SRRS_ELS_REQ_OK:
  583. case EFC_EVT_SRRS_ELS_REQ_FAIL:
  584. case EFC_EVT_SRRS_ELS_REQ_RJT:
  585. case EFC_EVT_ELS_REQ_ABORTED:
  586. if (WARN_ON(!node->els_req_cnt))
  587. break;
  588. node->els_req_cnt--;
  589. break;
  590. case EFC_EVT_ELS_RCVD: {
  591. struct fc_frame_header *hdr = cbdata->header->dma.virt;
  592. /*
  593. * Unsupported ELS was received,
  594. * send LS_RJT, command not supported
  595. */
  596. efc_log_debug(efc,
  597. "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
  598. node->display_name, funcname,
  599. ((u8 *)cbdata->payload->dma.virt)[0]);
  600. efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
  601. ELS_RJT_UNSUP, ELS_EXPL_NONE, 0);
  602. break;
  603. }
  604. case EFC_EVT_PLOGI_RCVD:
  605. case EFC_EVT_FLOGI_RCVD:
  606. case EFC_EVT_LOGO_RCVD:
  607. case EFC_EVT_PRLI_RCVD:
  608. case EFC_EVT_PRLO_RCVD:
  609. case EFC_EVT_PDISC_RCVD:
  610. case EFC_EVT_FDISC_RCVD:
  611. case EFC_EVT_ADISC_RCVD:
  612. case EFC_EVT_RSCN_RCVD:
  613. case EFC_EVT_SCR_RCVD: {
  614. struct fc_frame_header *hdr = cbdata->header->dma.virt;
  615. /* sm: / send ELS_RJT */
  616. efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n",
  617. node->display_name, funcname,
  618. efc_sm_event_name(evt));
  619. /* if we didn't catch this in a state, send generic LS_RJT */
  620. efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
  621. ELS_RJT_UNAB, ELS_EXPL_NONE, 0);
  622. break;
  623. }
  624. case EFC_EVT_ABTS_RCVD: {
  625. efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n",
  626. node->display_name, funcname,
  627. efc_sm_event_name(evt));
  628. /* sm: / send BA_ACC */
  629. efc_send_bls_acc(node, cbdata->header->dma.virt);
  630. break;
  631. }
  632. default:
  633. efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n",
  634. node->display_name, funcname,
  635. efc_sm_event_name(evt));
  636. }
  637. }
  638. void
  639. efc_node_save_sparms(struct efc_node *node, void *payload)
  640. {
  641. memcpy(node->service_params, payload, sizeof(node->service_params));
  642. }
  643. void
  644. efc_node_post_event(struct efc_node *node,
  645. enum efc_sm_event evt, void *arg)
  646. {
  647. bool free_node = false;
  648. node->evtdepth++;
  649. efc_sm_post_event(&node->sm, evt, arg);
  650. /* If our event call depth is one and
  651. * we're not holding frames
  652. * then we can dispatch any pending frames.
  653. * We don't want to allow the efc_process_node_pending()
  654. * call to recurse.
  655. */
  656. if (!node->hold_frames && node->evtdepth == 1)
  657. efc_process_node_pending(node);
  658. node->evtdepth--;
  659. /*
  660. * Free the node object if so requested,
  661. * and we're at an event call depth of zero
  662. */
  663. if (node->evtdepth == 0 && node->req_free)
  664. free_node = true;
  665. if (free_node)
  666. efc_node_free(node);
  667. }
  668. void
  669. efc_node_transition(struct efc_node *node,
  670. void (*state)(struct efc_sm_ctx *,
  671. enum efc_sm_event, void *), void *data)
  672. {
  673. struct efc_sm_ctx *ctx = &node->sm;
  674. if (ctx->current_state == state) {
  675. efc_node_post_event(node, EFC_EVT_REENTER, data);
  676. } else {
  677. efc_node_post_event(node, EFC_EVT_EXIT, data);
  678. ctx->current_state = state;
  679. efc_node_post_event(node, EFC_EVT_ENTER, data);
  680. }
  681. }
  682. void
  683. efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name)
  684. {
  685. memset(buf, 0, buf_len);
  686. snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name);
  687. }
  688. u64
  689. efc_node_get_wwpn(struct efc_node *node)
  690. {
  691. struct fc_els_flogi *sp =
  692. (struct fc_els_flogi *)node->service_params;
  693. return be64_to_cpu(sp->fl_wwpn);
  694. }
  695. u64
  696. efc_node_get_wwnn(struct efc_node *node)
  697. {
  698. struct fc_els_flogi *sp =
  699. (struct fc_els_flogi *)node->service_params;
  700. return be64_to_cpu(sp->fl_wwnn);
  701. }
  702. int
  703. efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
  704. u8 cmd, void (*efc_node_common_func)(const char *,
  705. struct efc_sm_ctx *, enum efc_sm_event, void *),
  706. const char *funcname)
  707. {
  708. return 0;
  709. }
  710. int
  711. efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
  712. u16 cmd, void (*efc_node_common_func)(const char *,
  713. struct efc_sm_ctx *, enum efc_sm_event, void *),
  714. const char *funcname)
  715. {
  716. return 0;
  717. }
  718. int
  719. efc_els_io_list_empty(struct efc_node *node, struct list_head *list)
  720. {
  721. int empty;
  722. unsigned long flags = 0;
  723. spin_lock_irqsave(&node->els_ios_lock, flags);
  724. empty = list_empty(list);
  725. spin_unlock_irqrestore(&node->els_ios_lock, flags);
  726. return empty;
  727. }
  728. void
  729. efc_node_pause(struct efc_node *node,
  730. void (*state)(struct efc_sm_ctx *,
  731. enum efc_sm_event, void *))
  732. {
  733. node->nodedb_state = state;
  734. efc_node_transition(node, __efc_node_paused, NULL);
  735. }
  736. void
  737. __efc_node_paused(struct efc_sm_ctx *ctx,
  738. enum efc_sm_event evt, void *arg)
  739. {
  740. struct efc_node *node = ctx->app;
  741. efc_node_evt_set(ctx, evt, __func__);
  742. node_sm_trace();
  743. /*
  744. * This state is entered when a state is "paused". When resumed, the
  745. * node is transitioned to a previously saved state (node->ndoedb_state)
  746. */
  747. switch (evt) {
  748. case EFC_EVT_ENTER:
  749. node_printf(node, "Paused\n");
  750. break;
  751. case EFC_EVT_RESUME: {
  752. void (*pf)(struct efc_sm_ctx *ctx,
  753. enum efc_sm_event evt, void *arg);
  754. pf = node->nodedb_state;
  755. node->nodedb_state = NULL;
  756. efc_node_transition(node, pf, NULL);
  757. break;
  758. }
  759. case EFC_EVT_DOMAIN_ATTACH_OK:
  760. break;
  761. case EFC_EVT_SHUTDOWN:
  762. node->req_free = true;
  763. break;
  764. default:
  765. __efc_node_common(__func__, ctx, evt, arg);
  766. }
  767. }
  768. void
  769. efc_node_recv_els_frame(struct efc_node *node,
  770. struct efc_hw_sequence *seq)
  771. {
  772. u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp);
  773. struct {
  774. u32 cmd;
  775. enum efc_sm_event evt;
  776. u32 payload_size;
  777. } els_cmd_list[] = {
  778. {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)},
  779. {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)},
  780. {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)},
  781. {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size},
  782. {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size},
  783. {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
  784. {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
  785. {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)},
  786. {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD},
  787. {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD},
  788. };
  789. struct efc_node_cb cbdata;
  790. u8 *buf = seq->payload->dma.virt;
  791. enum efc_sm_event evt = EFC_EVT_ELS_RCVD;
  792. u32 i;
  793. memset(&cbdata, 0, sizeof(cbdata));
  794. cbdata.header = seq->header;
  795. cbdata.payload = seq->payload;
  796. /* find a matching event for the ELS command */
  797. for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) {
  798. if (els_cmd_list[i].cmd == buf[0]) {
  799. evt = els_cmd_list[i].evt;
  800. break;
  801. }
  802. }
  803. efc_node_post_event(node, evt, &cbdata);
  804. }
  805. void
  806. efc_node_recv_ct_frame(struct efc_node *node,
  807. struct efc_hw_sequence *seq)
  808. {
  809. struct fc_ct_hdr *iu = seq->payload->dma.virt;
  810. struct fc_frame_header *hdr = seq->header->dma.virt;
  811. struct efc *efc = node->efc;
  812. u16 gscmd = be16_to_cpu(iu->ct_cmd);
  813. efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n",
  814. node->display_name, gscmd);
  815. efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu,
  816. FC_FS_RJT, FC_FS_RJT_UNSUP, 0);
  817. }
  818. void
  819. efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq)
  820. {
  821. struct efc_node_cb cbdata;
  822. memset(&cbdata, 0, sizeof(cbdata));
  823. cbdata.header = seq->header;
  824. cbdata.payload = seq->payload;
  825. efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata);
  826. }
  827. void
  828. efc_process_node_pending(struct efc_node *node)
  829. {
  830. struct efc *efc = node->efc;
  831. struct efc_hw_sequence *seq = NULL;
  832. u32 pend_frames_processed = 0;
  833. unsigned long flags = 0;
  834. for (;;) {
  835. /* need to check for hold frames condition after each frame
  836. * processed because any given frame could cause a transition
  837. * to a state that holds frames
  838. */
  839. if (node->hold_frames)
  840. break;
  841. seq = NULL;
  842. /* Get next frame/sequence */
  843. spin_lock_irqsave(&node->pend_frames_lock, flags);
  844. if (!list_empty(&node->pend_frames)) {
  845. seq = list_first_entry(&node->pend_frames,
  846. struct efc_hw_sequence, list_entry);
  847. list_del(&seq->list_entry);
  848. }
  849. spin_unlock_irqrestore(&node->pend_frames_lock, flags);
  850. if (!seq) {
  851. pend_frames_processed = node->pend_frames_processed;
  852. node->pend_frames_processed = 0;
  853. break;
  854. }
  855. node->pend_frames_processed++;
  856. /* now dispatch frame(s) to dispatch function */
  857. efc_node_dispatch_frame(node, seq);
  858. efc->tt.hw_seq_free(efc, seq);
  859. }
  860. if (pend_frames_processed != 0)
  861. efc_log_debug(efc, "%u node frames held and processed\n",
  862. pend_frames_processed);
  863. }
  864. void
  865. efc_scsi_sess_reg_complete(struct efc_node *node, u32 status)
  866. {
  867. unsigned long flags = 0;
  868. enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK;
  869. struct efc *efc = node->efc;
  870. if (status)
  871. evt = EFC_EVT_NODE_SESS_REG_FAIL;
  872. spin_lock_irqsave(&efc->lock, flags);
  873. /* Notify the node to resume */
  874. efc_node_post_event(node, evt, NULL);
  875. spin_unlock_irqrestore(&efc->lock, flags);
  876. }
  877. void
  878. efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node)
  879. {
  880. unsigned long flags = 0;
  881. spin_lock_irqsave(&efc->lock, flags);
  882. /* Notify the node to resume */
  883. efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
  884. spin_unlock_irqrestore(&efc->lock, flags);
  885. }
  886. void
  887. efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node)
  888. {
  889. unsigned long flags = 0;
  890. spin_lock_irqsave(&efc->lock, flags);
  891. /* Notify the node to resume */
  892. efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
  893. spin_unlock_irqrestore(&efc->lock, flags);
  894. }
  895. void
  896. efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node)
  897. {
  898. unsigned long flags = 0;
  899. spin_lock_irqsave(&efc->lock, flags);
  900. efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
  901. spin_unlock_irqrestore(&efc->lock, flags);
  902. }
  903. void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg)
  904. {
  905. struct efc *efc = node->efc;
  906. unsigned long flags = 0;
  907. spin_lock_irqsave(&efc->lock, flags);
  908. efc_node_post_event(node, evt, arg);
  909. spin_unlock_irqrestore(&efc->lock, flags);
  910. }
  911. void efc_node_post_shutdown(struct efc_node *node, void *arg)
  912. {
  913. unsigned long flags = 0;
  914. struct efc *efc = node->efc;
  915. spin_lock_irqsave(&efc->lock, flags);
  916. efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg);
  917. spin_unlock_irqrestore(&efc->lock, flags);
  918. }