efc_fabric.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
  4. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  5. */
  6. /*
  7. * This file implements remote node state machines for:
  8. * - Fabric logins.
  9. * - Fabric controller events.
  10. * - Name/directory services interaction.
  11. * - Point-to-point logins.
  12. */
  13. /*
  14. * fabric_sm Node State Machine: Fabric States
  15. * ns_sm Node State Machine: Name/Directory Services States
  16. * p2p_sm Node State Machine: Point-to-Point Node States
  17. */
  18. #include "efc.h"
  19. static void
  20. efc_fabric_initiate_shutdown(struct efc_node *node)
  21. {
  22. struct efc *efc = node->efc;
  23. node->els_io_enabled = false;
  24. if (node->attached) {
  25. int rc;
  26. /* issue hw node free; don't care if succeeds right away
  27. * or sometime later, will check node->attached later in
  28. * shutdown process
  29. */
  30. rc = efc_cmd_node_detach(efc, &node->rnode);
  31. if (rc < 0) {
  32. node_printf(node, "Failed freeing HW node, rc=%d\n",
  33. rc);
  34. }
  35. }
  36. /*
  37. * node has either been detached or is in the process of being detached,
  38. * call common node's initiate cleanup function
  39. */
  40. efc_node_initiate_cleanup(node);
  41. }
  42. static void
  43. __efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
  44. enum efc_sm_event evt, void *arg)
  45. {
  46. struct efc_node *node = NULL;
  47. node = ctx->app;
  48. switch (evt) {
  49. case EFC_EVT_DOMAIN_ATTACH_OK:
  50. break;
  51. case EFC_EVT_SHUTDOWN:
  52. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  53. efc_fabric_initiate_shutdown(node);
  54. break;
  55. default:
  56. /* call default event handler common to all nodes */
  57. __efc_node_common(funcname, ctx, evt, arg);
  58. }
  59. }
  60. void
  61. __efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
  62. void *arg)
  63. {
  64. struct efc_node *node = ctx->app;
  65. struct efc *efc = node->efc;
  66. efc_node_evt_set(ctx, evt, __func__);
  67. node_sm_trace();
  68. switch (evt) {
  69. case EFC_EVT_REENTER:
  70. efc_log_debug(efc, ">>> reenter !!\n");
  71. fallthrough;
  72. case EFC_EVT_ENTER:
  73. /* send FLOGI */
  74. efc_send_flogi(node);
  75. efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
  76. break;
  77. default:
  78. __efc_fabric_common(__func__, ctx, evt, arg);
  79. }
  80. }
  81. void
  82. efc_fabric_set_topology(struct efc_node *node,
  83. enum efc_nport_topology topology)
  84. {
  85. node->nport->topology = topology;
  86. }
  87. void
  88. efc_fabric_notify_topology(struct efc_node *node)
  89. {
  90. struct efc_node *tmp_node;
  91. unsigned long index;
  92. /*
  93. * now loop through the nodes in the nport
  94. * and send topology notification
  95. */
  96. xa_for_each(&node->nport->lookup, index, tmp_node) {
  97. if (tmp_node != node) {
  98. efc_node_post_event(tmp_node,
  99. EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
  100. &node->nport->topology);
  101. }
  102. }
  103. }
  104. static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
  105. {
  106. return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
  107. }
  108. void
  109. __efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
  110. enum efc_sm_event evt, void *arg)
  111. {
  112. struct efc_node_cb *cbdata = arg;
  113. struct efc_node *node = ctx->app;
  114. efc_node_evt_set(ctx, evt, __func__);
  115. node_sm_trace();
  116. switch (evt) {
  117. case EFC_EVT_SRRS_ELS_REQ_OK: {
  118. if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
  119. __efc_fabric_common, __func__)) {
  120. return;
  121. }
  122. WARN_ON(!node->els_req_cnt);
  123. node->els_req_cnt--;
  124. memcpy(node->nport->domain->flogi_service_params,
  125. cbdata->els_rsp.virt,
  126. sizeof(struct fc_els_flogi));
  127. /* Check to see if the fabric is an F_PORT or and N_PORT */
  128. if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
  129. /* sm: if not nport / efc_domain_attach */
  130. /* ext_status has the fc_id, attach domain */
  131. efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
  132. efc_fabric_notify_topology(node);
  133. WARN_ON(node->nport->domain->attached);
  134. efc_domain_attach(node->nport->domain,
  135. cbdata->ext_status);
  136. efc_node_transition(node,
  137. __efc_fabric_wait_domain_attach,
  138. NULL);
  139. break;
  140. }
  141. /* sm: if nport and p2p_winner / efc_domain_attach */
  142. efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
  143. if (efc_p2p_setup(node->nport)) {
  144. node_printf(node,
  145. "p2p setup failed, shutting down node\n");
  146. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  147. efc_fabric_initiate_shutdown(node);
  148. break;
  149. }
  150. if (node->nport->p2p_winner) {
  151. efc_node_transition(node,
  152. __efc_p2p_wait_domain_attach,
  153. NULL);
  154. if (node->nport->domain->attached &&
  155. !node->nport->domain->domain_notify_pend) {
  156. /*
  157. * already attached,
  158. * just send ATTACH_OK
  159. */
  160. node_printf(node,
  161. "p2p winner, domain already attached\n");
  162. efc_node_post_event(node,
  163. EFC_EVT_DOMAIN_ATTACH_OK,
  164. NULL);
  165. }
  166. } else {
  167. /*
  168. * peer is p2p winner;
  169. * PLOGI will be received on the
  170. * remote SID=1 node;
  171. * this node has served its purpose
  172. */
  173. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  174. efc_fabric_initiate_shutdown(node);
  175. }
  176. break;
  177. }
  178. case EFC_EVT_ELS_REQ_ABORTED:
  179. case EFC_EVT_SRRS_ELS_REQ_RJT:
  180. case EFC_EVT_SRRS_ELS_REQ_FAIL: {
  181. struct efc_nport *nport = node->nport;
  182. /*
  183. * with these errors, we have no recovery,
  184. * so shutdown the nport, leave the link
  185. * up and the domain ready
  186. */
  187. if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
  188. __efc_fabric_common, __func__)) {
  189. return;
  190. }
  191. node_printf(node,
  192. "FLOGI failed evt=%s, shutting down nport [%s]\n",
  193. efc_sm_event_name(evt), nport->display_name);
  194. WARN_ON(!node->els_req_cnt);
  195. node->els_req_cnt--;
  196. efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
  197. break;
  198. }
  199. default:
  200. __efc_fabric_common(__func__, ctx, evt, arg);
  201. }
  202. }
  203. void
  204. __efc_vport_fabric_init(struct efc_sm_ctx *ctx,
  205. enum efc_sm_event evt, void *arg)
  206. {
  207. struct efc_node *node = ctx->app;
  208. efc_node_evt_set(ctx, evt, __func__);
  209. node_sm_trace();
  210. switch (evt) {
  211. case EFC_EVT_ENTER:
  212. /* sm: / send FDISC */
  213. efc_send_fdisc(node);
  214. efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
  215. break;
  216. default:
  217. __efc_fabric_common(__func__, ctx, evt, arg);
  218. }
  219. }
  220. void
  221. __efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
  222. enum efc_sm_event evt, void *arg)
  223. {
  224. struct efc_node_cb *cbdata = arg;
  225. struct efc_node *node = ctx->app;
  226. efc_node_evt_set(ctx, evt, __func__);
  227. node_sm_trace();
  228. switch (evt) {
  229. case EFC_EVT_SRRS_ELS_REQ_OK: {
  230. /* fc_id is in ext_status */
  231. if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
  232. __efc_fabric_common, __func__)) {
  233. return;
  234. }
  235. WARN_ON(!node->els_req_cnt);
  236. node->els_req_cnt--;
  237. /* sm: / efc_nport_attach */
  238. efc_nport_attach(node->nport, cbdata->ext_status);
  239. efc_node_transition(node, __efc_fabric_wait_domain_attach,
  240. NULL);
  241. break;
  242. }
  243. case EFC_EVT_SRRS_ELS_REQ_RJT:
  244. case EFC_EVT_SRRS_ELS_REQ_FAIL: {
  245. if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
  246. __efc_fabric_common, __func__)) {
  247. return;
  248. }
  249. WARN_ON(!node->els_req_cnt);
  250. node->els_req_cnt--;
  251. efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
  252. /* sm: / shutdown nport */
  253. efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
  254. break;
  255. }
  256. default:
  257. __efc_fabric_common(__func__, ctx, evt, arg);
  258. }
  259. }
  260. static int
  261. efc_start_ns_node(struct efc_nport *nport)
  262. {
  263. struct efc_node *ns;
  264. /* Instantiate a name services node */
  265. ns = efc_node_find(nport, FC_FID_DIR_SERV);
  266. if (!ns) {
  267. ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
  268. if (!ns)
  269. return -EIO;
  270. }
  271. /*
  272. * for found ns, should we be transitioning from here?
  273. * breaks transition only
  274. * 1. from within state machine or
  275. * 2. if after alloc
  276. */
  277. if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
  278. efc_node_pause(ns, __efc_ns_init);
  279. else
  280. efc_node_transition(ns, __efc_ns_init, NULL);
  281. return 0;
  282. }
  283. static int
  284. efc_start_fabctl_node(struct efc_nport *nport)
  285. {
  286. struct efc_node *fabctl;
  287. fabctl = efc_node_find(nport, FC_FID_FCTRL);
  288. if (!fabctl) {
  289. fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
  290. false, false);
  291. if (!fabctl)
  292. return -EIO;
  293. }
  294. /*
  295. * for found ns, should we be transitioning from here?
  296. * breaks transition only
  297. * 1. from within state machine or
  298. * 2. if after alloc
  299. */
  300. efc_node_transition(fabctl, __efc_fabctl_init, NULL);
  301. return 0;
  302. }
  303. void
  304. __efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
  305. enum efc_sm_event evt, void *arg)
  306. {
  307. struct efc_node *node = ctx->app;
  308. efc_node_evt_set(ctx, evt, __func__);
  309. node_sm_trace();
  310. switch (evt) {
  311. case EFC_EVT_ENTER:
  312. efc_node_hold_frames(node);
  313. break;
  314. case EFC_EVT_EXIT:
  315. efc_node_accept_frames(node);
  316. break;
  317. case EFC_EVT_DOMAIN_ATTACH_OK:
  318. case EFC_EVT_NPORT_ATTACH_OK: {
  319. int rc;
  320. rc = efc_start_ns_node(node->nport);
  321. if (rc)
  322. return;
  323. /* sm: if enable_ini / start fabctl node */
  324. /* Instantiate the fabric controller (sends SCR) */
  325. if (node->nport->enable_rscn) {
  326. rc = efc_start_fabctl_node(node->nport);
  327. if (rc)
  328. return;
  329. }
  330. efc_node_transition(node, __efc_fabric_idle, NULL);
  331. break;
  332. }
  333. default:
  334. __efc_fabric_common(__func__, ctx, evt, arg);
  335. }
  336. }
  337. void
  338. __efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
  339. void *arg)
  340. {
  341. struct efc_node *node = ctx->app;
  342. efc_node_evt_set(ctx, evt, __func__);
  343. node_sm_trace();
  344. switch (evt) {
  345. case EFC_EVT_DOMAIN_ATTACH_OK:
  346. break;
  347. default:
  348. __efc_fabric_common(__func__, ctx, evt, arg);
  349. }
  350. }
  351. void
  352. __efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
  353. {
  354. struct efc_node *node = ctx->app;
  355. efc_node_evt_set(ctx, evt, __func__);
  356. node_sm_trace();
  357. switch (evt) {
  358. case EFC_EVT_ENTER:
  359. /* sm: / send PLOGI */
  360. efc_send_plogi(node);
  361. efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
  362. break;
  363. default:
  364. __efc_fabric_common(__func__, ctx, evt, arg);
  365. }
  366. }
  367. void
  368. __efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
  369. enum efc_sm_event evt, void *arg)
  370. {
  371. struct efc_node_cb *cbdata = arg;
  372. struct efc_node *node = ctx->app;
  373. efc_node_evt_set(ctx, evt, __func__);
  374. node_sm_trace();
  375. switch (evt) {
  376. case EFC_EVT_SRRS_ELS_REQ_OK: {
  377. int rc;
  378. /* Save service parameters */
  379. if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
  380. __efc_fabric_common, __func__)) {
  381. return;
  382. }
  383. WARN_ON(!node->els_req_cnt);
  384. node->els_req_cnt--;
  385. /* sm: / save sparams, efc_node_attach */
  386. efc_node_save_sparms(node, cbdata->els_rsp.virt);
  387. rc = efc_node_attach(node);
  388. efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
  389. if (rc < 0)
  390. efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
  391. NULL);
  392. break;
  393. }
  394. default:
  395. __efc_fabric_common(__func__, ctx, evt, arg);
  396. }
  397. }
  398. void
  399. __efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
  400. enum efc_sm_event evt, void *arg)
  401. {
  402. struct efc_node *node = ctx->app;
  403. efc_node_evt_set(ctx, evt, __func__);
  404. node_sm_trace();
  405. switch (evt) {
  406. case EFC_EVT_ENTER:
  407. efc_node_hold_frames(node);
  408. break;
  409. case EFC_EVT_EXIT:
  410. efc_node_accept_frames(node);
  411. break;
  412. case EFC_EVT_NODE_ATTACH_OK:
  413. node->attached = true;
  414. /* sm: / send RFTID */
  415. efc_ns_send_rftid(node);
  416. efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
  417. break;
  418. case EFC_EVT_NODE_ATTACH_FAIL:
  419. /* node attach failed, shutdown the node */
  420. node->attached = false;
  421. node_printf(node, "Node attach failed\n");
  422. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  423. efc_fabric_initiate_shutdown(node);
  424. break;
  425. case EFC_EVT_SHUTDOWN:
  426. node_printf(node, "Shutdown event received\n");
  427. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  428. efc_node_transition(node,
  429. __efc_fabric_wait_attach_evt_shutdown,
  430. NULL);
  431. break;
  432. /*
  433. * if receive RSCN just ignore,
  434. * we haven't sent GID_PT yet (ACC sent by fabctl node)
  435. */
  436. case EFC_EVT_RSCN_RCVD:
  437. break;
  438. default:
  439. __efc_fabric_common(__func__, ctx, evt, arg);
  440. }
  441. }
  442. void
  443. __efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
  444. enum efc_sm_event evt, void *arg)
  445. {
  446. struct efc_node *node = ctx->app;
  447. efc_node_evt_set(ctx, evt, __func__);
  448. node_sm_trace();
  449. switch (evt) {
  450. case EFC_EVT_ENTER:
  451. efc_node_hold_frames(node);
  452. break;
  453. case EFC_EVT_EXIT:
  454. efc_node_accept_frames(node);
  455. break;
  456. /* wait for any of these attach events and then shutdown */
  457. case EFC_EVT_NODE_ATTACH_OK:
  458. node->attached = true;
  459. node_printf(node, "Attach evt=%s, proceed to shutdown\n",
  460. efc_sm_event_name(evt));
  461. efc_fabric_initiate_shutdown(node);
  462. break;
  463. case EFC_EVT_NODE_ATTACH_FAIL:
  464. node->attached = false;
  465. node_printf(node, "Attach evt=%s, proceed to shutdown\n",
  466. efc_sm_event_name(evt));
  467. efc_fabric_initiate_shutdown(node);
  468. break;
  469. /* ignore shutdown event as we're already in shutdown path */
  470. case EFC_EVT_SHUTDOWN:
  471. node_printf(node, "Shutdown event received\n");
  472. break;
  473. default:
  474. __efc_fabric_common(__func__, ctx, evt, arg);
  475. }
  476. }
  477. void
  478. __efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
  479. enum efc_sm_event evt, void *arg)
  480. {
  481. struct efc_node *node = ctx->app;
  482. efc_node_evt_set(ctx, evt, __func__);
  483. node_sm_trace();
  484. switch (evt) {
  485. case EFC_EVT_SRRS_ELS_REQ_OK:
  486. if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
  487. __efc_fabric_common, __func__)) {
  488. return;
  489. }
  490. WARN_ON(!node->els_req_cnt);
  491. node->els_req_cnt--;
  492. /* sm: / send RFFID */
  493. efc_ns_send_rffid(node);
  494. efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
  495. break;
  496. /*
  497. * if receive RSCN just ignore,
  498. * we haven't sent GID_PT yet (ACC sent by fabctl node)
  499. */
  500. case EFC_EVT_RSCN_RCVD:
  501. break;
  502. default:
  503. __efc_fabric_common(__func__, ctx, evt, arg);
  504. }
  505. }
  506. void
  507. __efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
  508. enum efc_sm_event evt, void *arg)
  509. {
  510. struct efc_node *node = ctx->app;
  511. efc_node_evt_set(ctx, evt, __func__);
  512. node_sm_trace();
  513. /*
  514. * Waits for an RFFID response event;
  515. * if rscn enabled, a GIDPT name services request is issued.
  516. */
  517. switch (evt) {
  518. case EFC_EVT_SRRS_ELS_REQ_OK: {
  519. if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
  520. __efc_fabric_common, __func__)) {
  521. return;
  522. }
  523. WARN_ON(!node->els_req_cnt);
  524. node->els_req_cnt--;
  525. if (node->nport->enable_rscn) {
  526. /* sm: if enable_rscn / send GIDPT */
  527. efc_ns_send_gidpt(node);
  528. efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
  529. NULL);
  530. } else {
  531. /* if 'T' only, we're done, go to idle */
  532. efc_node_transition(node, __efc_ns_idle, NULL);
  533. }
  534. break;
  535. }
  536. /*
  537. * if receive RSCN just ignore,
  538. * we haven't sent GID_PT yet (ACC sent by fabctl node)
  539. */
  540. case EFC_EVT_RSCN_RCVD:
  541. break;
  542. default:
  543. __efc_fabric_common(__func__, ctx, evt, arg);
  544. }
  545. }
  546. static int
  547. efc_process_gidpt_payload(struct efc_node *node,
  548. void *data, u32 gidpt_len)
  549. {
  550. u32 i, j;
  551. struct efc_node *newnode;
  552. struct efc_nport *nport = node->nport;
  553. struct efc *efc = node->efc;
  554. u32 port_id = 0, port_count, plist_count;
  555. struct efc_node *n;
  556. struct efc_node **active_nodes;
  557. int residual;
  558. struct {
  559. struct fc_ct_hdr hdr;
  560. struct fc_gid_pn_resp pn_rsp;
  561. } *rsp;
  562. struct fc_gid_pn_resp *gidpt;
  563. unsigned long index;
  564. rsp = data;
  565. gidpt = &rsp->pn_rsp;
  566. residual = be16_to_cpu(rsp->hdr.ct_mr_size);
  567. if (residual != 0)
  568. efc_log_debug(node->efc, "residual is %u words\n", residual);
  569. if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
  570. node_printf(node,
  571. "GIDPT request failed: rsn x%x rsn_expl x%x\n",
  572. rsp->hdr.ct_reason, rsp->hdr.ct_explan);
  573. return -EIO;
  574. }
  575. plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
  576. /* Count the number of nodes */
  577. port_count = 0;
  578. xa_for_each(&nport->lookup, index, n) {
  579. port_count++;
  580. }
  581. /* Allocate a buffer for all nodes */
  582. active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC);
  583. if (!active_nodes) {
  584. node_printf(node, "efc_malloc failed\n");
  585. return -EIO;
  586. }
  587. /* Fill buffer with fc_id of active nodes */
  588. i = 0;
  589. xa_for_each(&nport->lookup, index, n) {
  590. port_id = n->rnode.fc_id;
  591. switch (port_id) {
  592. case FC_FID_FLOGI:
  593. case FC_FID_FCTRL:
  594. case FC_FID_DIR_SERV:
  595. break;
  596. default:
  597. if (port_id != FC_FID_DOM_MGR)
  598. active_nodes[i++] = n;
  599. break;
  600. }
  601. }
  602. /* update the active nodes buffer */
  603. for (i = 0; i < plist_count; i++) {
  604. hton24(gidpt[i].fp_fid, port_id);
  605. for (j = 0; j < port_count; j++) {
  606. if (active_nodes[j] &&
  607. port_id == active_nodes[j]->rnode.fc_id) {
  608. active_nodes[j] = NULL;
  609. }
  610. }
  611. if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
  612. break;
  613. }
  614. /* Those remaining in the active_nodes[] are now gone ! */
  615. for (i = 0; i < port_count; i++) {
  616. /*
  617. * if we're an initiator and the remote node
  618. * is a target, then post the node missing event.
  619. * if we're target and we have enabled
  620. * target RSCN, then post the node missing event.
  621. */
  622. if (!active_nodes[i])
  623. continue;
  624. if ((node->nport->enable_ini && active_nodes[i]->targ) ||
  625. (node->nport->enable_tgt && enable_target_rscn(efc))) {
  626. efc_node_post_event(active_nodes[i],
  627. EFC_EVT_NODE_MISSING, NULL);
  628. } else {
  629. node_printf(node,
  630. "GID_PT: skipping non-tgt port_id x%06x\n",
  631. active_nodes[i]->rnode.fc_id);
  632. }
  633. }
  634. kfree(active_nodes);
  635. for (i = 0; i < plist_count; i++) {
  636. hton24(gidpt[i].fp_fid, port_id);
  637. /* Don't create node for ourselves */
  638. if (port_id == node->rnode.nport->fc_id) {
  639. if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
  640. break;
  641. continue;
  642. }
  643. newnode = efc_node_find(nport, port_id);
  644. if (!newnode) {
  645. if (!node->nport->enable_ini)
  646. continue;
  647. newnode = efc_node_alloc(nport, port_id, false, false);
  648. if (!newnode) {
  649. efc_log_err(efc, "efc_node_alloc() failed\n");
  650. return -EIO;
  651. }
  652. /*
  653. * send PLOGI automatically
  654. * if initiator
  655. */
  656. efc_node_init_device(newnode, true);
  657. }
  658. if (node->nport->enable_ini && newnode->targ) {
  659. efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
  660. NULL);
  661. }
  662. if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
  663. break;
  664. }
  665. return 0;
  666. }
  667. void
  668. __efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
  669. enum efc_sm_event evt, void *arg)
  670. {
  671. struct efc_node_cb *cbdata = arg;
  672. struct efc_node *node = ctx->app;
  673. efc_node_evt_set(ctx, evt, __func__);
  674. node_sm_trace();
  675. /*
  676. * Wait for a GIDPT response from the name server. Process the FC_IDs
  677. * that are reported by creating new remote ports, as needed.
  678. */
  679. switch (evt) {
  680. case EFC_EVT_SRRS_ELS_REQ_OK: {
  681. if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
  682. __efc_fabric_common, __func__)) {
  683. return;
  684. }
  685. WARN_ON(!node->els_req_cnt);
  686. node->els_req_cnt--;
  687. /* sm: / process GIDPT payload */
  688. efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
  689. cbdata->els_rsp.len);
  690. efc_node_transition(node, __efc_ns_idle, NULL);
  691. break;
  692. }
  693. case EFC_EVT_SRRS_ELS_REQ_FAIL: {
  694. /* not much we can do; will retry with the next RSCN */
  695. node_printf(node, "GID_PT failed to complete\n");
  696. WARN_ON(!node->els_req_cnt);
  697. node->els_req_cnt--;
  698. efc_node_transition(node, __efc_ns_idle, NULL);
  699. break;
  700. }
  701. /* if receive RSCN here, queue up another discovery processing */
  702. case EFC_EVT_RSCN_RCVD: {
  703. node_printf(node, "RSCN received during GID_PT processing\n");
  704. node->rscn_pending = true;
  705. break;
  706. }
  707. default:
  708. __efc_fabric_common(__func__, ctx, evt, arg);
  709. }
  710. }
  711. void
  712. __efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
  713. {
  714. struct efc_node *node = ctx->app;
  715. struct efc *efc = node->efc;
  716. efc_node_evt_set(ctx, evt, __func__);
  717. node_sm_trace();
  718. /*
  719. * Wait for RSCN received events (posted from the fabric controller)
  720. * and restart the GIDPT name services query and processing.
  721. */
  722. switch (evt) {
  723. case EFC_EVT_ENTER:
  724. if (!node->rscn_pending)
  725. break;
  726. node_printf(node, "RSCN pending, restart discovery\n");
  727. node->rscn_pending = false;
  728. fallthrough;
  729. case EFC_EVT_RSCN_RCVD: {
  730. /* sm: / send GIDPT */
  731. /*
  732. * If target RSCN processing is enabled,
  733. * and this is target only (not initiator),
  734. * and tgt_rscn_delay is non-zero,
  735. * then we delay issuing the GID_PT
  736. */
  737. if (efc->tgt_rscn_delay_msec != 0 &&
  738. !node->nport->enable_ini && node->nport->enable_tgt &&
  739. enable_target_rscn(efc)) {
  740. efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
  741. } else {
  742. efc_ns_send_gidpt(node);
  743. efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
  744. NULL);
  745. }
  746. break;
  747. }
  748. default:
  749. __efc_fabric_common(__func__, ctx, evt, arg);
  750. }
  751. }
  752. static void
  753. gidpt_delay_timer_cb(struct timer_list *t)
  754. {
  755. struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
  756. del_timer(&node->gidpt_delay_timer);
  757. efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
  758. }
  759. void
  760. __efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
  761. enum efc_sm_event evt, void *arg)
  762. {
  763. struct efc_node *node = ctx->app;
  764. struct efc *efc = node->efc;
  765. efc_node_evt_set(ctx, evt, __func__);
  766. node_sm_trace();
  767. switch (evt) {
  768. case EFC_EVT_ENTER: {
  769. u64 delay_msec, tmp;
  770. /*
  771. * Compute the delay time.
  772. * Set to tgt_rscn_delay, if the time since last GIDPT
  773. * is less than tgt_rscn_period, then use tgt_rscn_period.
  774. */
  775. delay_msec = efc->tgt_rscn_delay_msec;
  776. tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
  777. if (tmp < efc->tgt_rscn_period_msec)
  778. delay_msec = efc->tgt_rscn_period_msec;
  779. timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
  780. 0);
  781. mod_timer(&node->gidpt_delay_timer,
  782. jiffies + msecs_to_jiffies(delay_msec));
  783. break;
  784. }
  785. case EFC_EVT_GIDPT_DELAY_EXPIRED:
  786. node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
  787. efc_ns_send_gidpt(node);
  788. efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
  789. break;
  790. case EFC_EVT_RSCN_RCVD: {
  791. efc_log_debug(efc,
  792. "RSCN received while in GIDPT delay - no action\n");
  793. break;
  794. }
  795. default:
  796. __efc_fabric_common(__func__, ctx, evt, arg);
  797. }
  798. }
  799. void
  800. __efc_fabctl_init(struct efc_sm_ctx *ctx,
  801. enum efc_sm_event evt, void *arg)
  802. {
  803. struct efc_node *node = ctx->app;
  804. node_sm_trace();
  805. switch (evt) {
  806. case EFC_EVT_ENTER:
  807. /* no need to login to fabric controller, just send SCR */
  808. efc_send_scr(node);
  809. efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
  810. break;
  811. case EFC_EVT_NODE_ATTACH_OK:
  812. node->attached = true;
  813. break;
  814. default:
  815. __efc_fabric_common(__func__, ctx, evt, arg);
  816. }
  817. }
  818. void
  819. __efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
  820. enum efc_sm_event evt, void *arg)
  821. {
  822. struct efc_node *node = ctx->app;
  823. efc_node_evt_set(ctx, evt, __func__);
  824. node_sm_trace();
  825. /*
  826. * Fabric controller node state machine:
  827. * Wait for an SCR response from the fabric controller.
  828. */
  829. switch (evt) {
  830. case EFC_EVT_SRRS_ELS_REQ_OK:
  831. if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
  832. __efc_fabric_common, __func__)) {
  833. return;
  834. }
  835. WARN_ON(!node->els_req_cnt);
  836. node->els_req_cnt--;
  837. efc_node_transition(node, __efc_fabctl_ready, NULL);
  838. break;
  839. default:
  840. __efc_fabric_common(__func__, ctx, evt, arg);
  841. }
  842. }
  843. static void
  844. efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
  845. {
  846. struct efc *efc = node->efc;
  847. struct efc_nport *nport = node->nport;
  848. struct efc_node *ns;
  849. /* Forward this event to the name-services node */
  850. ns = efc_node_find(nport, FC_FID_DIR_SERV);
  851. if (ns)
  852. efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
  853. else
  854. efc_log_warn(efc, "can't find name server node\n");
  855. }
  856. void
  857. __efc_fabctl_ready(struct efc_sm_ctx *ctx,
  858. enum efc_sm_event evt, void *arg)
  859. {
  860. struct efc_node_cb *cbdata = arg;
  861. struct efc_node *node = ctx->app;
  862. efc_node_evt_set(ctx, evt, __func__);
  863. node_sm_trace();
  864. /*
  865. * Fabric controller node state machine: Ready.
  866. * In this state, the fabric controller sends a RSCN, which is received
  867. * by this node and is forwarded to the name services node object; and
  868. * the RSCN LS_ACC is sent.
  869. */
  870. switch (evt) {
  871. case EFC_EVT_RSCN_RCVD: {
  872. struct fc_frame_header *hdr = cbdata->header->dma.virt;
  873. /*
  874. * sm: / process RSCN (forward to name services node),
  875. * send LS_ACC
  876. */
  877. efc_process_rscn(node, cbdata);
  878. efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
  879. efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
  880. NULL);
  881. break;
  882. }
  883. default:
  884. __efc_fabric_common(__func__, ctx, evt, arg);
  885. }
  886. }
  887. void
  888. __efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
  889. enum efc_sm_event evt, void *arg)
  890. {
  891. struct efc_node *node = ctx->app;
  892. efc_node_evt_set(ctx, evt, __func__);
  893. node_sm_trace();
  894. switch (evt) {
  895. case EFC_EVT_ENTER:
  896. efc_node_hold_frames(node);
  897. break;
  898. case EFC_EVT_EXIT:
  899. efc_node_accept_frames(node);
  900. break;
  901. case EFC_EVT_SRRS_ELS_CMPL_OK:
  902. WARN_ON(!node->els_cmpl_cnt);
  903. node->els_cmpl_cnt--;
  904. efc_node_transition(node, __efc_fabctl_ready, NULL);
  905. break;
  906. default:
  907. __efc_fabric_common(__func__, ctx, evt, arg);
  908. }
  909. }
  910. static uint64_t
  911. efc_get_wwpn(struct fc_els_flogi *sp)
  912. {
  913. return be64_to_cpu(sp->fl_wwnn);
  914. }
  915. static int
  916. efc_rnode_is_winner(struct efc_nport *nport)
  917. {
  918. struct fc_els_flogi *remote_sp;
  919. u64 remote_wwpn;
  920. u64 local_wwpn = nport->wwpn;
  921. u64 wwn_bump = 0;
  922. remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
  923. remote_wwpn = efc_get_wwpn(remote_sp);
  924. local_wwpn ^= wwn_bump;
  925. efc_log_debug(nport->efc, "r: %llx\n",
  926. be64_to_cpu(remote_sp->fl_wwpn));
  927. efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
  928. if (remote_wwpn == local_wwpn) {
  929. efc_log_warn(nport->efc,
  930. "WWPN of remote node [%08x %08x] matches local WWPN\n",
  931. (u32)(local_wwpn >> 32ll),
  932. (u32)local_wwpn);
  933. return -1;
  934. }
  935. return (remote_wwpn > local_wwpn);
  936. }
  937. void
  938. __efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
  939. enum efc_sm_event evt, void *arg)
  940. {
  941. struct efc_node *node = ctx->app;
  942. struct efc *efc = node->efc;
  943. efc_node_evt_set(ctx, evt, __func__);
  944. node_sm_trace();
  945. switch (evt) {
  946. case EFC_EVT_ENTER:
  947. efc_node_hold_frames(node);
  948. break;
  949. case EFC_EVT_EXIT:
  950. efc_node_accept_frames(node);
  951. break;
  952. case EFC_EVT_DOMAIN_ATTACH_OK: {
  953. struct efc_nport *nport = node->nport;
  954. struct efc_node *rnode;
  955. /*
  956. * this transient node (SID=0 (recv'd FLOGI)
  957. * or DID=fabric (sent FLOGI))
  958. * is the p2p winner, will use a separate node
  959. * to send PLOGI to peer
  960. */
  961. WARN_ON(!node->nport->p2p_winner);
  962. rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
  963. if (rnode) {
  964. /*
  965. * the "other" transient p2p node has
  966. * already kicked off the
  967. * new node from which PLOGI is sent
  968. */
  969. node_printf(node,
  970. "Node with fc_id x%x already exists\n",
  971. rnode->rnode.fc_id);
  972. } else {
  973. /*
  974. * create new node (SID=1, DID=2)
  975. * from which to send PLOGI
  976. */
  977. rnode = efc_node_alloc(nport,
  978. nport->p2p_remote_port_id,
  979. false, false);
  980. if (!rnode) {
  981. efc_log_err(efc, "node alloc failed\n");
  982. return;
  983. }
  984. efc_fabric_notify_topology(node);
  985. /* sm: / allocate p2p remote node */
  986. efc_node_transition(rnode, __efc_p2p_rnode_init,
  987. NULL);
  988. }
  989. /*
  990. * the transient node (SID=0 or DID=fabric)
  991. * has served its purpose
  992. */
  993. if (node->rnode.fc_id == 0) {
  994. /*
  995. * if this is the SID=0 node,
  996. * move to the init state in case peer
  997. * has restarted FLOGI discovery and FLOGI is pending
  998. */
  999. /* don't send PLOGI on efc_d_init entry */
  1000. efc_node_init_device(node, false);
  1001. } else {
  1002. /*
  1003. * if this is the DID=fabric node
  1004. * (we initiated FLOGI), shut it down
  1005. */
  1006. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  1007. efc_fabric_initiate_shutdown(node);
  1008. }
  1009. break;
  1010. }
  1011. default:
  1012. __efc_fabric_common(__func__, ctx, evt, arg);
  1013. }
  1014. }
  1015. void
  1016. __efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
  1017. enum efc_sm_event evt, void *arg)
  1018. {
  1019. struct efc_node_cb *cbdata = arg;
  1020. struct efc_node *node = ctx->app;
  1021. efc_node_evt_set(ctx, evt, __func__);
  1022. node_sm_trace();
  1023. switch (evt) {
  1024. case EFC_EVT_ENTER:
  1025. /* sm: / send PLOGI */
  1026. efc_send_plogi(node);
  1027. efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
  1028. break;
  1029. case EFC_EVT_ABTS_RCVD:
  1030. /* sm: send BA_ACC */
  1031. efc_send_bls_acc(node, cbdata->header->dma.virt);
  1032. break;
  1033. default:
  1034. __efc_fabric_common(__func__, ctx, evt, arg);
  1035. }
  1036. }
  1037. void
  1038. __efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
  1039. enum efc_sm_event evt, void *arg)
  1040. {
  1041. struct efc_node_cb *cbdata = arg;
  1042. struct efc_node *node = ctx->app;
  1043. efc_node_evt_set(ctx, evt, __func__);
  1044. node_sm_trace();
  1045. switch (evt) {
  1046. case EFC_EVT_ENTER:
  1047. efc_node_hold_frames(node);
  1048. break;
  1049. case EFC_EVT_EXIT:
  1050. efc_node_accept_frames(node);
  1051. break;
  1052. case EFC_EVT_SRRS_ELS_CMPL_OK:
  1053. WARN_ON(!node->els_cmpl_cnt);
  1054. node->els_cmpl_cnt--;
  1055. /* sm: if p2p_winner / domain_attach */
  1056. if (node->nport->p2p_winner) {
  1057. efc_node_transition(node,
  1058. __efc_p2p_wait_domain_attach,
  1059. NULL);
  1060. if (!node->nport->domain->attached) {
  1061. node_printf(node, "Domain not attached\n");
  1062. efc_domain_attach(node->nport->domain,
  1063. node->nport->p2p_port_id);
  1064. } else {
  1065. node_printf(node, "Domain already attached\n");
  1066. efc_node_post_event(node,
  1067. EFC_EVT_DOMAIN_ATTACH_OK,
  1068. NULL);
  1069. }
  1070. } else {
  1071. /* this node has served its purpose;
  1072. * we'll expect a PLOGI on a separate
  1073. * node (remote SID=0x1); return this node
  1074. * to init state in case peer
  1075. * restarts discovery -- it may already
  1076. * have (pending frames may exist).
  1077. */
  1078. /* don't send PLOGI on efc_d_init entry */
  1079. efc_node_init_device(node, false);
  1080. }
  1081. break;
  1082. case EFC_EVT_SRRS_ELS_CMPL_FAIL:
  1083. /*
  1084. * LS_ACC failed, possibly due to link down;
  1085. * shutdown node and wait
  1086. * for FLOGI discovery to restart
  1087. */
  1088. node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
  1089. WARN_ON(!node->els_cmpl_cnt);
  1090. node->els_cmpl_cnt--;
  1091. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  1092. efc_fabric_initiate_shutdown(node);
  1093. break;
  1094. case EFC_EVT_ABTS_RCVD: {
  1095. /* sm: / send BA_ACC */
  1096. efc_send_bls_acc(node, cbdata->header->dma.virt);
  1097. break;
  1098. }
  1099. default:
  1100. __efc_fabric_common(__func__, ctx, evt, arg);
  1101. }
  1102. }
  1103. void
  1104. __efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
  1105. enum efc_sm_event evt, void *arg)
  1106. {
  1107. struct efc_node_cb *cbdata = arg;
  1108. struct efc_node *node = ctx->app;
  1109. efc_node_evt_set(ctx, evt, __func__);
  1110. node_sm_trace();
  1111. switch (evt) {
  1112. case EFC_EVT_SRRS_ELS_REQ_OK: {
  1113. int rc;
  1114. if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
  1115. __efc_fabric_common, __func__)) {
  1116. return;
  1117. }
  1118. WARN_ON(!node->els_req_cnt);
  1119. node->els_req_cnt--;
  1120. /* sm: / save sparams, efc_node_attach */
  1121. efc_node_save_sparms(node, cbdata->els_rsp.virt);
  1122. rc = efc_node_attach(node);
  1123. efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
  1124. if (rc < 0)
  1125. efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
  1126. NULL);
  1127. break;
  1128. }
  1129. case EFC_EVT_SRRS_ELS_REQ_FAIL: {
  1130. if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
  1131. __efc_fabric_common, __func__)) {
  1132. return;
  1133. }
  1134. node_printf(node, "PLOGI failed, shutting down\n");
  1135. WARN_ON(!node->els_req_cnt);
  1136. node->els_req_cnt--;
  1137. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  1138. efc_fabric_initiate_shutdown(node);
  1139. break;
  1140. }
  1141. case EFC_EVT_PLOGI_RCVD: {
  1142. struct fc_frame_header *hdr = cbdata->header->dma.virt;
  1143. /* if we're in external loopback mode, just send LS_ACC */
  1144. if (node->efc->external_loopback) {
  1145. efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
  1146. } else {
  1147. /*
  1148. * if this isn't external loopback,
  1149. * pass to default handler
  1150. */
  1151. __efc_fabric_common(__func__, ctx, evt, arg);
  1152. }
  1153. break;
  1154. }
  1155. case EFC_EVT_PRLI_RCVD:
  1156. /* I, or I+T */
  1157. /* sent PLOGI and before completion was seen, received the
  1158. * PRLI from the remote node (WCQEs and RCQEs come in on
  1159. * different queues and order of processing cannot be assumed)
  1160. * Save OXID so PRLI can be sent after the attach and continue
  1161. * to wait for PLOGI response
  1162. */
  1163. efc_process_prli_payload(node, cbdata->payload->dma.virt);
  1164. efc_send_ls_acc_after_attach(node,
  1165. cbdata->header->dma.virt,
  1166. EFC_NODE_SEND_LS_ACC_PRLI);
  1167. efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
  1168. NULL);
  1169. break;
  1170. default:
  1171. __efc_fabric_common(__func__, ctx, evt, arg);
  1172. }
  1173. }
  1174. void
  1175. __efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
  1176. enum efc_sm_event evt, void *arg)
  1177. {
  1178. struct efc_node_cb *cbdata = arg;
  1179. struct efc_node *node = ctx->app;
  1180. efc_node_evt_set(ctx, evt, __func__);
  1181. node_sm_trace();
  1182. switch (evt) {
  1183. case EFC_EVT_ENTER:
  1184. /*
  1185. * Since we've received a PRLI, we have a port login and will
  1186. * just need to wait for the PLOGI response to do the node
  1187. * attach and then we can send the LS_ACC for the PRLI. If,
  1188. * during this time, we receive FCP_CMNDs (which is possible
  1189. * since we've already sent a PRLI and our peer may have
  1190. * accepted).
  1191. * At this time, we are not waiting on any other unsolicited
  1192. * frames to continue with the login process. Thus, it will not
  1193. * hurt to hold frames here.
  1194. */
  1195. efc_node_hold_frames(node);
  1196. break;
  1197. case EFC_EVT_EXIT:
  1198. efc_node_accept_frames(node);
  1199. break;
  1200. case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */
  1201. int rc;
  1202. /* Completion from PLOGI sent */
  1203. if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
  1204. __efc_fabric_common, __func__)) {
  1205. return;
  1206. }
  1207. WARN_ON(!node->els_req_cnt);
  1208. node->els_req_cnt--;
  1209. /* sm: / save sparams, efc_node_attach */
  1210. efc_node_save_sparms(node, cbdata->els_rsp.virt);
  1211. rc = efc_node_attach(node);
  1212. efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
  1213. if (rc < 0)
  1214. efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
  1215. NULL);
  1216. break;
  1217. }
  1218. case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
  1219. case EFC_EVT_SRRS_ELS_REQ_RJT:
  1220. /* PLOGI failed, shutdown the node */
  1221. if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
  1222. __efc_fabric_common, __func__)) {
  1223. return;
  1224. }
  1225. WARN_ON(!node->els_req_cnt);
  1226. node->els_req_cnt--;
  1227. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  1228. efc_fabric_initiate_shutdown(node);
  1229. break;
  1230. default:
  1231. __efc_fabric_common(__func__, ctx, evt, arg);
  1232. }
  1233. }
  1234. void
  1235. __efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
  1236. enum efc_sm_event evt, void *arg)
  1237. {
  1238. struct efc_node_cb *cbdata = arg;
  1239. struct efc_node *node = ctx->app;
  1240. efc_node_evt_set(ctx, evt, __func__);
  1241. node_sm_trace();
  1242. switch (evt) {
  1243. case EFC_EVT_ENTER:
  1244. efc_node_hold_frames(node);
  1245. break;
  1246. case EFC_EVT_EXIT:
  1247. efc_node_accept_frames(node);
  1248. break;
  1249. case EFC_EVT_NODE_ATTACH_OK:
  1250. node->attached = true;
  1251. switch (node->send_ls_acc) {
  1252. case EFC_NODE_SEND_LS_ACC_PRLI: {
  1253. efc_d_send_prli_rsp(node->ls_acc_io,
  1254. node->ls_acc_oxid);
  1255. node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
  1256. node->ls_acc_io = NULL;
  1257. break;
  1258. }
  1259. case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
  1260. case EFC_NODE_SEND_LS_ACC_NONE:
  1261. default:
  1262. /* Normal case for I */
  1263. /* sm: send_plogi_acc is not set / send PLOGI acc */
  1264. efc_node_transition(node, __efc_d_port_logged_in,
  1265. NULL);
  1266. break;
  1267. }
  1268. break;
  1269. case EFC_EVT_NODE_ATTACH_FAIL:
  1270. /* node attach failed, shutdown the node */
  1271. node->attached = false;
  1272. node_printf(node, "Node attach failed\n");
  1273. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  1274. efc_fabric_initiate_shutdown(node);
  1275. break;
  1276. case EFC_EVT_SHUTDOWN:
  1277. node_printf(node, "%s received\n", efc_sm_event_name(evt));
  1278. node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
  1279. efc_node_transition(node,
  1280. __efc_fabric_wait_attach_evt_shutdown,
  1281. NULL);
  1282. break;
  1283. case EFC_EVT_PRLI_RCVD:
  1284. node_printf(node, "%s: PRLI received before node is attached\n",
  1285. efc_sm_event_name(evt));
  1286. efc_process_prli_payload(node, cbdata->payload->dma.virt);
  1287. efc_send_ls_acc_after_attach(node,
  1288. cbdata->header->dma.virt,
  1289. EFC_NODE_SEND_LS_ACC_PRLI);
  1290. break;
  1291. default:
  1292. __efc_fabric_common(__func__, ctx, evt, arg);
  1293. }
  1294. }
  1295. int
  1296. efc_p2p_setup(struct efc_nport *nport)
  1297. {
  1298. struct efc *efc = nport->efc;
  1299. int rnode_winner;
  1300. rnode_winner = efc_rnode_is_winner(nport);
  1301. /* set nport flags to indicate p2p "winner" */
  1302. if (rnode_winner == 1) {
  1303. nport->p2p_remote_port_id = 0;
  1304. nport->p2p_port_id = 0;
  1305. nport->p2p_winner = false;
  1306. } else if (rnode_winner == 0) {
  1307. nport->p2p_remote_port_id = 2;
  1308. nport->p2p_port_id = 1;
  1309. nport->p2p_winner = true;
  1310. } else {
  1311. /* no winner; only okay if external loopback enabled */
  1312. if (nport->efc->external_loopback) {
  1313. /*
  1314. * External loopback mode enabled;
  1315. * local nport and remote node
  1316. * will be registered with an NPortID = 1;
  1317. */
  1318. efc_log_debug(efc,
  1319. "External loopback mode enabled\n");
  1320. nport->p2p_remote_port_id = 1;
  1321. nport->p2p_port_id = 1;
  1322. nport->p2p_winner = true;
  1323. } else {
  1324. efc_log_warn(efc,
  1325. "failed to determine p2p winner\n");
  1326. return rnode_winner;
  1327. }
  1328. }
  1329. return 0;
  1330. }