efct_hw.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
  4. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  5. */
  6. #include "efct_driver.h"
  7. #include "efct_hw.h"
  8. #include "efct_unsol.h"
  9. struct efct_hw_link_stat_cb_arg {
  10. void (*cb)(int status, u32 num_counters,
  11. struct efct_hw_link_stat_counts *counters, void *arg);
  12. void *arg;
  13. };
  14. struct efct_hw_host_stat_cb_arg {
  15. void (*cb)(int status, u32 num_counters,
  16. struct efct_hw_host_stat_counts *counters, void *arg);
  17. void *arg;
  18. };
  19. struct efct_hw_fw_wr_cb_arg {
  20. void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
  21. void *arg;
  22. };
  23. struct efct_mbox_rqst_ctx {
  24. int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
  25. void *arg;
  26. };
  27. static int
  28. efct_hw_link_event_init(struct efct_hw *hw)
  29. {
  30. hw->link.status = SLI4_LINK_STATUS_MAX;
  31. hw->link.topology = SLI4_LINK_TOPO_NONE;
  32. hw->link.medium = SLI4_LINK_MEDIUM_MAX;
  33. hw->link.speed = 0;
  34. hw->link.loop_map = NULL;
  35. hw->link.fc_id = U32_MAX;
  36. return 0;
  37. }
  38. static int
  39. efct_hw_read_max_dump_size(struct efct_hw *hw)
  40. {
  41. u8 buf[SLI4_BMBX_SIZE];
  42. struct efct *efct = hw->os;
  43. int rc = 0;
  44. struct sli4_rsp_cmn_set_dump_location *rsp;
  45. /* attempt to detemine the dump size for function 0 only. */
  46. if (PCI_FUNC(efct->pci->devfn) != 0)
  47. return rc;
  48. if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
  49. return -EIO;
  50. rsp = (struct sli4_rsp_cmn_set_dump_location *)
  51. (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
  52. rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
  53. if (rc != 0) {
  54. efc_log_debug(hw->os, "set dump location cmd failed\n");
  55. return rc;
  56. }
  57. hw->dump_size =
  58. le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
  59. efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size);
  60. return rc;
  61. }
  62. static int
  63. __efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
  64. {
  65. struct sli4_cmd_read_topology *read_topo =
  66. (struct sli4_cmd_read_topology *)mqe;
  67. u8 speed;
  68. struct efc_domain_record drec = {0};
  69. struct efct *efct = hw->os;
  70. if (status || le16_to_cpu(read_topo->hdr.status)) {
  71. efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
  72. le16_to_cpu(read_topo->hdr.status));
  73. return -EIO;
  74. }
  75. switch (le32_to_cpu(read_topo->dw2_attentype) &
  76. SLI4_READTOPO_ATTEN_TYPE) {
  77. case SLI4_READ_TOPOLOGY_LINK_UP:
  78. hw->link.status = SLI4_LINK_STATUS_UP;
  79. break;
  80. case SLI4_READ_TOPOLOGY_LINK_DOWN:
  81. hw->link.status = SLI4_LINK_STATUS_DOWN;
  82. break;
  83. case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
  84. hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
  85. break;
  86. default:
  87. hw->link.status = SLI4_LINK_STATUS_MAX;
  88. break;
  89. }
  90. switch (read_topo->topology) {
  91. case SLI4_READ_TOPO_NON_FC_AL:
  92. hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
  93. break;
  94. case SLI4_READ_TOPO_FC_AL:
  95. hw->link.topology = SLI4_LINK_TOPO_FC_AL;
  96. if (hw->link.status == SLI4_LINK_STATUS_UP)
  97. hw->link.loop_map = hw->loop_map.virt;
  98. hw->link.fc_id = read_topo->acquired_al_pa;
  99. break;
  100. default:
  101. hw->link.topology = SLI4_LINK_TOPO_MAX;
  102. break;
  103. }
  104. hw->link.medium = SLI4_LINK_MEDIUM_FC;
  105. speed = (le32_to_cpu(read_topo->currlink_state) &
  106. SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
  107. switch (speed) {
  108. case SLI4_READ_TOPOLOGY_SPEED_1G:
  109. hw->link.speed = 1 * 1000;
  110. break;
  111. case SLI4_READ_TOPOLOGY_SPEED_2G:
  112. hw->link.speed = 2 * 1000;
  113. break;
  114. case SLI4_READ_TOPOLOGY_SPEED_4G:
  115. hw->link.speed = 4 * 1000;
  116. break;
  117. case SLI4_READ_TOPOLOGY_SPEED_8G:
  118. hw->link.speed = 8 * 1000;
  119. break;
  120. case SLI4_READ_TOPOLOGY_SPEED_16G:
  121. hw->link.speed = 16 * 1000;
  122. break;
  123. case SLI4_READ_TOPOLOGY_SPEED_32G:
  124. hw->link.speed = 32 * 1000;
  125. break;
  126. case SLI4_READ_TOPOLOGY_SPEED_64G:
  127. hw->link.speed = 64 * 1000;
  128. break;
  129. case SLI4_READ_TOPOLOGY_SPEED_128G:
  130. hw->link.speed = 128 * 1000;
  131. break;
  132. }
  133. drec.speed = hw->link.speed;
  134. drec.fc_id = hw->link.fc_id;
  135. drec.is_nport = true;
  136. efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
  137. return 0;
  138. }
  139. static int
  140. efct_hw_cb_link(void *ctx, void *e)
  141. {
  142. struct efct_hw *hw = ctx;
  143. struct sli4_link_event *event = e;
  144. struct efc_domain *d = NULL;
  145. int rc = 0;
  146. struct efct *efct = hw->os;
  147. efct_hw_link_event_init(hw);
  148. switch (event->status) {
  149. case SLI4_LINK_STATUS_UP:
  150. hw->link = *event;
  151. efct->efcport->link_status = EFC_LINK_STATUS_UP;
  152. if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
  153. struct efc_domain_record drec = {0};
  154. efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
  155. event->speed);
  156. drec.speed = event->speed;
  157. drec.fc_id = event->fc_id;
  158. drec.is_nport = true;
  159. efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
  160. &drec);
  161. } else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
  162. u8 buf[SLI4_BMBX_SIZE];
  163. efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
  164. event->speed);
  165. if (!sli_cmd_read_topology(&hw->sli, buf,
  166. &hw->loop_map)) {
  167. rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
  168. __efct_read_topology_cb, NULL);
  169. }
  170. if (rc)
  171. efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
  172. } else {
  173. efc_log_info(hw->os, "%s(%#x), speed is %d\n",
  174. "Link Up, unsupported topology ",
  175. event->topology, event->speed);
  176. }
  177. break;
  178. case SLI4_LINK_STATUS_DOWN:
  179. efc_log_info(hw->os, "Link down\n");
  180. hw->link.status = event->status;
  181. efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
  182. d = efct->efcport->domain;
  183. if (d)
  184. efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
  185. break;
  186. default:
  187. efc_log_debug(hw->os, "unhandled link status %#x\n",
  188. event->status);
  189. break;
  190. }
  191. return 0;
  192. }
  193. int
  194. efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
  195. {
  196. u32 i, max_sgl, cpus;
  197. if (hw->hw_setup_called)
  198. return 0;
  199. /*
  200. * efct_hw_init() relies on NULL pointers indicating that a structure
  201. * needs allocation. If a structure is non-NULL, efct_hw_init() won't
  202. * free/realloc that memory
  203. */
  204. memset(hw, 0, sizeof(struct efct_hw));
  205. hw->hw_setup_called = true;
  206. hw->os = os;
  207. mutex_init(&hw->bmbx_lock);
  208. spin_lock_init(&hw->cmd_lock);
  209. INIT_LIST_HEAD(&hw->cmd_head);
  210. INIT_LIST_HEAD(&hw->cmd_pending);
  211. hw->cmd_head_count = 0;
  212. /* Create mailbox command ctx pool */
  213. hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
  214. sizeof(struct efct_command_ctx));
  215. if (!hw->cmd_ctx_pool) {
  216. efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
  217. return -EIO;
  218. }
  219. /* Create mailbox request ctx pool for library callback */
  220. hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
  221. sizeof(struct efct_mbox_rqst_ctx));
  222. if (!hw->mbox_rqst_pool) {
  223. efc_log_err(hw->os, "failed to allocate mbox request pool\n");
  224. return -EIO;
  225. }
  226. spin_lock_init(&hw->io_lock);
  227. INIT_LIST_HEAD(&hw->io_inuse);
  228. INIT_LIST_HEAD(&hw->io_free);
  229. INIT_LIST_HEAD(&hw->io_wait_free);
  230. atomic_set(&hw->io_alloc_failed_count, 0);
  231. hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
  232. if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
  233. efc_log_err(hw->os, "SLI setup failed\n");
  234. return -EIO;
  235. }
  236. efct_hw_link_event_init(hw);
  237. sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
  238. /*
  239. * Set all the queue sizes to the maximum allowed.
  240. */
  241. for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
  242. hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
  243. /*
  244. * Adjust the size of the WQs so that the CQ is twice as big as
  245. * the WQ to allow for 2 completions per IO. This allows us to
  246. * handle multi-phase as well as aborts.
  247. */
  248. hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
  249. /*
  250. * The RQ assignment for RQ pair mode.
  251. */
  252. hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
  253. hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
  254. cpus = num_possible_cpus();
  255. hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
  256. max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
  257. max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
  258. hw->config.n_sgl = max_sgl;
  259. (void)efct_hw_read_max_dump_size(hw);
  260. return 0;
  261. }
  262. static void
  263. efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
  264. {
  265. efc_log_info(hw->os,
  266. "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
  267. j, hw->config.filter_def[j], i, id);
  268. }
  269. static inline void
  270. efct_hw_init_free_io(struct efct_hw_io *io)
  271. {
  272. /*
  273. * Set io->done to NULL, to avoid any callbacks, should
  274. * a completion be received for one of these IOs
  275. */
  276. io->done = NULL;
  277. io->abort_done = NULL;
  278. io->status_saved = false;
  279. io->abort_in_progress = false;
  280. io->type = 0xFFFF;
  281. io->wq = NULL;
  282. }
  283. static bool efct_hw_iotype_is_originator(u16 io_type)
  284. {
  285. switch (io_type) {
  286. case EFCT_HW_FC_CT:
  287. case EFCT_HW_ELS_REQ:
  288. return true;
  289. default:
  290. return false;
  291. }
  292. }
  293. static void
  294. efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
  295. {
  296. /* Restore the default */
  297. io->sgl = &io->def_sgl;
  298. io->sgl_count = io->def_sgl_count;
  299. }
  300. static void
  301. efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
  302. {
  303. struct efct_hw_io *io = arg;
  304. struct efct_hw *hw = io->hw;
  305. struct sli4_fc_wcqe *wcqe = (void *)cqe;
  306. u32 len = 0;
  307. u32 ext = 0;
  308. /* clear xbusy flag if WCQE[XB] is clear */
  309. if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
  310. io->xbusy = false;
  311. /* get extended CQE status */
  312. switch (io->type) {
  313. case EFCT_HW_BLS_ACC:
  314. case EFCT_HW_BLS_RJT:
  315. break;
  316. case EFCT_HW_ELS_REQ:
  317. sli_fc_els_did(&hw->sli, cqe, &ext);
  318. len = sli_fc_response_length(&hw->sli, cqe);
  319. break;
  320. case EFCT_HW_ELS_RSP:
  321. case EFCT_HW_FC_CT_RSP:
  322. break;
  323. case EFCT_HW_FC_CT:
  324. len = sli_fc_response_length(&hw->sli, cqe);
  325. break;
  326. case EFCT_HW_IO_TARGET_WRITE:
  327. len = sli_fc_io_length(&hw->sli, cqe);
  328. break;
  329. case EFCT_HW_IO_TARGET_READ:
  330. len = sli_fc_io_length(&hw->sli, cqe);
  331. break;
  332. case EFCT_HW_IO_TARGET_RSP:
  333. break;
  334. case EFCT_HW_IO_DNRX_REQUEUE:
  335. /* release the count for re-posting the buffer */
  336. /* efct_hw_io_free(hw, io); */
  337. break;
  338. default:
  339. efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
  340. io->type, io->indicator);
  341. break;
  342. }
  343. if (status) {
  344. ext = sli_fc_ext_status(&hw->sli, cqe);
  345. /*
  346. * If we're not an originator IO, and XB is set, then issue
  347. * abort for the IO from within the HW
  348. */
  349. if (efct_hw_iotype_is_originator(io->type) &&
  350. wcqe->flags & SLI4_WCQE_XB) {
  351. int rc;
  352. efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
  353. io->indicator, io->reqtag);
  354. /*
  355. * Because targets may send a response when the IO
  356. * completes using the same XRI, we must wait for the
  357. * XRI_ABORTED CQE to issue the IO callback
  358. */
  359. rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
  360. if (rc == 0) {
  361. /*
  362. * latch status to return after abort is
  363. * complete
  364. */
  365. io->status_saved = true;
  366. io->saved_status = status;
  367. io->saved_ext = ext;
  368. io->saved_len = len;
  369. goto exit_efct_hw_wq_process_io;
  370. } else if (rc == -EINPROGRESS) {
  371. /*
  372. * Already being aborted by someone else (ABTS
  373. * perhaps). Just return original
  374. * error.
  375. */
  376. efc_log_debug(hw->os, "%s%#x tag=%#x\n",
  377. "abort in progress xri=",
  378. io->indicator, io->reqtag);
  379. } else {
  380. /* Failed to abort for some other reason, log
  381. * error
  382. */
  383. efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
  384. "Failed to abort xri=",
  385. io->indicator, io->reqtag, rc);
  386. }
  387. }
  388. }
  389. if (io->done) {
  390. efct_hw_done_t done = io->done;
  391. io->done = NULL;
  392. if (io->status_saved) {
  393. /* use latched status if exists */
  394. status = io->saved_status;
  395. len = io->saved_len;
  396. ext = io->saved_ext;
  397. io->status_saved = false;
  398. }
  399. /* Restore default SGL */
  400. efct_hw_io_restore_sgl(hw, io);
  401. done(io, len, status, ext, io->arg);
  402. }
  403. exit_efct_hw_wq_process_io:
  404. return;
  405. }
  406. static int
  407. efct_hw_setup_io(struct efct_hw *hw)
  408. {
  409. u32 i = 0;
  410. struct efct_hw_io *io = NULL;
  411. uintptr_t xfer_virt = 0;
  412. uintptr_t xfer_phys = 0;
  413. u32 index;
  414. bool new_alloc = true;
  415. struct efc_dma *dma;
  416. struct efct *efct = hw->os;
  417. if (!hw->io) {
  418. hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
  419. if (!hw->io)
  420. return -ENOMEM;
  421. memset(hw->io, 0, hw->config.n_io * sizeof(io));
  422. for (i = 0; i < hw->config.n_io; i++) {
  423. hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
  424. if (!hw->io[i])
  425. goto error;
  426. }
  427. /* Create WQE buffs for IO */
  428. hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
  429. GFP_KERNEL);
  430. if (!hw->wqe_buffs) {
  431. kfree(hw->io);
  432. return -ENOMEM;
  433. }
  434. } else {
  435. /* re-use existing IOs, including SGLs */
  436. new_alloc = false;
  437. }
  438. if (new_alloc) {
  439. dma = &hw->xfer_rdy;
  440. dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
  441. dma->virt = dma_alloc_coherent(&efct->pci->dev,
  442. dma->size, &dma->phys, GFP_KERNEL);
  443. if (!dma->virt)
  444. return -ENOMEM;
  445. }
  446. xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
  447. xfer_phys = hw->xfer_rdy.phys;
  448. /* Initialize the pool of HW IO objects */
  449. for (i = 0; i < hw->config.n_io; i++) {
  450. struct hw_wq_callback *wqcb;
  451. io = hw->io[i];
  452. /* initialize IO fields */
  453. io->hw = hw;
  454. /* Assign a WQE buff */
  455. io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
  456. /* Allocate the request tag for this IO */
  457. wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
  458. if (!wqcb) {
  459. efc_log_err(hw->os, "can't allocate request tag\n");
  460. return -ENOSPC;
  461. }
  462. io->reqtag = wqcb->instance_index;
  463. /* Now for the fields that are initialized on each free */
  464. efct_hw_init_free_io(io);
  465. /* The XB flag isn't cleared on IO free, so init to zero */
  466. io->xbusy = 0;
  467. if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
  468. &io->indicator, &index)) {
  469. efc_log_err(hw->os,
  470. "sli_resource_alloc failed @ %d\n", i);
  471. return -ENOMEM;
  472. }
  473. if (new_alloc) {
  474. dma = &io->def_sgl;
  475. dma->size = hw->config.n_sgl *
  476. sizeof(struct sli4_sge);
  477. dma->virt = dma_alloc_coherent(&efct->pci->dev,
  478. dma->size, &dma->phys,
  479. GFP_KERNEL);
  480. if (!dma->virt) {
  481. efc_log_err(hw->os, "dma_alloc fail %d\n", i);
  482. memset(&io->def_sgl, 0,
  483. sizeof(struct efc_dma));
  484. return -ENOMEM;
  485. }
  486. }
  487. io->def_sgl_count = hw->config.n_sgl;
  488. io->sgl = &io->def_sgl;
  489. io->sgl_count = io->def_sgl_count;
  490. if (hw->xfer_rdy.size) {
  491. io->xfer_rdy.virt = (void *)xfer_virt;
  492. io->xfer_rdy.phys = xfer_phys;
  493. io->xfer_rdy.size = sizeof(struct fcp_txrdy);
  494. xfer_virt += sizeof(struct fcp_txrdy);
  495. xfer_phys += sizeof(struct fcp_txrdy);
  496. }
  497. }
  498. return 0;
  499. error:
  500. for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
  501. kfree(hw->io[i]);
  502. hw->io[i] = NULL;
  503. }
  504. kfree(hw->io);
  505. hw->io = NULL;
  506. return -ENOMEM;
  507. }
  508. static int
  509. efct_hw_init_prereg_io(struct efct_hw *hw)
  510. {
  511. u32 i, idx = 0;
  512. struct efct_hw_io *io = NULL;
  513. u8 cmd[SLI4_BMBX_SIZE];
  514. int rc = 0;
  515. u32 n_rem;
  516. u32 n = 0;
  517. u32 sgls_per_request = 256;
  518. struct efc_dma **sgls = NULL;
  519. struct efc_dma req;
  520. struct efct *efct = hw->os;
  521. sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
  522. if (!sgls)
  523. return -ENOMEM;
  524. memset(&req, 0, sizeof(struct efc_dma));
  525. req.size = 32 + sgls_per_request * 16;
  526. req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
  527. GFP_KERNEL);
  528. if (!req.virt) {
  529. kfree(sgls);
  530. return -ENOMEM;
  531. }
  532. for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
  533. /* Copy address of SGL's into local sgls[] array, break
  534. * out if the xri is not contiguous.
  535. */
  536. u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
  537. for (n = 0; n < min; n++) {
  538. /* Check that we have contiguous xri values */
  539. if (n > 0) {
  540. if (hw->io[idx + n]->indicator !=
  541. hw->io[idx + n - 1]->indicator + 1)
  542. break;
  543. }
  544. sgls[n] = hw->io[idx + n]->sgl;
  545. }
  546. if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
  547. hw->io[idx]->indicator, n, sgls, NULL, &req)) {
  548. rc = -EIO;
  549. break;
  550. }
  551. rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
  552. if (rc) {
  553. efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
  554. break;
  555. }
  556. /* Add to tail if successful */
  557. for (i = 0; i < n; i++, idx++) {
  558. io = hw->io[idx];
  559. io->state = EFCT_HW_IO_STATE_FREE;
  560. INIT_LIST_HEAD(&io->list_entry);
  561. list_add_tail(&io->list_entry, &hw->io_free);
  562. }
  563. }
  564. dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
  565. memset(&req, 0, sizeof(struct efc_dma));
  566. kfree(sgls);
  567. return rc;
  568. }
  569. static int
  570. efct_hw_init_io(struct efct_hw *hw)
  571. {
  572. u32 i, idx = 0;
  573. bool prereg = false;
  574. struct efct_hw_io *io = NULL;
  575. int rc = 0;
  576. prereg = hw->sli.params.sgl_pre_registered;
  577. if (prereg)
  578. return efct_hw_init_prereg_io(hw);
  579. for (i = 0; i < hw->config.n_io; i++, idx++) {
  580. io = hw->io[idx];
  581. io->state = EFCT_HW_IO_STATE_FREE;
  582. INIT_LIST_HEAD(&io->list_entry);
  583. list_add_tail(&io->list_entry, &hw->io_free);
  584. }
  585. return rc;
  586. }
  587. static int
  588. efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
  589. {
  590. int rc = 0;
  591. u8 buf[SLI4_BMBX_SIZE];
  592. struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
  593. memset(&param, 0, sizeof(param));
  594. param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
  595. /* build the set_features command */
  596. sli_cmd_common_set_features(&hw->sli, buf,
  597. SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), &param);
  598. rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
  599. if (rc)
  600. efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
  601. fdt_xfer_hint, rc);
  602. else
  603. efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
  604. le32_to_cpu(param.fdt_xfer_hint));
  605. return rc;
  606. }
  607. static int
  608. efct_hw_config_rq(struct efct_hw *hw)
  609. {
  610. u32 min_rq_count, i, rc;
  611. struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
  612. u8 buf[SLI4_BMBX_SIZE];
  613. efc_log_info(hw->os, "using REG_FCFI standard\n");
  614. /*
  615. * Set the filter match/mask values from hw's
  616. * filter_def values
  617. */
  618. for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
  619. rq_cfg[i].rq_id = cpu_to_le16(0xffff);
  620. rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
  621. rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
  622. rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
  623. rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
  624. }
  625. /*
  626. * Update the rq_id's of the FCF configuration
  627. * (don't update more than the number of rq_cfg
  628. * elements)
  629. */
  630. min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
  631. hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
  632. for (i = 0; i < min_rq_count; i++) {
  633. struct hw_rq *rq = hw->hw_rq[i];
  634. u32 j;
  635. for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
  636. u32 mask = (rq->filter_mask != 0) ?
  637. rq->filter_mask : 1;
  638. if (!(mask & (1U << j)))
  639. continue;
  640. rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
  641. efct_logfcfi(hw, j, i, rq->hdr->id);
  642. }
  643. }
  644. rc = -EIO;
  645. if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg))
  646. rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
  647. if (rc != 0) {
  648. efc_log_err(hw->os, "FCFI registration failed\n");
  649. return rc;
  650. }
  651. hw->fcf_indicator =
  652. le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
  653. return rc;
  654. }
  655. static int
  656. efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
  657. {
  658. u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
  659. struct hw_rq *rq;
  660. struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
  661. struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
  662. u32 rc, i;
  663. if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
  664. goto issue_cmd;
  665. /* Set the filter match/mask values from hw's filter_def values */
  666. for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
  667. rq_filter[i].rq_id = cpu_to_le16(0xffff);
  668. rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
  669. rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
  670. rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
  671. rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
  672. }
  673. rq = hw->hw_rq[0];
  674. rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
  675. rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
  676. mrq_bitmask = 0x2;
  677. issue_cmd:
  678. efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
  679. hw->hw_rq_count, hw->config.rq_selection_policy, mode);
  680. /* Invoke REG_FCFI_MRQ */
  681. rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
  682. hw->config.rq_selection_policy, mrq_bitmask,
  683. hw->hw_mrq_count, rq_filter);
  684. if (rc) {
  685. efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
  686. return -EIO;
  687. }
  688. rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
  689. rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
  690. if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
  691. efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
  692. rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
  693. return -EIO;
  694. }
  695. if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
  696. hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
  697. return 0;
  698. }
  699. static void
  700. efct_hw_queue_hash_add(struct efct_queue_hash *hash,
  701. u16 id, u16 index)
  702. {
  703. u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
  704. /*
  705. * Since the hash is always bigger than the number of queues, then we
  706. * never have to worry about an infinite loop.
  707. */
  708. while (hash[hash_index].in_use)
  709. hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
  710. /* not used, claim the entry */
  711. hash[hash_index].id = id;
  712. hash[hash_index].in_use = true;
  713. hash[hash_index].index = index;
  714. }
  715. static int
  716. efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
  717. {
  718. int rc = 0;
  719. u8 buf[SLI4_BMBX_SIZE];
  720. struct sli4_rqst_cmn_set_features_health_check param;
  721. u32 health_check_flag = 0;
  722. memset(&param, 0, sizeof(param));
  723. if (enable)
  724. health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
  725. if (query)
  726. health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
  727. param.health_check_dword = cpu_to_le32(health_check_flag);
  728. /* build the set_features command */
  729. sli_cmd_common_set_features(&hw->sli, buf,
  730. SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), &param);
  731. rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
  732. if (rc)
  733. efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
  734. else
  735. efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
  736. return rc;
  737. }
  738. int
  739. efct_hw_init(struct efct_hw *hw)
  740. {
  741. int rc;
  742. u32 i = 0;
  743. int rem_count;
  744. unsigned long flags = 0;
  745. struct efct_hw_io *temp;
  746. struct efc_dma *dma;
  747. /*
  748. * Make sure the command lists are empty. If this is start-of-day,
  749. * they'll be empty since they were just initialized in efct_hw_setup.
  750. * If we've just gone through a reset, the command and command pending
  751. * lists should have been cleaned up as part of the reset
  752. * (efct_hw_reset()).
  753. */
  754. spin_lock_irqsave(&hw->cmd_lock, flags);
  755. if (!list_empty(&hw->cmd_head)) {
  756. spin_unlock_irqrestore(&hw->cmd_lock, flags);
  757. efc_log_err(hw->os, "command found on cmd list\n");
  758. return -EIO;
  759. }
  760. if (!list_empty(&hw->cmd_pending)) {
  761. spin_unlock_irqrestore(&hw->cmd_lock, flags);
  762. efc_log_err(hw->os, "command found on pending list\n");
  763. return -EIO;
  764. }
  765. spin_unlock_irqrestore(&hw->cmd_lock, flags);
  766. /* Free RQ buffers if prevously allocated */
  767. efct_hw_rx_free(hw);
  768. /*
  769. * The IO queues must be initialized here for the reset case. The
  770. * efct_hw_init_io() function will re-add the IOs to the free list.
  771. * The cmd_head list should be OK since we free all entries in
  772. * efct_hw_command_cancel() that is called in the efct_hw_reset().
  773. */
  774. /* If we are in this function due to a reset, there may be stale items
  775. * on lists that need to be removed. Clean them up.
  776. */
  777. rem_count = 0;
  778. while ((!list_empty(&hw->io_wait_free))) {
  779. rem_count++;
  780. temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
  781. list_entry);
  782. list_del_init(&temp->list_entry);
  783. }
  784. if (rem_count > 0)
  785. efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
  786. rem_count);
  787. rem_count = 0;
  788. while ((!list_empty(&hw->io_inuse))) {
  789. rem_count++;
  790. temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
  791. list_entry);
  792. list_del_init(&temp->list_entry);
  793. }
  794. if (rem_count > 0)
  795. efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
  796. rem_count);
  797. rem_count = 0;
  798. while ((!list_empty(&hw->io_free))) {
  799. rem_count++;
  800. temp = list_first_entry(&hw->io_free, struct efct_hw_io,
  801. list_entry);
  802. list_del_init(&temp->list_entry);
  803. }
  804. if (rem_count > 0)
  805. efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
  806. rem_count);
  807. /* If MRQ not required, Make sure we dont request feature. */
  808. if (hw->config.n_rq == 1)
  809. hw->sli.features &= (~SLI4_REQFEAT_MRQP);
  810. if (sli_init(&hw->sli)) {
  811. efc_log_err(hw->os, "SLI failed to initialize\n");
  812. return -EIO;
  813. }
  814. if (hw->sliport_healthcheck) {
  815. rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
  816. if (rc != 0) {
  817. efc_log_err(hw->os, "Enable port Health check fail\n");
  818. return rc;
  819. }
  820. }
  821. /*
  822. * Set FDT transfer hint, only works on Lancer
  823. */
  824. if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
  825. /*
  826. * Non-fatal error. In particular, we can disregard failure to
  827. * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware
  828. * that do not support EFCT_HW_FDT_XFER_HINT feature.
  829. */
  830. efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
  831. }
  832. /* zero the hashes */
  833. memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
  834. efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
  835. EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
  836. memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
  837. efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
  838. EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
  839. memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
  840. efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
  841. EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
  842. rc = efct_hw_init_queues(hw);
  843. if (rc)
  844. return rc;
  845. rc = efct_hw_map_wq_cpu(hw);
  846. if (rc)
  847. return rc;
  848. /* Allocate and p_st RQ buffers */
  849. rc = efct_hw_rx_allocate(hw);
  850. if (rc) {
  851. efc_log_err(hw->os, "rx_allocate failed\n");
  852. return rc;
  853. }
  854. rc = efct_hw_rx_post(hw);
  855. if (rc) {
  856. efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
  857. return rc;
  858. }
  859. if (hw->config.n_eq == 1) {
  860. rc = efct_hw_config_rq(hw);
  861. if (rc) {
  862. efc_log_err(hw->os, "config rq failed %d\n", rc);
  863. return rc;
  864. }
  865. } else {
  866. rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
  867. if (rc != 0) {
  868. efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
  869. return rc;
  870. }
  871. rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
  872. if (rc != 0) {
  873. efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
  874. return rc;
  875. }
  876. }
  877. /*
  878. * Allocate the WQ request tag pool, if not previously allocated
  879. * (the request tag value is 16 bits, thus the pool allocation size
  880. * of 64k)
  881. */
  882. hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
  883. if (!hw->wq_reqtag_pool) {
  884. efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n");
  885. return -ENOMEM;
  886. }
  887. rc = efct_hw_setup_io(hw);
  888. if (rc) {
  889. efc_log_err(hw->os, "IO allocation failure\n");
  890. return rc;
  891. }
  892. rc = efct_hw_init_io(hw);
  893. if (rc) {
  894. efc_log_err(hw->os, "IO initialization failure\n");
  895. return rc;
  896. }
  897. dma = &hw->loop_map;
  898. dma->size = SLI4_MIN_LOOP_MAP_BYTES;
  899. dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
  900. GFP_KERNEL);
  901. if (!dma->virt)
  902. return -EIO;
  903. /*
  904. * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ
  905. * entries
  906. */
  907. for (i = 0; i < hw->eq_count; i++)
  908. sli_queue_arm(&hw->sli, &hw->eq[i], true);
  909. /*
  910. * Initialize RQ hash
  911. */
  912. for (i = 0; i < hw->rq_count; i++)
  913. efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
  914. /*
  915. * Initialize WQ hash
  916. */
  917. for (i = 0; i < hw->wq_count; i++)
  918. efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
  919. /*
  920. * Arming the CQ allows (e.g.) MQ completions to write CQ entries
  921. */
  922. for (i = 0; i < hw->cq_count; i++) {
  923. efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
  924. sli_queue_arm(&hw->sli, &hw->cq[i], true);
  925. }
  926. /* Set RQ process limit*/
  927. for (i = 0; i < hw->hw_rq_count; i++) {
  928. struct hw_rq *rq = hw->hw_rq[i];
  929. hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
  930. }
  931. /* record the fact that the queues are functional */
  932. hw->state = EFCT_HW_STATE_ACTIVE;
  933. /*
  934. * Allocate a HW IOs for send frame.
  935. */
  936. hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
  937. if (!hw->hw_wq[0]->send_frame_io)
  938. efc_log_err(hw->os, "alloc for send_frame_io failed\n");
  939. /* Initialize send frame sequence id */
  940. atomic_set(&hw->send_frame_seq_id, 0);
  941. return 0;
  942. }
  943. int
  944. efct_hw_parse_filter(struct efct_hw *hw, void *value)
  945. {
  946. int rc = 0;
  947. char *p = NULL;
  948. char *token;
  949. u32 idx = 0;
  950. for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
  951. hw->config.filter_def[idx] = 0;
  952. p = kstrdup(value, GFP_KERNEL);
  953. if (!p || !*p) {
  954. efc_log_err(hw->os, "p is NULL\n");
  955. return -ENOMEM;
  956. }
  957. idx = 0;
  958. while ((token = strsep(&p, ",")) && *token) {
  959. if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
  960. efc_log_err(hw->os, "kstrtoint failed\n");
  961. if (!p || !*p)
  962. break;
  963. if (idx == ARRAY_SIZE(hw->config.filter_def))
  964. break;
  965. }
  966. kfree(p);
  967. return rc;
  968. }
  969. u64
  970. efct_get_wwnn(struct efct_hw *hw)
  971. {
  972. struct sli4 *sli = &hw->sli;
  973. u8 p[8];
  974. memcpy(p, sli->wwnn, sizeof(p));
  975. return get_unaligned_be64(p);
  976. }
  977. u64
  978. efct_get_wwpn(struct efct_hw *hw)
  979. {
  980. struct sli4 *sli = &hw->sli;
  981. u8 p[8];
  982. memcpy(p, sli->wwpn, sizeof(p));
  983. return get_unaligned_be64(p);
  984. }
  985. static struct efc_hw_rq_buffer *
  986. efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
  987. u32 size)
  988. {
  989. struct efct *efct = hw->os;
  990. struct efc_hw_rq_buffer *rq_buf = NULL;
  991. struct efc_hw_rq_buffer *prq;
  992. u32 i;
  993. if (!count)
  994. return NULL;
  995. rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
  996. if (!rq_buf)
  997. return NULL;
  998. memset(rq_buf, 0, sizeof(*rq_buf) * count);
  999. for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
  1000. prq->rqindex = rqindex;
  1001. prq->dma.size = size;
  1002. prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
  1003. prq->dma.size,
  1004. &prq->dma.phys,
  1005. GFP_KERNEL);
  1006. if (!prq->dma.virt) {
  1007. efc_log_err(hw->os, "DMA allocation failed\n");
  1008. kfree(rq_buf);
  1009. return NULL;
  1010. }
  1011. }
  1012. return rq_buf;
  1013. }
  1014. static void
  1015. efct_hw_rx_buffer_free(struct efct_hw *hw,
  1016. struct efc_hw_rq_buffer *rq_buf,
  1017. u32 count)
  1018. {
  1019. struct efct *efct = hw->os;
  1020. u32 i;
  1021. struct efc_hw_rq_buffer *prq;
  1022. if (rq_buf) {
  1023. for (i = 0, prq = rq_buf; i < count; i++, prq++) {
  1024. dma_free_coherent(&efct->pci->dev,
  1025. prq->dma.size, prq->dma.virt,
  1026. prq->dma.phys);
  1027. memset(&prq->dma, 0, sizeof(struct efc_dma));
  1028. }
  1029. kfree(rq_buf);
  1030. }
  1031. }
  1032. int
  1033. efct_hw_rx_allocate(struct efct_hw *hw)
  1034. {
  1035. struct efct *efct = hw->os;
  1036. u32 i;
  1037. int rc = 0;
  1038. u32 rqindex = 0;
  1039. u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
  1040. u32 payload_size = hw->config.rq_default_buffer_size;
  1041. rqindex = 0;
  1042. for (i = 0; i < hw->hw_rq_count; i++) {
  1043. struct hw_rq *rq = hw->hw_rq[i];
  1044. /* Allocate header buffers */
  1045. rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
  1046. rq->entry_count,
  1047. hdr_size);
  1048. if (!rq->hdr_buf) {
  1049. efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
  1050. rc = -EIO;
  1051. break;
  1052. }
  1053. efc_log_debug(hw->os,
  1054. "rq[%2d] rq_id %02d header %4d by %4d bytes\n",
  1055. i, rq->hdr->id, rq->entry_count, hdr_size);
  1056. rqindex++;
  1057. /* Allocate payload buffers */
  1058. rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
  1059. rq->entry_count,
  1060. payload_size);
  1061. if (!rq->payload_buf) {
  1062. efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
  1063. rc = -EIO;
  1064. break;
  1065. }
  1066. efc_log_debug(hw->os,
  1067. "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
  1068. i, rq->data->id, rq->entry_count, payload_size);
  1069. rqindex++;
  1070. }
  1071. return rc ? -EIO : 0;
  1072. }
  1073. int
  1074. efct_hw_rx_post(struct efct_hw *hw)
  1075. {
  1076. u32 i;
  1077. u32 idx;
  1078. u32 rq_idx;
  1079. int rc = 0;
  1080. if (!hw->seq_pool) {
  1081. u32 count = 0;
  1082. for (i = 0; i < hw->hw_rq_count; i++)
  1083. count += hw->hw_rq[i]->entry_count;
  1084. hw->seq_pool = kmalloc_array(count,
  1085. sizeof(struct efc_hw_sequence), GFP_KERNEL);
  1086. if (!hw->seq_pool)
  1087. return -ENOMEM;
  1088. }
  1089. /*
  1090. * In RQ pair mode, we MUST post the header and payload buffer at the
  1091. * same time.
  1092. */
  1093. for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
  1094. struct hw_rq *rq = hw->hw_rq[rq_idx];
  1095. for (i = 0; i < rq->entry_count - 1; i++) {
  1096. struct efc_hw_sequence *seq;
  1097. seq = hw->seq_pool + idx;
  1098. idx++;
  1099. seq->header = &rq->hdr_buf[i];
  1100. seq->payload = &rq->payload_buf[i];
  1101. rc = efct_hw_sequence_free(hw, seq);
  1102. if (rc)
  1103. break;
  1104. }
  1105. if (rc)
  1106. break;
  1107. }
  1108. if (rc && hw->seq_pool)
  1109. kfree(hw->seq_pool);
  1110. return rc;
  1111. }
  1112. void
  1113. efct_hw_rx_free(struct efct_hw *hw)
  1114. {
  1115. u32 i;
  1116. /* Free hw_rq buffers */
  1117. for (i = 0; i < hw->hw_rq_count; i++) {
  1118. struct hw_rq *rq = hw->hw_rq[i];
  1119. if (rq) {
  1120. efct_hw_rx_buffer_free(hw, rq->hdr_buf,
  1121. rq->entry_count);
  1122. rq->hdr_buf = NULL;
  1123. efct_hw_rx_buffer_free(hw, rq->payload_buf,
  1124. rq->entry_count);
  1125. rq->payload_buf = NULL;
  1126. }
  1127. }
  1128. }
  1129. static int
  1130. efct_hw_cmd_submit_pending(struct efct_hw *hw)
  1131. {
  1132. int rc = 0;
  1133. /* Assumes lock held */
  1134. /* Only submit MQE if there's room */
  1135. while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
  1136. !list_empty(&hw->cmd_pending)) {
  1137. struct efct_command_ctx *ctx;
  1138. ctx = list_first_entry(&hw->cmd_pending,
  1139. struct efct_command_ctx, list_entry);
  1140. if (!ctx)
  1141. break;
  1142. list_del_init(&ctx->list_entry);
  1143. list_add_tail(&ctx->list_entry, &hw->cmd_head);
  1144. hw->cmd_head_count++;
  1145. if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
  1146. efc_log_debug(hw->os,
  1147. "sli_queue_write failed: %d\n", rc);
  1148. rc = -EIO;
  1149. break;
  1150. }
  1151. }
  1152. return rc;
  1153. }
  1154. int
  1155. efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
  1156. {
  1157. int rc = -EIO;
  1158. unsigned long flags = 0;
  1159. void *bmbx = NULL;
  1160. /*
  1161. * If the chip is in an error state (UE'd) then reject this mailbox
  1162. * command.
  1163. */
  1164. if (sli_fw_error_status(&hw->sli) > 0) {
  1165. efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
  1166. efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
  1167. sli_reg_read_status(&hw->sli),
  1168. sli_reg_read_err1(&hw->sli),
  1169. sli_reg_read_err2(&hw->sli));
  1170. return -EIO;
  1171. }
  1172. /*
  1173. * Send a mailbox command to the hardware, and either wait for
  1174. * a completion (EFCT_CMD_POLL) or get an optional asynchronous
  1175. * completion (EFCT_CMD_NOWAIT).
  1176. */
  1177. if (opts == EFCT_CMD_POLL) {
  1178. mutex_lock(&hw->bmbx_lock);
  1179. bmbx = hw->sli.bmbx.virt;
  1180. memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
  1181. if (sli_bmbx_command(&hw->sli) == 0) {
  1182. rc = 0;
  1183. memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
  1184. }
  1185. mutex_unlock(&hw->bmbx_lock);
  1186. } else if (opts == EFCT_CMD_NOWAIT) {
  1187. struct efct_command_ctx *ctx = NULL;
  1188. if (hw->state != EFCT_HW_STATE_ACTIVE) {
  1189. efc_log_err(hw->os, "Can't send command, HW state=%d\n",
  1190. hw->state);
  1191. return -EIO;
  1192. }
  1193. ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
  1194. if (!ctx)
  1195. return -ENOSPC;
  1196. memset(ctx, 0, sizeof(struct efct_command_ctx));
  1197. if (cb) {
  1198. ctx->cb = cb;
  1199. ctx->arg = arg;
  1200. }
  1201. memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
  1202. ctx->ctx = hw;
  1203. spin_lock_irqsave(&hw->cmd_lock, flags);
  1204. /* Add to pending list */
  1205. INIT_LIST_HEAD(&ctx->list_entry);
  1206. list_add_tail(&ctx->list_entry, &hw->cmd_pending);
  1207. /* Submit as much of the pending list as we can */
  1208. rc = efct_hw_cmd_submit_pending(hw);
  1209. spin_unlock_irqrestore(&hw->cmd_lock, flags);
  1210. }
  1211. return rc;
  1212. }
  1213. static int
  1214. efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
  1215. size_t size)
  1216. {
  1217. struct efct_command_ctx *ctx = NULL;
  1218. unsigned long flags = 0;
  1219. spin_lock_irqsave(&hw->cmd_lock, flags);
  1220. if (!list_empty(&hw->cmd_head)) {
  1221. ctx = list_first_entry(&hw->cmd_head,
  1222. struct efct_command_ctx, list_entry);
  1223. list_del_init(&ctx->list_entry);
  1224. }
  1225. if (!ctx) {
  1226. efc_log_err(hw->os, "no command context\n");
  1227. spin_unlock_irqrestore(&hw->cmd_lock, flags);
  1228. return -EIO;
  1229. }
  1230. hw->cmd_head_count--;
  1231. /* Post any pending requests */
  1232. efct_hw_cmd_submit_pending(hw);
  1233. spin_unlock_irqrestore(&hw->cmd_lock, flags);
  1234. if (ctx->cb) {
  1235. memcpy(ctx->buf, mqe, size);
  1236. ctx->cb(hw, status, ctx->buf, ctx->arg);
  1237. }
  1238. mempool_free(ctx, hw->cmd_ctx_pool);
  1239. return 0;
  1240. }
  1241. static int
  1242. efct_hw_mq_process(struct efct_hw *hw,
  1243. int status, struct sli4_queue *mq)
  1244. {
  1245. u8 mqe[SLI4_BMBX_SIZE];
  1246. int rc;
  1247. rc = sli_mq_read(&hw->sli, mq, mqe);
  1248. if (!rc)
  1249. rc = efct_hw_command_process(hw, status, mqe, mq->size);
  1250. return rc;
  1251. }
  1252. static int
  1253. efct_hw_command_cancel(struct efct_hw *hw)
  1254. {
  1255. unsigned long flags = 0;
  1256. int rc = 0;
  1257. spin_lock_irqsave(&hw->cmd_lock, flags);
  1258. /*
  1259. * Manually clean up remaining commands. Note: since this calls
  1260. * efct_hw_command_process(), we'll also process the cmd_pending
  1261. * list, so no need to manually clean that out.
  1262. */
  1263. while (!list_empty(&hw->cmd_head)) {
  1264. u8 mqe[SLI4_BMBX_SIZE] = { 0 };
  1265. struct efct_command_ctx *ctx;
  1266. ctx = list_first_entry(&hw->cmd_head,
  1267. struct efct_command_ctx, list_entry);
  1268. efc_log_debug(hw->os, "hung command %08x\n",
  1269. !ctx ? U32_MAX : *((u32 *)ctx->buf));
  1270. spin_unlock_irqrestore(&hw->cmd_lock, flags);
  1271. rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
  1272. spin_lock_irqsave(&hw->cmd_lock, flags);
  1273. }
  1274. spin_unlock_irqrestore(&hw->cmd_lock, flags);
  1275. return rc;
  1276. }
  1277. static void
  1278. efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
  1279. {
  1280. struct efct_mbox_rqst_ctx *ctx = arg;
  1281. if (ctx) {
  1282. if (ctx->callback)
  1283. (*ctx->callback)(hw->os->efcport, status, mqe,
  1284. ctx->arg);
  1285. mempool_free(ctx, hw->mbox_rqst_pool);
  1286. }
  1287. }
  1288. int
  1289. efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
  1290. {
  1291. struct efct_mbox_rqst_ctx *ctx;
  1292. struct efct *efct = base;
  1293. struct efct_hw *hw = &efct->hw;
  1294. int rc;
  1295. /*
  1296. * Allocate a callback context (which includes the mbox cmd buffer),
  1297. * we need this to be persistent as the mbox cmd submission may be
  1298. * queued and executed later execution.
  1299. */
  1300. ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
  1301. if (!ctx)
  1302. return -EIO;
  1303. ctx->callback = cb;
  1304. ctx->arg = arg;
  1305. rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
  1306. if (rc) {
  1307. efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
  1308. mempool_free(ctx, hw->mbox_rqst_pool);
  1309. return -EIO;
  1310. }
  1311. return 0;
  1312. }
  1313. static inline struct efct_hw_io *
  1314. _efct_hw_io_alloc(struct efct_hw *hw)
  1315. {
  1316. struct efct_hw_io *io = NULL;
  1317. if (!list_empty(&hw->io_free)) {
  1318. io = list_first_entry(&hw->io_free, struct efct_hw_io,
  1319. list_entry);
  1320. list_del(&io->list_entry);
  1321. }
  1322. if (io) {
  1323. INIT_LIST_HEAD(&io->list_entry);
  1324. list_add_tail(&io->list_entry, &hw->io_inuse);
  1325. io->state = EFCT_HW_IO_STATE_INUSE;
  1326. io->abort_reqtag = U32_MAX;
  1327. io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
  1328. if (!io->wq) {
  1329. efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
  1330. raw_smp_processor_id());
  1331. io->wq = hw->hw_wq[0];
  1332. }
  1333. kref_init(&io->ref);
  1334. io->release = efct_hw_io_free_internal;
  1335. } else {
  1336. atomic_add(1, &hw->io_alloc_failed_count);
  1337. }
  1338. return io;
  1339. }
  1340. struct efct_hw_io *
  1341. efct_hw_io_alloc(struct efct_hw *hw)
  1342. {
  1343. struct efct_hw_io *io = NULL;
  1344. unsigned long flags = 0;
  1345. spin_lock_irqsave(&hw->io_lock, flags);
  1346. io = _efct_hw_io_alloc(hw);
  1347. spin_unlock_irqrestore(&hw->io_lock, flags);
  1348. return io;
  1349. }
  1350. static void
  1351. efct_hw_io_free_move_correct_list(struct efct_hw *hw,
  1352. struct efct_hw_io *io)
  1353. {
  1354. /*
  1355. * When an IO is freed, depending on the exchange busy flag,
  1356. * move it to the correct list.
  1357. */
  1358. if (io->xbusy) {
  1359. /*
  1360. * add to wait_free list and wait for XRI_ABORTED CQEs to clean
  1361. * up
  1362. */
  1363. INIT_LIST_HEAD(&io->list_entry);
  1364. list_add_tail(&io->list_entry, &hw->io_wait_free);
  1365. io->state = EFCT_HW_IO_STATE_WAIT_FREE;
  1366. } else {
  1367. /* IO not busy, add to free list */
  1368. INIT_LIST_HEAD(&io->list_entry);
  1369. list_add_tail(&io->list_entry, &hw->io_free);
  1370. io->state = EFCT_HW_IO_STATE_FREE;
  1371. }
  1372. }
  1373. static inline void
  1374. efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
  1375. {
  1376. /* initialize IO fields */
  1377. efct_hw_init_free_io(io);
  1378. /* Restore default SGL */
  1379. efct_hw_io_restore_sgl(hw, io);
  1380. }
  1381. void
  1382. efct_hw_io_free_internal(struct kref *arg)
  1383. {
  1384. unsigned long flags = 0;
  1385. struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref);
  1386. struct efct_hw *hw = io->hw;
  1387. /* perform common cleanup */
  1388. efct_hw_io_free_common(hw, io);
  1389. spin_lock_irqsave(&hw->io_lock, flags);
  1390. /* remove from in-use list */
  1391. if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
  1392. list_del_init(&io->list_entry);
  1393. efct_hw_io_free_move_correct_list(hw, io);
  1394. }
  1395. spin_unlock_irqrestore(&hw->io_lock, flags);
  1396. }
  1397. int
  1398. efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
  1399. {
  1400. return kref_put(&io->ref, io->release);
  1401. }
  1402. struct efct_hw_io *
  1403. efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
  1404. {
  1405. u32 ioindex;
  1406. ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
  1407. return hw->io[ioindex];
  1408. }
  1409. int
  1410. efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
  1411. enum efct_hw_io_type type)
  1412. {
  1413. struct sli4_sge *data = NULL;
  1414. u32 i = 0;
  1415. u32 skips = 0;
  1416. u32 sge_flags = 0;
  1417. if (!io) {
  1418. efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
  1419. return -EIO;
  1420. }
  1421. /* Clear / reset the scatter-gather list */
  1422. io->sgl = &io->def_sgl;
  1423. io->sgl_count = io->def_sgl_count;
  1424. io->first_data_sge = 0;
  1425. memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
  1426. io->n_sge = 0;
  1427. io->sge_offset = 0;
  1428. io->type = type;
  1429. data = io->sgl->virt;
  1430. /*
  1431. * Some IO types have underlying hardware requirements on the order
  1432. * of SGEs. Process all special entries here.
  1433. */
  1434. switch (type) {
  1435. case EFCT_HW_IO_TARGET_WRITE:
  1436. /* populate host resident XFER_RDY buffer */
  1437. sge_flags = le32_to_cpu(data->dw2_flags);
  1438. sge_flags &= (~SLI4_SGE_TYPE_MASK);
  1439. sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
  1440. data->buffer_address_high =
  1441. cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
  1442. data->buffer_address_low =
  1443. cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
  1444. data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
  1445. data->dw2_flags = cpu_to_le32(sge_flags);
  1446. data++;
  1447. skips = EFCT_TARGET_WRITE_SKIPS;
  1448. io->n_sge = 1;
  1449. break;
  1450. case EFCT_HW_IO_TARGET_READ:
  1451. /*
  1452. * For FCP_TSEND64, the first 2 entries are SKIP SGE's
  1453. */
  1454. skips = EFCT_TARGET_READ_SKIPS;
  1455. break;
  1456. case EFCT_HW_IO_TARGET_RSP:
  1457. /*
  1458. * No skips, etc. for FCP_TRSP64
  1459. */
  1460. break;
  1461. default:
  1462. efc_log_err(hw->os, "unsupported IO type %#x\n", type);
  1463. return -EIO;
  1464. }
  1465. /*
  1466. * Write skip entries
  1467. */
  1468. for (i = 0; i < skips; i++) {
  1469. sge_flags = le32_to_cpu(data->dw2_flags);
  1470. sge_flags &= (~SLI4_SGE_TYPE_MASK);
  1471. sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
  1472. data->dw2_flags = cpu_to_le32(sge_flags);
  1473. data++;
  1474. }
  1475. io->n_sge += skips;
  1476. /*
  1477. * Set last
  1478. */
  1479. sge_flags = le32_to_cpu(data->dw2_flags);
  1480. sge_flags |= SLI4_SGE_LAST;
  1481. data->dw2_flags = cpu_to_le32(sge_flags);
  1482. return 0;
  1483. }
  1484. int
  1485. efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
  1486. uintptr_t addr, u32 length)
  1487. {
  1488. struct sli4_sge *data = NULL;
  1489. u32 sge_flags = 0;
  1490. if (!io || !addr || !length) {
  1491. efc_log_err(hw->os,
  1492. "bad parameter hw=%p io=%p addr=%lx length=%u\n",
  1493. hw, io, addr, length);
  1494. return -EIO;
  1495. }
  1496. if (length > hw->sli.sge_supported_length) {
  1497. efc_log_err(hw->os,
  1498. "length of SGE %d bigger than allowed %d\n",
  1499. length, hw->sli.sge_supported_length);
  1500. return -EIO;
  1501. }
  1502. data = io->sgl->virt;
  1503. data += io->n_sge;
  1504. sge_flags = le32_to_cpu(data->dw2_flags);
  1505. sge_flags &= ~SLI4_SGE_TYPE_MASK;
  1506. sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
  1507. sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
  1508. sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
  1509. data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
  1510. data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
  1511. data->buffer_length = cpu_to_le32(length);
  1512. /*
  1513. * Always assume this is the last entry and mark as such.
  1514. * If this is not the first entry unset the "last SGE"
  1515. * indication for the previous entry
  1516. */
  1517. sge_flags |= SLI4_SGE_LAST;
  1518. data->dw2_flags = cpu_to_le32(sge_flags);
  1519. if (io->n_sge) {
  1520. sge_flags = le32_to_cpu(data[-1].dw2_flags);
  1521. sge_flags &= ~SLI4_SGE_LAST;
  1522. data[-1].dw2_flags = cpu_to_le32(sge_flags);
  1523. }
  1524. /* Set first_data_bde if not previously set */
  1525. if (io->first_data_sge == 0)
  1526. io->first_data_sge = io->n_sge;
  1527. io->sge_offset += length;
  1528. io->n_sge++;
  1529. return 0;
  1530. }
  1531. void
  1532. efct_hw_io_abort_all(struct efct_hw *hw)
  1533. {
  1534. struct efct_hw_io *io_to_abort = NULL;
  1535. struct efct_hw_io *next_io = NULL;
  1536. list_for_each_entry_safe(io_to_abort, next_io,
  1537. &hw->io_inuse, list_entry) {
  1538. efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
  1539. }
  1540. }
  1541. static void
  1542. efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
  1543. {
  1544. struct efct_hw_io *io = arg;
  1545. struct efct_hw *hw = io->hw;
  1546. u32 ext = 0;
  1547. u32 len = 0;
  1548. struct hw_wq_callback *wqcb;
  1549. /*
  1550. * For IOs that were aborted internally, we may need to issue the
  1551. * callback here depending on whether a XRI_ABORTED CQE is expected ot
  1552. * not. If the status is Local Reject/No XRI, then
  1553. * issue the callback now.
  1554. */
  1555. ext = sli_fc_ext_status(&hw->sli, cqe);
  1556. if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
  1557. ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
  1558. efct_hw_done_t done = io->done;
  1559. io->done = NULL;
  1560. /*
  1561. * Use latched status as this is always saved for an internal
  1562. * abort Note: We won't have both a done and abort_done
  1563. * function, so don't worry about
  1564. * clobbering the len, status and ext fields.
  1565. */
  1566. status = io->saved_status;
  1567. len = io->saved_len;
  1568. ext = io->saved_ext;
  1569. io->status_saved = false;
  1570. done(io, len, status, ext, io->arg);
  1571. }
  1572. if (io->abort_done) {
  1573. efct_hw_done_t done = io->abort_done;
  1574. io->abort_done = NULL;
  1575. done(io, len, status, ext, io->abort_arg);
  1576. }
  1577. /* clear abort bit to indicate abort is complete */
  1578. io->abort_in_progress = false;
  1579. /* Free the WQ callback */
  1580. if (io->abort_reqtag == U32_MAX) {
  1581. efc_log_err(hw->os, "HW IO already freed\n");
  1582. return;
  1583. }
  1584. wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
  1585. efct_hw_reqtag_free(hw, wqcb);
  1586. /*
  1587. * Call efct_hw_io_free() because this releases the WQ reservation as
  1588. * well as doing the refcount put. Don't duplicate the code here.
  1589. */
  1590. (void)efct_hw_io_free(hw, io);
  1591. }
  1592. static void
  1593. efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
  1594. {
  1595. struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
  1596. memset(abort, 0, hw->sli.wqe_size);
  1597. abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
  1598. abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
  1599. /* Suppress ABTS retries */
  1600. abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
  1601. abort->t_tag = cpu_to_le32(wqe->id);
  1602. abort->command = SLI4_WQE_ABORT;
  1603. abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
  1604. abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
  1605. abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
  1606. }
  1607. int
  1608. efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
  1609. bool send_abts, void *cb, void *arg)
  1610. {
  1611. struct hw_wq_callback *wqcb;
  1612. unsigned long flags = 0;
  1613. if (!io_to_abort) {
  1614. efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
  1615. hw, io_to_abort);
  1616. return -EIO;
  1617. }
  1618. if (hw->state != EFCT_HW_STATE_ACTIVE) {
  1619. efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
  1620. hw->state);
  1621. return -EIO;
  1622. }
  1623. /* take a reference on IO being aborted */
  1624. if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
  1625. /* command no longer active */
  1626. efc_log_debug(hw->os,
  1627. "io not active xri=0x%x tag=0x%x\n",
  1628. io_to_abort->indicator, io_to_abort->reqtag);
  1629. return -ENOENT;
  1630. }
  1631. /* Must have a valid WQ reference */
  1632. if (!io_to_abort->wq) {
  1633. efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
  1634. io_to_abort->indicator);
  1635. /* efct_ref_get(): same function */
  1636. kref_put(&io_to_abort->ref, io_to_abort->release);
  1637. return -ENOENT;
  1638. }
  1639. /*
  1640. * Validation checks complete; now check to see if already being
  1641. * aborted, if not set the flag.
  1642. */
  1643. if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
  1644. /* efct_ref_get(): same function */
  1645. kref_put(&io_to_abort->ref, io_to_abort->release);
  1646. efc_log_debug(hw->os,
  1647. "io already being aborted xri=0x%x tag=0x%x\n",
  1648. io_to_abort->indicator, io_to_abort->reqtag);
  1649. return -EINPROGRESS;
  1650. }
  1651. /*
  1652. * If we got here, the possibilities are:
  1653. * - host owned xri
  1654. * - io_to_abort->wq_index != U32_MAX
  1655. * - submit ABORT_WQE to same WQ
  1656. * - port owned xri:
  1657. * - rxri: io_to_abort->wq_index == U32_MAX
  1658. * - submit ABORT_WQE to any WQ
  1659. * - non-rxri
  1660. * - io_to_abort->index != U32_MAX
  1661. * - submit ABORT_WQE to same WQ
  1662. * - io_to_abort->index == U32_MAX
  1663. * - submit ABORT_WQE to any WQ
  1664. */
  1665. io_to_abort->abort_done = cb;
  1666. io_to_abort->abort_arg = arg;
  1667. /* Allocate a request tag for the abort portion of this IO */
  1668. wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
  1669. if (!wqcb) {
  1670. efc_log_err(hw->os, "can't allocate request tag\n");
  1671. return -ENOSPC;
  1672. }
  1673. io_to_abort->abort_reqtag = wqcb->instance_index;
  1674. io_to_abort->wqe.send_abts = send_abts;
  1675. io_to_abort->wqe.id = io_to_abort->indicator;
  1676. io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
  1677. /*
  1678. * If the wqe is on the pending list, then set this wqe to be
  1679. * aborted when the IO's wqe is removed from the list.
  1680. */
  1681. if (io_to_abort->wq) {
  1682. spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
  1683. if (io_to_abort->wqe.list_entry.next) {
  1684. io_to_abort->wqe.abort_wqe_submit_needed = true;
  1685. spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
  1686. flags);
  1687. return 0;
  1688. }
  1689. spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
  1690. }
  1691. efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
  1692. /* ABORT_WQE does not actually utilize an XRI on the Port,
  1693. * therefore, keep xbusy as-is to track the exchange's state,
  1694. * not the ABORT_WQE's state
  1695. */
  1696. if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
  1697. io_to_abort->abort_in_progress = false;
  1698. /* efct_ref_get(): same function */
  1699. kref_put(&io_to_abort->ref, io_to_abort->release);
  1700. return -EIO;
  1701. }
  1702. return 0;
  1703. }
  1704. void
  1705. efct_hw_reqtag_pool_free(struct efct_hw *hw)
  1706. {
  1707. u32 i;
  1708. struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
  1709. struct hw_wq_callback *wqcb = NULL;
  1710. if (reqtag_pool) {
  1711. for (i = 0; i < U16_MAX; i++) {
  1712. wqcb = reqtag_pool->tags[i];
  1713. if (!wqcb)
  1714. continue;
  1715. kfree(wqcb);
  1716. }
  1717. kfree(reqtag_pool);
  1718. hw->wq_reqtag_pool = NULL;
  1719. }
  1720. }
  1721. struct reqtag_pool *
  1722. efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
  1723. {
  1724. u32 i = 0;
  1725. struct reqtag_pool *reqtag_pool;
  1726. struct hw_wq_callback *wqcb;
  1727. reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
  1728. if (!reqtag_pool)
  1729. return NULL;
  1730. INIT_LIST_HEAD(&reqtag_pool->freelist);
  1731. /* initialize reqtag pool lock */
  1732. spin_lock_init(&reqtag_pool->lock);
  1733. for (i = 0; i < U16_MAX; i++) {
  1734. wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
  1735. if (!wqcb)
  1736. break;
  1737. reqtag_pool->tags[i] = wqcb;
  1738. wqcb->instance_index = i;
  1739. wqcb->callback = NULL;
  1740. wqcb->arg = NULL;
  1741. INIT_LIST_HEAD(&wqcb->list_entry);
  1742. list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
  1743. }
  1744. return reqtag_pool;
  1745. }
  1746. struct hw_wq_callback *
  1747. efct_hw_reqtag_alloc(struct efct_hw *hw,
  1748. void (*callback)(void *arg, u8 *cqe, int status),
  1749. void *arg)
  1750. {
  1751. struct hw_wq_callback *wqcb = NULL;
  1752. struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
  1753. unsigned long flags = 0;
  1754. if (!callback)
  1755. return wqcb;
  1756. spin_lock_irqsave(&reqtag_pool->lock, flags);
  1757. if (!list_empty(&reqtag_pool->freelist)) {
  1758. wqcb = list_first_entry(&reqtag_pool->freelist,
  1759. struct hw_wq_callback, list_entry);
  1760. }
  1761. if (wqcb) {
  1762. list_del_init(&wqcb->list_entry);
  1763. spin_unlock_irqrestore(&reqtag_pool->lock, flags);
  1764. wqcb->callback = callback;
  1765. wqcb->arg = arg;
  1766. } else {
  1767. spin_unlock_irqrestore(&reqtag_pool->lock, flags);
  1768. }
  1769. return wqcb;
  1770. }
  1771. void
  1772. efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
  1773. {
  1774. unsigned long flags = 0;
  1775. struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
  1776. if (!wqcb->callback)
  1777. efc_log_err(hw->os, "WQCB is already freed\n");
  1778. spin_lock_irqsave(&reqtag_pool->lock, flags);
  1779. wqcb->callback = NULL;
  1780. wqcb->arg = NULL;
  1781. INIT_LIST_HEAD(&wqcb->list_entry);
  1782. list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
  1783. spin_unlock_irqrestore(&reqtag_pool->lock, flags);
  1784. }
  1785. struct hw_wq_callback *
  1786. efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
  1787. {
  1788. struct hw_wq_callback *wqcb;
  1789. wqcb = hw->wq_reqtag_pool->tags[instance_index];
  1790. if (!wqcb)
  1791. efc_log_err(hw->os, "wqcb for instance %d is null\n",
  1792. instance_index);
  1793. return wqcb;
  1794. }
  1795. int
  1796. efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
  1797. {
  1798. int index = -1;
  1799. int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
  1800. /*
  1801. * Since the hash is always bigger than the maximum number of Qs, then
  1802. * we never have to worry about an infinite loop. We will always find
  1803. * an unused entry.
  1804. */
  1805. do {
  1806. if (hash[i].in_use && hash[i].id == id)
  1807. index = hash[i].index;
  1808. else
  1809. i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
  1810. } while (index == -1 && hash[i].in_use);
  1811. return index;
  1812. }
  1813. int
  1814. efct_hw_process(struct efct_hw *hw, u32 vector,
  1815. u32 max_isr_time_msec)
  1816. {
  1817. struct hw_eq *eq;
  1818. /*
  1819. * The caller should disable interrupts if they wish to prevent us
  1820. * from processing during a shutdown. The following states are defined:
  1821. * EFCT_HW_STATE_UNINITIALIZED - No queues allocated
  1822. * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
  1823. * queues are cleared.
  1824. * EFCT_HW_STATE_ACTIVE - Chip and queues are operational
  1825. * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
  1826. * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
  1827. * completions.
  1828. */
  1829. if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
  1830. return 0;
  1831. /* Get pointer to struct hw_eq */
  1832. eq = hw->hw_eq[vector];
  1833. if (!eq)
  1834. return 0;
  1835. eq->use_count++;
  1836. return efct_hw_eq_process(hw, eq, max_isr_time_msec);
  1837. }
  1838. int
  1839. efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
  1840. u32 max_isr_time_msec)
  1841. {
  1842. u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
  1843. u32 tcheck_count;
  1844. u64 tstart;
  1845. u64 telapsed;
  1846. bool done = false;
  1847. tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
  1848. tstart = jiffies_to_msecs(jiffies);
  1849. while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
  1850. u16 cq_id = 0;
  1851. int rc;
  1852. rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
  1853. if (unlikely(rc)) {
  1854. if (rc == SLI4_EQE_STATUS_EQ_FULL) {
  1855. u32 i;
  1856. /*
  1857. * Received a sentinel EQE indicating the
  1858. * EQ is full. Process all CQs
  1859. */
  1860. for (i = 0; i < hw->cq_count; i++)
  1861. efct_hw_cq_process(hw, hw->hw_cq[i]);
  1862. continue;
  1863. } else {
  1864. return rc;
  1865. }
  1866. } else {
  1867. int index;
  1868. index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
  1869. if (likely(index >= 0))
  1870. efct_hw_cq_process(hw, hw->hw_cq[index]);
  1871. else
  1872. efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
  1873. }
  1874. if (eq->queue->n_posted > eq->queue->posted_limit)
  1875. sli_queue_arm(&hw->sli, eq->queue, false);
  1876. if (tcheck_count && (--tcheck_count == 0)) {
  1877. tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
  1878. telapsed = jiffies_to_msecs(jiffies) - tstart;
  1879. if (telapsed >= max_isr_time_msec)
  1880. done = true;
  1881. }
  1882. }
  1883. sli_queue_eq_arm(&hw->sli, eq->queue, true);
  1884. return 0;
  1885. }
  1886. static int
  1887. _efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
  1888. {
  1889. int queue_rc;
  1890. /* Every so often, set the wqec bit to generate comsummed completions */
  1891. if (wq->wqec_count)
  1892. wq->wqec_count--;
  1893. if (wq->wqec_count == 0) {
  1894. struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
  1895. genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
  1896. wq->wqec_count = wq->wqec_set_count;
  1897. }
  1898. /* Decrement WQ free count */
  1899. wq->free_count--;
  1900. queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
  1901. return (queue_rc < 0) ? -EIO : 0;
  1902. }
  1903. static void
  1904. hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
  1905. {
  1906. struct efct_hw_wqe *wqe;
  1907. unsigned long flags = 0;
  1908. spin_lock_irqsave(&wq->queue->lock, flags);
  1909. /* Update free count with value passed in */
  1910. wq->free_count += update_free_count;
  1911. while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
  1912. wqe = list_first_entry(&wq->pending_list,
  1913. struct efct_hw_wqe, list_entry);
  1914. list_del_init(&wqe->list_entry);
  1915. _efct_hw_wq_write(wq, wqe);
  1916. if (wqe->abort_wqe_submit_needed) {
  1917. wqe->abort_wqe_submit_needed = false;
  1918. efct_hw_fill_abort_wqe(wq->hw, wqe);
  1919. INIT_LIST_HEAD(&wqe->list_entry);
  1920. list_add_tail(&wqe->list_entry, &wq->pending_list);
  1921. wq->wq_pending_count++;
  1922. }
  1923. }
  1924. spin_unlock_irqrestore(&wq->queue->lock, flags);
  1925. }
  1926. void
  1927. efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
  1928. {
  1929. u8 cqe[sizeof(struct sli4_mcqe)];
  1930. u16 rid = U16_MAX;
  1931. /* completion type */
  1932. enum sli4_qentry ctype;
  1933. u32 n_processed = 0;
  1934. u32 tstart, telapsed;
  1935. tstart = jiffies_to_msecs(jiffies);
  1936. while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
  1937. int status;
  1938. status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
  1939. /*
  1940. * The sign of status is significant. If status is:
  1941. * == 0 : call completed correctly and
  1942. * the CQE indicated success
  1943. * > 0 : call completed correctly and
  1944. * the CQE indicated an error
  1945. * < 0 : call failed and no information is available about the
  1946. * CQE
  1947. */
  1948. if (status < 0) {
  1949. if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
  1950. /*
  1951. * Notification that an entry was consumed,
  1952. * but not completed
  1953. */
  1954. continue;
  1955. break;
  1956. }
  1957. switch (ctype) {
  1958. case SLI4_QENTRY_ASYNC:
  1959. sli_cqe_async(&hw->sli, cqe);
  1960. break;
  1961. case SLI4_QENTRY_MQ:
  1962. /*
  1963. * Process MQ entry. Note there is no way to determine
  1964. * the MQ_ID from the completion entry.
  1965. */
  1966. efct_hw_mq_process(hw, status, hw->mq);
  1967. break;
  1968. case SLI4_QENTRY_WQ:
  1969. efct_hw_wq_process(hw, cq, cqe, status, rid);
  1970. break;
  1971. case SLI4_QENTRY_WQ_RELEASE: {
  1972. u32 wq_id = rid;
  1973. int index;
  1974. struct hw_wq *wq = NULL;
  1975. index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
  1976. if (likely(index >= 0)) {
  1977. wq = hw->hw_wq[index];
  1978. } else {
  1979. efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
  1980. break;
  1981. }
  1982. /* Submit any HW IOs that are on the WQ pending list */
  1983. hw_wq_submit_pending(wq, wq->wqec_set_count);
  1984. break;
  1985. }
  1986. case SLI4_QENTRY_RQ:
  1987. efct_hw_rqpair_process_rq(hw, cq, cqe);
  1988. break;
  1989. case SLI4_QENTRY_XABT: {
  1990. efct_hw_xabt_process(hw, cq, cqe, rid);
  1991. break;
  1992. }
  1993. default:
  1994. efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
  1995. ctype, rid);
  1996. break;
  1997. }
  1998. n_processed++;
  1999. if (n_processed == cq->queue->proc_limit)
  2000. break;
  2001. if (cq->queue->n_posted >= cq->queue->posted_limit)
  2002. sli_queue_arm(&hw->sli, cq->queue, false);
  2003. }
  2004. sli_queue_arm(&hw->sli, cq->queue, true);
  2005. if (n_processed > cq->queue->max_num_processed)
  2006. cq->queue->max_num_processed = n_processed;
  2007. telapsed = jiffies_to_msecs(jiffies) - tstart;
  2008. if (telapsed > cq->queue->max_process_time)
  2009. cq->queue->max_process_time = telapsed;
  2010. }
  2011. void
  2012. efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
  2013. u8 *cqe, int status, u16 rid)
  2014. {
  2015. struct hw_wq_callback *wqcb;
  2016. if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
  2017. if (status)
  2018. efc_log_err(hw->os, "reque xri failed, status = %d\n",
  2019. status);
  2020. return;
  2021. }
  2022. wqcb = efct_hw_reqtag_get_instance(hw, rid);
  2023. if (!wqcb) {
  2024. efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
  2025. return;
  2026. }
  2027. if (!wqcb->callback) {
  2028. efc_log_err(hw->os, "wqcb callback is NULL\n");
  2029. return;
  2030. }
  2031. (*wqcb->callback)(wqcb->arg, cqe, status);
  2032. }
  2033. void
  2034. efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
  2035. u8 *cqe, u16 rid)
  2036. {
  2037. /* search IOs wait free list */
  2038. struct efct_hw_io *io = NULL;
  2039. unsigned long flags = 0;
  2040. io = efct_hw_io_lookup(hw, rid);
  2041. if (!io) {
  2042. /* IO lookup failure should never happen */
  2043. efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
  2044. return;
  2045. }
  2046. if (!io->xbusy)
  2047. efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
  2048. else
  2049. /* mark IO as no longer busy */
  2050. io->xbusy = false;
  2051. /*
  2052. * For IOs that were aborted internally, we need to issue any pending
  2053. * callback here.
  2054. */
  2055. if (io->done) {
  2056. efct_hw_done_t done = io->done;
  2057. void *arg = io->arg;
  2058. /*
  2059. * Use latched status as this is always saved for an internal
  2060. * abort
  2061. */
  2062. int status = io->saved_status;
  2063. u32 len = io->saved_len;
  2064. u32 ext = io->saved_ext;
  2065. io->done = NULL;
  2066. io->status_saved = false;
  2067. done(io, len, status, ext, arg);
  2068. }
  2069. spin_lock_irqsave(&hw->io_lock, flags);
  2070. if (io->state == EFCT_HW_IO_STATE_INUSE ||
  2071. io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
  2072. /* if on wait_free list, caller has already freed IO;
  2073. * remove from wait_free list and add to free list.
  2074. * if on in-use list, already marked as no longer busy;
  2075. * just leave there and wait for caller to free.
  2076. */
  2077. if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
  2078. io->state = EFCT_HW_IO_STATE_FREE;
  2079. list_del_init(&io->list_entry);
  2080. efct_hw_io_free_move_correct_list(hw, io);
  2081. }
  2082. }
  2083. spin_unlock_irqrestore(&hw->io_lock, flags);
  2084. }
  2085. static int
  2086. efct_hw_flush(struct efct_hw *hw)
  2087. {
  2088. u32 i = 0;
  2089. /* Process any remaining completions */
  2090. for (i = 0; i < hw->eq_count; i++)
  2091. efct_hw_process(hw, i, ~0);
  2092. return 0;
  2093. }
  2094. int
  2095. efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
  2096. {
  2097. int rc = 0;
  2098. unsigned long flags = 0;
  2099. spin_lock_irqsave(&wq->queue->lock, flags);
  2100. if (list_empty(&wq->pending_list)) {
  2101. if (wq->free_count > 0) {
  2102. rc = _efct_hw_wq_write(wq, wqe);
  2103. } else {
  2104. INIT_LIST_HEAD(&wqe->list_entry);
  2105. list_add_tail(&wqe->list_entry, &wq->pending_list);
  2106. wq->wq_pending_count++;
  2107. }
  2108. spin_unlock_irqrestore(&wq->queue->lock, flags);
  2109. return rc;
  2110. }
  2111. INIT_LIST_HEAD(&wqe->list_entry);
  2112. list_add_tail(&wqe->list_entry, &wq->pending_list);
  2113. wq->wq_pending_count++;
  2114. while (wq->free_count > 0) {
  2115. wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
  2116. list_entry);
  2117. if (!wqe)
  2118. break;
  2119. list_del_init(&wqe->list_entry);
  2120. rc = _efct_hw_wq_write(wq, wqe);
  2121. if (rc)
  2122. break;
  2123. if (wqe->abort_wqe_submit_needed) {
  2124. wqe->abort_wqe_submit_needed = false;
  2125. efct_hw_fill_abort_wqe(wq->hw, wqe);
  2126. INIT_LIST_HEAD(&wqe->list_entry);
  2127. list_add_tail(&wqe->list_entry, &wq->pending_list);
  2128. wq->wq_pending_count++;
  2129. }
  2130. }
  2131. spin_unlock_irqrestore(&wq->queue->lock, flags);
  2132. return rc;
  2133. }
  2134. int
  2135. efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
  2136. {
  2137. struct efct *efct = efc->base;
  2138. return efct_hw_bls_send(efct, type, bls, NULL, NULL);
  2139. }
  2140. int
  2141. efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
  2142. void *cb, void *arg)
  2143. {
  2144. struct efct_hw *hw = &efct->hw;
  2145. struct efct_hw_io *hio;
  2146. struct sli_bls_payload bls;
  2147. int rc;
  2148. if (hw->state != EFCT_HW_STATE_ACTIVE) {
  2149. efc_log_err(hw->os,
  2150. "cannot send BLS, HW state=%d\n", hw->state);
  2151. return -EIO;
  2152. }
  2153. hio = efct_hw_io_alloc(hw);
  2154. if (!hio) {
  2155. efc_log_err(hw->os, "HIO allocation failed\n");
  2156. return -EIO;
  2157. }
  2158. hio->done = cb;
  2159. hio->arg = arg;
  2160. bls_params->xri = hio->indicator;
  2161. bls_params->tag = hio->reqtag;
  2162. if (type == FC_RCTL_BA_ACC) {
  2163. hio->type = EFCT_HW_BLS_ACC;
  2164. bls.type = SLI4_SLI_BLS_ACC;
  2165. memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
  2166. } else {
  2167. hio->type = EFCT_HW_BLS_RJT;
  2168. bls.type = SLI4_SLI_BLS_RJT;
  2169. memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
  2170. }
  2171. bls.ox_id = cpu_to_le16(bls_params->ox_id);
  2172. bls.rx_id = cpu_to_le16(bls_params->rx_id);
  2173. if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
  2174. &bls, bls_params)) {
  2175. efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
  2176. return -EIO;
  2177. }
  2178. hio->xbusy = true;
  2179. /*
  2180. * Add IO to active io wqe list before submitting, in case the
  2181. * wcqe processing preempts this thread.
  2182. */
  2183. hio->wq->use_count++;
  2184. rc = efct_hw_wq_write(hio->wq, &hio->wqe);
  2185. if (rc >= 0) {
  2186. /* non-negative return is success */
  2187. rc = 0;
  2188. } else {
  2189. /* failed to write wqe, remove from active wqe list */
  2190. efc_log_err(hw->os,
  2191. "sli_queue_write failed: %d\n", rc);
  2192. hio->xbusy = false;
  2193. }
  2194. return rc;
  2195. }
  2196. static int
  2197. efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
  2198. u32 ext_status, void *arg)
  2199. {
  2200. struct efc_disc_io *io = arg;
  2201. efc_disc_io_complete(io, length, status, ext_status);
  2202. return 0;
  2203. }
  2204. static inline void
  2205. efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
  2206. {
  2207. u8 *cmd = io->req.virt;
  2208. params->cmd = *cmd;
  2209. params->s_id = io->s_id;
  2210. params->d_id = io->d_id;
  2211. params->ox_id = io->iparam.els.ox_id;
  2212. params->rpi = io->rpi;
  2213. params->vpi = io->vpi;
  2214. params->rpi_registered = io->rpi_registered;
  2215. params->xmit_len = io->xmit_len;
  2216. params->rsp_len = io->rsp_len;
  2217. params->timeout = io->iparam.els.timeout;
  2218. }
  2219. static inline void
  2220. efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
  2221. {
  2222. params->r_ctl = io->iparam.ct.r_ctl;
  2223. params->type = io->iparam.ct.type;
  2224. params->df_ctl = io->iparam.ct.df_ctl;
  2225. params->d_id = io->d_id;
  2226. params->ox_id = io->iparam.ct.ox_id;
  2227. params->rpi = io->rpi;
  2228. params->vpi = io->vpi;
  2229. params->rpi_registered = io->rpi_registered;
  2230. params->xmit_len = io->xmit_len;
  2231. params->rsp_len = io->rsp_len;
  2232. params->timeout = io->iparam.ct.timeout;
  2233. }
  2234. /**
  2235. * efct_els_hw_srrs_send() - Send a single request and response cmd.
  2236. * @efc: efc library structure
  2237. * @io: Discovery IO used to hold els and ct cmd context.
  2238. *
  2239. * This routine supports communication sequences consisting of a single
  2240. * request and single response between two endpoints. Examples include:
  2241. * - Sending an ELS request.
  2242. * - Sending an ELS response - To send an ELS response, the caller must provide
  2243. * the OX_ID from the received request.
  2244. * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
  2245. * the caller must provide the R_CTL, TYPE, and DF_CTL
  2246. * values to place in the FC frame header.
  2247. *
  2248. * Return: Status of the request.
  2249. */
  2250. int
  2251. efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
  2252. {
  2253. struct efct *efct = efc->base;
  2254. struct efct_hw_io *hio;
  2255. struct efct_hw *hw = &efct->hw;
  2256. struct efc_dma *send = &io->req;
  2257. struct efc_dma *receive = &io->rsp;
  2258. struct sli4_sge *sge = NULL;
  2259. int rc = 0;
  2260. u32 len = io->xmit_len;
  2261. u32 sge0_flags;
  2262. u32 sge1_flags;
  2263. hio = efct_hw_io_alloc(hw);
  2264. if (!hio) {
  2265. pr_err("HIO alloc failed\n");
  2266. return -EIO;
  2267. }
  2268. if (hw->state != EFCT_HW_STATE_ACTIVE) {
  2269. efc_log_debug(hw->os,
  2270. "cannot send SRRS, HW state=%d\n", hw->state);
  2271. return -EIO;
  2272. }
  2273. hio->done = efct_els_ssrs_send_cb;
  2274. hio->arg = io;
  2275. sge = hio->sgl->virt;
  2276. /* clear both SGE */
  2277. memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
  2278. sge0_flags = le32_to_cpu(sge[0].dw2_flags);
  2279. sge1_flags = le32_to_cpu(sge[1].dw2_flags);
  2280. if (send->size) {
  2281. sge[0].buffer_address_high =
  2282. cpu_to_le32(upper_32_bits(send->phys));
  2283. sge[0].buffer_address_low =
  2284. cpu_to_le32(lower_32_bits(send->phys));
  2285. sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
  2286. sge[0].buffer_length = cpu_to_le32(len);
  2287. }
  2288. if (io->io_type == EFC_DISC_IO_ELS_REQ ||
  2289. io->io_type == EFC_DISC_IO_CT_REQ) {
  2290. sge[1].buffer_address_high =
  2291. cpu_to_le32(upper_32_bits(receive->phys));
  2292. sge[1].buffer_address_low =
  2293. cpu_to_le32(lower_32_bits(receive->phys));
  2294. sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
  2295. sge1_flags |= SLI4_SGE_LAST;
  2296. sge[1].buffer_length = cpu_to_le32(receive->size);
  2297. } else {
  2298. sge0_flags |= SLI4_SGE_LAST;
  2299. }
  2300. sge[0].dw2_flags = cpu_to_le32(sge0_flags);
  2301. sge[1].dw2_flags = cpu_to_le32(sge1_flags);
  2302. switch (io->io_type) {
  2303. case EFC_DISC_IO_ELS_REQ: {
  2304. struct sli_els_params els_params;
  2305. hio->type = EFCT_HW_ELS_REQ;
  2306. efct_fill_els_params(io, &els_params);
  2307. els_params.xri = hio->indicator;
  2308. els_params.tag = hio->reqtag;
  2309. if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
  2310. &els_params)) {
  2311. efc_log_err(hw->os, "REQ WQE error\n");
  2312. rc = -EIO;
  2313. }
  2314. break;
  2315. }
  2316. case EFC_DISC_IO_ELS_RESP: {
  2317. struct sli_els_params els_params;
  2318. hio->type = EFCT_HW_ELS_RSP;
  2319. efct_fill_els_params(io, &els_params);
  2320. els_params.xri = hio->indicator;
  2321. els_params.tag = hio->reqtag;
  2322. if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
  2323. &els_params)){
  2324. efc_log_err(hw->os, "RSP WQE error\n");
  2325. rc = -EIO;
  2326. }
  2327. break;
  2328. }
  2329. case EFC_DISC_IO_CT_REQ: {
  2330. struct sli_ct_params ct_params;
  2331. hio->type = EFCT_HW_FC_CT;
  2332. efct_fill_ct_params(io, &ct_params);
  2333. ct_params.xri = hio->indicator;
  2334. ct_params.tag = hio->reqtag;
  2335. if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
  2336. &ct_params)){
  2337. efc_log_err(hw->os, "GEN WQE error\n");
  2338. rc = -EIO;
  2339. }
  2340. break;
  2341. }
  2342. case EFC_DISC_IO_CT_RESP: {
  2343. struct sli_ct_params ct_params;
  2344. hio->type = EFCT_HW_FC_CT_RSP;
  2345. efct_fill_ct_params(io, &ct_params);
  2346. ct_params.xri = hio->indicator;
  2347. ct_params.tag = hio->reqtag;
  2348. if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
  2349. &ct_params)){
  2350. efc_log_err(hw->os, "XMIT SEQ WQE error\n");
  2351. rc = -EIO;
  2352. }
  2353. break;
  2354. }
  2355. default:
  2356. efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
  2357. rc = -EIO;
  2358. }
  2359. if (rc == 0) {
  2360. hio->xbusy = true;
  2361. /*
  2362. * Add IO to active io wqe list before submitting, in case the
  2363. * wcqe processing preempts this thread.
  2364. */
  2365. hio->wq->use_count++;
  2366. rc = efct_hw_wq_write(hio->wq, &hio->wqe);
  2367. if (rc >= 0) {
  2368. /* non-negative return is success */
  2369. rc = 0;
  2370. } else {
  2371. /* failed to write wqe, remove from active wqe list */
  2372. efc_log_err(hw->os,
  2373. "sli_queue_write failed: %d\n", rc);
  2374. hio->xbusy = false;
  2375. }
  2376. }
  2377. return rc;
  2378. }
  2379. int
  2380. efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
  2381. struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
  2382. void *cb, void *arg)
  2383. {
  2384. int rc = 0;
  2385. bool send_wqe = true;
  2386. if (!io) {
  2387. pr_err("bad parm hw=%p io=%p\n", hw, io);
  2388. return -EIO;
  2389. }
  2390. if (hw->state != EFCT_HW_STATE_ACTIVE) {
  2391. efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
  2392. return -EIO;
  2393. }
  2394. /*
  2395. * Save state needed during later stages
  2396. */
  2397. io->type = type;
  2398. io->done = cb;
  2399. io->arg = arg;
  2400. /*
  2401. * Format the work queue entry used to send the IO
  2402. */
  2403. switch (type) {
  2404. case EFCT_HW_IO_TARGET_WRITE: {
  2405. u16 *flags = &iparam->fcp_tgt.flags;
  2406. struct fcp_txrdy *xfer = io->xfer_rdy.virt;
  2407. /*
  2408. * Fill in the XFER_RDY for IF_TYPE 0 devices
  2409. */
  2410. xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
  2411. xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
  2412. if (io->xbusy)
  2413. *flags |= SLI4_IO_CONTINUATION;
  2414. else
  2415. *flags &= ~SLI4_IO_CONTINUATION;
  2416. iparam->fcp_tgt.xri = io->indicator;
  2417. iparam->fcp_tgt.tag = io->reqtag;
  2418. if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
  2419. &io->def_sgl, io->first_data_sge,
  2420. SLI4_CQ_DEFAULT,
  2421. 0, 0, &iparam->fcp_tgt)) {
  2422. efc_log_err(hw->os, "TRECEIVE WQE error\n");
  2423. rc = -EIO;
  2424. }
  2425. break;
  2426. }
  2427. case EFCT_HW_IO_TARGET_READ: {
  2428. u16 *flags = &iparam->fcp_tgt.flags;
  2429. if (io->xbusy)
  2430. *flags |= SLI4_IO_CONTINUATION;
  2431. else
  2432. *flags &= ~SLI4_IO_CONTINUATION;
  2433. iparam->fcp_tgt.xri = io->indicator;
  2434. iparam->fcp_tgt.tag = io->reqtag;
  2435. if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
  2436. &io->def_sgl, io->first_data_sge,
  2437. SLI4_CQ_DEFAULT,
  2438. 0, 0, &iparam->fcp_tgt)) {
  2439. efc_log_err(hw->os, "TSEND WQE error\n");
  2440. rc = -EIO;
  2441. }
  2442. break;
  2443. }
  2444. case EFCT_HW_IO_TARGET_RSP: {
  2445. u16 *flags = &iparam->fcp_tgt.flags;
  2446. if (io->xbusy)
  2447. *flags |= SLI4_IO_CONTINUATION;
  2448. else
  2449. *flags &= ~SLI4_IO_CONTINUATION;
  2450. iparam->fcp_tgt.xri = io->indicator;
  2451. iparam->fcp_tgt.tag = io->reqtag;
  2452. if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
  2453. &io->def_sgl, SLI4_CQ_DEFAULT,
  2454. 0, &iparam->fcp_tgt)) {
  2455. efc_log_err(hw->os, "TRSP WQE error\n");
  2456. rc = -EIO;
  2457. }
  2458. break;
  2459. }
  2460. default:
  2461. efc_log_err(hw->os, "unsupported IO type %#x\n", type);
  2462. rc = -EIO;
  2463. }
  2464. if (send_wqe && rc == 0) {
  2465. io->xbusy = true;
  2466. /*
  2467. * Add IO to active io wqe list before submitting, in case the
  2468. * wcqe processing preempts this thread.
  2469. */
  2470. hw->tcmd_wq_submit[io->wq->instance]++;
  2471. io->wq->use_count++;
  2472. rc = efct_hw_wq_write(io->wq, &io->wqe);
  2473. if (rc >= 0) {
  2474. /* non-negative return is success */
  2475. rc = 0;
  2476. } else {
  2477. /* failed to write wqe, remove from active wqe list */
  2478. efc_log_err(hw->os,
  2479. "sli_queue_write failed: %d\n", rc);
  2480. io->xbusy = false;
  2481. }
  2482. }
  2483. return rc;
  2484. }
  2485. int
  2486. efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
  2487. u8 sof, u8 eof, struct efc_dma *payload,
  2488. struct efct_hw_send_frame_context *ctx,
  2489. void (*callback)(void *arg, u8 *cqe, int status),
  2490. void *arg)
  2491. {
  2492. int rc;
  2493. struct efct_hw_wqe *wqe;
  2494. u32 xri;
  2495. struct hw_wq *wq;
  2496. wqe = &ctx->wqe;
  2497. /* populate the callback object */
  2498. ctx->hw = hw;
  2499. /* Fetch and populate request tag */
  2500. ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
  2501. if (!ctx->wqcb) {
  2502. efc_log_err(hw->os, "can't allocate request tag\n");
  2503. return -ENOSPC;
  2504. }
  2505. wq = hw->hw_wq[0];
  2506. /* Set XRI and RX_ID in the header based on which WQ, and which
  2507. * send_frame_io we are using
  2508. */
  2509. xri = wq->send_frame_io->indicator;
  2510. /* Build the send frame WQE */
  2511. rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
  2512. sof, eof, (u32 *)hdr, payload, payload->len,
  2513. EFCT_HW_SEND_FRAME_TIMEOUT, xri,
  2514. ctx->wqcb->instance_index);
  2515. if (rc) {
  2516. efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
  2517. return -EIO;
  2518. }
  2519. /* Write to WQ */
  2520. rc = efct_hw_wq_write(wq, wqe);
  2521. if (rc) {
  2522. efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
  2523. return -EIO;
  2524. }
  2525. wq->use_count++;
  2526. return 0;
  2527. }
  2528. static int
  2529. efct_hw_cb_link_stat(struct efct_hw *hw, int status,
  2530. u8 *mqe, void *arg)
  2531. {
  2532. struct sli4_cmd_read_link_stats *mbox_rsp;
  2533. struct efct_hw_link_stat_cb_arg *cb_arg = arg;
  2534. struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
  2535. u32 num_counters, i;
  2536. u32 mbox_rsp_flags = 0;
  2537. mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
  2538. mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
  2539. num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
  2540. memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
  2541. EFCT_HW_LINK_STAT_MAX);
  2542. /* Fill overflow counts, mask starts from SLI4_READ_LNKSTAT_W02OF*/
  2543. for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++)
  2544. counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2)));
  2545. counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
  2546. le32_to_cpu(mbox_rsp->linkfail_errcnt);
  2547. counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
  2548. le32_to_cpu(mbox_rsp->losssync_errcnt);
  2549. counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
  2550. le32_to_cpu(mbox_rsp->losssignal_errcnt);
  2551. counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
  2552. le32_to_cpu(mbox_rsp->primseq_errcnt);
  2553. counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
  2554. le32_to_cpu(mbox_rsp->inval_txword_errcnt);
  2555. counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
  2556. le32_to_cpu(mbox_rsp->crc_errcnt);
  2557. counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
  2558. le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
  2559. counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
  2560. le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
  2561. counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
  2562. le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
  2563. counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
  2564. le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
  2565. counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
  2566. le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
  2567. counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
  2568. le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
  2569. counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
  2570. le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
  2571. counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
  2572. le32_to_cpu(mbox_rsp->rx_eofa_cnt);
  2573. counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
  2574. le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
  2575. counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
  2576. le32_to_cpu(mbox_rsp->rx_eofni_cnt);
  2577. counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
  2578. le32_to_cpu(mbox_rsp->rx_soff_cnt);
  2579. counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
  2580. le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
  2581. counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
  2582. le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
  2583. counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
  2584. le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
  2585. if (cb_arg) {
  2586. if (cb_arg->cb) {
  2587. if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
  2588. status = le16_to_cpu(mbox_rsp->hdr.status);
  2589. cb_arg->cb(status, num_counters, counts, cb_arg->arg);
  2590. }
  2591. kfree(cb_arg);
  2592. }
  2593. return 0;
  2594. }
  2595. int
  2596. efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters,
  2597. u8 clear_overflow_flags, u8 clear_all_counters,
  2598. void (*cb)(int status, u32 num_counters,
  2599. struct efct_hw_link_stat_counts *counters,
  2600. void *arg),
  2601. void *arg)
  2602. {
  2603. int rc = -EIO;
  2604. struct efct_hw_link_stat_cb_arg *cb_arg;
  2605. u8 mbxdata[SLI4_BMBX_SIZE];
  2606. cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC);
  2607. if (!cb_arg)
  2608. return -ENOMEM;
  2609. cb_arg->cb = cb;
  2610. cb_arg->arg = arg;
  2611. /* Send the HW command */
  2612. if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters,
  2613. clear_overflow_flags, clear_all_counters))
  2614. rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
  2615. efct_hw_cb_link_stat, cb_arg);
  2616. if (rc)
  2617. kfree(cb_arg);
  2618. return rc;
  2619. }
  2620. static int
  2621. efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg)
  2622. {
  2623. struct sli4_cmd_read_status *mbox_rsp =
  2624. (struct sli4_cmd_read_status *)mqe;
  2625. struct efct_hw_host_stat_cb_arg *cb_arg = arg;
  2626. struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
  2627. u32 num_counters = EFCT_HW_HOST_STAT_MAX;
  2628. memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
  2629. EFCT_HW_HOST_STAT_MAX);
  2630. counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
  2631. le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
  2632. counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
  2633. le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
  2634. counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
  2635. le32_to_cpu(mbox_rsp->trans_frame_cnt);
  2636. counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
  2637. le32_to_cpu(mbox_rsp->recv_frame_cnt);
  2638. counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
  2639. le32_to_cpu(mbox_rsp->trans_seq_cnt);
  2640. counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
  2641. le32_to_cpu(mbox_rsp->recv_seq_cnt);
  2642. counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
  2643. le32_to_cpu(mbox_rsp->tot_exchanges_orig);
  2644. counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
  2645. le32_to_cpu(mbox_rsp->tot_exchanges_resp);
  2646. counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
  2647. le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
  2648. counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
  2649. le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
  2650. counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
  2651. le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
  2652. counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
  2653. le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
  2654. counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
  2655. le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
  2656. counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
  2657. le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
  2658. if (cb_arg) {
  2659. if (cb_arg->cb) {
  2660. if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
  2661. status = le16_to_cpu(mbox_rsp->hdr.status);
  2662. cb_arg->cb(status, num_counters, counts, cb_arg->arg);
  2663. }
  2664. kfree(cb_arg);
  2665. }
  2666. return 0;
  2667. }
  2668. int
  2669. efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
  2670. void (*cb)(int status, u32 num_counters,
  2671. struct efct_hw_host_stat_counts *counters,
  2672. void *arg),
  2673. void *arg)
  2674. {
  2675. int rc = -EIO;
  2676. struct efct_hw_host_stat_cb_arg *cb_arg;
  2677. u8 mbxdata[SLI4_BMBX_SIZE];
  2678. cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
  2679. if (!cb_arg)
  2680. return -ENOMEM;
  2681. cb_arg->cb = cb;
  2682. cb_arg->arg = arg;
  2683. /* Send the HW command to get the host stats */
  2684. if (!sli_cmd_read_status(&hw->sli, mbxdata, cc))
  2685. rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
  2686. efct_hw_cb_host_stat, cb_arg);
  2687. if (rc) {
  2688. efc_log_debug(hw->os, "READ_HOST_STATS failed\n");
  2689. kfree(cb_arg);
  2690. }
  2691. return rc;
  2692. }
  2693. struct efct_hw_async_call_ctx {
  2694. efct_hw_async_cb_t callback;
  2695. void *arg;
  2696. u8 cmd[SLI4_BMBX_SIZE];
  2697. };
  2698. static void
  2699. efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
  2700. {
  2701. struct efct_hw_async_call_ctx *ctx = arg;
  2702. if (ctx) {
  2703. if (ctx->callback)
  2704. (*ctx->callback)(hw, status, mqe, ctx->arg);
  2705. kfree(ctx);
  2706. }
  2707. }
  2708. int
  2709. efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg)
  2710. {
  2711. struct efct_hw_async_call_ctx *ctx;
  2712. int rc;
  2713. /*
  2714. * Allocate a callback context (which includes the mbox cmd buffer),
  2715. * we need this to be persistent as the mbox cmd submission may be
  2716. * queued and executed later execution.
  2717. */
  2718. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  2719. if (!ctx)
  2720. return -ENOMEM;
  2721. ctx->callback = callback;
  2722. ctx->arg = arg;
  2723. /* Build and send a NOP mailbox command */
  2724. if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) {
  2725. efc_log_err(hw->os, "COMMON_NOP format failure\n");
  2726. kfree(ctx);
  2727. return -EIO;
  2728. }
  2729. rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb,
  2730. ctx);
  2731. if (rc) {
  2732. efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc);
  2733. kfree(ctx);
  2734. return -EIO;
  2735. }
  2736. return 0;
  2737. }
  2738. static int
  2739. efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg)
  2740. {
  2741. struct sli4_cmd_sli_config *mbox_rsp =
  2742. (struct sli4_cmd_sli_config *)mqe;
  2743. struct sli4_rsp_cmn_write_object *wr_obj_rsp;
  2744. struct efct_hw_fw_wr_cb_arg *cb_arg = arg;
  2745. u32 bytes_written;
  2746. u16 mbox_status;
  2747. u32 change_status;
  2748. wr_obj_rsp = (struct sli4_rsp_cmn_write_object *)
  2749. &mbox_rsp->payload.embed;
  2750. bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length);
  2751. mbox_status = le16_to_cpu(mbox_rsp->hdr.status);
  2752. change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) &
  2753. RSP_CHANGE_STATUS);
  2754. if (cb_arg) {
  2755. if (cb_arg->cb) {
  2756. if (!status && mbox_status)
  2757. status = mbox_status;
  2758. cb_arg->cb(status, bytes_written, change_status,
  2759. cb_arg->arg);
  2760. }
  2761. kfree(cb_arg);
  2762. }
  2763. return 0;
  2764. }
  2765. int
  2766. efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size,
  2767. u32 offset, int last,
  2768. void (*cb)(int status, u32 bytes_written,
  2769. u32 change_status, void *arg),
  2770. void *arg)
  2771. {
  2772. int rc = -EIO;
  2773. u8 mbxdata[SLI4_BMBX_SIZE];
  2774. struct efct_hw_fw_wr_cb_arg *cb_arg;
  2775. int noc = 0;
  2776. cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL);
  2777. if (!cb_arg)
  2778. return -ENOMEM;
  2779. cb_arg->cb = cb;
  2780. cb_arg->arg = arg;
  2781. /* Write a portion of a firmware image to the device */
  2782. if (!sli_cmd_common_write_object(&hw->sli, mbxdata,
  2783. noc, last, size, offset, "/prg/",
  2784. dma))
  2785. rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
  2786. efct_hw_cb_fw_write, cb_arg);
  2787. if (rc != 0) {
  2788. efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n");
  2789. kfree(cb_arg);
  2790. }
  2791. return rc;
  2792. }
  2793. static int
  2794. efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe,
  2795. void *arg)
  2796. {
  2797. return 0;
  2798. }
  2799. int
  2800. efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
  2801. uintptr_t value,
  2802. void (*cb)(int status, uintptr_t value, void *arg),
  2803. void *arg)
  2804. {
  2805. int rc = -EIO;
  2806. u8 link[SLI4_BMBX_SIZE];
  2807. u32 speed = 0;
  2808. u8 reset_alpa = 0;
  2809. switch (ctrl) {
  2810. case EFCT_HW_PORT_INIT:
  2811. if (!sli_cmd_config_link(&hw->sli, link))
  2812. rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
  2813. efct_hw_cb_port_control, NULL);
  2814. if (rc != 0) {
  2815. efc_log_err(hw->os, "CONFIG_LINK failed\n");
  2816. break;
  2817. }
  2818. speed = hw->config.speed;
  2819. reset_alpa = (u8)(value & 0xff);
  2820. rc = -EIO;
  2821. if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa))
  2822. rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
  2823. efct_hw_cb_port_control, NULL);
  2824. /* Free buffer on error, since no callback is coming */
  2825. if (rc)
  2826. efc_log_err(hw->os, "INIT_LINK failed\n");
  2827. break;
  2828. case EFCT_HW_PORT_SHUTDOWN:
  2829. if (!sli_cmd_down_link(&hw->sli, link))
  2830. rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
  2831. efct_hw_cb_port_control, NULL);
  2832. /* Free buffer on error, since no callback is coming */
  2833. if (rc)
  2834. efc_log_err(hw->os, "DOWN_LINK failed\n");
  2835. break;
  2836. default:
  2837. efc_log_debug(hw->os, "unhandled control %#x\n", ctrl);
  2838. break;
  2839. }
  2840. return rc;
  2841. }
  2842. void
  2843. efct_hw_teardown(struct efct_hw *hw)
  2844. {
  2845. u32 i = 0;
  2846. u32 destroy_queues;
  2847. u32 free_memory;
  2848. struct efc_dma *dma;
  2849. struct efct *efct = hw->os;
  2850. destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE);
  2851. free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED);
  2852. /* Cancel Sliport Healthcheck */
  2853. if (hw->sliport_healthcheck) {
  2854. hw->sliport_healthcheck = 0;
  2855. efct_hw_config_sli_port_health_check(hw, 0, 0);
  2856. }
  2857. if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) {
  2858. hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
  2859. efct_hw_flush(hw);
  2860. if (list_empty(&hw->cmd_head))
  2861. efc_log_debug(hw->os,
  2862. "All commands completed on MQ queue\n");
  2863. else
  2864. efc_log_debug(hw->os,
  2865. "Some cmds still pending on MQ queue\n");
  2866. /* Cancel any remaining commands */
  2867. efct_hw_command_cancel(hw);
  2868. } else {
  2869. hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
  2870. }
  2871. dma_free_coherent(&efct->pci->dev,
  2872. hw->rnode_mem.size, hw->rnode_mem.virt,
  2873. hw->rnode_mem.phys);
  2874. memset(&hw->rnode_mem, 0, sizeof(struct efc_dma));
  2875. if (hw->io) {
  2876. for (i = 0; i < hw->config.n_io; i++) {
  2877. if (hw->io[i] && hw->io[i]->sgl &&
  2878. hw->io[i]->sgl->virt) {
  2879. dma_free_coherent(&efct->pci->dev,
  2880. hw->io[i]->sgl->size,
  2881. hw->io[i]->sgl->virt,
  2882. hw->io[i]->sgl->phys);
  2883. }
  2884. kfree(hw->io[i]);
  2885. hw->io[i] = NULL;
  2886. }
  2887. kfree(hw->io);
  2888. hw->io = NULL;
  2889. kfree(hw->wqe_buffs);
  2890. hw->wqe_buffs = NULL;
  2891. }
  2892. dma = &hw->xfer_rdy;
  2893. dma_free_coherent(&efct->pci->dev,
  2894. dma->size, dma->virt, dma->phys);
  2895. memset(dma, 0, sizeof(struct efc_dma));
  2896. dma = &hw->loop_map;
  2897. dma_free_coherent(&efct->pci->dev,
  2898. dma->size, dma->virt, dma->phys);
  2899. memset(dma, 0, sizeof(struct efc_dma));
  2900. for (i = 0; i < hw->wq_count; i++)
  2901. sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues,
  2902. free_memory);
  2903. for (i = 0; i < hw->rq_count; i++)
  2904. sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
  2905. free_memory);
  2906. for (i = 0; i < hw->mq_count; i++)
  2907. sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues,
  2908. free_memory);
  2909. for (i = 0; i < hw->cq_count; i++)
  2910. sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues,
  2911. free_memory);
  2912. for (i = 0; i < hw->eq_count; i++)
  2913. sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues,
  2914. free_memory);
  2915. /* Free rq buffers */
  2916. efct_hw_rx_free(hw);
  2917. efct_hw_queue_teardown(hw);
  2918. kfree(hw->wq_cpu_array);
  2919. sli_teardown(&hw->sli);
  2920. /* record the fact that the queues are non-functional */
  2921. hw->state = EFCT_HW_STATE_UNINITIALIZED;
  2922. /* free sequence free pool */
  2923. kfree(hw->seq_pool);
  2924. hw->seq_pool = NULL;
  2925. /* free hw_wq_callback pool */
  2926. efct_hw_reqtag_pool_free(hw);
  2927. mempool_destroy(hw->cmd_ctx_pool);
  2928. mempool_destroy(hw->mbox_rqst_pool);
  2929. /* Mark HW setup as not having been called */
  2930. hw->hw_setup_called = false;
  2931. }
  2932. static int
  2933. efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset,
  2934. enum efct_hw_state prev_state)
  2935. {
  2936. int rc = 0;
  2937. switch (reset) {
  2938. case EFCT_HW_RESET_FUNCTION:
  2939. efc_log_debug(hw->os, "issuing function level reset\n");
  2940. if (sli_reset(&hw->sli)) {
  2941. efc_log_err(hw->os, "sli_reset failed\n");
  2942. rc = -EIO;
  2943. }
  2944. break;
  2945. case EFCT_HW_RESET_FIRMWARE:
  2946. efc_log_debug(hw->os, "issuing firmware reset\n");
  2947. if (sli_fw_reset(&hw->sli)) {
  2948. efc_log_err(hw->os, "sli_soft_reset failed\n");
  2949. rc = -EIO;
  2950. }
  2951. /*
  2952. * Because the FW reset leaves the FW in a non-running state,
  2953. * follow that with a regular reset.
  2954. */
  2955. efc_log_debug(hw->os, "issuing function level reset\n");
  2956. if (sli_reset(&hw->sli)) {
  2957. efc_log_err(hw->os, "sli_reset failed\n");
  2958. rc = -EIO;
  2959. }
  2960. break;
  2961. default:
  2962. efc_log_err(hw->os, "unknown type - no reset performed\n");
  2963. hw->state = prev_state;
  2964. rc = -EINVAL;
  2965. break;
  2966. }
  2967. return rc;
  2968. }
  2969. int
  2970. efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset)
  2971. {
  2972. int rc = 0;
  2973. enum efct_hw_state prev_state = hw->state;
  2974. if (hw->state != EFCT_HW_STATE_ACTIVE)
  2975. efc_log_debug(hw->os,
  2976. "HW state %d is not active\n", hw->state);
  2977. hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS;
  2978. /*
  2979. * If the prev_state is already reset/teardown in progress,
  2980. * don't continue further
  2981. */
  2982. if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS ||
  2983. prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS)
  2984. return efct_hw_sli_reset(hw, reset, prev_state);
  2985. if (prev_state != EFCT_HW_STATE_UNINITIALIZED) {
  2986. efct_hw_flush(hw);
  2987. if (list_empty(&hw->cmd_head))
  2988. efc_log_debug(hw->os,
  2989. "All commands completed on MQ queue\n");
  2990. else
  2991. efc_log_err(hw->os,
  2992. "Some commands still pending on MQ queue\n");
  2993. }
  2994. /* Reset the chip */
  2995. rc = efct_hw_sli_reset(hw, reset, prev_state);
  2996. if (rc == -EINVAL)
  2997. return -EIO;
  2998. return rc;
  2999. }