pcie.c 92 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * NXP Wireless LAN device driver: PCIE specific handling
  4. *
  5. * Copyright 2011-2020 NXP
  6. */
  7. #include <linux/iopoll.h>
  8. #include <linux/firmware.h>
  9. #include "decl.h"
  10. #include "ioctl.h"
  11. #include "util.h"
  12. #include "fw.h"
  13. #include "main.h"
  14. #include "wmm.h"
  15. #include "11n.h"
  16. #include "pcie.h"
  17. #include "pcie_quirks.h"
  18. #define PCIE_VERSION "1.0"
  19. #define DRV_NAME "Marvell mwifiex PCIe"
  20. static struct mwifiex_if_ops pcie_ops;
  21. static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
  22. .cmd_addr_lo = PCIE_SCRATCH_0_REG,
  23. .cmd_addr_hi = PCIE_SCRATCH_1_REG,
  24. .cmd_size = PCIE_SCRATCH_2_REG,
  25. .fw_status = PCIE_SCRATCH_3_REG,
  26. .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
  27. .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
  28. .tx_rdptr = PCIE_SCRATCH_6_REG,
  29. .tx_wrptr = PCIE_SCRATCH_7_REG,
  30. .rx_rdptr = PCIE_SCRATCH_8_REG,
  31. .rx_wrptr = PCIE_SCRATCH_9_REG,
  32. .evt_rdptr = PCIE_SCRATCH_10_REG,
  33. .evt_wrptr = PCIE_SCRATCH_11_REG,
  34. .drv_rdy = PCIE_SCRATCH_12_REG,
  35. .tx_start_ptr = 0,
  36. .tx_mask = MWIFIEX_TXBD_MASK,
  37. .tx_wrap_mask = 0,
  38. .rx_mask = MWIFIEX_RXBD_MASK,
  39. .rx_wrap_mask = 0,
  40. .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
  41. .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
  42. .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
  43. .ring_flag_sop = 0,
  44. .ring_flag_eop = 0,
  45. .ring_flag_xs_sop = 0,
  46. .ring_flag_xs_eop = 0,
  47. .ring_tx_start_ptr = 0,
  48. .pfu_enabled = 0,
  49. .sleep_cookie = 1,
  50. .msix_support = 0,
  51. };
  52. static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
  53. .cmd_addr_lo = PCIE_SCRATCH_0_REG,
  54. .cmd_addr_hi = PCIE_SCRATCH_1_REG,
  55. .cmd_size = PCIE_SCRATCH_2_REG,
  56. .fw_status = PCIE_SCRATCH_3_REG,
  57. .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
  58. .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
  59. .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1,
  60. .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1,
  61. .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1,
  62. .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1,
  63. .evt_rdptr = PCIE_SCRATCH_10_REG,
  64. .evt_wrptr = PCIE_SCRATCH_11_REG,
  65. .drv_rdy = PCIE_SCRATCH_12_REG,
  66. .tx_start_ptr = 16,
  67. .tx_mask = 0x03FF0000,
  68. .tx_wrap_mask = 0x07FF0000,
  69. .rx_mask = 0x000003FF,
  70. .rx_wrap_mask = 0x000007FF,
  71. .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND,
  72. .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND,
  73. .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
  74. .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
  75. .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
  76. .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
  77. .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
  78. .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
  79. .pfu_enabled = 1,
  80. .sleep_cookie = 0,
  81. .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
  82. .fw_dump_start = PCIE_SCRATCH_14_REG,
  83. .fw_dump_end = 0xcff,
  84. .fw_dump_host_ready = 0xee,
  85. .fw_dump_read_done = 0xfe,
  86. .msix_support = 0,
  87. };
  88. static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
  89. .cmd_addr_lo = PCIE_SCRATCH_0_REG,
  90. .cmd_addr_hi = PCIE_SCRATCH_1_REG,
  91. .cmd_size = PCIE_SCRATCH_2_REG,
  92. .fw_status = PCIE_SCRATCH_3_REG,
  93. .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
  94. .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
  95. .tx_rdptr = 0xC1A4,
  96. .tx_wrptr = 0xC174,
  97. .rx_rdptr = 0xC174,
  98. .rx_wrptr = 0xC1A4,
  99. .evt_rdptr = PCIE_SCRATCH_10_REG,
  100. .evt_wrptr = PCIE_SCRATCH_11_REG,
  101. .drv_rdy = PCIE_SCRATCH_12_REG,
  102. .tx_start_ptr = 16,
  103. .tx_mask = 0x0FFF0000,
  104. .tx_wrap_mask = 0x1FFF0000,
  105. .rx_mask = 0x00000FFF,
  106. .rx_wrap_mask = 0x00001FFF,
  107. .tx_rollover_ind = BIT(28),
  108. .rx_rollover_ind = BIT(12),
  109. .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
  110. .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
  111. .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
  112. .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
  113. .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
  114. .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
  115. .pfu_enabled = 1,
  116. .sleep_cookie = 0,
  117. .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
  118. .fw_dump_start = PCIE_SCRATCH_14_REG,
  119. .fw_dump_end = 0xcff,
  120. .fw_dump_host_ready = 0xcc,
  121. .fw_dump_read_done = 0xdd,
  122. .msix_support = 0,
  123. };
  124. static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
  125. {"ITCM", NULL, 0, 0xF0},
  126. {"DTCM", NULL, 0, 0xF1},
  127. {"SQRAM", NULL, 0, 0xF2},
  128. {"IRAM", NULL, 0, 0xF3},
  129. {"APU", NULL, 0, 0xF4},
  130. {"CIU", NULL, 0, 0xF5},
  131. {"ICU", NULL, 0, 0xF6},
  132. {"MAC", NULL, 0, 0xF7},
  133. };
  134. static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
  135. {"DUMP", NULL, 0, 0xDD},
  136. };
  137. static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
  138. .reg = &mwifiex_reg_8766,
  139. .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
  140. .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
  141. .can_dump_fw = false,
  142. .can_ext_scan = true,
  143. };
  144. static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
  145. .reg = &mwifiex_reg_8897,
  146. .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
  147. .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
  148. .can_dump_fw = true,
  149. .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897,
  150. .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897),
  151. .can_ext_scan = true,
  152. };
  153. static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
  154. .reg = &mwifiex_reg_8997,
  155. .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
  156. .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
  157. .can_dump_fw = true,
  158. .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997,
  159. .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997),
  160. .can_ext_scan = true,
  161. };
  162. static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
  163. { .compatible = "pci11ab,2b42" },
  164. { .compatible = "pci1b4b,2b42" },
  165. { }
  166. };
  167. static int mwifiex_pcie_probe_of(struct device *dev)
  168. {
  169. if (!of_match_node(mwifiex_pcie_of_match_table, dev->of_node)) {
  170. dev_err(dev, "required compatible string missing\n");
  171. return -EINVAL;
  172. }
  173. return 0;
  174. }
  175. static void mwifiex_pcie_work(struct work_struct *work);
  176. static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter);
  177. static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter);
  178. static int
  179. mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
  180. size_t size, int flags)
  181. {
  182. struct pcie_service_card *card = adapter->card;
  183. struct mwifiex_dma_mapping mapping;
  184. mapping.addr = dma_map_single(&card->dev->dev, skb->data, size, flags);
  185. if (dma_mapping_error(&card->dev->dev, mapping.addr)) {
  186. mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
  187. return -1;
  188. }
  189. mapping.len = size;
  190. mwifiex_store_mapping(skb, &mapping);
  191. return 0;
  192. }
  193. static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
  194. struct sk_buff *skb, int flags)
  195. {
  196. struct pcie_service_card *card = adapter->card;
  197. struct mwifiex_dma_mapping mapping;
  198. mwifiex_get_mapping(skb, &mapping);
  199. dma_unmap_single(&card->dev->dev, mapping.addr, mapping.len, flags);
  200. }
  201. /*
  202. * This function writes data into PCIE card register.
  203. */
  204. static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data)
  205. {
  206. struct pcie_service_card *card = adapter->card;
  207. iowrite32(data, card->pci_mmap1 + reg);
  208. return 0;
  209. }
  210. /* This function reads data from PCIE card register.
  211. */
  212. static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
  213. {
  214. struct pcie_service_card *card = adapter->card;
  215. *data = ioread32(card->pci_mmap1 + reg);
  216. if (*data == 0xffffffff)
  217. return 0xffffffff;
  218. return 0;
  219. }
  220. /* This function reads u8 data from PCIE card register. */
  221. static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter,
  222. int reg, u8 *data)
  223. {
  224. struct pcie_service_card *card = adapter->card;
  225. *data = ioread8(card->pci_mmap1 + reg);
  226. return 0;
  227. }
  228. /*
  229. * This function reads sleep cookie and checks if FW is ready
  230. */
  231. static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
  232. {
  233. u32 cookie_value;
  234. struct pcie_service_card *card = adapter->card;
  235. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  236. if (!reg->sleep_cookie)
  237. return true;
  238. if (card->sleep_cookie_vbase) {
  239. cookie_value = get_unaligned_le32(card->sleep_cookie_vbase);
  240. mwifiex_dbg(adapter, INFO,
  241. "info: ACCESS_HW: sleep cookie=0x%x\n",
  242. cookie_value);
  243. if (cookie_value == FW_AWAKE_COOKIE)
  244. return true;
  245. }
  246. return false;
  247. }
  248. #ifdef CONFIG_PM_SLEEP
  249. /*
  250. * Kernel needs to suspend all functions separately. Therefore all
  251. * registered functions must have drivers with suspend and resume
  252. * methods. Failing that the kernel simply removes the whole card.
  253. *
  254. * If already not suspended, this function allocates and sends a host
  255. * sleep activate request to the firmware and turns off the traffic.
  256. */
  257. static int mwifiex_pcie_suspend(struct device *dev)
  258. {
  259. struct mwifiex_adapter *adapter;
  260. struct pcie_service_card *card = dev_get_drvdata(dev);
  261. /* Might still be loading firmware */
  262. wait_for_completion(&card->fw_done);
  263. adapter = card->adapter;
  264. if (!adapter) {
  265. dev_err(dev, "adapter is not valid\n");
  266. return 0;
  267. }
  268. mwifiex_enable_wake(adapter);
  269. /* Enable the Host Sleep */
  270. if (!mwifiex_enable_hs(adapter)) {
  271. mwifiex_dbg(adapter, ERROR,
  272. "cmd: failed to suspend\n");
  273. clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
  274. mwifiex_disable_wake(adapter);
  275. return -EFAULT;
  276. }
  277. flush_workqueue(adapter->workqueue);
  278. /* Indicate device suspended */
  279. set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
  280. clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
  281. return 0;
  282. }
  283. /*
  284. * Kernel needs to suspend all functions separately. Therefore all
  285. * registered functions must have drivers with suspend and resume
  286. * methods. Failing that the kernel simply removes the whole card.
  287. *
  288. * If already not resumed, this function turns on the traffic and
  289. * sends a host sleep cancel request to the firmware.
  290. */
  291. static int mwifiex_pcie_resume(struct device *dev)
  292. {
  293. struct mwifiex_adapter *adapter;
  294. struct pcie_service_card *card = dev_get_drvdata(dev);
  295. if (!card->adapter) {
  296. dev_err(dev, "adapter structure is not valid\n");
  297. return 0;
  298. }
  299. adapter = card->adapter;
  300. if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
  301. mwifiex_dbg(adapter, WARN,
  302. "Device already resumed\n");
  303. return 0;
  304. }
  305. clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
  306. mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
  307. MWIFIEX_ASYNC_CMD);
  308. mwifiex_disable_wake(adapter);
  309. return 0;
  310. }
  311. #endif
  312. /*
  313. * This function probes an mwifiex device and registers it. It allocates
  314. * the card structure, enables PCIE function number and initiates the
  315. * device registration and initialization procedure by adding a logical
  316. * interface.
  317. */
  318. static int mwifiex_pcie_probe(struct pci_dev *pdev,
  319. const struct pci_device_id *ent)
  320. {
  321. struct pcie_service_card *card;
  322. int ret;
  323. pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
  324. pdev->vendor, pdev->device, pdev->revision);
  325. card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
  326. if (!card)
  327. return -ENOMEM;
  328. init_completion(&card->fw_done);
  329. card->dev = pdev;
  330. if (ent->driver_data) {
  331. struct mwifiex_pcie_device *data = (void *)ent->driver_data;
  332. card->pcie.reg = data->reg;
  333. card->pcie.blksz_fw_dl = data->blksz_fw_dl;
  334. card->pcie.tx_buf_size = data->tx_buf_size;
  335. card->pcie.can_dump_fw = data->can_dump_fw;
  336. card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl;
  337. card->pcie.num_mem_types = data->num_mem_types;
  338. card->pcie.can_ext_scan = data->can_ext_scan;
  339. INIT_WORK(&card->work, mwifiex_pcie_work);
  340. }
  341. /* device tree node parsing and platform specific configuration*/
  342. if (pdev->dev.of_node) {
  343. ret = mwifiex_pcie_probe_of(&pdev->dev);
  344. if (ret)
  345. return ret;
  346. }
  347. /* check quirks */
  348. mwifiex_initialize_quirks(card);
  349. if (mwifiex_add_card(card, &card->fw_done, &pcie_ops,
  350. MWIFIEX_PCIE, &pdev->dev)) {
  351. pr_err("%s failed\n", __func__);
  352. return -1;
  353. }
  354. return 0;
  355. }
  356. /*
  357. * This function removes the interface and frees up the card structure.
  358. */
  359. static void mwifiex_pcie_remove(struct pci_dev *pdev)
  360. {
  361. struct pcie_service_card *card;
  362. struct mwifiex_adapter *adapter;
  363. struct mwifiex_private *priv;
  364. const struct mwifiex_pcie_card_reg *reg;
  365. u32 fw_status;
  366. card = pci_get_drvdata(pdev);
  367. wait_for_completion(&card->fw_done);
  368. adapter = card->adapter;
  369. if (!adapter || !adapter->priv_num)
  370. return;
  371. reg = card->pcie.reg;
  372. if (reg)
  373. mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
  374. else
  375. fw_status = -1;
  376. if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) {
  377. mwifiex_deauthenticate_all(adapter);
  378. priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
  379. mwifiex_disable_auto_ds(priv);
  380. mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
  381. }
  382. mwifiex_remove_card(adapter);
  383. }
  384. static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
  385. {
  386. mwifiex_pcie_remove(pdev);
  387. return;
  388. }
  389. static void mwifiex_pcie_coredump(struct device *dev)
  390. {
  391. struct pci_dev *pdev;
  392. struct pcie_service_card *card;
  393. pdev = container_of(dev, struct pci_dev, dev);
  394. card = pci_get_drvdata(pdev);
  395. if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
  396. &card->work_flags))
  397. schedule_work(&card->work);
  398. }
  399. static const struct pci_device_id mwifiex_ids[] = {
  400. {
  401. PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
  402. PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  403. .driver_data = (unsigned long)&mwifiex_pcie8766,
  404. },
  405. {
  406. PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
  407. PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  408. .driver_data = (unsigned long)&mwifiex_pcie8897,
  409. },
  410. {
  411. PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
  412. PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  413. .driver_data = (unsigned long)&mwifiex_pcie8997,
  414. },
  415. {
  416. PCIE_VENDOR_ID_V2_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
  417. PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  418. .driver_data = (unsigned long)&mwifiex_pcie8997,
  419. },
  420. {},
  421. };
  422. MODULE_DEVICE_TABLE(pci, mwifiex_ids);
  423. /*
  424. * Cleanup all software without cleaning anything related to PCIe and HW.
  425. */
  426. static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
  427. {
  428. struct pcie_service_card *card = pci_get_drvdata(pdev);
  429. struct mwifiex_adapter *adapter = card->adapter;
  430. if (!adapter) {
  431. dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
  432. __func__);
  433. return;
  434. }
  435. mwifiex_dbg(adapter, INFO,
  436. "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Pre-FLR\n",
  437. __func__, pdev->vendor, pdev->device, pdev->revision);
  438. mwifiex_shutdown_sw(adapter);
  439. clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
  440. clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
  441. /* On MS Surface gen4+ devices FLR isn't effective to recover from
  442. * hangups, so we power-cycle the card instead.
  443. */
  444. if (card->quirks & QUIRK_FW_RST_D3COLD)
  445. mwifiex_pcie_reset_d3cold_quirk(pdev);
  446. mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
  447. card->pci_reset_ongoing = true;
  448. }
  449. /*
  450. * Kernel stores and restores PCIe function context before and after performing
  451. * FLR respectively. Reconfigure the software and firmware including firmware
  452. * redownload.
  453. */
  454. static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
  455. {
  456. struct pcie_service_card *card = pci_get_drvdata(pdev);
  457. struct mwifiex_adapter *adapter = card->adapter;
  458. int ret;
  459. if (!adapter) {
  460. dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
  461. __func__);
  462. return;
  463. }
  464. mwifiex_dbg(adapter, INFO,
  465. "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Post-FLR\n",
  466. __func__, pdev->vendor, pdev->device, pdev->revision);
  467. ret = mwifiex_reinit_sw(adapter);
  468. if (ret)
  469. dev_err(&pdev->dev, "reinit failed: %d\n", ret);
  470. else
  471. mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
  472. card->pci_reset_ongoing = false;
  473. }
  474. static const struct pci_error_handlers mwifiex_pcie_err_handler = {
  475. .reset_prepare = mwifiex_pcie_reset_prepare,
  476. .reset_done = mwifiex_pcie_reset_done,
  477. };
  478. #ifdef CONFIG_PM_SLEEP
  479. /* Power Management Hooks */
  480. static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
  481. mwifiex_pcie_resume);
  482. #endif
  483. /* PCI Device Driver */
  484. static struct pci_driver mwifiex_pcie = {
  485. .name = "mwifiex_pcie",
  486. .id_table = mwifiex_ids,
  487. .probe = mwifiex_pcie_probe,
  488. .remove = mwifiex_pcie_remove,
  489. .driver = {
  490. .coredump = mwifiex_pcie_coredump,
  491. #ifdef CONFIG_PM_SLEEP
  492. .pm = &mwifiex_pcie_pm_ops,
  493. #endif
  494. },
  495. .shutdown = mwifiex_pcie_shutdown,
  496. .err_handler = &mwifiex_pcie_err_handler,
  497. };
  498. /*
  499. * This function adds delay loop to ensure FW is awake before proceeding.
  500. */
  501. static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter)
  502. {
  503. int i = 0;
  504. while (mwifiex_pcie_ok_to_access_hw(adapter)) {
  505. i++;
  506. usleep_range(10, 20);
  507. /* 50ms max wait */
  508. if (i == 5000)
  509. break;
  510. }
  511. return;
  512. }
  513. static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
  514. u32 max_delay_loop_cnt)
  515. {
  516. struct pcie_service_card *card = adapter->card;
  517. u8 *buffer;
  518. u32 sleep_cookie, count;
  519. struct sk_buff *cmdrsp = card->cmdrsp_buf;
  520. for (count = 0; count < max_delay_loop_cnt; count++) {
  521. dma_sync_single_for_cpu(&card->dev->dev,
  522. MWIFIEX_SKB_DMA_ADDR(cmdrsp),
  523. sizeof(sleep_cookie), DMA_FROM_DEVICE);
  524. buffer = cmdrsp->data;
  525. sleep_cookie = get_unaligned_le32(buffer);
  526. if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
  527. mwifiex_dbg(adapter, INFO,
  528. "sleep cookie found at count %d\n", count);
  529. break;
  530. }
  531. dma_sync_single_for_device(&card->dev->dev,
  532. MWIFIEX_SKB_DMA_ADDR(cmdrsp),
  533. sizeof(sleep_cookie),
  534. DMA_FROM_DEVICE);
  535. usleep_range(20, 30);
  536. }
  537. if (count >= max_delay_loop_cnt)
  538. mwifiex_dbg(adapter, INFO,
  539. "max count reached while accessing sleep cookie\n");
  540. }
  541. #define N_WAKEUP_TRIES_SHORT_INTERVAL 15
  542. #define N_WAKEUP_TRIES_LONG_INTERVAL 35
  543. /* This function wakes up the card by reading fw_status register. */
  544. static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
  545. {
  546. struct pcie_service_card *card = adapter->card;
  547. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  548. int retval __maybe_unused;
  549. mwifiex_dbg(adapter, EVENT,
  550. "event: Wakeup device...\n");
  551. if (reg->sleep_cookie)
  552. mwifiex_pcie_dev_wakeup_delay(adapter);
  553. /* The 88W8897 PCIe+USB firmware (latest version 15.68.19.p21) sometimes
  554. * appears to ignore or miss our wakeup request, so we continue trying
  555. * until we receive an interrupt from the card.
  556. */
  557. if (read_poll_timeout(mwifiex_write_reg, retval,
  558. READ_ONCE(adapter->int_status) != 0,
  559. 500, 500 * N_WAKEUP_TRIES_SHORT_INTERVAL,
  560. false,
  561. adapter, reg->fw_status, FIRMWARE_READY_PCIE)) {
  562. if (read_poll_timeout(mwifiex_write_reg, retval,
  563. READ_ONCE(adapter->int_status) != 0,
  564. 10000, 10000 * N_WAKEUP_TRIES_LONG_INTERVAL,
  565. false,
  566. adapter, reg->fw_status, FIRMWARE_READY_PCIE)) {
  567. mwifiex_dbg(adapter, ERROR,
  568. "Firmware didn't wake up\n");
  569. return -EIO;
  570. }
  571. }
  572. if (reg->sleep_cookie) {
  573. mwifiex_pcie_dev_wakeup_delay(adapter);
  574. mwifiex_dbg(adapter, INFO,
  575. "PCIE wakeup: Setting PS_STATE_AWAKE\n");
  576. adapter->ps_state = PS_STATE_AWAKE;
  577. }
  578. return 0;
  579. }
  580. /*
  581. * This function is called after the card has woken up.
  582. *
  583. * The card configuration register is reset.
  584. */
  585. static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
  586. {
  587. mwifiex_dbg(adapter, CMD,
  588. "cmd: Wakeup device completed\n");
  589. return 0;
  590. }
  591. /*
  592. * This function disables the host interrupt.
  593. *
  594. * The host interrupt mask is read, the disable bit is reset and
  595. * written back to the card host interrupt mask register.
  596. */
  597. static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter)
  598. {
  599. if (mwifiex_pcie_ok_to_access_hw(adapter)) {
  600. if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
  601. 0x00000000)) {
  602. mwifiex_dbg(adapter, ERROR,
  603. "Disable host interrupt failed\n");
  604. return -1;
  605. }
  606. }
  607. atomic_set(&adapter->tx_hw_pending, 0);
  608. return 0;
  609. }
  610. static void mwifiex_pcie_disable_host_int_noerr(struct mwifiex_adapter *adapter)
  611. {
  612. WARN_ON(mwifiex_pcie_disable_host_int(adapter));
  613. }
  614. /*
  615. * This function enables the host interrupt.
  616. *
  617. * The host interrupt enable mask is written to the card
  618. * host interrupt mask register.
  619. */
  620. static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter)
  621. {
  622. if (mwifiex_pcie_ok_to_access_hw(adapter)) {
  623. /* Simply write the mask to the register */
  624. if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
  625. HOST_INTR_MASK)) {
  626. mwifiex_dbg(adapter, ERROR,
  627. "Enable host interrupt failed\n");
  628. return -1;
  629. }
  630. }
  631. return 0;
  632. }
  633. /*
  634. * This function initializes TX buffer ring descriptors
  635. */
  636. static int mwifiex_init_txq_ring(struct mwifiex_adapter *adapter)
  637. {
  638. struct pcie_service_card *card = adapter->card;
  639. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  640. struct mwifiex_pcie_buf_desc *desc;
  641. struct mwifiex_pfu_buf_desc *desc2;
  642. int i;
  643. for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
  644. card->tx_buf_list[i] = NULL;
  645. if (reg->pfu_enabled) {
  646. card->txbd_ring[i] = (void *)card->txbd_ring_vbase +
  647. (sizeof(*desc2) * i);
  648. desc2 = card->txbd_ring[i];
  649. memset(desc2, 0, sizeof(*desc2));
  650. } else {
  651. card->txbd_ring[i] = (void *)card->txbd_ring_vbase +
  652. (sizeof(*desc) * i);
  653. desc = card->txbd_ring[i];
  654. memset(desc, 0, sizeof(*desc));
  655. }
  656. }
  657. return 0;
  658. }
  659. /* This function initializes RX buffer ring descriptors. Each SKB is allocated
  660. * here and after mapping PCI memory, its physical address is assigned to
  661. * PCIE Rx buffer descriptor's physical address.
  662. */
  663. static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
  664. {
  665. struct pcie_service_card *card = adapter->card;
  666. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  667. struct sk_buff *skb;
  668. struct mwifiex_pcie_buf_desc *desc;
  669. struct mwifiex_pfu_buf_desc *desc2;
  670. dma_addr_t buf_pa;
  671. int i;
  672. for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
  673. /* Allocate skb here so that firmware can DMA data from it */
  674. skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
  675. GFP_KERNEL);
  676. if (!skb) {
  677. mwifiex_dbg(adapter, ERROR,
  678. "Unable to allocate skb for RX ring.\n");
  679. return -ENOMEM;
  680. }
  681. if (mwifiex_map_pci_memory(adapter, skb,
  682. MWIFIEX_RX_DATA_BUF_SIZE,
  683. DMA_FROM_DEVICE)) {
  684. kfree_skb(skb);
  685. return -ENOMEM;
  686. }
  687. buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
  688. mwifiex_dbg(adapter, INFO,
  689. "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
  690. skb, skb->len, skb->data, (u32)buf_pa,
  691. (u32)((u64)buf_pa >> 32));
  692. card->rx_buf_list[i] = skb;
  693. if (reg->pfu_enabled) {
  694. card->rxbd_ring[i] = (void *)card->rxbd_ring_vbase +
  695. (sizeof(*desc2) * i);
  696. desc2 = card->rxbd_ring[i];
  697. desc2->paddr = buf_pa;
  698. desc2->len = (u16)skb->len;
  699. desc2->frag_len = (u16)skb->len;
  700. desc2->flags = reg->ring_flag_eop | reg->ring_flag_sop;
  701. desc2->offset = 0;
  702. } else {
  703. card->rxbd_ring[i] = (void *)(card->rxbd_ring_vbase +
  704. (sizeof(*desc) * i));
  705. desc = card->rxbd_ring[i];
  706. desc->paddr = buf_pa;
  707. desc->len = (u16)skb->len;
  708. desc->flags = 0;
  709. }
  710. }
  711. return 0;
  712. }
  713. /* This function initializes event buffer ring descriptors. Each SKB is
  714. * allocated here and after mapping PCI memory, its physical address is assigned
  715. * to PCIE Rx buffer descriptor's physical address
  716. */
  717. static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
  718. {
  719. struct pcie_service_card *card = adapter->card;
  720. struct mwifiex_evt_buf_desc *desc;
  721. struct sk_buff *skb;
  722. dma_addr_t buf_pa;
  723. int i;
  724. for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
  725. /* Allocate skb here so that firmware can DMA data from it */
  726. skb = dev_alloc_skb(MAX_EVENT_SIZE);
  727. if (!skb) {
  728. mwifiex_dbg(adapter, ERROR,
  729. "Unable to allocate skb for EVENT buf.\n");
  730. return -ENOMEM;
  731. }
  732. skb_put(skb, MAX_EVENT_SIZE);
  733. if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
  734. DMA_FROM_DEVICE)) {
  735. kfree_skb(skb);
  736. return -ENOMEM;
  737. }
  738. buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
  739. mwifiex_dbg(adapter, EVENT,
  740. "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
  741. skb, skb->len, skb->data, (u32)buf_pa,
  742. (u32)((u64)buf_pa >> 32));
  743. card->evt_buf_list[i] = skb;
  744. card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase +
  745. (sizeof(*desc) * i));
  746. desc = card->evtbd_ring[i];
  747. desc->paddr = buf_pa;
  748. desc->len = (u16)skb->len;
  749. desc->flags = 0;
  750. }
  751. return 0;
  752. }
  753. /* This function cleans up TX buffer rings. If any of the buffer list has valid
  754. * SKB address, associated SKB is freed.
  755. */
  756. static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
  757. {
  758. struct pcie_service_card *card = adapter->card;
  759. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  760. struct sk_buff *skb;
  761. struct mwifiex_pcie_buf_desc *desc;
  762. struct mwifiex_pfu_buf_desc *desc2;
  763. int i;
  764. for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
  765. if (reg->pfu_enabled) {
  766. desc2 = card->txbd_ring[i];
  767. if (card->tx_buf_list[i]) {
  768. skb = card->tx_buf_list[i];
  769. mwifiex_unmap_pci_memory(adapter, skb,
  770. DMA_TO_DEVICE);
  771. dev_kfree_skb_any(skb);
  772. }
  773. memset(desc2, 0, sizeof(*desc2));
  774. } else {
  775. desc = card->txbd_ring[i];
  776. if (card->tx_buf_list[i]) {
  777. skb = card->tx_buf_list[i];
  778. mwifiex_unmap_pci_memory(adapter, skb,
  779. DMA_TO_DEVICE);
  780. dev_kfree_skb_any(skb);
  781. }
  782. memset(desc, 0, sizeof(*desc));
  783. }
  784. card->tx_buf_list[i] = NULL;
  785. }
  786. atomic_set(&adapter->tx_hw_pending, 0);
  787. return;
  788. }
  789. /* This function cleans up RX buffer rings. If any of the buffer list has valid
  790. * SKB address, associated SKB is freed.
  791. */
  792. static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
  793. {
  794. struct pcie_service_card *card = adapter->card;
  795. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  796. struct mwifiex_pcie_buf_desc *desc;
  797. struct mwifiex_pfu_buf_desc *desc2;
  798. struct sk_buff *skb;
  799. int i;
  800. for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
  801. if (reg->pfu_enabled) {
  802. desc2 = card->rxbd_ring[i];
  803. if (card->rx_buf_list[i]) {
  804. skb = card->rx_buf_list[i];
  805. mwifiex_unmap_pci_memory(adapter, skb,
  806. DMA_FROM_DEVICE);
  807. dev_kfree_skb_any(skb);
  808. }
  809. memset(desc2, 0, sizeof(*desc2));
  810. } else {
  811. desc = card->rxbd_ring[i];
  812. if (card->rx_buf_list[i]) {
  813. skb = card->rx_buf_list[i];
  814. mwifiex_unmap_pci_memory(adapter, skb,
  815. DMA_FROM_DEVICE);
  816. dev_kfree_skb_any(skb);
  817. }
  818. memset(desc, 0, sizeof(*desc));
  819. }
  820. card->rx_buf_list[i] = NULL;
  821. }
  822. return;
  823. }
  824. /* This function cleans up event buffer rings. If any of the buffer list has
  825. * valid SKB address, associated SKB is freed.
  826. */
  827. static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
  828. {
  829. struct pcie_service_card *card = adapter->card;
  830. struct mwifiex_evt_buf_desc *desc;
  831. struct sk_buff *skb;
  832. int i;
  833. for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
  834. desc = card->evtbd_ring[i];
  835. if (card->evt_buf_list[i]) {
  836. skb = card->evt_buf_list[i];
  837. mwifiex_unmap_pci_memory(adapter, skb,
  838. DMA_FROM_DEVICE);
  839. dev_kfree_skb_any(skb);
  840. }
  841. card->evt_buf_list[i] = NULL;
  842. memset(desc, 0, sizeof(*desc));
  843. }
  844. return;
  845. }
  846. /* This function creates buffer descriptor ring for TX
  847. */
  848. static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
  849. {
  850. struct pcie_service_card *card = adapter->card;
  851. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  852. /*
  853. * driver maintaines the write pointer and firmware maintaines the read
  854. * pointer. The write pointer starts at 0 (zero) while the read pointer
  855. * starts at zero with rollover bit set
  856. */
  857. card->txbd_wrptr = 0;
  858. if (reg->pfu_enabled)
  859. card->txbd_rdptr = 0;
  860. else
  861. card->txbd_rdptr |= reg->tx_rollover_ind;
  862. /* allocate shared memory for the BD ring and divide the same in to
  863. several descriptors */
  864. if (reg->pfu_enabled)
  865. card->txbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) *
  866. MWIFIEX_MAX_TXRX_BD;
  867. else
  868. card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
  869. MWIFIEX_MAX_TXRX_BD;
  870. mwifiex_dbg(adapter, INFO,
  871. "info: txbd_ring: Allocating %d bytes\n",
  872. card->txbd_ring_size);
  873. card->txbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
  874. card->txbd_ring_size,
  875. &card->txbd_ring_pbase,
  876. GFP_KERNEL);
  877. if (!card->txbd_ring_vbase) {
  878. mwifiex_dbg(adapter, ERROR,
  879. "allocate coherent memory (%d bytes) failed!\n",
  880. card->txbd_ring_size);
  881. return -ENOMEM;
  882. }
  883. mwifiex_dbg(adapter, DATA,
  884. "info: txbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
  885. card->txbd_ring_vbase, (u32)card->txbd_ring_pbase,
  886. (u32)((u64)card->txbd_ring_pbase >> 32),
  887. card->txbd_ring_size);
  888. return mwifiex_init_txq_ring(adapter);
  889. }
  890. static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
  891. {
  892. struct pcie_service_card *card = adapter->card;
  893. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  894. mwifiex_cleanup_txq_ring(adapter);
  895. if (card->txbd_ring_vbase)
  896. dma_free_coherent(&card->dev->dev, card->txbd_ring_size,
  897. card->txbd_ring_vbase,
  898. card->txbd_ring_pbase);
  899. card->txbd_ring_size = 0;
  900. card->txbd_wrptr = 0;
  901. card->txbd_rdptr = 0 | reg->tx_rollover_ind;
  902. card->txbd_ring_vbase = NULL;
  903. card->txbd_ring_pbase = 0;
  904. return 0;
  905. }
  906. /*
  907. * This function creates buffer descriptor ring for RX
  908. */
  909. static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
  910. {
  911. int ret;
  912. struct pcie_service_card *card = adapter->card;
  913. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  914. /*
  915. * driver maintaines the read pointer and firmware maintaines the write
  916. * pointer. The write pointer starts at 0 (zero) while the read pointer
  917. * starts at zero with rollover bit set
  918. */
  919. card->rxbd_wrptr = 0;
  920. card->rxbd_rdptr = reg->rx_rollover_ind;
  921. if (reg->pfu_enabled)
  922. card->rxbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) *
  923. MWIFIEX_MAX_TXRX_BD;
  924. else
  925. card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
  926. MWIFIEX_MAX_TXRX_BD;
  927. mwifiex_dbg(adapter, INFO,
  928. "info: rxbd_ring: Allocating %d bytes\n",
  929. card->rxbd_ring_size);
  930. card->rxbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
  931. card->rxbd_ring_size,
  932. &card->rxbd_ring_pbase,
  933. GFP_KERNEL);
  934. if (!card->rxbd_ring_vbase) {
  935. mwifiex_dbg(adapter, ERROR,
  936. "allocate coherent memory (%d bytes) failed!\n",
  937. card->rxbd_ring_size);
  938. return -ENOMEM;
  939. }
  940. mwifiex_dbg(adapter, DATA,
  941. "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
  942. card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
  943. (u32)((u64)card->rxbd_ring_pbase >> 32),
  944. card->rxbd_ring_size);
  945. ret = mwifiex_init_rxq_ring(adapter);
  946. if (ret)
  947. mwifiex_pcie_delete_rxbd_ring(adapter);
  948. return ret;
  949. }
  950. /*
  951. * This function deletes Buffer descriptor ring for RX
  952. */
  953. static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
  954. {
  955. struct pcie_service_card *card = adapter->card;
  956. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  957. mwifiex_cleanup_rxq_ring(adapter);
  958. if (card->rxbd_ring_vbase)
  959. dma_free_coherent(&card->dev->dev, card->rxbd_ring_size,
  960. card->rxbd_ring_vbase,
  961. card->rxbd_ring_pbase);
  962. card->rxbd_ring_size = 0;
  963. card->rxbd_wrptr = 0;
  964. card->rxbd_rdptr = 0 | reg->rx_rollover_ind;
  965. card->rxbd_ring_vbase = NULL;
  966. card->rxbd_ring_pbase = 0;
  967. return 0;
  968. }
  969. /*
  970. * This function creates buffer descriptor ring for Events
  971. */
  972. static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
  973. {
  974. int ret;
  975. struct pcie_service_card *card = adapter->card;
  976. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  977. /*
  978. * driver maintaines the read pointer and firmware maintaines the write
  979. * pointer. The write pointer starts at 0 (zero) while the read pointer
  980. * starts at zero with rollover bit set
  981. */
  982. card->evtbd_wrptr = 0;
  983. card->evtbd_rdptr = reg->evt_rollover_ind;
  984. card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) *
  985. MWIFIEX_MAX_EVT_BD;
  986. mwifiex_dbg(adapter, INFO,
  987. "info: evtbd_ring: Allocating %d bytes\n",
  988. card->evtbd_ring_size);
  989. card->evtbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
  990. card->evtbd_ring_size,
  991. &card->evtbd_ring_pbase,
  992. GFP_KERNEL);
  993. if (!card->evtbd_ring_vbase) {
  994. mwifiex_dbg(adapter, ERROR,
  995. "allocate coherent memory (%d bytes) failed!\n",
  996. card->evtbd_ring_size);
  997. return -ENOMEM;
  998. }
  999. mwifiex_dbg(adapter, EVENT,
  1000. "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
  1001. card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
  1002. (u32)((u64)card->evtbd_ring_pbase >> 32),
  1003. card->evtbd_ring_size);
  1004. ret = mwifiex_pcie_init_evt_ring(adapter);
  1005. if (ret)
  1006. mwifiex_pcie_delete_evtbd_ring(adapter);
  1007. return ret;
  1008. }
  1009. /*
  1010. * This function deletes Buffer descriptor ring for Events
  1011. */
  1012. static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
  1013. {
  1014. struct pcie_service_card *card = adapter->card;
  1015. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1016. mwifiex_cleanup_evt_ring(adapter);
  1017. if (card->evtbd_ring_vbase)
  1018. dma_free_coherent(&card->dev->dev, card->evtbd_ring_size,
  1019. card->evtbd_ring_vbase,
  1020. card->evtbd_ring_pbase);
  1021. card->evtbd_wrptr = 0;
  1022. card->evtbd_rdptr = 0 | reg->evt_rollover_ind;
  1023. card->evtbd_ring_size = 0;
  1024. card->evtbd_ring_vbase = NULL;
  1025. card->evtbd_ring_pbase = 0;
  1026. return 0;
  1027. }
  1028. /*
  1029. * This function allocates a buffer for CMDRSP
  1030. */
  1031. static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
  1032. {
  1033. struct pcie_service_card *card = adapter->card;
  1034. struct sk_buff *skb;
  1035. /* Allocate memory for receiving command response data */
  1036. skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
  1037. if (!skb) {
  1038. mwifiex_dbg(adapter, ERROR,
  1039. "Unable to allocate skb for command response data.\n");
  1040. return -ENOMEM;
  1041. }
  1042. skb_put(skb, MWIFIEX_UPLD_SIZE);
  1043. if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
  1044. DMA_FROM_DEVICE)) {
  1045. kfree_skb(skb);
  1046. return -1;
  1047. }
  1048. card->cmdrsp_buf = skb;
  1049. return 0;
  1050. }
  1051. /*
  1052. * This function deletes a buffer for CMDRSP
  1053. */
  1054. static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
  1055. {
  1056. struct pcie_service_card *card;
  1057. if (!adapter)
  1058. return 0;
  1059. card = adapter->card;
  1060. if (card && card->cmdrsp_buf) {
  1061. mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
  1062. DMA_FROM_DEVICE);
  1063. dev_kfree_skb_any(card->cmdrsp_buf);
  1064. card->cmdrsp_buf = NULL;
  1065. }
  1066. if (card && card->cmd_buf) {
  1067. mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
  1068. DMA_TO_DEVICE);
  1069. dev_kfree_skb_any(card->cmd_buf);
  1070. card->cmd_buf = NULL;
  1071. }
  1072. return 0;
  1073. }
  1074. /*
  1075. * This function allocates a buffer for sleep cookie
  1076. */
  1077. static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
  1078. {
  1079. struct pcie_service_card *card = adapter->card;
  1080. u32 *cookie;
  1081. card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
  1082. sizeof(u32),
  1083. &card->sleep_cookie_pbase,
  1084. GFP_KERNEL);
  1085. if (!card->sleep_cookie_vbase) {
  1086. mwifiex_dbg(adapter, ERROR,
  1087. "dma_alloc_coherent failed!\n");
  1088. return -ENOMEM;
  1089. }
  1090. cookie = (u32 *)card->sleep_cookie_vbase;
  1091. /* Init val of Sleep Cookie */
  1092. *cookie = FW_AWAKE_COOKIE;
  1093. mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", *cookie);
  1094. return 0;
  1095. }
  1096. /*
  1097. * This function deletes buffer for sleep cookie
  1098. */
  1099. static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
  1100. {
  1101. struct pcie_service_card *card;
  1102. if (!adapter)
  1103. return 0;
  1104. card = adapter->card;
  1105. if (card && card->sleep_cookie_vbase) {
  1106. dma_free_coherent(&card->dev->dev, sizeof(u32),
  1107. card->sleep_cookie_vbase,
  1108. card->sleep_cookie_pbase);
  1109. card->sleep_cookie_vbase = NULL;
  1110. }
  1111. return 0;
  1112. }
  1113. /* This function flushes the TX buffer descriptor ring
  1114. * This function defined as handler is also called while cleaning TXRX
  1115. * during disconnect/ bss stop.
  1116. */
  1117. static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
  1118. {
  1119. struct pcie_service_card *card = adapter->card;
  1120. if (!mwifiex_pcie_txbd_empty(card, card->txbd_rdptr)) {
  1121. card->txbd_flush = 1;
  1122. /* write pointer already set at last send
  1123. * send dnld-rdy intr again, wait for completion.
  1124. */
  1125. if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
  1126. CPU_INTR_DNLD_RDY)) {
  1127. mwifiex_dbg(adapter, ERROR,
  1128. "failed to assert dnld-rdy interrupt.\n");
  1129. return -1;
  1130. }
  1131. }
  1132. return 0;
  1133. }
  1134. /*
  1135. * This function unmaps and frees downloaded data buffer
  1136. */
  1137. static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
  1138. {
  1139. struct sk_buff *skb;
  1140. u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
  1141. struct mwifiex_pcie_buf_desc *desc;
  1142. struct mwifiex_pfu_buf_desc *desc2;
  1143. struct pcie_service_card *card = adapter->card;
  1144. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1145. if (!mwifiex_pcie_ok_to_access_hw(adapter))
  1146. mwifiex_pm_wakeup_card(adapter);
  1147. /* Read the TX ring read pointer set by firmware */
  1148. if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
  1149. mwifiex_dbg(adapter, ERROR,
  1150. "SEND COMP: failed to read reg->tx_rdptr\n");
  1151. return -1;
  1152. }
  1153. mwifiex_dbg(adapter, DATA,
  1154. "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
  1155. card->txbd_rdptr, rdptr);
  1156. num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
  1157. /* free from previous txbd_rdptr to current txbd_rdptr */
  1158. while (((card->txbd_rdptr & reg->tx_mask) !=
  1159. (rdptr & reg->tx_mask)) ||
  1160. ((card->txbd_rdptr & reg->tx_rollover_ind) !=
  1161. (rdptr & reg->tx_rollover_ind))) {
  1162. wrdoneidx = (card->txbd_rdptr & reg->tx_mask) >>
  1163. reg->tx_start_ptr;
  1164. skb = card->tx_buf_list[wrdoneidx];
  1165. if (skb) {
  1166. mwifiex_dbg(adapter, DATA,
  1167. "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
  1168. skb, wrdoneidx);
  1169. mwifiex_unmap_pci_memory(adapter, skb,
  1170. DMA_TO_DEVICE);
  1171. unmap_count++;
  1172. if (card->txbd_flush)
  1173. mwifiex_write_data_complete(adapter, skb, 0,
  1174. -1);
  1175. else
  1176. mwifiex_write_data_complete(adapter, skb, 0, 0);
  1177. atomic_dec(&adapter->tx_hw_pending);
  1178. }
  1179. card->tx_buf_list[wrdoneidx] = NULL;
  1180. if (reg->pfu_enabled) {
  1181. desc2 = card->txbd_ring[wrdoneidx];
  1182. memset(desc2, 0, sizeof(*desc2));
  1183. } else {
  1184. desc = card->txbd_ring[wrdoneidx];
  1185. memset(desc, 0, sizeof(*desc));
  1186. }
  1187. switch (card->dev->device) {
  1188. case PCIE_DEVICE_ID_MARVELL_88W8766P:
  1189. card->txbd_rdptr++;
  1190. break;
  1191. case PCIE_DEVICE_ID_MARVELL_88W8897:
  1192. case PCIE_DEVICE_ID_MARVELL_88W8997:
  1193. card->txbd_rdptr += reg->ring_tx_start_ptr;
  1194. break;
  1195. }
  1196. if ((card->txbd_rdptr & reg->tx_mask) == num_tx_buffs)
  1197. card->txbd_rdptr = ((card->txbd_rdptr &
  1198. reg->tx_rollover_ind) ^
  1199. reg->tx_rollover_ind);
  1200. }
  1201. if (unmap_count)
  1202. adapter->data_sent = false;
  1203. if (card->txbd_flush) {
  1204. if (mwifiex_pcie_txbd_empty(card, card->txbd_rdptr))
  1205. card->txbd_flush = 0;
  1206. else
  1207. mwifiex_clean_pcie_ring_buf(adapter);
  1208. }
  1209. return 0;
  1210. }
  1211. /* This function sends data buffer to device. First 4 bytes of payload
  1212. * are filled with payload length and payload type. Then this payload
  1213. * is mapped to PCI device memory. Tx ring pointers are advanced accordingly.
  1214. * Download ready interrupt to FW is deffered if Tx ring is not full and
  1215. * additional payload can be accomodated.
  1216. * Caller must ensure tx_param parameter to this function is not NULL.
  1217. */
  1218. static int
  1219. mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
  1220. struct mwifiex_tx_param *tx_param)
  1221. {
  1222. struct pcie_service_card *card = adapter->card;
  1223. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1224. u32 wrindx, num_tx_buffs, rx_val;
  1225. int ret;
  1226. dma_addr_t buf_pa;
  1227. struct mwifiex_pcie_buf_desc *desc = NULL;
  1228. struct mwifiex_pfu_buf_desc *desc2 = NULL;
  1229. if (!(skb->data && skb->len)) {
  1230. mwifiex_dbg(adapter, ERROR,
  1231. "%s(): invalid parameter <%p, %#x>\n",
  1232. __func__, skb->data, skb->len);
  1233. return -1;
  1234. }
  1235. if (!mwifiex_pcie_ok_to_access_hw(adapter))
  1236. mwifiex_pm_wakeup_card(adapter);
  1237. num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
  1238. mwifiex_dbg(adapter, DATA,
  1239. "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
  1240. card->txbd_rdptr, card->txbd_wrptr);
  1241. if (mwifiex_pcie_txbd_not_full(card)) {
  1242. u8 *payload;
  1243. adapter->data_sent = true;
  1244. payload = skb->data;
  1245. put_unaligned_le16((u16)skb->len, payload + 0);
  1246. put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2);
  1247. if (mwifiex_map_pci_memory(adapter, skb, skb->len,
  1248. DMA_TO_DEVICE))
  1249. return -1;
  1250. wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
  1251. buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
  1252. card->tx_buf_list[wrindx] = skb;
  1253. atomic_inc(&adapter->tx_hw_pending);
  1254. if (reg->pfu_enabled) {
  1255. desc2 = card->txbd_ring[wrindx];
  1256. desc2->paddr = buf_pa;
  1257. desc2->len = (u16)skb->len;
  1258. desc2->frag_len = (u16)skb->len;
  1259. desc2->offset = 0;
  1260. desc2->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
  1261. MWIFIEX_BD_FLAG_LAST_DESC;
  1262. } else {
  1263. desc = card->txbd_ring[wrindx];
  1264. desc->paddr = buf_pa;
  1265. desc->len = (u16)skb->len;
  1266. desc->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
  1267. MWIFIEX_BD_FLAG_LAST_DESC;
  1268. }
  1269. switch (card->dev->device) {
  1270. case PCIE_DEVICE_ID_MARVELL_88W8766P:
  1271. card->txbd_wrptr++;
  1272. break;
  1273. case PCIE_DEVICE_ID_MARVELL_88W8897:
  1274. case PCIE_DEVICE_ID_MARVELL_88W8997:
  1275. card->txbd_wrptr += reg->ring_tx_start_ptr;
  1276. break;
  1277. }
  1278. if ((card->txbd_wrptr & reg->tx_mask) == num_tx_buffs)
  1279. card->txbd_wrptr = ((card->txbd_wrptr &
  1280. reg->tx_rollover_ind) ^
  1281. reg->tx_rollover_ind);
  1282. rx_val = card->rxbd_rdptr & reg->rx_wrap_mask;
  1283. /* Write the TX ring write pointer in to reg->tx_wrptr */
  1284. if (mwifiex_write_reg(adapter, reg->tx_wrptr,
  1285. card->txbd_wrptr | rx_val)) {
  1286. mwifiex_dbg(adapter, ERROR,
  1287. "SEND DATA: failed to write reg->tx_wrptr\n");
  1288. ret = -1;
  1289. goto done_unmap;
  1290. }
  1291. /* The firmware (latest version 15.68.19.p21) of the 88W8897 PCIe+USB card
  1292. * seems to crash randomly after setting the TX ring write pointer when
  1293. * ASPM powersaving is enabled. A workaround seems to be keeping the bus
  1294. * busy by reading a random register afterwards.
  1295. */
  1296. mwifiex_read_reg(adapter, PCI_VENDOR_ID, &rx_val);
  1297. if ((mwifiex_pcie_txbd_not_full(card)) &&
  1298. tx_param->next_pkt_len) {
  1299. /* have more packets and TxBD still can hold more */
  1300. mwifiex_dbg(adapter, DATA,
  1301. "SEND DATA: delay dnld-rdy interrupt.\n");
  1302. adapter->data_sent = false;
  1303. } else {
  1304. /* Send the TX ready interrupt */
  1305. if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
  1306. CPU_INTR_DNLD_RDY)) {
  1307. mwifiex_dbg(adapter, ERROR,
  1308. "SEND DATA: failed to assert dnld-rdy interrupt.\n");
  1309. ret = -1;
  1310. goto done_unmap;
  1311. }
  1312. }
  1313. mwifiex_dbg(adapter, DATA,
  1314. "info: SEND DATA: Updated <Rd: %#x, Wr:\t"
  1315. "%#x> and sent packet to firmware successfully\n",
  1316. card->txbd_rdptr, card->txbd_wrptr);
  1317. } else {
  1318. mwifiex_dbg(adapter, DATA,
  1319. "info: TX Ring full, can't send packets to fw\n");
  1320. adapter->data_sent = true;
  1321. /* Send the TX ready interrupt */
  1322. if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
  1323. CPU_INTR_DNLD_RDY))
  1324. mwifiex_dbg(adapter, ERROR,
  1325. "SEND DATA: failed to assert door-bell intr\n");
  1326. return -EBUSY;
  1327. }
  1328. return -EINPROGRESS;
  1329. done_unmap:
  1330. mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
  1331. card->tx_buf_list[wrindx] = NULL;
  1332. atomic_dec(&adapter->tx_hw_pending);
  1333. if (reg->pfu_enabled)
  1334. memset(desc2, 0, sizeof(*desc2));
  1335. else
  1336. memset(desc, 0, sizeof(*desc));
  1337. return ret;
  1338. }
  1339. /*
  1340. * This function handles received buffer ring and
  1341. * dispatches packets to upper
  1342. */
  1343. static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
  1344. {
  1345. struct pcie_service_card *card = adapter->card;
  1346. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1347. u32 wrptr, rd_index, tx_val;
  1348. dma_addr_t buf_pa;
  1349. int ret = 0;
  1350. struct sk_buff *skb_tmp = NULL;
  1351. struct mwifiex_pcie_buf_desc *desc;
  1352. struct mwifiex_pfu_buf_desc *desc2;
  1353. if (!mwifiex_pcie_ok_to_access_hw(adapter))
  1354. mwifiex_pm_wakeup_card(adapter);
  1355. /* Read the RX ring Write pointer set by firmware */
  1356. if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
  1357. mwifiex_dbg(adapter, ERROR,
  1358. "RECV DATA: failed to read reg->rx_wrptr\n");
  1359. ret = -1;
  1360. goto done;
  1361. }
  1362. card->rxbd_wrptr = wrptr;
  1363. while (((wrptr & reg->rx_mask) !=
  1364. (card->rxbd_rdptr & reg->rx_mask)) ||
  1365. ((wrptr & reg->rx_rollover_ind) ==
  1366. (card->rxbd_rdptr & reg->rx_rollover_ind))) {
  1367. struct sk_buff *skb_data;
  1368. u16 rx_len;
  1369. rd_index = card->rxbd_rdptr & reg->rx_mask;
  1370. skb_data = card->rx_buf_list[rd_index];
  1371. /* If skb allocation was failed earlier for Rx packet,
  1372. * rx_buf_list[rd_index] would have been left with a NULL.
  1373. */
  1374. if (!skb_data)
  1375. return -ENOMEM;
  1376. mwifiex_unmap_pci_memory(adapter, skb_data, DMA_FROM_DEVICE);
  1377. card->rx_buf_list[rd_index] = NULL;
  1378. /* Get data length from interface header -
  1379. * first 2 bytes for len, next 2 bytes is for type
  1380. */
  1381. rx_len = get_unaligned_le16(skb_data->data);
  1382. if (WARN_ON(rx_len <= adapter->intf_hdr_len ||
  1383. rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) {
  1384. mwifiex_dbg(adapter, ERROR,
  1385. "Invalid RX len %d, Rd=%#x, Wr=%#x\n",
  1386. rx_len, card->rxbd_rdptr, wrptr);
  1387. dev_kfree_skb_any(skb_data);
  1388. } else {
  1389. skb_put(skb_data, rx_len);
  1390. mwifiex_dbg(adapter, DATA,
  1391. "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
  1392. card->rxbd_rdptr, wrptr, rx_len);
  1393. skb_pull(skb_data, adapter->intf_hdr_len);
  1394. if (adapter->rx_work_enabled) {
  1395. skb_queue_tail(&adapter->rx_data_q, skb_data);
  1396. adapter->data_received = true;
  1397. atomic_inc(&adapter->rx_pending);
  1398. } else {
  1399. mwifiex_handle_rx_packet(adapter, skb_data);
  1400. }
  1401. }
  1402. skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
  1403. GFP_KERNEL);
  1404. if (!skb_tmp) {
  1405. mwifiex_dbg(adapter, ERROR,
  1406. "Unable to allocate skb.\n");
  1407. return -ENOMEM;
  1408. }
  1409. if (mwifiex_map_pci_memory(adapter, skb_tmp,
  1410. MWIFIEX_RX_DATA_BUF_SIZE,
  1411. DMA_FROM_DEVICE))
  1412. return -1;
  1413. buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
  1414. mwifiex_dbg(adapter, INFO,
  1415. "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
  1416. skb_tmp, rd_index);
  1417. card->rx_buf_list[rd_index] = skb_tmp;
  1418. if (reg->pfu_enabled) {
  1419. desc2 = card->rxbd_ring[rd_index];
  1420. desc2->paddr = buf_pa;
  1421. desc2->len = skb_tmp->len;
  1422. desc2->frag_len = skb_tmp->len;
  1423. desc2->offset = 0;
  1424. desc2->flags = reg->ring_flag_sop | reg->ring_flag_eop;
  1425. } else {
  1426. desc = card->rxbd_ring[rd_index];
  1427. desc->paddr = buf_pa;
  1428. desc->len = skb_tmp->len;
  1429. desc->flags = 0;
  1430. }
  1431. if ((++card->rxbd_rdptr & reg->rx_mask) ==
  1432. MWIFIEX_MAX_TXRX_BD) {
  1433. card->rxbd_rdptr = ((card->rxbd_rdptr &
  1434. reg->rx_rollover_ind) ^
  1435. reg->rx_rollover_ind);
  1436. }
  1437. mwifiex_dbg(adapter, DATA,
  1438. "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
  1439. card->rxbd_rdptr, wrptr);
  1440. tx_val = card->txbd_wrptr & reg->tx_wrap_mask;
  1441. /* Write the RX ring read pointer in to reg->rx_rdptr */
  1442. if (mwifiex_write_reg(adapter, reg->rx_rdptr,
  1443. card->rxbd_rdptr | tx_val)) {
  1444. mwifiex_dbg(adapter, DATA,
  1445. "RECV DATA: failed to write reg->rx_rdptr\n");
  1446. ret = -1;
  1447. goto done;
  1448. }
  1449. /* Read the RX ring Write pointer set by firmware */
  1450. if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
  1451. mwifiex_dbg(adapter, ERROR,
  1452. "RECV DATA: failed to read reg->rx_wrptr\n");
  1453. ret = -1;
  1454. goto done;
  1455. }
  1456. mwifiex_dbg(adapter, DATA,
  1457. "info: RECV DATA: Rcvd packet from fw successfully\n");
  1458. card->rxbd_wrptr = wrptr;
  1459. }
  1460. done:
  1461. return ret;
  1462. }
  1463. /*
  1464. * This function downloads the boot command to device
  1465. */
  1466. static int
  1467. mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
  1468. {
  1469. dma_addr_t buf_pa;
  1470. struct pcie_service_card *card = adapter->card;
  1471. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1472. if (!(skb->data && skb->len)) {
  1473. mwifiex_dbg(adapter, ERROR,
  1474. "Invalid parameter in %s <%p. len %d>\n",
  1475. __func__, skb->data, skb->len);
  1476. return -1;
  1477. }
  1478. if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
  1479. return -1;
  1480. buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
  1481. /* Write the lower 32bits of the physical address to low command
  1482. * address scratch register
  1483. */
  1484. if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) {
  1485. mwifiex_dbg(adapter, ERROR,
  1486. "%s: failed to write download command to boot code.\n",
  1487. __func__);
  1488. mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
  1489. return -1;
  1490. }
  1491. /* Write the upper 32bits of the physical address to high command
  1492. * address scratch register
  1493. */
  1494. if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
  1495. (u32)((u64)buf_pa >> 32))) {
  1496. mwifiex_dbg(adapter, ERROR,
  1497. "%s: failed to write download command to boot code.\n",
  1498. __func__);
  1499. mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
  1500. return -1;
  1501. }
  1502. /* Write the command length to cmd_size scratch register */
  1503. if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
  1504. mwifiex_dbg(adapter, ERROR,
  1505. "%s: failed to write command len to cmd_size scratch reg\n",
  1506. __func__);
  1507. mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
  1508. return -1;
  1509. }
  1510. /* Ring the door bell */
  1511. if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
  1512. CPU_INTR_DOOR_BELL)) {
  1513. mwifiex_dbg(adapter, ERROR,
  1514. "%s: failed to assert door-bell intr\n", __func__);
  1515. mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
  1516. return -1;
  1517. }
  1518. return 0;
  1519. }
  1520. /* This function init rx port in firmware which in turn enables to receive data
  1521. * from device before transmitting any packet.
  1522. */
  1523. static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter)
  1524. {
  1525. struct pcie_service_card *card = adapter->card;
  1526. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1527. int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask;
  1528. /* Write the RX ring read pointer in to reg->rx_rdptr */
  1529. if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr |
  1530. tx_wrap)) {
  1531. mwifiex_dbg(adapter, ERROR,
  1532. "RECV DATA: failed to write reg->rx_rdptr\n");
  1533. return -1;
  1534. }
  1535. return 0;
  1536. }
  1537. /* This function downloads commands to the device
  1538. */
  1539. static int
  1540. mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
  1541. {
  1542. struct pcie_service_card *card = adapter->card;
  1543. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1544. int ret = 0;
  1545. dma_addr_t cmd_buf_pa, cmdrsp_buf_pa;
  1546. u8 *payload = (u8 *)skb->data;
  1547. if (!(skb->data && skb->len)) {
  1548. mwifiex_dbg(adapter, ERROR,
  1549. "Invalid parameter in %s <%p, %#x>\n",
  1550. __func__, skb->data, skb->len);
  1551. return -1;
  1552. }
  1553. /* Make sure a command response buffer is available */
  1554. if (!card->cmdrsp_buf) {
  1555. mwifiex_dbg(adapter, ERROR,
  1556. "No response buffer available, send command failed\n");
  1557. return -EBUSY;
  1558. }
  1559. if (!mwifiex_pcie_ok_to_access_hw(adapter))
  1560. mwifiex_pm_wakeup_card(adapter);
  1561. adapter->cmd_sent = true;
  1562. put_unaligned_le16((u16)skb->len, &payload[0]);
  1563. put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]);
  1564. if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
  1565. return -1;
  1566. card->cmd_buf = skb;
  1567. /*
  1568. * Need to keep a reference, since core driver might free up this
  1569. * buffer before we've unmapped it.
  1570. */
  1571. skb_get(skb);
  1572. /* To send a command, the driver will:
  1573. 1. Write the 64bit physical address of the data buffer to
  1574. cmd response address low + cmd response address high
  1575. 2. Ring the door bell (i.e. set the door bell interrupt)
  1576. In response to door bell interrupt, the firmware will perform
  1577. the DMA of the command packet (first header to obtain the total
  1578. length and then rest of the command).
  1579. */
  1580. if (card->cmdrsp_buf) {
  1581. cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf);
  1582. /* Write the lower 32bits of the cmdrsp buffer physical
  1583. address */
  1584. if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
  1585. (u32)cmdrsp_buf_pa)) {
  1586. mwifiex_dbg(adapter, ERROR,
  1587. "Failed to write download cmd to boot code.\n");
  1588. ret = -1;
  1589. goto done;
  1590. }
  1591. /* Write the upper 32bits of the cmdrsp buffer physical
  1592. address */
  1593. if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi,
  1594. (u32)((u64)cmdrsp_buf_pa >> 32))) {
  1595. mwifiex_dbg(adapter, ERROR,
  1596. "Failed to write download cmd to boot code.\n");
  1597. ret = -1;
  1598. goto done;
  1599. }
  1600. }
  1601. cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf);
  1602. /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
  1603. if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
  1604. (u32)cmd_buf_pa)) {
  1605. mwifiex_dbg(adapter, ERROR,
  1606. "Failed to write download cmd to boot code.\n");
  1607. ret = -1;
  1608. goto done;
  1609. }
  1610. /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */
  1611. if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
  1612. (u32)((u64)cmd_buf_pa >> 32))) {
  1613. mwifiex_dbg(adapter, ERROR,
  1614. "Failed to write download cmd to boot code.\n");
  1615. ret = -1;
  1616. goto done;
  1617. }
  1618. /* Write the command length to reg->cmd_size */
  1619. if (mwifiex_write_reg(adapter, reg->cmd_size,
  1620. card->cmd_buf->len)) {
  1621. mwifiex_dbg(adapter, ERROR,
  1622. "Failed to write cmd len to reg->cmd_size\n");
  1623. ret = -1;
  1624. goto done;
  1625. }
  1626. /* Ring the door bell */
  1627. if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
  1628. CPU_INTR_DOOR_BELL)) {
  1629. mwifiex_dbg(adapter, ERROR,
  1630. "Failed to assert door-bell intr\n");
  1631. ret = -1;
  1632. goto done;
  1633. }
  1634. done:
  1635. if (ret)
  1636. adapter->cmd_sent = false;
  1637. return 0;
  1638. }
  1639. /*
  1640. * This function handles command complete interrupt
  1641. */
  1642. static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
  1643. {
  1644. struct pcie_service_card *card = adapter->card;
  1645. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1646. struct sk_buff *skb = card->cmdrsp_buf;
  1647. int count = 0;
  1648. u16 rx_len;
  1649. mwifiex_dbg(adapter, CMD,
  1650. "info: Rx CMD Response\n");
  1651. if (adapter->curr_cmd)
  1652. mwifiex_unmap_pci_memory(adapter, skb, DMA_FROM_DEVICE);
  1653. else
  1654. dma_sync_single_for_cpu(&card->dev->dev,
  1655. MWIFIEX_SKB_DMA_ADDR(skb),
  1656. MWIFIEX_UPLD_SIZE, DMA_FROM_DEVICE);
  1657. /* Unmap the command as a response has been received. */
  1658. if (card->cmd_buf) {
  1659. mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
  1660. DMA_TO_DEVICE);
  1661. dev_kfree_skb_any(card->cmd_buf);
  1662. card->cmd_buf = NULL;
  1663. }
  1664. rx_len = get_unaligned_le16(skb->data);
  1665. skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
  1666. skb_trim(skb, rx_len);
  1667. if (!adapter->curr_cmd) {
  1668. if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
  1669. dma_sync_single_for_device(&card->dev->dev,
  1670. MWIFIEX_SKB_DMA_ADDR(skb),
  1671. MWIFIEX_SLEEP_COOKIE_SIZE,
  1672. DMA_FROM_DEVICE);
  1673. if (mwifiex_write_reg(adapter,
  1674. PCIE_CPU_INT_EVENT,
  1675. CPU_INTR_SLEEP_CFM_DONE)) {
  1676. mwifiex_dbg(adapter, ERROR,
  1677. "Write register failed\n");
  1678. return -1;
  1679. }
  1680. mwifiex_delay_for_sleep_cookie(adapter,
  1681. MWIFIEX_MAX_DELAY_COUNT);
  1682. mwifiex_unmap_pci_memory(adapter, skb,
  1683. DMA_FROM_DEVICE);
  1684. skb_pull(skb, adapter->intf_hdr_len);
  1685. while (reg->sleep_cookie && (count++ < 10) &&
  1686. mwifiex_pcie_ok_to_access_hw(adapter))
  1687. usleep_range(50, 60);
  1688. mwifiex_pcie_enable_host_int(adapter);
  1689. mwifiex_process_sleep_confirm_resp(adapter, skb->data,
  1690. skb->len);
  1691. } else {
  1692. mwifiex_dbg(adapter, ERROR,
  1693. "There is no command but got cmdrsp\n");
  1694. }
  1695. memcpy(adapter->upld_buf, skb->data,
  1696. min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
  1697. skb_push(skb, adapter->intf_hdr_len);
  1698. if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
  1699. DMA_FROM_DEVICE))
  1700. return -1;
  1701. } else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
  1702. skb_pull(skb, adapter->intf_hdr_len);
  1703. adapter->curr_cmd->resp_skb = skb;
  1704. adapter->cmd_resp_received = true;
  1705. /* Take the pointer and set it to CMD node and will
  1706. return in the response complete callback */
  1707. card->cmdrsp_buf = NULL;
  1708. /* Clear the cmd-rsp buffer address in scratch registers. This
  1709. will prevent firmware from writing to the same response
  1710. buffer again. */
  1711. if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) {
  1712. mwifiex_dbg(adapter, ERROR,
  1713. "cmd_done: failed to clear cmd_rsp_addr_lo\n");
  1714. return -1;
  1715. }
  1716. /* Write the upper 32bits of the cmdrsp buffer physical
  1717. address */
  1718. if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) {
  1719. mwifiex_dbg(adapter, ERROR,
  1720. "cmd_done: failed to clear cmd_rsp_addr_hi\n");
  1721. return -1;
  1722. }
  1723. }
  1724. return 0;
  1725. }
  1726. /*
  1727. * Command Response processing complete handler
  1728. */
  1729. static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
  1730. struct sk_buff *skb)
  1731. {
  1732. struct pcie_service_card *card = adapter->card;
  1733. if (skb) {
  1734. card->cmdrsp_buf = skb;
  1735. skb_push(card->cmdrsp_buf, adapter->intf_hdr_len);
  1736. if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
  1737. DMA_FROM_DEVICE))
  1738. return -1;
  1739. }
  1740. return 0;
  1741. }
  1742. /*
  1743. * This function handles firmware event ready interrupt
  1744. */
  1745. static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
  1746. {
  1747. struct pcie_service_card *card = adapter->card;
  1748. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1749. u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
  1750. u32 wrptr, event;
  1751. struct mwifiex_evt_buf_desc *desc;
  1752. if (!mwifiex_pcie_ok_to_access_hw(adapter))
  1753. mwifiex_pm_wakeup_card(adapter);
  1754. if (adapter->event_received) {
  1755. mwifiex_dbg(adapter, EVENT,
  1756. "info: Event being processed,\t"
  1757. "do not process this interrupt just yet\n");
  1758. return 0;
  1759. }
  1760. if (rdptr >= MWIFIEX_MAX_EVT_BD) {
  1761. mwifiex_dbg(adapter, ERROR,
  1762. "info: Invalid read pointer...\n");
  1763. return -1;
  1764. }
  1765. /* Read the event ring write pointer set by firmware */
  1766. if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
  1767. mwifiex_dbg(adapter, ERROR,
  1768. "EventReady: failed to read reg->evt_wrptr\n");
  1769. return -1;
  1770. }
  1771. mwifiex_dbg(adapter, EVENT,
  1772. "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
  1773. card->evtbd_rdptr, wrptr);
  1774. if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
  1775. & MWIFIEX_EVTBD_MASK)) ||
  1776. ((wrptr & reg->evt_rollover_ind) ==
  1777. (card->evtbd_rdptr & reg->evt_rollover_ind))) {
  1778. struct sk_buff *skb_cmd;
  1779. __le16 data_len = 0;
  1780. u16 evt_len;
  1781. mwifiex_dbg(adapter, INFO,
  1782. "info: Read Index: %d\n", rdptr);
  1783. skb_cmd = card->evt_buf_list[rdptr];
  1784. mwifiex_unmap_pci_memory(adapter, skb_cmd, DMA_FROM_DEVICE);
  1785. /* Take the pointer and set it to event pointer in adapter
  1786. and will return back after event handling callback */
  1787. card->evt_buf_list[rdptr] = NULL;
  1788. desc = card->evtbd_ring[rdptr];
  1789. memset(desc, 0, sizeof(*desc));
  1790. event = get_unaligned_le32(
  1791. &skb_cmd->data[adapter->intf_hdr_len]);
  1792. adapter->event_cause = event;
  1793. /* The first 4bytes will be the event transfer header
  1794. len is 2 bytes followed by type which is 2 bytes */
  1795. memcpy(&data_len, skb_cmd->data, sizeof(__le16));
  1796. evt_len = le16_to_cpu(data_len);
  1797. skb_trim(skb_cmd, evt_len);
  1798. skb_pull(skb_cmd, adapter->intf_hdr_len);
  1799. mwifiex_dbg(adapter, EVENT,
  1800. "info: Event length: %d\n", evt_len);
  1801. if (evt_len > MWIFIEX_EVENT_HEADER_LEN &&
  1802. evt_len < MAX_EVENT_SIZE)
  1803. memcpy(adapter->event_body, skb_cmd->data +
  1804. MWIFIEX_EVENT_HEADER_LEN, evt_len -
  1805. MWIFIEX_EVENT_HEADER_LEN);
  1806. adapter->event_received = true;
  1807. adapter->event_skb = skb_cmd;
  1808. /* Do not update the event read pointer here, wait till the
  1809. buffer is released. This is just to make things simpler,
  1810. we need to find a better method of managing these buffers.
  1811. */
  1812. } else {
  1813. if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
  1814. CPU_INTR_EVENT_DONE)) {
  1815. mwifiex_dbg(adapter, ERROR,
  1816. "Write register failed\n");
  1817. return -1;
  1818. }
  1819. }
  1820. return 0;
  1821. }
  1822. /*
  1823. * Event processing complete handler
  1824. */
  1825. static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
  1826. struct sk_buff *skb)
  1827. {
  1828. struct pcie_service_card *card = adapter->card;
  1829. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1830. int ret = 0;
  1831. u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
  1832. u32 wrptr;
  1833. struct mwifiex_evt_buf_desc *desc;
  1834. if (!skb)
  1835. return 0;
  1836. if (rdptr >= MWIFIEX_MAX_EVT_BD) {
  1837. mwifiex_dbg(adapter, ERROR,
  1838. "event_complete: Invalid rdptr 0x%x\n",
  1839. rdptr);
  1840. return -EINVAL;
  1841. }
  1842. /* Read the event ring write pointer set by firmware */
  1843. if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
  1844. mwifiex_dbg(adapter, ERROR,
  1845. "event_complete: failed to read reg->evt_wrptr\n");
  1846. return -1;
  1847. }
  1848. if (!card->evt_buf_list[rdptr]) {
  1849. skb_push(skb, adapter->intf_hdr_len);
  1850. skb_put(skb, MAX_EVENT_SIZE - skb->len);
  1851. if (mwifiex_map_pci_memory(adapter, skb,
  1852. MAX_EVENT_SIZE,
  1853. DMA_FROM_DEVICE))
  1854. return -1;
  1855. card->evt_buf_list[rdptr] = skb;
  1856. desc = card->evtbd_ring[rdptr];
  1857. desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb);
  1858. desc->len = (u16)skb->len;
  1859. desc->flags = 0;
  1860. skb = NULL;
  1861. } else {
  1862. mwifiex_dbg(adapter, ERROR,
  1863. "info: ERROR: buf still valid at index %d, <%p, %p>\n",
  1864. rdptr, card->evt_buf_list[rdptr], skb);
  1865. }
  1866. if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
  1867. card->evtbd_rdptr = ((card->evtbd_rdptr &
  1868. reg->evt_rollover_ind) ^
  1869. reg->evt_rollover_ind);
  1870. }
  1871. mwifiex_dbg(adapter, EVENT,
  1872. "info: Updated <Rd: 0x%x, Wr: 0x%x>",
  1873. card->evtbd_rdptr, wrptr);
  1874. /* Write the event ring read pointer in to reg->evt_rdptr */
  1875. if (mwifiex_write_reg(adapter, reg->evt_rdptr,
  1876. card->evtbd_rdptr)) {
  1877. mwifiex_dbg(adapter, ERROR,
  1878. "event_complete: failed to read reg->evt_rdptr\n");
  1879. return -1;
  1880. }
  1881. mwifiex_dbg(adapter, EVENT,
  1882. "info: Check Events Again\n");
  1883. ret = mwifiex_pcie_process_event_ready(adapter);
  1884. return ret;
  1885. }
  1886. /* Combo firmware image is a combination of
  1887. * (1) combo crc heaer, start with CMD5
  1888. * (2) bluetooth image, start with CMD7, end with CMD6, data wrapped in CMD1.
  1889. * (3) wifi image.
  1890. *
  1891. * This function bypass the header and bluetooth part, return
  1892. * the offset of tail wifi-only part. If the image is already wifi-only,
  1893. * that is start with CMD1, return 0.
  1894. */
  1895. static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter,
  1896. const void *firmware, u32 firmware_len) {
  1897. const struct mwifiex_fw_data *fwdata;
  1898. u32 offset = 0, data_len, dnld_cmd;
  1899. int ret = 0;
  1900. bool cmd7_before = false, first_cmd = false;
  1901. while (1) {
  1902. /* Check for integer and buffer overflow */
  1903. if (offset + sizeof(fwdata->header) < sizeof(fwdata->header) ||
  1904. offset + sizeof(fwdata->header) >= firmware_len) {
  1905. mwifiex_dbg(adapter, ERROR,
  1906. "extract wifi-only fw failure!\n");
  1907. ret = -1;
  1908. goto done;
  1909. }
  1910. fwdata = firmware + offset;
  1911. dnld_cmd = le32_to_cpu(fwdata->header.dnld_cmd);
  1912. data_len = le32_to_cpu(fwdata->header.data_length);
  1913. /* Skip past header */
  1914. offset += sizeof(fwdata->header);
  1915. switch (dnld_cmd) {
  1916. case MWIFIEX_FW_DNLD_CMD_1:
  1917. if (offset + data_len < data_len) {
  1918. mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
  1919. ret = -1;
  1920. goto done;
  1921. }
  1922. /* Image start with cmd1, already wifi-only firmware */
  1923. if (!first_cmd) {
  1924. mwifiex_dbg(adapter, MSG,
  1925. "input wifi-only firmware\n");
  1926. return 0;
  1927. }
  1928. if (!cmd7_before) {
  1929. mwifiex_dbg(adapter, ERROR,
  1930. "no cmd7 before cmd1!\n");
  1931. ret = -1;
  1932. goto done;
  1933. }
  1934. offset += data_len;
  1935. break;
  1936. case MWIFIEX_FW_DNLD_CMD_5:
  1937. first_cmd = true;
  1938. /* Check for integer overflow */
  1939. if (offset + data_len < data_len) {
  1940. mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
  1941. ret = -1;
  1942. goto done;
  1943. }
  1944. offset += data_len;
  1945. break;
  1946. case MWIFIEX_FW_DNLD_CMD_6:
  1947. first_cmd = true;
  1948. /* Check for integer overflow */
  1949. if (offset + data_len < data_len) {
  1950. mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
  1951. ret = -1;
  1952. goto done;
  1953. }
  1954. offset += data_len;
  1955. if (offset >= firmware_len) {
  1956. mwifiex_dbg(adapter, ERROR,
  1957. "extract wifi-only fw failure!\n");
  1958. ret = -1;
  1959. } else {
  1960. ret = offset;
  1961. }
  1962. goto done;
  1963. case MWIFIEX_FW_DNLD_CMD_7:
  1964. first_cmd = true;
  1965. cmd7_before = true;
  1966. break;
  1967. default:
  1968. mwifiex_dbg(adapter, ERROR, "unknown dnld_cmd %d\n",
  1969. dnld_cmd);
  1970. ret = -1;
  1971. goto done;
  1972. }
  1973. }
  1974. done:
  1975. return ret;
  1976. }
  1977. /*
  1978. * This function downloads the firmware to the card.
  1979. *
  1980. * Firmware is downloaded to the card in blocks. Every block download
  1981. * is tested for CRC errors, and retried a number of times before
  1982. * returning failure.
  1983. */
  1984. static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
  1985. struct mwifiex_fw_image *fw)
  1986. {
  1987. int ret;
  1988. u8 *firmware = fw->fw_buf;
  1989. u32 firmware_len = fw->fw_len;
  1990. u32 offset = 0;
  1991. struct sk_buff *skb;
  1992. u32 txlen, tx_blocks = 0, tries, len, val;
  1993. u32 block_retry_cnt = 0;
  1994. struct pcie_service_card *card = adapter->card;
  1995. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  1996. if (!firmware || !firmware_len) {
  1997. mwifiex_dbg(adapter, ERROR,
  1998. "No firmware image found! Terminating download\n");
  1999. return -1;
  2000. }
  2001. mwifiex_dbg(adapter, INFO,
  2002. "info: Downloading FW image (%d bytes)\n",
  2003. firmware_len);
  2004. if (mwifiex_pcie_disable_host_int(adapter)) {
  2005. mwifiex_dbg(adapter, ERROR,
  2006. "%s: Disabling interrupts failed.\n", __func__);
  2007. return -1;
  2008. }
  2009. skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
  2010. if (!skb) {
  2011. ret = -ENOMEM;
  2012. goto done;
  2013. }
  2014. ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_13_REG, &val);
  2015. if (ret) {
  2016. mwifiex_dbg(adapter, FATAL, "Failed to read scratch register 13\n");
  2017. goto done;
  2018. }
  2019. /* PCIE FLR case: extract wifi part from combo firmware*/
  2020. if (val == MWIFIEX_PCIE_FLR_HAPPENS) {
  2021. ret = mwifiex_extract_wifi_fw(adapter, firmware, firmware_len);
  2022. if (ret < 0) {
  2023. mwifiex_dbg(adapter, ERROR, "Failed to extract wifi fw\n");
  2024. goto done;
  2025. }
  2026. offset = ret;
  2027. mwifiex_dbg(adapter, MSG,
  2028. "info: dnld wifi firmware from %d bytes\n", offset);
  2029. }
  2030. /* Perform firmware data transfer */
  2031. do {
  2032. u32 ireg_intr = 0;
  2033. /* More data? */
  2034. if (offset >= firmware_len)
  2035. break;
  2036. for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
  2037. ret = mwifiex_read_reg(adapter, reg->cmd_size,
  2038. &len);
  2039. if (ret) {
  2040. mwifiex_dbg(adapter, FATAL,
  2041. "Failed reading len from boot code\n");
  2042. goto done;
  2043. }
  2044. if (len)
  2045. break;
  2046. usleep_range(10, 20);
  2047. }
  2048. if (!len) {
  2049. break;
  2050. } else if (len > MWIFIEX_UPLD_SIZE) {
  2051. mwifiex_dbg(adapter, ERROR,
  2052. "FW download failure @ %d, invalid length %d\n",
  2053. offset, len);
  2054. ret = -1;
  2055. goto done;
  2056. }
  2057. txlen = len;
  2058. if (len & BIT(0)) {
  2059. block_retry_cnt++;
  2060. if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) {
  2061. mwifiex_dbg(adapter, ERROR,
  2062. "FW download failure @ %d, over max\t"
  2063. "retry count\n", offset);
  2064. ret = -1;
  2065. goto done;
  2066. }
  2067. mwifiex_dbg(adapter, ERROR,
  2068. "FW CRC error indicated by the\t"
  2069. "helper: len = 0x%04X, txlen = %d\n",
  2070. len, txlen);
  2071. len &= ~BIT(0);
  2072. /* Setting this to 0 to resend from same offset */
  2073. txlen = 0;
  2074. } else {
  2075. block_retry_cnt = 0;
  2076. /* Set blocksize to transfer - checking for
  2077. last block */
  2078. if (firmware_len - offset < txlen)
  2079. txlen = firmware_len - offset;
  2080. tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
  2081. card->pcie.blksz_fw_dl;
  2082. /* Copy payload to buffer */
  2083. memmove(skb->data, &firmware[offset], txlen);
  2084. }
  2085. skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
  2086. skb_trim(skb, tx_blocks * card->pcie.blksz_fw_dl);
  2087. /* Send the boot command to device */
  2088. if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
  2089. mwifiex_dbg(adapter, ERROR,
  2090. "Failed to send firmware download command\n");
  2091. ret = -1;
  2092. goto done;
  2093. }
  2094. /* Wait for the command done interrupt */
  2095. for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
  2096. if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
  2097. &ireg_intr)) {
  2098. mwifiex_dbg(adapter, ERROR,
  2099. "%s: Failed to read\t"
  2100. "interrupt status during fw dnld.\n",
  2101. __func__);
  2102. mwifiex_unmap_pci_memory(adapter, skb,
  2103. DMA_TO_DEVICE);
  2104. ret = -1;
  2105. goto done;
  2106. }
  2107. if (!(ireg_intr & CPU_INTR_DOOR_BELL))
  2108. break;
  2109. usleep_range(10, 20);
  2110. }
  2111. if (ireg_intr & CPU_INTR_DOOR_BELL) {
  2112. mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n",
  2113. __func__);
  2114. mwifiex_unmap_pci_memory(adapter, skb,
  2115. DMA_TO_DEVICE);
  2116. ret = -1;
  2117. goto done;
  2118. }
  2119. mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
  2120. offset += txlen;
  2121. } while (true);
  2122. mwifiex_dbg(adapter, MSG,
  2123. "info: FW download over, size %d bytes\n", offset);
  2124. ret = 0;
  2125. done:
  2126. dev_kfree_skb_any(skb);
  2127. return ret;
  2128. }
  2129. /*
  2130. * This function checks the firmware status in card.
  2131. */
  2132. static int
  2133. mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
  2134. {
  2135. int ret = 0;
  2136. u32 firmware_stat;
  2137. struct pcie_service_card *card = adapter->card;
  2138. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  2139. u32 tries;
  2140. /* Mask spurios interrupts */
  2141. if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK,
  2142. HOST_INTR_MASK)) {
  2143. mwifiex_dbg(adapter, ERROR,
  2144. "Write register failed\n");
  2145. return -1;
  2146. }
  2147. mwifiex_dbg(adapter, INFO,
  2148. "Setting driver ready signature\n");
  2149. if (mwifiex_write_reg(adapter, reg->drv_rdy,
  2150. FIRMWARE_READY_PCIE)) {
  2151. mwifiex_dbg(adapter, ERROR,
  2152. "Failed to write driver ready signature\n");
  2153. return -1;
  2154. }
  2155. /* Wait for firmware initialization event */
  2156. for (tries = 0; tries < poll_num; tries++) {
  2157. if (mwifiex_read_reg(adapter, reg->fw_status,
  2158. &firmware_stat))
  2159. ret = -1;
  2160. else
  2161. ret = 0;
  2162. mwifiex_dbg(adapter, INFO, "Try %d if FW is ready <%d,%#x>",
  2163. tries, ret, firmware_stat);
  2164. if (ret)
  2165. continue;
  2166. if (firmware_stat == FIRMWARE_READY_PCIE) {
  2167. ret = 0;
  2168. break;
  2169. } else {
  2170. msleep(100);
  2171. ret = -1;
  2172. }
  2173. }
  2174. return ret;
  2175. }
  2176. /* This function checks if WLAN is the winner.
  2177. */
  2178. static int
  2179. mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
  2180. {
  2181. u32 winner = 0;
  2182. int ret = 0;
  2183. struct pcie_service_card *card = adapter->card;
  2184. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  2185. if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) {
  2186. ret = -1;
  2187. } else if (!winner) {
  2188. mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n");
  2189. adapter->winner = 1;
  2190. } else {
  2191. mwifiex_dbg(adapter, ERROR,
  2192. "PCI-E is not the winner <%#x>", winner);
  2193. }
  2194. return ret;
  2195. }
  2196. /*
  2197. * This function reads the interrupt status from card.
  2198. */
  2199. static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter,
  2200. int msg_id)
  2201. {
  2202. u32 pcie_ireg;
  2203. unsigned long flags;
  2204. struct pcie_service_card *card = adapter->card;
  2205. if (card->msi_enable) {
  2206. spin_lock_irqsave(&adapter->int_lock, flags);
  2207. adapter->int_status = 1;
  2208. spin_unlock_irqrestore(&adapter->int_lock, flags);
  2209. return;
  2210. }
  2211. if (!mwifiex_pcie_ok_to_access_hw(adapter))
  2212. return;
  2213. if (card->msix_enable && msg_id >= 0) {
  2214. pcie_ireg = BIT(msg_id);
  2215. } else {
  2216. if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
  2217. &pcie_ireg)) {
  2218. mwifiex_dbg(adapter, ERROR, "Read register failed\n");
  2219. return;
  2220. }
  2221. if ((pcie_ireg == 0xFFFFFFFF) || !pcie_ireg)
  2222. return;
  2223. mwifiex_pcie_disable_host_int(adapter);
  2224. /* Clear the pending interrupts */
  2225. if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS,
  2226. ~pcie_ireg)) {
  2227. mwifiex_dbg(adapter, ERROR,
  2228. "Write register failed\n");
  2229. return;
  2230. }
  2231. }
  2232. if (!adapter->pps_uapsd_mode &&
  2233. adapter->ps_state == PS_STATE_SLEEP &&
  2234. mwifiex_pcie_ok_to_access_hw(adapter)) {
  2235. /* Potentially for PCIe we could get other
  2236. * interrupts like shared. Don't change power
  2237. * state until cookie is set
  2238. */
  2239. adapter->ps_state = PS_STATE_AWAKE;
  2240. adapter->pm_wakeup_fw_try = false;
  2241. del_timer(&adapter->wakeup_timer);
  2242. }
  2243. spin_lock_irqsave(&adapter->int_lock, flags);
  2244. adapter->int_status |= pcie_ireg;
  2245. spin_unlock_irqrestore(&adapter->int_lock, flags);
  2246. mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg);
  2247. }
  2248. /*
  2249. * Interrupt handler for PCIe root port
  2250. *
  2251. * This function reads the interrupt status from firmware and assigns
  2252. * the main process in workqueue which will handle the interrupt.
  2253. */
  2254. static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
  2255. {
  2256. struct mwifiex_msix_context *ctx = context;
  2257. struct pci_dev *pdev = ctx->dev;
  2258. struct pcie_service_card *card;
  2259. struct mwifiex_adapter *adapter;
  2260. card = pci_get_drvdata(pdev);
  2261. if (!card->adapter) {
  2262. pr_err("info: %s: card=%p adapter=%p\n", __func__, card,
  2263. card ? card->adapter : NULL);
  2264. goto exit;
  2265. }
  2266. adapter = card->adapter;
  2267. if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
  2268. goto exit;
  2269. if (card->msix_enable)
  2270. mwifiex_interrupt_status(adapter, ctx->msg_id);
  2271. else
  2272. mwifiex_interrupt_status(adapter, -1);
  2273. mwifiex_queue_main_work(adapter);
  2274. exit:
  2275. return IRQ_HANDLED;
  2276. }
  2277. /*
  2278. * This function checks the current interrupt status.
  2279. *
  2280. * The following interrupts are checked and handled by this function -
  2281. * - Data sent
  2282. * - Command sent
  2283. * - Command received
  2284. * - Packets received
  2285. * - Events received
  2286. *
  2287. * In case of Rx packets received, the packets are uploaded from card to
  2288. * host and processed accordingly.
  2289. */
  2290. static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
  2291. {
  2292. int ret;
  2293. u32 pcie_ireg = 0;
  2294. unsigned long flags;
  2295. struct pcie_service_card *card = adapter->card;
  2296. spin_lock_irqsave(&adapter->int_lock, flags);
  2297. if (!card->msi_enable) {
  2298. /* Clear out unused interrupts */
  2299. pcie_ireg = adapter->int_status;
  2300. }
  2301. adapter->int_status = 0;
  2302. spin_unlock_irqrestore(&adapter->int_lock, flags);
  2303. if (card->msi_enable) {
  2304. if (mwifiex_pcie_ok_to_access_hw(adapter)) {
  2305. if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
  2306. &pcie_ireg)) {
  2307. mwifiex_dbg(adapter, ERROR,
  2308. "Read register failed\n");
  2309. return -1;
  2310. }
  2311. if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
  2312. if (mwifiex_write_reg(adapter,
  2313. PCIE_HOST_INT_STATUS,
  2314. ~pcie_ireg)) {
  2315. mwifiex_dbg(adapter, ERROR,
  2316. "Write register failed\n");
  2317. return -1;
  2318. }
  2319. if (!adapter->pps_uapsd_mode &&
  2320. adapter->ps_state == PS_STATE_SLEEP) {
  2321. adapter->ps_state = PS_STATE_AWAKE;
  2322. adapter->pm_wakeup_fw_try = false;
  2323. del_timer(&adapter->wakeup_timer);
  2324. }
  2325. }
  2326. }
  2327. }
  2328. if (pcie_ireg & HOST_INTR_DNLD_DONE) {
  2329. mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n");
  2330. ret = mwifiex_pcie_send_data_complete(adapter);
  2331. if (ret)
  2332. return ret;
  2333. }
  2334. if (pcie_ireg & HOST_INTR_UPLD_RDY) {
  2335. mwifiex_dbg(adapter, INTR, "info: Rx DATA\n");
  2336. ret = mwifiex_pcie_process_recv_data(adapter);
  2337. if (ret)
  2338. return ret;
  2339. }
  2340. if (pcie_ireg & HOST_INTR_EVENT_RDY) {
  2341. mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n");
  2342. ret = mwifiex_pcie_process_event_ready(adapter);
  2343. if (ret)
  2344. return ret;
  2345. }
  2346. if (pcie_ireg & HOST_INTR_CMD_DONE) {
  2347. if (adapter->cmd_sent) {
  2348. mwifiex_dbg(adapter, INTR,
  2349. "info: CMD sent Interrupt\n");
  2350. adapter->cmd_sent = false;
  2351. }
  2352. /* Handle command response */
  2353. ret = mwifiex_pcie_process_cmd_complete(adapter);
  2354. if (ret)
  2355. return ret;
  2356. }
  2357. mwifiex_dbg(adapter, INTR,
  2358. "info: cmd_sent=%d data_sent=%d\n",
  2359. adapter->cmd_sent, adapter->data_sent);
  2360. if (!card->msi_enable && !card->msix_enable &&
  2361. adapter->ps_state != PS_STATE_SLEEP)
  2362. mwifiex_pcie_enable_host_int(adapter);
  2363. return 0;
  2364. }
  2365. /*
  2366. * This function downloads data from driver to card.
  2367. *
  2368. * Both commands and data packets are transferred to the card by this
  2369. * function.
  2370. *
  2371. * This function adds the PCIE specific header to the front of the buffer
  2372. * before transferring. The header contains the length of the packet and
  2373. * the type. The firmware handles the packets based upon this set type.
  2374. */
  2375. static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
  2376. struct sk_buff *skb,
  2377. struct mwifiex_tx_param *tx_param)
  2378. {
  2379. if (!skb) {
  2380. mwifiex_dbg(adapter, ERROR,
  2381. "Passed NULL skb to %s\n", __func__);
  2382. return -1;
  2383. }
  2384. if (type == MWIFIEX_TYPE_DATA)
  2385. return mwifiex_pcie_send_data(adapter, skb, tx_param);
  2386. else if (type == MWIFIEX_TYPE_CMD)
  2387. return mwifiex_pcie_send_cmd(adapter, skb);
  2388. return 0;
  2389. }
  2390. /* Function to dump PCIE scratch registers in case of FW crash
  2391. */
  2392. static int
  2393. mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
  2394. {
  2395. char *p = drv_buf;
  2396. char buf[256], *ptr;
  2397. int i;
  2398. u32 value;
  2399. struct pcie_service_card *card = adapter->card;
  2400. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  2401. int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG,
  2402. PCIE_SCRATCH_14_REG,
  2403. PCIE_SCRATCH_15_REG};
  2404. if (!p)
  2405. return 0;
  2406. mwifiex_dbg(adapter, MSG, "PCIE register dump start\n");
  2407. if (mwifiex_read_reg(adapter, reg->fw_status, &value)) {
  2408. mwifiex_dbg(adapter, ERROR, "failed to read firmware status");
  2409. return 0;
  2410. }
  2411. ptr = buf;
  2412. mwifiex_dbg(adapter, MSG, "pcie scratch register:");
  2413. for (i = 0; i < ARRAY_SIZE(pcie_scratch_reg); i++) {
  2414. mwifiex_read_reg(adapter, pcie_scratch_reg[i], &value);
  2415. ptr += sprintf(ptr, "reg:0x%x, value=0x%x\n",
  2416. pcie_scratch_reg[i], value);
  2417. }
  2418. mwifiex_dbg(adapter, MSG, "%s\n", buf);
  2419. p += sprintf(p, "%s\n", buf);
  2420. mwifiex_dbg(adapter, MSG, "PCIE register dump end\n");
  2421. return p - drv_buf;
  2422. }
  2423. /* This function read/write firmware */
  2424. static enum rdwr_status
  2425. mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
  2426. {
  2427. int ret, tries;
  2428. u8 ctrl_data;
  2429. u32 fw_status;
  2430. struct pcie_service_card *card = adapter->card;
  2431. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  2432. if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status))
  2433. return RDWR_STATUS_FAILURE;
  2434. ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
  2435. reg->fw_dump_host_ready);
  2436. if (ret) {
  2437. mwifiex_dbg(adapter, ERROR,
  2438. "PCIE write err\n");
  2439. return RDWR_STATUS_FAILURE;
  2440. }
  2441. for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
  2442. mwifiex_read_reg_byte(adapter, reg->fw_dump_ctrl, &ctrl_data);
  2443. if (ctrl_data == FW_DUMP_DONE)
  2444. return RDWR_STATUS_SUCCESS;
  2445. if (doneflag && ctrl_data == doneflag)
  2446. return RDWR_STATUS_DONE;
  2447. if (ctrl_data != reg->fw_dump_host_ready) {
  2448. mwifiex_dbg(adapter, WARN,
  2449. "The ctrl reg was changed, re-try again!\n");
  2450. ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
  2451. reg->fw_dump_host_ready);
  2452. if (ret) {
  2453. mwifiex_dbg(adapter, ERROR,
  2454. "PCIE write err\n");
  2455. return RDWR_STATUS_FAILURE;
  2456. }
  2457. }
  2458. usleep_range(100, 200);
  2459. }
  2460. mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n");
  2461. return RDWR_STATUS_FAILURE;
  2462. }
  2463. /* This function dump firmware memory to file */
  2464. static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
  2465. {
  2466. struct pcie_service_card *card = adapter->card;
  2467. const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
  2468. unsigned int reg, reg_start, reg_end;
  2469. u8 *dbg_ptr, *end_ptr, *tmp_ptr, fw_dump_num, dump_num;
  2470. u8 idx, i, read_reg, doneflag = 0;
  2471. enum rdwr_status stat;
  2472. u32 memory_size;
  2473. int ret;
  2474. if (!card->pcie.can_dump_fw)
  2475. return;
  2476. for (idx = 0; idx < adapter->num_mem_types; idx++) {
  2477. struct memory_type_mapping *entry =
  2478. &adapter->mem_type_mapping_tbl[idx];
  2479. if (entry->mem_ptr) {
  2480. vfree(entry->mem_ptr);
  2481. entry->mem_ptr = NULL;
  2482. }
  2483. entry->mem_size = 0;
  2484. }
  2485. mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
  2486. /* Read the number of the memories which will dump */
  2487. stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
  2488. if (stat == RDWR_STATUS_FAILURE)
  2489. return;
  2490. reg = creg->fw_dump_start;
  2491. mwifiex_read_reg_byte(adapter, reg, &fw_dump_num);
  2492. /* W8997 chipset firmware dump will be restore in single region*/
  2493. if (fw_dump_num == 0)
  2494. dump_num = 1;
  2495. else
  2496. dump_num = fw_dump_num;
  2497. /* Read the length of every memory which will dump */
  2498. for (idx = 0; idx < dump_num; idx++) {
  2499. struct memory_type_mapping *entry =
  2500. &adapter->mem_type_mapping_tbl[idx];
  2501. memory_size = 0;
  2502. if (fw_dump_num != 0) {
  2503. stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
  2504. if (stat == RDWR_STATUS_FAILURE)
  2505. return;
  2506. reg = creg->fw_dump_start;
  2507. for (i = 0; i < 4; i++) {
  2508. mwifiex_read_reg_byte(adapter, reg, &read_reg);
  2509. memory_size |= (read_reg << (i * 8));
  2510. reg++;
  2511. }
  2512. } else {
  2513. memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE;
  2514. }
  2515. if (memory_size == 0) {
  2516. mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n");
  2517. ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl,
  2518. creg->fw_dump_read_done);
  2519. if (ret) {
  2520. mwifiex_dbg(adapter, ERROR, "PCIE write err\n");
  2521. return;
  2522. }
  2523. break;
  2524. }
  2525. mwifiex_dbg(adapter, DUMP,
  2526. "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
  2527. entry->mem_ptr = vmalloc(memory_size + 1);
  2528. entry->mem_size = memory_size;
  2529. if (!entry->mem_ptr) {
  2530. mwifiex_dbg(adapter, ERROR,
  2531. "Vmalloc %s failed\n", entry->mem_name);
  2532. return;
  2533. }
  2534. dbg_ptr = entry->mem_ptr;
  2535. end_ptr = dbg_ptr + memory_size;
  2536. doneflag = entry->done_flag;
  2537. mwifiex_dbg(adapter, DUMP, "Start %s output, please wait...\n",
  2538. entry->mem_name);
  2539. do {
  2540. stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
  2541. if (RDWR_STATUS_FAILURE == stat)
  2542. return;
  2543. reg_start = creg->fw_dump_start;
  2544. reg_end = creg->fw_dump_end;
  2545. for (reg = reg_start; reg <= reg_end; reg++) {
  2546. mwifiex_read_reg_byte(adapter, reg, dbg_ptr);
  2547. if (dbg_ptr < end_ptr) {
  2548. dbg_ptr++;
  2549. continue;
  2550. }
  2551. mwifiex_dbg(adapter, ERROR,
  2552. "pre-allocated buf not enough\n");
  2553. tmp_ptr =
  2554. vzalloc(memory_size + MWIFIEX_SIZE_4K);
  2555. if (!tmp_ptr)
  2556. return;
  2557. memcpy(tmp_ptr, entry->mem_ptr, memory_size);
  2558. vfree(entry->mem_ptr);
  2559. entry->mem_ptr = tmp_ptr;
  2560. tmp_ptr = NULL;
  2561. dbg_ptr = entry->mem_ptr + memory_size;
  2562. memory_size += MWIFIEX_SIZE_4K;
  2563. end_ptr = entry->mem_ptr + memory_size;
  2564. }
  2565. if (stat != RDWR_STATUS_DONE)
  2566. continue;
  2567. mwifiex_dbg(adapter, DUMP,
  2568. "%s done: size=0x%tx\n",
  2569. entry->mem_name, dbg_ptr - entry->mem_ptr);
  2570. break;
  2571. } while (true);
  2572. }
  2573. mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
  2574. }
  2575. static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
  2576. {
  2577. adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
  2578. if (!adapter->devdump_data) {
  2579. mwifiex_dbg(adapter, ERROR,
  2580. "vzalloc devdump data failure!\n");
  2581. return;
  2582. }
  2583. mwifiex_drv_info_dump(adapter);
  2584. mwifiex_pcie_fw_dump(adapter);
  2585. mwifiex_prepare_fw_dump_info(adapter);
  2586. mwifiex_upload_device_dump(adapter);
  2587. }
  2588. static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
  2589. {
  2590. struct pcie_service_card *card = adapter->card;
  2591. /* We can't afford to wait here; remove() might be waiting on us. If we
  2592. * can't grab the device lock, maybe we'll get another chance later.
  2593. */
  2594. pci_try_reset_function(card->dev);
  2595. }
  2596. static void mwifiex_pcie_work(struct work_struct *work)
  2597. {
  2598. struct pcie_service_card *card =
  2599. container_of(work, struct pcie_service_card, work);
  2600. if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
  2601. &card->work_flags))
  2602. mwifiex_pcie_device_dump_work(card->adapter);
  2603. if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
  2604. &card->work_flags))
  2605. mwifiex_pcie_card_reset_work(card->adapter);
  2606. }
  2607. /* This function dumps FW information */
  2608. static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
  2609. {
  2610. struct pcie_service_card *card = adapter->card;
  2611. if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
  2612. &card->work_flags))
  2613. schedule_work(&card->work);
  2614. }
  2615. static void mwifiex_pcie_card_reset(struct mwifiex_adapter *adapter)
  2616. {
  2617. struct pcie_service_card *card = adapter->card;
  2618. if (!test_and_set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags))
  2619. schedule_work(&card->work);
  2620. }
  2621. static int mwifiex_pcie_alloc_buffers(struct mwifiex_adapter *adapter)
  2622. {
  2623. struct pcie_service_card *card = adapter->card;
  2624. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  2625. int ret;
  2626. card->cmdrsp_buf = NULL;
  2627. ret = mwifiex_pcie_create_txbd_ring(adapter);
  2628. if (ret) {
  2629. mwifiex_dbg(adapter, ERROR, "Failed to create txbd ring\n");
  2630. goto err_cre_txbd;
  2631. }
  2632. ret = mwifiex_pcie_create_rxbd_ring(adapter);
  2633. if (ret) {
  2634. mwifiex_dbg(adapter, ERROR, "Failed to create rxbd ring\n");
  2635. goto err_cre_rxbd;
  2636. }
  2637. ret = mwifiex_pcie_create_evtbd_ring(adapter);
  2638. if (ret) {
  2639. mwifiex_dbg(adapter, ERROR, "Failed to create evtbd ring\n");
  2640. goto err_cre_evtbd;
  2641. }
  2642. ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
  2643. if (ret) {
  2644. mwifiex_dbg(adapter, ERROR, "Failed to allocate cmdbuf buffer\n");
  2645. goto err_alloc_cmdbuf;
  2646. }
  2647. if (reg->sleep_cookie) {
  2648. ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
  2649. if (ret) {
  2650. mwifiex_dbg(adapter, ERROR, "Failed to allocate sleep_cookie buffer\n");
  2651. goto err_alloc_cookie;
  2652. }
  2653. } else {
  2654. card->sleep_cookie_vbase = NULL;
  2655. }
  2656. return 0;
  2657. err_alloc_cookie:
  2658. mwifiex_pcie_delete_cmdrsp_buf(adapter);
  2659. err_alloc_cmdbuf:
  2660. mwifiex_pcie_delete_evtbd_ring(adapter);
  2661. err_cre_evtbd:
  2662. mwifiex_pcie_delete_rxbd_ring(adapter);
  2663. err_cre_rxbd:
  2664. mwifiex_pcie_delete_txbd_ring(adapter);
  2665. err_cre_txbd:
  2666. return ret;
  2667. }
  2668. static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
  2669. {
  2670. struct pcie_service_card *card = adapter->card;
  2671. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  2672. if (reg->sleep_cookie)
  2673. mwifiex_pcie_delete_sleep_cookie_buf(adapter);
  2674. mwifiex_pcie_delete_cmdrsp_buf(adapter);
  2675. mwifiex_pcie_delete_evtbd_ring(adapter);
  2676. mwifiex_pcie_delete_rxbd_ring(adapter);
  2677. mwifiex_pcie_delete_txbd_ring(adapter);
  2678. }
  2679. /*
  2680. * This function initializes the PCI-E host memory space, WCB rings, etc.
  2681. */
  2682. static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
  2683. {
  2684. struct pcie_service_card *card = adapter->card;
  2685. int ret;
  2686. struct pci_dev *pdev = card->dev;
  2687. pci_set_drvdata(pdev, card);
  2688. ret = pci_enable_device(pdev);
  2689. if (ret)
  2690. goto err_enable_dev;
  2691. pci_set_master(pdev);
  2692. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2693. if (ret) {
  2694. pr_err("dma_set_mask(32) failed: %d\n", ret);
  2695. goto err_set_dma_mask;
  2696. }
  2697. ret = pci_request_region(pdev, 0, DRV_NAME);
  2698. if (ret) {
  2699. pr_err("req_reg(0) error\n");
  2700. goto err_req_region0;
  2701. }
  2702. card->pci_mmap = pci_iomap(pdev, 0, 0);
  2703. if (!card->pci_mmap) {
  2704. pr_err("iomap(0) error\n");
  2705. ret = -EIO;
  2706. goto err_iomap0;
  2707. }
  2708. ret = pci_request_region(pdev, 2, DRV_NAME);
  2709. if (ret) {
  2710. pr_err("req_reg(2) error\n");
  2711. goto err_req_region2;
  2712. }
  2713. card->pci_mmap1 = pci_iomap(pdev, 2, 0);
  2714. if (!card->pci_mmap1) {
  2715. pr_err("iomap(2) error\n");
  2716. ret = -EIO;
  2717. goto err_iomap2;
  2718. }
  2719. pr_notice("PCI memory map Virt0: %pK PCI memory map Virt2: %pK\n",
  2720. card->pci_mmap, card->pci_mmap1);
  2721. ret = mwifiex_pcie_alloc_buffers(adapter);
  2722. if (ret)
  2723. goto err_alloc_buffers;
  2724. if (pdev->device == PCIE_DEVICE_ID_MARVELL_88W8897)
  2725. adapter->ignore_btcoex_events = true;
  2726. return 0;
  2727. err_alloc_buffers:
  2728. pci_iounmap(pdev, card->pci_mmap1);
  2729. err_iomap2:
  2730. pci_release_region(pdev, 2);
  2731. err_req_region2:
  2732. pci_iounmap(pdev, card->pci_mmap);
  2733. err_iomap0:
  2734. pci_release_region(pdev, 0);
  2735. err_req_region0:
  2736. err_set_dma_mask:
  2737. pci_disable_device(pdev);
  2738. err_enable_dev:
  2739. return ret;
  2740. }
  2741. /*
  2742. * This function cleans up the allocated card buffers.
  2743. */
  2744. static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
  2745. {
  2746. struct pcie_service_card *card = adapter->card;
  2747. struct pci_dev *pdev = card->dev;
  2748. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  2749. u32 fw_status;
  2750. /* Perform the cancel_work_sync() only when we're not resetting
  2751. * the card. It's because that function never returns if we're
  2752. * in reset path. If we're here when resetting the card, it means
  2753. * that we failed to reset the card (reset failure path).
  2754. */
  2755. if (!card->pci_reset_ongoing) {
  2756. mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n");
  2757. cancel_work_sync(&card->work);
  2758. mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n");
  2759. } else {
  2760. mwifiex_dbg(adapter, MSG,
  2761. "skipped cancel_work_sync() because we're in card reset failure path\n");
  2762. }
  2763. mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
  2764. if (fw_status == FIRMWARE_READY_PCIE) {
  2765. mwifiex_dbg(adapter, INFO,
  2766. "Clearing driver ready signature\n");
  2767. if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
  2768. mwifiex_dbg(adapter, ERROR,
  2769. "Failed to write driver not-ready signature\n");
  2770. }
  2771. pci_disable_device(pdev);
  2772. pci_iounmap(pdev, card->pci_mmap);
  2773. pci_iounmap(pdev, card->pci_mmap1);
  2774. pci_release_region(pdev, 2);
  2775. pci_release_region(pdev, 0);
  2776. mwifiex_pcie_free_buffers(adapter);
  2777. }
  2778. static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
  2779. {
  2780. int ret, i, j;
  2781. struct pcie_service_card *card = adapter->card;
  2782. struct pci_dev *pdev = card->dev;
  2783. if (card->pcie.reg->msix_support) {
  2784. for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
  2785. card->msix_entries[i].entry = i;
  2786. ret = pci_enable_msix_exact(pdev, card->msix_entries,
  2787. MWIFIEX_NUM_MSIX_VECTORS);
  2788. if (!ret) {
  2789. for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) {
  2790. card->msix_ctx[i].dev = pdev;
  2791. card->msix_ctx[i].msg_id = i;
  2792. ret = request_irq(card->msix_entries[i].vector,
  2793. mwifiex_pcie_interrupt, 0,
  2794. "MWIFIEX_PCIE_MSIX",
  2795. &card->msix_ctx[i]);
  2796. if (ret)
  2797. break;
  2798. }
  2799. if (ret) {
  2800. mwifiex_dbg(adapter, INFO, "request_irq fail: %d\n",
  2801. ret);
  2802. for (j = 0; j < i; j++)
  2803. free_irq(card->msix_entries[j].vector,
  2804. &card->msix_ctx[i]);
  2805. pci_disable_msix(pdev);
  2806. } else {
  2807. mwifiex_dbg(adapter, MSG, "MSIx enabled!");
  2808. card->msix_enable = 1;
  2809. return 0;
  2810. }
  2811. }
  2812. }
  2813. if (pci_enable_msi(pdev) != 0)
  2814. pci_disable_msi(pdev);
  2815. else
  2816. card->msi_enable = 1;
  2817. mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable);
  2818. card->share_irq_ctx.dev = pdev;
  2819. card->share_irq_ctx.msg_id = -1;
  2820. ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
  2821. "MRVL_PCIE", &card->share_irq_ctx);
  2822. if (ret) {
  2823. pr_err("request_irq failed: ret=%d\n", ret);
  2824. return -1;
  2825. }
  2826. return 0;
  2827. }
  2828. /*
  2829. * This function gets the firmware name for downloading by revision id
  2830. *
  2831. * Read revision id register to get revision id
  2832. */
  2833. static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
  2834. {
  2835. int revision_id = 0;
  2836. int version, magic;
  2837. struct pcie_service_card *card = adapter->card;
  2838. switch (card->dev->device) {
  2839. case PCIE_DEVICE_ID_MARVELL_88W8766P:
  2840. strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
  2841. break;
  2842. case PCIE_DEVICE_ID_MARVELL_88W8897:
  2843. mwifiex_write_reg(adapter, 0x0c58, 0x80c00000);
  2844. mwifiex_read_reg(adapter, 0x0c58, &revision_id);
  2845. revision_id &= 0xff00;
  2846. switch (revision_id) {
  2847. case PCIE8897_A0:
  2848. strcpy(adapter->fw_name, PCIE8897_A0_FW_NAME);
  2849. break;
  2850. case PCIE8897_B0:
  2851. strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME);
  2852. break;
  2853. default:
  2854. strcpy(adapter->fw_name, PCIE8897_DEFAULT_FW_NAME);
  2855. break;
  2856. }
  2857. break;
  2858. case PCIE_DEVICE_ID_MARVELL_88W8997:
  2859. mwifiex_read_reg(adapter, 0x8, &revision_id);
  2860. mwifiex_read_reg(adapter, 0x0cd0, &version);
  2861. mwifiex_read_reg(adapter, 0x0cd4, &magic);
  2862. revision_id &= 0xff;
  2863. version &= 0x7;
  2864. magic &= 0xff;
  2865. if (revision_id == PCIE8997_A1 &&
  2866. magic == CHIP_MAGIC_VALUE &&
  2867. version == CHIP_VER_PCIEUART)
  2868. strcpy(adapter->fw_name, PCIEUART8997_FW_NAME_V4);
  2869. else
  2870. strcpy(adapter->fw_name, PCIEUSB8997_FW_NAME_V4);
  2871. break;
  2872. default:
  2873. break;
  2874. }
  2875. }
  2876. /*
  2877. * This function registers the PCIE device.
  2878. *
  2879. * PCIE IRQ is claimed, block size is set and driver data is initialized.
  2880. */
  2881. static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
  2882. {
  2883. struct pcie_service_card *card = adapter->card;
  2884. /* save adapter pointer in card */
  2885. card->adapter = adapter;
  2886. if (mwifiex_pcie_request_irq(adapter))
  2887. return -1;
  2888. adapter->tx_buf_size = card->pcie.tx_buf_size;
  2889. adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
  2890. adapter->num_mem_types = card->pcie.num_mem_types;
  2891. adapter->ext_scan = card->pcie.can_ext_scan;
  2892. mwifiex_pcie_get_fw_name(adapter);
  2893. return 0;
  2894. }
  2895. /*
  2896. * This function unregisters the PCIE device.
  2897. *
  2898. * The PCIE IRQ is released, the function is disabled and driver
  2899. * data is set to null.
  2900. */
  2901. static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
  2902. {
  2903. struct pcie_service_card *card = adapter->card;
  2904. struct pci_dev *pdev = card->dev;
  2905. int i;
  2906. if (card->msix_enable) {
  2907. for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
  2908. synchronize_irq(card->msix_entries[i].vector);
  2909. for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
  2910. free_irq(card->msix_entries[i].vector,
  2911. &card->msix_ctx[i]);
  2912. card->msix_enable = 0;
  2913. pci_disable_msix(pdev);
  2914. } else {
  2915. mwifiex_dbg(adapter, INFO,
  2916. "%s(): calling free_irq()\n", __func__);
  2917. free_irq(card->dev->irq, &card->share_irq_ctx);
  2918. if (card->msi_enable)
  2919. pci_disable_msi(pdev);
  2920. }
  2921. card->adapter = NULL;
  2922. }
  2923. /*
  2924. * This function initializes the PCI-E host memory space, WCB rings, etc.,
  2925. * similar to mwifiex_init_pcie(), but without resetting PCI-E state.
  2926. */
  2927. static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
  2928. {
  2929. struct pcie_service_card *card = adapter->card;
  2930. struct pci_dev *pdev = card->dev;
  2931. /* tx_buf_size might be changed to 3584 by firmware during
  2932. * data transfer, we should reset it to default size.
  2933. */
  2934. adapter->tx_buf_size = card->pcie.tx_buf_size;
  2935. mwifiex_pcie_alloc_buffers(adapter);
  2936. pci_set_master(pdev);
  2937. }
  2938. /* This function cleans up the PCI-E host memory space. */
  2939. static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
  2940. {
  2941. struct pcie_service_card *card = adapter->card;
  2942. const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
  2943. struct pci_dev *pdev = card->dev;
  2944. if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
  2945. mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n");
  2946. pci_clear_master(pdev);
  2947. adapter->seq_num = 0;
  2948. mwifiex_pcie_free_buffers(adapter);
  2949. }
  2950. static struct mwifiex_if_ops pcie_ops = {
  2951. .init_if = mwifiex_init_pcie,
  2952. .cleanup_if = mwifiex_cleanup_pcie,
  2953. .check_fw_status = mwifiex_check_fw_status,
  2954. .check_winner_status = mwifiex_check_winner_status,
  2955. .prog_fw = mwifiex_prog_fw_w_helper,
  2956. .register_dev = mwifiex_register_dev,
  2957. .unregister_dev = mwifiex_unregister_dev,
  2958. .enable_int = mwifiex_pcie_enable_host_int,
  2959. .disable_int = mwifiex_pcie_disable_host_int_noerr,
  2960. .process_int_status = mwifiex_process_int_status,
  2961. .host_to_card = mwifiex_pcie_host_to_card,
  2962. .wakeup = mwifiex_pm_wakeup_card,
  2963. .wakeup_complete = mwifiex_pm_wakeup_card_complete,
  2964. /* PCIE specific */
  2965. .cmdrsp_complete = mwifiex_pcie_cmdrsp_complete,
  2966. .event_complete = mwifiex_pcie_event_complete,
  2967. .update_mp_end_port = NULL,
  2968. .cleanup_mpa_buf = NULL,
  2969. .init_fw_port = mwifiex_pcie_init_fw_port,
  2970. .clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
  2971. .card_reset = mwifiex_pcie_card_reset,
  2972. .reg_dump = mwifiex_pcie_reg_dump,
  2973. .device_dump = mwifiex_pcie_device_dump,
  2974. .down_dev = mwifiex_pcie_down_dev,
  2975. .up_dev = mwifiex_pcie_up_dev,
  2976. };
  2977. module_pci_driver(mwifiex_pcie);
  2978. MODULE_AUTHOR("Marvell International Ltd.");
  2979. MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
  2980. MODULE_VERSION(PCIE_VERSION);
  2981. MODULE_LICENSE("GPL v2");