snoc.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (c) 2018 The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/bits.h>
  6. #include <linux/clk.h>
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/of.h>
  10. #include <linux/of_device.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/property.h>
  13. #include <linux/regulator/consumer.h>
  14. #include <linux/remoteproc/qcom_rproc.h>
  15. #include <linux/of_address.h>
  16. #include <linux/iommu.h>
  17. #include "ce.h"
  18. #include "coredump.h"
  19. #include "debug.h"
  20. #include "hif.h"
  21. #include "htc.h"
  22. #include "snoc.h"
  23. #define ATH10K_SNOC_RX_POST_RETRY_MS 50
  24. #define CE_POLL_PIPE 4
  25. #define ATH10K_SNOC_WAKE_IRQ 2
  26. static char *const ce_name[] = {
  27. "WLAN_CE_0",
  28. "WLAN_CE_1",
  29. "WLAN_CE_2",
  30. "WLAN_CE_3",
  31. "WLAN_CE_4",
  32. "WLAN_CE_5",
  33. "WLAN_CE_6",
  34. "WLAN_CE_7",
  35. "WLAN_CE_8",
  36. "WLAN_CE_9",
  37. "WLAN_CE_10",
  38. "WLAN_CE_11",
  39. };
  40. static const char * const ath10k_regulators[] = {
  41. "vdd-0.8-cx-mx",
  42. "vdd-1.8-xo",
  43. "vdd-1.3-rfa",
  44. "vdd-3.3-ch0",
  45. "vdd-3.3-ch1",
  46. };
  47. static const char * const ath10k_clocks[] = {
  48. "cxo_ref_clk_pin", "qdss",
  49. };
  50. static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
  51. static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
  52. static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
  53. static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
  54. static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
  55. static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
  56. static const struct ath10k_snoc_drv_priv drv_priv = {
  57. .hw_rev = ATH10K_HW_WCN3990,
  58. .dma_mask = DMA_BIT_MASK(35),
  59. .msa_size = 0x100000,
  60. };
  61. #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
  62. #define WCN3990_DST_WR_IDX_OFFSET 0x40
  63. static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
  64. {
  65. .ce_id = __cpu_to_le16(0),
  66. .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
  67. },
  68. {
  69. .ce_id = __cpu_to_le16(3),
  70. .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
  71. },
  72. {
  73. .ce_id = __cpu_to_le16(4),
  74. .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
  75. },
  76. {
  77. .ce_id = __cpu_to_le16(5),
  78. .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
  79. },
  80. {
  81. .ce_id = __cpu_to_le16(7),
  82. .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
  83. },
  84. {
  85. .ce_id = __cpu_to_le16(1),
  86. .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
  87. },
  88. {
  89. .ce_id = __cpu_to_le16(2),
  90. .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
  91. },
  92. {
  93. .ce_id = __cpu_to_le16(7),
  94. .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
  95. },
  96. {
  97. .ce_id = __cpu_to_le16(8),
  98. .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
  99. },
  100. {
  101. .ce_id = __cpu_to_le16(9),
  102. .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
  103. },
  104. {
  105. .ce_id = __cpu_to_le16(10),
  106. .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
  107. },
  108. {
  109. .ce_id = __cpu_to_le16(11),
  110. .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
  111. },
  112. };
  113. static struct ce_attr host_ce_config_wlan[] = {
  114. /* CE0: host->target HTC control streams */
  115. {
  116. .flags = CE_ATTR_FLAGS,
  117. .src_nentries = 16,
  118. .src_sz_max = 2048,
  119. .dest_nentries = 0,
  120. .send_cb = ath10k_snoc_htc_tx_cb,
  121. },
  122. /* CE1: target->host HTT + HTC control */
  123. {
  124. .flags = CE_ATTR_FLAGS,
  125. .src_nentries = 0,
  126. .src_sz_max = 2048,
  127. .dest_nentries = 512,
  128. .recv_cb = ath10k_snoc_htt_htc_rx_cb,
  129. },
  130. /* CE2: target->host WMI */
  131. {
  132. .flags = CE_ATTR_FLAGS,
  133. .src_nentries = 0,
  134. .src_sz_max = 2048,
  135. .dest_nentries = 64,
  136. .recv_cb = ath10k_snoc_htc_rx_cb,
  137. },
  138. /* CE3: host->target WMI */
  139. {
  140. .flags = CE_ATTR_FLAGS,
  141. .src_nentries = 32,
  142. .src_sz_max = 2048,
  143. .dest_nentries = 0,
  144. .send_cb = ath10k_snoc_htc_tx_cb,
  145. },
  146. /* CE4: host->target HTT */
  147. {
  148. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  149. .src_nentries = 2048,
  150. .src_sz_max = 256,
  151. .dest_nentries = 0,
  152. .send_cb = ath10k_snoc_htt_tx_cb,
  153. },
  154. /* CE5: target->host HTT (ipa_uc->target ) */
  155. {
  156. .flags = CE_ATTR_FLAGS,
  157. .src_nentries = 0,
  158. .src_sz_max = 512,
  159. .dest_nentries = 512,
  160. .recv_cb = ath10k_snoc_htt_rx_cb,
  161. },
  162. /* CE6: target autonomous hif_memcpy */
  163. {
  164. .flags = CE_ATTR_FLAGS,
  165. .src_nentries = 0,
  166. .src_sz_max = 0,
  167. .dest_nentries = 0,
  168. },
  169. /* CE7: ce_diag, the Diagnostic Window */
  170. {
  171. .flags = CE_ATTR_FLAGS,
  172. .src_nentries = 2,
  173. .src_sz_max = 2048,
  174. .dest_nentries = 2,
  175. },
  176. /* CE8: Target to uMC */
  177. {
  178. .flags = CE_ATTR_FLAGS,
  179. .src_nentries = 0,
  180. .src_sz_max = 2048,
  181. .dest_nentries = 128,
  182. },
  183. /* CE9 target->host HTT */
  184. {
  185. .flags = CE_ATTR_FLAGS,
  186. .src_nentries = 0,
  187. .src_sz_max = 2048,
  188. .dest_nentries = 512,
  189. .recv_cb = ath10k_snoc_htt_htc_rx_cb,
  190. },
  191. /* CE10: target->host HTT */
  192. {
  193. .flags = CE_ATTR_FLAGS,
  194. .src_nentries = 0,
  195. .src_sz_max = 2048,
  196. .dest_nentries = 512,
  197. .recv_cb = ath10k_snoc_htt_htc_rx_cb,
  198. },
  199. /* CE11: target -> host PKTLOG */
  200. {
  201. .flags = CE_ATTR_FLAGS,
  202. .src_nentries = 0,
  203. .src_sz_max = 2048,
  204. .dest_nentries = 512,
  205. .recv_cb = ath10k_snoc_pktlog_rx_cb,
  206. },
  207. };
  208. static struct ce_pipe_config target_ce_config_wlan[] = {
  209. /* CE0: host->target HTC control and raw streams */
  210. {
  211. .pipenum = __cpu_to_le32(0),
  212. .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  213. .nentries = __cpu_to_le32(32),
  214. .nbytes_max = __cpu_to_le32(2048),
  215. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  216. .reserved = __cpu_to_le32(0),
  217. },
  218. /* CE1: target->host HTT + HTC control */
  219. {
  220. .pipenum = __cpu_to_le32(1),
  221. .pipedir = __cpu_to_le32(PIPEDIR_IN),
  222. .nentries = __cpu_to_le32(32),
  223. .nbytes_max = __cpu_to_le32(2048),
  224. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  225. .reserved = __cpu_to_le32(0),
  226. },
  227. /* CE2: target->host WMI */
  228. {
  229. .pipenum = __cpu_to_le32(2),
  230. .pipedir = __cpu_to_le32(PIPEDIR_IN),
  231. .nentries = __cpu_to_le32(64),
  232. .nbytes_max = __cpu_to_le32(2048),
  233. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  234. .reserved = __cpu_to_le32(0),
  235. },
  236. /* CE3: host->target WMI */
  237. {
  238. .pipenum = __cpu_to_le32(3),
  239. .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  240. .nentries = __cpu_to_le32(32),
  241. .nbytes_max = __cpu_to_le32(2048),
  242. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  243. .reserved = __cpu_to_le32(0),
  244. },
  245. /* CE4: host->target HTT */
  246. {
  247. .pipenum = __cpu_to_le32(4),
  248. .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  249. .nentries = __cpu_to_le32(256),
  250. .nbytes_max = __cpu_to_le32(256),
  251. .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
  252. .reserved = __cpu_to_le32(0),
  253. },
  254. /* CE5: target->host HTT (HIF->HTT) */
  255. {
  256. .pipenum = __cpu_to_le32(5),
  257. .pipedir = __cpu_to_le32(PIPEDIR_OUT),
  258. .nentries = __cpu_to_le32(1024),
  259. .nbytes_max = __cpu_to_le32(64),
  260. .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
  261. .reserved = __cpu_to_le32(0),
  262. },
  263. /* CE6: Reserved for target autonomous hif_memcpy */
  264. {
  265. .pipenum = __cpu_to_le32(6),
  266. .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
  267. .nentries = __cpu_to_le32(32),
  268. .nbytes_max = __cpu_to_le32(16384),
  269. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  270. .reserved = __cpu_to_le32(0),
  271. },
  272. /* CE7 used only by Host */
  273. {
  274. .pipenum = __cpu_to_le32(7),
  275. .pipedir = __cpu_to_le32(4),
  276. .nentries = __cpu_to_le32(0),
  277. .nbytes_max = __cpu_to_le32(0),
  278. .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
  279. .reserved = __cpu_to_le32(0),
  280. },
  281. /* CE8 Target to uMC */
  282. {
  283. .pipenum = __cpu_to_le32(8),
  284. .pipedir = __cpu_to_le32(PIPEDIR_IN),
  285. .nentries = __cpu_to_le32(32),
  286. .nbytes_max = __cpu_to_le32(2048),
  287. .flags = __cpu_to_le32(0),
  288. .reserved = __cpu_to_le32(0),
  289. },
  290. /* CE9 target->host HTT */
  291. {
  292. .pipenum = __cpu_to_le32(9),
  293. .pipedir = __cpu_to_le32(PIPEDIR_IN),
  294. .nentries = __cpu_to_le32(32),
  295. .nbytes_max = __cpu_to_le32(2048),
  296. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  297. .reserved = __cpu_to_le32(0),
  298. },
  299. /* CE10 target->host HTT */
  300. {
  301. .pipenum = __cpu_to_le32(10),
  302. .pipedir = __cpu_to_le32(PIPEDIR_IN),
  303. .nentries = __cpu_to_le32(32),
  304. .nbytes_max = __cpu_to_le32(2048),
  305. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  306. .reserved = __cpu_to_le32(0),
  307. },
  308. /* CE11 target autonomous qcache memcpy */
  309. {
  310. .pipenum = __cpu_to_le32(11),
  311. .pipedir = __cpu_to_le32(PIPEDIR_IN),
  312. .nentries = __cpu_to_le32(32),
  313. .nbytes_max = __cpu_to_le32(2048),
  314. .flags = __cpu_to_le32(CE_ATTR_FLAGS),
  315. .reserved = __cpu_to_le32(0),
  316. },
  317. };
  318. static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
  319. {
  320. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
  321. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  322. __cpu_to_le32(3),
  323. },
  324. {
  325. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
  326. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  327. __cpu_to_le32(2),
  328. },
  329. {
  330. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
  331. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  332. __cpu_to_le32(3),
  333. },
  334. {
  335. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
  336. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  337. __cpu_to_le32(2),
  338. },
  339. {
  340. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
  341. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  342. __cpu_to_le32(3),
  343. },
  344. {
  345. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
  346. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  347. __cpu_to_le32(2),
  348. },
  349. {
  350. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
  351. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  352. __cpu_to_le32(3),
  353. },
  354. {
  355. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
  356. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  357. __cpu_to_le32(2),
  358. },
  359. {
  360. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
  361. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  362. __cpu_to_le32(3),
  363. },
  364. {
  365. __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
  366. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  367. __cpu_to_le32(2),
  368. },
  369. {
  370. __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
  371. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  372. __cpu_to_le32(0),
  373. },
  374. {
  375. __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
  376. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  377. __cpu_to_le32(2),
  378. },
  379. { /* not used */
  380. __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
  381. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  382. __cpu_to_le32(0),
  383. },
  384. { /* not used */
  385. __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
  386. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  387. __cpu_to_le32(2),
  388. },
  389. {
  390. __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
  391. __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
  392. __cpu_to_le32(4),
  393. },
  394. {
  395. __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
  396. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  397. __cpu_to_le32(1),
  398. },
  399. { /* not used */
  400. __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
  401. __cpu_to_le32(PIPEDIR_OUT),
  402. __cpu_to_le32(5),
  403. },
  404. { /* in = DL = target -> host */
  405. __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
  406. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  407. __cpu_to_le32(9),
  408. },
  409. { /* in = DL = target -> host */
  410. __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
  411. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  412. __cpu_to_le32(10),
  413. },
  414. { /* in = DL = target -> host pktlog */
  415. __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
  416. __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
  417. __cpu_to_le32(11),
  418. },
  419. /* (Additions here) */
  420. { /* must be last */
  421. __cpu_to_le32(0),
  422. __cpu_to_le32(0),
  423. __cpu_to_le32(0),
  424. },
  425. };
  426. static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
  427. {
  428. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  429. iowrite32(value, ar_snoc->mem + offset);
  430. }
  431. static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
  432. {
  433. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  434. u32 val;
  435. val = ioread32(ar_snoc->mem + offset);
  436. return val;
  437. }
  438. static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
  439. {
  440. struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
  441. struct ath10k *ar = pipe->hif_ce_state;
  442. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  443. struct sk_buff *skb;
  444. dma_addr_t paddr;
  445. int ret;
  446. skb = dev_alloc_skb(pipe->buf_sz);
  447. if (!skb)
  448. return -ENOMEM;
  449. WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  450. paddr = dma_map_single(ar->dev, skb->data,
  451. skb->len + skb_tailroom(skb),
  452. DMA_FROM_DEVICE);
  453. if (unlikely(dma_mapping_error(ar->dev, paddr))) {
  454. ath10k_warn(ar, "failed to dma map snoc rx buf\n");
  455. dev_kfree_skb_any(skb);
  456. return -EIO;
  457. }
  458. ATH10K_SKB_RXCB(skb)->paddr = paddr;
  459. spin_lock_bh(&ce->ce_lock);
  460. ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
  461. spin_unlock_bh(&ce->ce_lock);
  462. if (ret) {
  463. dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
  464. DMA_FROM_DEVICE);
  465. dev_kfree_skb_any(skb);
  466. return ret;
  467. }
  468. return 0;
  469. }
  470. static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
  471. {
  472. struct ath10k *ar = pipe->hif_ce_state;
  473. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  474. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  475. struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
  476. int ret, num;
  477. if (pipe->buf_sz == 0)
  478. return;
  479. if (!ce_pipe->dest_ring)
  480. return;
  481. spin_lock_bh(&ce->ce_lock);
  482. num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
  483. spin_unlock_bh(&ce->ce_lock);
  484. while (num--) {
  485. ret = __ath10k_snoc_rx_post_buf(pipe);
  486. if (ret) {
  487. if (ret == -ENOSPC)
  488. break;
  489. ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
  490. mod_timer(&ar_snoc->rx_post_retry, jiffies +
  491. ATH10K_SNOC_RX_POST_RETRY_MS);
  492. break;
  493. }
  494. }
  495. }
  496. static void ath10k_snoc_rx_post(struct ath10k *ar)
  497. {
  498. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  499. int i;
  500. for (i = 0; i < CE_COUNT; i++)
  501. ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
  502. }
  503. static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
  504. void (*callback)(struct ath10k *ar,
  505. struct sk_buff *skb))
  506. {
  507. struct ath10k *ar = ce_state->ar;
  508. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  509. struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
  510. struct sk_buff *skb;
  511. struct sk_buff_head list;
  512. void *transfer_context;
  513. unsigned int nbytes, max_nbytes;
  514. __skb_queue_head_init(&list);
  515. while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
  516. &nbytes) == 0) {
  517. skb = transfer_context;
  518. max_nbytes = skb->len + skb_tailroom(skb);
  519. dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
  520. max_nbytes, DMA_FROM_DEVICE);
  521. if (unlikely(max_nbytes < nbytes)) {
  522. ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
  523. nbytes, max_nbytes);
  524. dev_kfree_skb_any(skb);
  525. continue;
  526. }
  527. skb_put(skb, nbytes);
  528. __skb_queue_tail(&list, skb);
  529. }
  530. while ((skb = __skb_dequeue(&list))) {
  531. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
  532. ce_state->id, skb->len);
  533. callback(ar, skb);
  534. }
  535. ath10k_snoc_rx_post_pipe(pipe_info);
  536. }
  537. static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
  538. {
  539. ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
  540. }
  541. static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
  542. {
  543. /* CE4 polling needs to be done whenever CE pipe which transports
  544. * HTT Rx (target->host) is processed.
  545. */
  546. ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
  547. ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
  548. }
  549. /* Called by lower (CE) layer when data is received from the Target.
  550. * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
  551. */
  552. static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
  553. {
  554. ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
  555. }
  556. static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
  557. {
  558. skb_pull(skb, sizeof(struct ath10k_htc_hdr));
  559. ath10k_htt_t2h_msg_handler(ar, skb);
  560. }
  561. static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
  562. {
  563. ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
  564. ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
  565. }
  566. static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
  567. {
  568. struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
  569. struct ath10k *ar = ar_snoc->ar;
  570. ath10k_snoc_rx_post(ar);
  571. }
  572. static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
  573. {
  574. struct ath10k *ar = ce_state->ar;
  575. struct sk_buff_head list;
  576. struct sk_buff *skb;
  577. __skb_queue_head_init(&list);
  578. while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
  579. if (!skb)
  580. continue;
  581. __skb_queue_tail(&list, skb);
  582. }
  583. while ((skb = __skb_dequeue(&list)))
  584. ath10k_htc_tx_completion_handler(ar, skb);
  585. }
  586. static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
  587. {
  588. struct ath10k *ar = ce_state->ar;
  589. struct sk_buff *skb;
  590. while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
  591. if (!skb)
  592. continue;
  593. dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
  594. skb->len, DMA_TO_DEVICE);
  595. ath10k_htt_hif_tx_complete(ar, skb);
  596. }
  597. }
  598. static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
  599. struct ath10k_hif_sg_item *items, int n_items)
  600. {
  601. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  602. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  603. struct ath10k_snoc_pipe *snoc_pipe;
  604. struct ath10k_ce_pipe *ce_pipe;
  605. int err, i = 0;
  606. snoc_pipe = &ar_snoc->pipe_info[pipe_id];
  607. ce_pipe = snoc_pipe->ce_hdl;
  608. spin_lock_bh(&ce->ce_lock);
  609. for (i = 0; i < n_items - 1; i++) {
  610. ath10k_dbg(ar, ATH10K_DBG_SNOC,
  611. "snoc tx item %d paddr %pad len %d n_items %d\n",
  612. i, &items[i].paddr, items[i].len, n_items);
  613. err = ath10k_ce_send_nolock(ce_pipe,
  614. items[i].transfer_context,
  615. items[i].paddr,
  616. items[i].len,
  617. items[i].transfer_id,
  618. CE_SEND_FLAG_GATHER);
  619. if (err)
  620. goto err;
  621. }
  622. ath10k_dbg(ar, ATH10K_DBG_SNOC,
  623. "snoc tx item %d paddr %pad len %d n_items %d\n",
  624. i, &items[i].paddr, items[i].len, n_items);
  625. err = ath10k_ce_send_nolock(ce_pipe,
  626. items[i].transfer_context,
  627. items[i].paddr,
  628. items[i].len,
  629. items[i].transfer_id,
  630. 0);
  631. if (err)
  632. goto err;
  633. spin_unlock_bh(&ce->ce_lock);
  634. return 0;
  635. err:
  636. for (; i > 0; i--)
  637. __ath10k_ce_send_revert(ce_pipe);
  638. spin_unlock_bh(&ce->ce_lock);
  639. return err;
  640. }
  641. static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
  642. struct bmi_target_info *target_info)
  643. {
  644. target_info->version = ATH10K_HW_WCN3990;
  645. target_info->type = ATH10K_HW_WCN3990;
  646. return 0;
  647. }
  648. static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
  649. {
  650. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  651. ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
  652. return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
  653. }
  654. static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
  655. int force)
  656. {
  657. int resources;
  658. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
  659. if (!force) {
  660. resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
  661. if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
  662. return;
  663. }
  664. ath10k_ce_per_engine_service(ar, pipe);
  665. }
  666. static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
  667. u16 service_id,
  668. u8 *ul_pipe, u8 *dl_pipe)
  669. {
  670. const struct ce_service_to_pipe *entry;
  671. bool ul_set = false, dl_set = false;
  672. int i;
  673. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
  674. for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
  675. entry = &target_service_to_ce_map_wlan[i];
  676. if (__le32_to_cpu(entry->service_id) != service_id)
  677. continue;
  678. switch (__le32_to_cpu(entry->pipedir)) {
  679. case PIPEDIR_NONE:
  680. break;
  681. case PIPEDIR_IN:
  682. WARN_ON(dl_set);
  683. *dl_pipe = __le32_to_cpu(entry->pipenum);
  684. dl_set = true;
  685. break;
  686. case PIPEDIR_OUT:
  687. WARN_ON(ul_set);
  688. *ul_pipe = __le32_to_cpu(entry->pipenum);
  689. ul_set = true;
  690. break;
  691. case PIPEDIR_INOUT:
  692. WARN_ON(dl_set);
  693. WARN_ON(ul_set);
  694. *dl_pipe = __le32_to_cpu(entry->pipenum);
  695. *ul_pipe = __le32_to_cpu(entry->pipenum);
  696. dl_set = true;
  697. ul_set = true;
  698. break;
  699. }
  700. }
  701. if (!ul_set || !dl_set)
  702. return -ENOENT;
  703. return 0;
  704. }
  705. static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
  706. u8 *ul_pipe, u8 *dl_pipe)
  707. {
  708. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
  709. (void)ath10k_snoc_hif_map_service_to_pipe(ar,
  710. ATH10K_HTC_SVC_ID_RSVD_CTRL,
  711. ul_pipe, dl_pipe);
  712. }
  713. static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
  714. {
  715. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  716. int id;
  717. for (id = 0; id < CE_COUNT_MAX; id++)
  718. disable_irq(ar_snoc->ce_irqs[id].irq_line);
  719. }
  720. static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
  721. {
  722. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  723. int id;
  724. for (id = 0; id < CE_COUNT_MAX; id++)
  725. enable_irq(ar_snoc->ce_irqs[id].irq_line);
  726. }
  727. static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
  728. {
  729. struct ath10k_ce_pipe *ce_pipe;
  730. struct ath10k_ce_ring *ce_ring;
  731. struct sk_buff *skb;
  732. struct ath10k *ar;
  733. int i;
  734. ar = snoc_pipe->hif_ce_state;
  735. ce_pipe = snoc_pipe->ce_hdl;
  736. ce_ring = ce_pipe->dest_ring;
  737. if (!ce_ring)
  738. return;
  739. if (!snoc_pipe->buf_sz)
  740. return;
  741. for (i = 0; i < ce_ring->nentries; i++) {
  742. skb = ce_ring->per_transfer_context[i];
  743. if (!skb)
  744. continue;
  745. ce_ring->per_transfer_context[i] = NULL;
  746. dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
  747. skb->len + skb_tailroom(skb),
  748. DMA_FROM_DEVICE);
  749. dev_kfree_skb_any(skb);
  750. }
  751. }
  752. static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
  753. {
  754. struct ath10k_ce_pipe *ce_pipe;
  755. struct ath10k_ce_ring *ce_ring;
  756. struct sk_buff *skb;
  757. struct ath10k *ar;
  758. int i;
  759. ar = snoc_pipe->hif_ce_state;
  760. ce_pipe = snoc_pipe->ce_hdl;
  761. ce_ring = ce_pipe->src_ring;
  762. if (!ce_ring)
  763. return;
  764. if (!snoc_pipe->buf_sz)
  765. return;
  766. for (i = 0; i < ce_ring->nentries; i++) {
  767. skb = ce_ring->per_transfer_context[i];
  768. if (!skb)
  769. continue;
  770. ce_ring->per_transfer_context[i] = NULL;
  771. ath10k_htc_tx_completion_handler(ar, skb);
  772. }
  773. }
  774. static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
  775. {
  776. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  777. struct ath10k_snoc_pipe *pipe_info;
  778. int pipe_num;
  779. del_timer_sync(&ar_snoc->rx_post_retry);
  780. for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
  781. pipe_info = &ar_snoc->pipe_info[pipe_num];
  782. ath10k_snoc_rx_pipe_cleanup(pipe_info);
  783. ath10k_snoc_tx_pipe_cleanup(pipe_info);
  784. }
  785. }
  786. static void ath10k_snoc_hif_stop(struct ath10k *ar)
  787. {
  788. if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
  789. ath10k_snoc_irq_disable(ar);
  790. ath10k_core_napi_sync_disable(ar);
  791. ath10k_snoc_buffer_cleanup(ar);
  792. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
  793. }
  794. static int ath10k_snoc_hif_start(struct ath10k *ar)
  795. {
  796. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  797. bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
  798. ath10k_core_napi_enable(ar);
  799. ath10k_snoc_irq_enable(ar);
  800. ath10k_snoc_rx_post(ar);
  801. clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
  802. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
  803. return 0;
  804. }
  805. static int ath10k_snoc_init_pipes(struct ath10k *ar)
  806. {
  807. int i, ret;
  808. for (i = 0; i < CE_COUNT; i++) {
  809. ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
  810. if (ret) {
  811. ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
  812. i, ret);
  813. return ret;
  814. }
  815. }
  816. return 0;
  817. }
  818. static int ath10k_snoc_wlan_enable(struct ath10k *ar,
  819. enum ath10k_firmware_mode fw_mode)
  820. {
  821. struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
  822. struct ath10k_qmi_wlan_enable_cfg cfg;
  823. enum wlfw_driver_mode_enum_v01 mode;
  824. int pipe_num;
  825. for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
  826. tgt_cfg[pipe_num].pipe_num =
  827. target_ce_config_wlan[pipe_num].pipenum;
  828. tgt_cfg[pipe_num].pipe_dir =
  829. target_ce_config_wlan[pipe_num].pipedir;
  830. tgt_cfg[pipe_num].nentries =
  831. target_ce_config_wlan[pipe_num].nentries;
  832. tgt_cfg[pipe_num].nbytes_max =
  833. target_ce_config_wlan[pipe_num].nbytes_max;
  834. tgt_cfg[pipe_num].flags =
  835. target_ce_config_wlan[pipe_num].flags;
  836. tgt_cfg[pipe_num].reserved = 0;
  837. }
  838. cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
  839. sizeof(struct ath10k_tgt_pipe_cfg);
  840. cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
  841. &tgt_cfg;
  842. cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
  843. sizeof(struct ath10k_svc_pipe_cfg);
  844. cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
  845. &target_service_to_ce_map_wlan;
  846. cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
  847. cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
  848. &target_shadow_reg_cfg_map;
  849. switch (fw_mode) {
  850. case ATH10K_FIRMWARE_MODE_NORMAL:
  851. mode = QMI_WLFW_MISSION_V01;
  852. break;
  853. case ATH10K_FIRMWARE_MODE_UTF:
  854. mode = QMI_WLFW_FTM_V01;
  855. break;
  856. default:
  857. ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
  858. return -EINVAL;
  859. }
  860. return ath10k_qmi_wlan_enable(ar, &cfg, mode,
  861. NULL);
  862. }
  863. static int ath10k_hw_power_on(struct ath10k *ar)
  864. {
  865. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  866. int ret;
  867. ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
  868. ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
  869. if (ret)
  870. return ret;
  871. ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
  872. if (ret)
  873. goto vreg_off;
  874. return ret;
  875. vreg_off:
  876. regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
  877. return ret;
  878. }
  879. static int ath10k_hw_power_off(struct ath10k *ar)
  880. {
  881. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  882. ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
  883. clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
  884. return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
  885. }
  886. static void ath10k_snoc_wlan_disable(struct ath10k *ar)
  887. {
  888. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  889. /* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
  890. * flags are not set, it means that the driver has restarted
  891. * due to a crash inject via debugfs. In this case, the driver
  892. * needs to restart the firmware and hence send qmi wlan disable,
  893. * during the driver restart sequence.
  894. */
  895. if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
  896. !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
  897. ath10k_qmi_wlan_disable(ar);
  898. }
  899. static void ath10k_snoc_hif_power_down(struct ath10k *ar)
  900. {
  901. ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
  902. ath10k_snoc_wlan_disable(ar);
  903. ath10k_ce_free_rri(ar);
  904. ath10k_hw_power_off(ar);
  905. }
  906. static int ath10k_snoc_hif_power_up(struct ath10k *ar,
  907. enum ath10k_firmware_mode fw_mode)
  908. {
  909. int ret;
  910. ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
  911. __func__, ar->state);
  912. ret = ath10k_hw_power_on(ar);
  913. if (ret) {
  914. ath10k_err(ar, "failed to power on device: %d\n", ret);
  915. return ret;
  916. }
  917. ret = ath10k_snoc_wlan_enable(ar, fw_mode);
  918. if (ret) {
  919. ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
  920. goto err_hw_power_off;
  921. }
  922. ath10k_ce_alloc_rri(ar);
  923. ret = ath10k_snoc_init_pipes(ar);
  924. if (ret) {
  925. ath10k_err(ar, "failed to initialize CE: %d\n", ret);
  926. goto err_free_rri;
  927. }
  928. ath10k_ce_enable_interrupts(ar);
  929. return 0;
  930. err_free_rri:
  931. ath10k_ce_free_rri(ar);
  932. ath10k_snoc_wlan_disable(ar);
  933. err_hw_power_off:
  934. ath10k_hw_power_off(ar);
  935. return ret;
  936. }
  937. static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
  938. u8 fw_log_mode)
  939. {
  940. u8 fw_dbg_mode;
  941. if (fw_log_mode)
  942. fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
  943. else
  944. fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
  945. return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
  946. }
  947. #ifdef CONFIG_PM
  948. static int ath10k_snoc_hif_suspend(struct ath10k *ar)
  949. {
  950. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  951. int ret;
  952. if (!device_may_wakeup(ar->dev))
  953. return -EPERM;
  954. ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
  955. if (ret) {
  956. ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
  957. return ret;
  958. }
  959. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
  960. return ret;
  961. }
  962. static int ath10k_snoc_hif_resume(struct ath10k *ar)
  963. {
  964. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  965. int ret;
  966. if (!device_may_wakeup(ar->dev))
  967. return -EPERM;
  968. ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
  969. if (ret) {
  970. ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
  971. return ret;
  972. }
  973. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
  974. return ret;
  975. }
  976. #endif
  977. static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
  978. .read32 = ath10k_snoc_read32,
  979. .write32 = ath10k_snoc_write32,
  980. .start = ath10k_snoc_hif_start,
  981. .stop = ath10k_snoc_hif_stop,
  982. .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
  983. .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
  984. .power_up = ath10k_snoc_hif_power_up,
  985. .power_down = ath10k_snoc_hif_power_down,
  986. .tx_sg = ath10k_snoc_hif_tx_sg,
  987. .send_complete_check = ath10k_snoc_hif_send_complete_check,
  988. .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
  989. .get_target_info = ath10k_snoc_hif_get_target_info,
  990. .set_target_log_mode = ath10k_snoc_hif_set_target_log_mode,
  991. #ifdef CONFIG_PM
  992. .suspend = ath10k_snoc_hif_suspend,
  993. .resume = ath10k_snoc_hif_resume,
  994. #endif
  995. };
  996. static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
  997. .read32 = ath10k_snoc_read32,
  998. .write32 = ath10k_snoc_write32,
  999. };
  1000. static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
  1001. {
  1002. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1003. int i;
  1004. for (i = 0; i < CE_COUNT_MAX; i++) {
  1005. if (ar_snoc->ce_irqs[i].irq_line == irq)
  1006. return i;
  1007. }
  1008. ath10k_err(ar, "No matching CE id for irq %d\n", irq);
  1009. return -EINVAL;
  1010. }
  1011. static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
  1012. {
  1013. struct ath10k *ar = arg;
  1014. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1015. int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
  1016. if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
  1017. ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
  1018. ce_id);
  1019. return IRQ_HANDLED;
  1020. }
  1021. ath10k_ce_disable_interrupt(ar, ce_id);
  1022. set_bit(ce_id, ar_snoc->pending_ce_irqs);
  1023. napi_schedule(&ar->napi);
  1024. return IRQ_HANDLED;
  1025. }
  1026. static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
  1027. {
  1028. struct ath10k *ar = container_of(ctx, struct ath10k, napi);
  1029. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1030. int done = 0;
  1031. int ce_id;
  1032. if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
  1033. napi_complete(ctx);
  1034. return done;
  1035. }
  1036. for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
  1037. if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) {
  1038. ath10k_ce_per_engine_service(ar, ce_id);
  1039. ath10k_ce_enable_interrupt(ar, ce_id);
  1040. }
  1041. done = ath10k_htt_txrx_compl_task(ar, budget);
  1042. if (done < budget)
  1043. napi_complete(ctx);
  1044. return done;
  1045. }
  1046. static void ath10k_snoc_init_napi(struct ath10k *ar)
  1047. {
  1048. netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
  1049. }
  1050. static int ath10k_snoc_request_irq(struct ath10k *ar)
  1051. {
  1052. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1053. int ret, id;
  1054. for (id = 0; id < CE_COUNT_MAX; id++) {
  1055. ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
  1056. ath10k_snoc_per_engine_handler,
  1057. IRQF_NO_AUTOEN, ce_name[id], ar);
  1058. if (ret) {
  1059. ath10k_err(ar,
  1060. "failed to register IRQ handler for CE %d: %d\n",
  1061. id, ret);
  1062. goto err_irq;
  1063. }
  1064. }
  1065. return 0;
  1066. err_irq:
  1067. for (id -= 1; id >= 0; id--)
  1068. free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
  1069. return ret;
  1070. }
  1071. static void ath10k_snoc_free_irq(struct ath10k *ar)
  1072. {
  1073. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1074. int id;
  1075. for (id = 0; id < CE_COUNT_MAX; id++)
  1076. free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
  1077. }
  1078. static int ath10k_snoc_resource_init(struct ath10k *ar)
  1079. {
  1080. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1081. struct platform_device *pdev;
  1082. struct resource *res;
  1083. int i, ret = 0;
  1084. pdev = ar_snoc->dev;
  1085. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
  1086. if (!res) {
  1087. ath10k_err(ar, "Memory base not found in DT\n");
  1088. return -EINVAL;
  1089. }
  1090. ar_snoc->mem_pa = res->start;
  1091. ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
  1092. resource_size(res));
  1093. if (!ar_snoc->mem) {
  1094. ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
  1095. &ar_snoc->mem_pa);
  1096. return -EINVAL;
  1097. }
  1098. for (i = 0; i < CE_COUNT; i++) {
  1099. ret = platform_get_irq(ar_snoc->dev, i);
  1100. if (ret < 0)
  1101. return ret;
  1102. ar_snoc->ce_irqs[i].irq_line = ret;
  1103. }
  1104. ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
  1105. &ar_snoc->xo_cal_data);
  1106. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
  1107. if (ret == 0) {
  1108. ar_snoc->xo_cal_supported = true;
  1109. ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
  1110. ar_snoc->xo_cal_data);
  1111. }
  1112. return 0;
  1113. }
  1114. static void ath10k_snoc_quirks_init(struct ath10k *ar)
  1115. {
  1116. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1117. struct device *dev = &ar_snoc->dev->dev;
  1118. if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
  1119. set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
  1120. }
  1121. int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
  1122. {
  1123. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1124. struct ath10k_bus_params bus_params = {};
  1125. int ret;
  1126. if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
  1127. return 0;
  1128. switch (type) {
  1129. case ATH10K_QMI_EVENT_FW_READY_IND:
  1130. if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
  1131. ath10k_core_start_recovery(ar);
  1132. break;
  1133. }
  1134. bus_params.dev_type = ATH10K_DEV_TYPE_LL;
  1135. bus_params.chip_id = ar_snoc->target_info.soc_version;
  1136. ret = ath10k_core_register(ar, &bus_params);
  1137. if (ret) {
  1138. ath10k_err(ar, "Failed to register driver core: %d\n",
  1139. ret);
  1140. return ret;
  1141. }
  1142. set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
  1143. break;
  1144. case ATH10K_QMI_EVENT_FW_DOWN_IND:
  1145. set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
  1146. set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
  1147. break;
  1148. default:
  1149. ath10k_err(ar, "invalid fw indication: %llx\n", type);
  1150. return -EINVAL;
  1151. }
  1152. return 0;
  1153. }
  1154. static int ath10k_snoc_setup_resource(struct ath10k *ar)
  1155. {
  1156. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1157. struct ath10k_ce *ce = ath10k_ce_priv(ar);
  1158. struct ath10k_snoc_pipe *pipe;
  1159. int i, ret;
  1160. timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
  1161. spin_lock_init(&ce->ce_lock);
  1162. for (i = 0; i < CE_COUNT; i++) {
  1163. pipe = &ar_snoc->pipe_info[i];
  1164. pipe->ce_hdl = &ce->ce_states[i];
  1165. pipe->pipe_num = i;
  1166. pipe->hif_ce_state = ar;
  1167. ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
  1168. if (ret) {
  1169. ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
  1170. i, ret);
  1171. return ret;
  1172. }
  1173. pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
  1174. }
  1175. ath10k_snoc_init_napi(ar);
  1176. return 0;
  1177. }
  1178. static void ath10k_snoc_release_resource(struct ath10k *ar)
  1179. {
  1180. int i;
  1181. netif_napi_del(&ar->napi);
  1182. for (i = 0; i < CE_COUNT; i++)
  1183. ath10k_ce_free_pipe(ar, i);
  1184. }
  1185. static void ath10k_msa_dump_memory(struct ath10k *ar,
  1186. struct ath10k_fw_crash_data *crash_data)
  1187. {
  1188. const struct ath10k_hw_mem_layout *mem_layout;
  1189. const struct ath10k_mem_region *current_region;
  1190. struct ath10k_dump_ram_data_hdr *hdr;
  1191. size_t buf_len;
  1192. u8 *buf;
  1193. if (!crash_data || !crash_data->ramdump_buf)
  1194. return;
  1195. mem_layout = ath10k_coredump_get_mem_layout(ar);
  1196. if (!mem_layout)
  1197. return;
  1198. current_region = &mem_layout->region_table.regions[0];
  1199. buf = crash_data->ramdump_buf;
  1200. buf_len = crash_data->ramdump_buf_len;
  1201. memset(buf, 0, buf_len);
  1202. /* Reserve space for the header. */
  1203. hdr = (void *)buf;
  1204. buf += sizeof(*hdr);
  1205. buf_len -= sizeof(*hdr);
  1206. hdr->region_type = cpu_to_le32(current_region->type);
  1207. hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
  1208. hdr->length = cpu_to_le32(ar->msa.mem_size);
  1209. if (current_region->len < ar->msa.mem_size) {
  1210. memcpy(buf, ar->msa.vaddr, current_region->len);
  1211. ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
  1212. current_region->len, ar->msa.mem_size);
  1213. } else {
  1214. memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
  1215. }
  1216. }
  1217. void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
  1218. {
  1219. struct ath10k_fw_crash_data *crash_data;
  1220. char guid[UUID_STRING_LEN + 1];
  1221. mutex_lock(&ar->dump_mutex);
  1222. spin_lock_bh(&ar->data_lock);
  1223. ar->stats.fw_crash_counter++;
  1224. spin_unlock_bh(&ar->data_lock);
  1225. crash_data = ath10k_coredump_new(ar);
  1226. if (crash_data)
  1227. scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
  1228. else
  1229. scnprintf(guid, sizeof(guid), "n/a");
  1230. ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
  1231. ath10k_print_driver_info(ar);
  1232. ath10k_msa_dump_memory(ar, crash_data);
  1233. mutex_unlock(&ar->dump_mutex);
  1234. }
  1235. static int ath10k_snoc_modem_notify(struct notifier_block *nb, unsigned long action,
  1236. void *data)
  1237. {
  1238. struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, nb);
  1239. struct ath10k *ar = ar_snoc->ar;
  1240. struct qcom_ssr_notify_data *notify_data = data;
  1241. switch (action) {
  1242. case QCOM_SSR_BEFORE_POWERUP:
  1243. ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem starting event\n");
  1244. clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
  1245. break;
  1246. case QCOM_SSR_AFTER_POWERUP:
  1247. ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem running event\n");
  1248. break;
  1249. case QCOM_SSR_BEFORE_SHUTDOWN:
  1250. ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem %s event\n",
  1251. notify_data->crashed ? "crashed" : "stopping");
  1252. if (!notify_data->crashed)
  1253. set_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
  1254. else
  1255. clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
  1256. break;
  1257. case QCOM_SSR_AFTER_SHUTDOWN:
  1258. ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem offline event\n");
  1259. break;
  1260. default:
  1261. ath10k_err(ar, "received unrecognized event %lu\n", action);
  1262. break;
  1263. }
  1264. return NOTIFY_OK;
  1265. }
  1266. static int ath10k_modem_init(struct ath10k *ar)
  1267. {
  1268. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1269. void *notifier;
  1270. int ret;
  1271. ar_snoc->nb.notifier_call = ath10k_snoc_modem_notify;
  1272. notifier = qcom_register_ssr_notifier("mpss", &ar_snoc->nb);
  1273. if (IS_ERR(notifier)) {
  1274. ret = PTR_ERR(notifier);
  1275. ath10k_err(ar, "failed to initialize modem notifier: %d\n", ret);
  1276. return ret;
  1277. }
  1278. ar_snoc->notifier = notifier;
  1279. return 0;
  1280. }
  1281. static void ath10k_modem_deinit(struct ath10k *ar)
  1282. {
  1283. int ret;
  1284. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1285. ret = qcom_unregister_ssr_notifier(ar_snoc->notifier, &ar_snoc->nb);
  1286. if (ret)
  1287. ath10k_err(ar, "error %d unregistering notifier\n", ret);
  1288. }
  1289. static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
  1290. {
  1291. struct device *dev = ar->dev;
  1292. struct device_node *node;
  1293. struct resource r;
  1294. int ret;
  1295. node = of_parse_phandle(dev->of_node, "memory-region", 0);
  1296. if (node) {
  1297. ret = of_address_to_resource(node, 0, &r);
  1298. of_node_put(node);
  1299. if (ret) {
  1300. dev_err(dev, "failed to resolve msa fixed region\n");
  1301. return ret;
  1302. }
  1303. ar->msa.paddr = r.start;
  1304. ar->msa.mem_size = resource_size(&r);
  1305. ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
  1306. ar->msa.mem_size,
  1307. MEMREMAP_WT);
  1308. if (IS_ERR(ar->msa.vaddr)) {
  1309. dev_err(dev, "failed to map memory region: %pa\n",
  1310. &r.start);
  1311. return PTR_ERR(ar->msa.vaddr);
  1312. }
  1313. } else {
  1314. ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
  1315. &ar->msa.paddr,
  1316. GFP_KERNEL);
  1317. if (!ar->msa.vaddr) {
  1318. ath10k_err(ar, "failed to allocate dma memory for msa region\n");
  1319. return -ENOMEM;
  1320. }
  1321. ar->msa.mem_size = msa_size;
  1322. }
  1323. ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
  1324. &ar->msa.paddr,
  1325. ar->msa.vaddr);
  1326. return 0;
  1327. }
  1328. static int ath10k_fw_init(struct ath10k *ar)
  1329. {
  1330. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1331. struct device *host_dev = &ar_snoc->dev->dev;
  1332. struct platform_device_info info;
  1333. struct iommu_domain *iommu_dom;
  1334. struct platform_device *pdev;
  1335. struct device_node *node;
  1336. int ret;
  1337. node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
  1338. if (!node) {
  1339. ar_snoc->use_tz = true;
  1340. return 0;
  1341. }
  1342. memset(&info, 0, sizeof(info));
  1343. info.fwnode = &node->fwnode;
  1344. info.parent = host_dev;
  1345. info.name = node->name;
  1346. info.dma_mask = DMA_BIT_MASK(32);
  1347. pdev = platform_device_register_full(&info);
  1348. if (IS_ERR(pdev)) {
  1349. of_node_put(node);
  1350. return PTR_ERR(pdev);
  1351. }
  1352. pdev->dev.of_node = node;
  1353. ret = of_dma_configure(&pdev->dev, node, true);
  1354. if (ret) {
  1355. ath10k_err(ar, "dma configure fail: %d\n", ret);
  1356. goto err_unregister;
  1357. }
  1358. ar_snoc->fw.dev = &pdev->dev;
  1359. iommu_dom = iommu_domain_alloc(&platform_bus_type);
  1360. if (!iommu_dom) {
  1361. ath10k_err(ar, "failed to allocate iommu domain\n");
  1362. ret = -ENOMEM;
  1363. goto err_unregister;
  1364. }
  1365. ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
  1366. if (ret) {
  1367. ath10k_err(ar, "could not attach device: %d\n", ret);
  1368. goto err_iommu_free;
  1369. }
  1370. ar_snoc->fw.iommu_domain = iommu_dom;
  1371. ar_snoc->fw.fw_start_addr = ar->msa.paddr;
  1372. ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
  1373. ar->msa.paddr, ar->msa.mem_size,
  1374. IOMMU_READ | IOMMU_WRITE);
  1375. if (ret) {
  1376. ath10k_err(ar, "failed to map firmware region: %d\n", ret);
  1377. goto err_iommu_detach;
  1378. }
  1379. of_node_put(node);
  1380. return 0;
  1381. err_iommu_detach:
  1382. iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
  1383. err_iommu_free:
  1384. iommu_domain_free(iommu_dom);
  1385. err_unregister:
  1386. platform_device_unregister(pdev);
  1387. of_node_put(node);
  1388. return ret;
  1389. }
  1390. static int ath10k_fw_deinit(struct ath10k *ar)
  1391. {
  1392. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1393. const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
  1394. struct iommu_domain *iommu;
  1395. size_t unmapped_size;
  1396. if (ar_snoc->use_tz)
  1397. return 0;
  1398. iommu = ar_snoc->fw.iommu_domain;
  1399. unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
  1400. mapped_size);
  1401. if (unmapped_size != mapped_size)
  1402. ath10k_err(ar, "failed to unmap firmware: %zu\n",
  1403. unmapped_size);
  1404. iommu_detach_device(iommu, ar_snoc->fw.dev);
  1405. iommu_domain_free(iommu);
  1406. platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
  1407. return 0;
  1408. }
  1409. static const struct of_device_id ath10k_snoc_dt_match[] = {
  1410. { .compatible = "qcom,wcn3990-wifi",
  1411. .data = &drv_priv,
  1412. },
  1413. { }
  1414. };
  1415. MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
  1416. static int ath10k_snoc_probe(struct platform_device *pdev)
  1417. {
  1418. const struct ath10k_snoc_drv_priv *drv_data;
  1419. struct ath10k_snoc *ar_snoc;
  1420. struct device *dev;
  1421. struct ath10k *ar;
  1422. u32 msa_size;
  1423. int ret;
  1424. u32 i;
  1425. dev = &pdev->dev;
  1426. drv_data = device_get_match_data(dev);
  1427. if (!drv_data) {
  1428. dev_err(dev, "failed to find matching device tree id\n");
  1429. return -EINVAL;
  1430. }
  1431. ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
  1432. if (ret) {
  1433. dev_err(dev, "failed to set dma mask: %d\n", ret);
  1434. return ret;
  1435. }
  1436. ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
  1437. drv_data->hw_rev, &ath10k_snoc_hif_ops);
  1438. if (!ar) {
  1439. dev_err(dev, "failed to allocate core\n");
  1440. return -ENOMEM;
  1441. }
  1442. ar_snoc = ath10k_snoc_priv(ar);
  1443. ar_snoc->dev = pdev;
  1444. platform_set_drvdata(pdev, ar);
  1445. ar_snoc->ar = ar;
  1446. ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
  1447. ar->ce_priv = &ar_snoc->ce;
  1448. msa_size = drv_data->msa_size;
  1449. ath10k_snoc_quirks_init(ar);
  1450. ret = ath10k_snoc_resource_init(ar);
  1451. if (ret) {
  1452. ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
  1453. goto err_core_destroy;
  1454. }
  1455. ret = ath10k_snoc_setup_resource(ar);
  1456. if (ret) {
  1457. ath10k_warn(ar, "failed to setup resource: %d\n", ret);
  1458. goto err_core_destroy;
  1459. }
  1460. ret = ath10k_snoc_request_irq(ar);
  1461. if (ret) {
  1462. ath10k_warn(ar, "failed to request irqs: %d\n", ret);
  1463. goto err_release_resource;
  1464. }
  1465. ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
  1466. ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
  1467. sizeof(*ar_snoc->vregs), GFP_KERNEL);
  1468. if (!ar_snoc->vregs) {
  1469. ret = -ENOMEM;
  1470. goto err_free_irq;
  1471. }
  1472. for (i = 0; i < ar_snoc->num_vregs; i++)
  1473. ar_snoc->vregs[i].supply = ath10k_regulators[i];
  1474. ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
  1475. ar_snoc->vregs);
  1476. if (ret < 0)
  1477. goto err_free_irq;
  1478. ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
  1479. ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
  1480. sizeof(*ar_snoc->clks), GFP_KERNEL);
  1481. if (!ar_snoc->clks) {
  1482. ret = -ENOMEM;
  1483. goto err_free_irq;
  1484. }
  1485. for (i = 0; i < ar_snoc->num_clks; i++)
  1486. ar_snoc->clks[i].id = ath10k_clocks[i];
  1487. ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
  1488. ar_snoc->clks);
  1489. if (ret)
  1490. goto err_free_irq;
  1491. ret = ath10k_setup_msa_resources(ar, msa_size);
  1492. if (ret) {
  1493. ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
  1494. goto err_free_irq;
  1495. }
  1496. ret = ath10k_fw_init(ar);
  1497. if (ret) {
  1498. ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
  1499. goto err_free_irq;
  1500. }
  1501. ret = ath10k_qmi_init(ar, msa_size);
  1502. if (ret) {
  1503. ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
  1504. goto err_fw_deinit;
  1505. }
  1506. ret = ath10k_modem_init(ar);
  1507. if (ret)
  1508. goto err_qmi_deinit;
  1509. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
  1510. return 0;
  1511. err_qmi_deinit:
  1512. ath10k_qmi_deinit(ar);
  1513. err_fw_deinit:
  1514. ath10k_fw_deinit(ar);
  1515. err_free_irq:
  1516. ath10k_snoc_free_irq(ar);
  1517. err_release_resource:
  1518. ath10k_snoc_release_resource(ar);
  1519. err_core_destroy:
  1520. ath10k_core_destroy(ar);
  1521. return ret;
  1522. }
  1523. static int ath10k_snoc_free_resources(struct ath10k *ar)
  1524. {
  1525. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1526. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc free resources\n");
  1527. set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
  1528. ath10k_core_unregister(ar);
  1529. ath10k_fw_deinit(ar);
  1530. ath10k_snoc_free_irq(ar);
  1531. ath10k_snoc_release_resource(ar);
  1532. ath10k_modem_deinit(ar);
  1533. ath10k_qmi_deinit(ar);
  1534. ath10k_core_destroy(ar);
  1535. return 0;
  1536. }
  1537. static int ath10k_snoc_remove(struct platform_device *pdev)
  1538. {
  1539. struct ath10k *ar = platform_get_drvdata(pdev);
  1540. struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
  1541. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
  1542. reinit_completion(&ar->driver_recovery);
  1543. if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
  1544. wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
  1545. ath10k_snoc_free_resources(ar);
  1546. return 0;
  1547. }
  1548. static void ath10k_snoc_shutdown(struct platform_device *pdev)
  1549. {
  1550. struct ath10k *ar = platform_get_drvdata(pdev);
  1551. ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n");
  1552. ath10k_snoc_free_resources(ar);
  1553. }
  1554. static struct platform_driver ath10k_snoc_driver = {
  1555. .probe = ath10k_snoc_probe,
  1556. .remove = ath10k_snoc_remove,
  1557. .shutdown = ath10k_snoc_shutdown,
  1558. .driver = {
  1559. .name = "ath10k_snoc",
  1560. .of_match_table = ath10k_snoc_dt_match,
  1561. },
  1562. };
  1563. module_platform_driver(ath10k_snoc_driver);
  1564. MODULE_AUTHOR("Qualcomm");
  1565. MODULE_LICENSE("Dual BSD/GPL");
  1566. MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");