hal.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375
  1. // SPDX-License-Identifier: BSD-3-Clause-Clear
  2. /*
  3. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/dma-mapping.h>
  7. #include "hal_tx.h"
  8. #include "debug.h"
  9. #include "hal_desc.h"
  10. #include "hif.h"
  11. static const struct hal_srng_config hw_srng_config_template[] = {
  12. /* TODO: max_rings can populated by querying HW capabilities */
  13. { /* REO_DST */
  14. .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
  15. .max_rings = 4,
  16. .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
  17. .lmac_ring = false,
  18. .ring_dir = HAL_SRNG_DIR_DST,
  19. .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
  20. },
  21. { /* REO_EXCEPTION */
  22. /* Designating REO2TCL ring as exception ring. This ring is
  23. * similar to other REO2SW rings though it is named as REO2TCL.
  24. * Any of theREO2SW rings can be used as exception ring.
  25. */
  26. .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
  27. .max_rings = 1,
  28. .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
  29. .lmac_ring = false,
  30. .ring_dir = HAL_SRNG_DIR_DST,
  31. .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
  32. },
  33. { /* REO_REINJECT */
  34. .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
  35. .max_rings = 1,
  36. .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
  37. .lmac_ring = false,
  38. .ring_dir = HAL_SRNG_DIR_SRC,
  39. .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
  40. },
  41. { /* REO_CMD */
  42. .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
  43. .max_rings = 1,
  44. .entry_size = (sizeof(struct hal_tlv_hdr) +
  45. sizeof(struct hal_reo_get_queue_stats)) >> 2,
  46. .lmac_ring = false,
  47. .ring_dir = HAL_SRNG_DIR_SRC,
  48. .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
  49. },
  50. { /* REO_STATUS */
  51. .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
  52. .max_rings = 1,
  53. .entry_size = (sizeof(struct hal_tlv_hdr) +
  54. sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
  55. .lmac_ring = false,
  56. .ring_dir = HAL_SRNG_DIR_DST,
  57. .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
  58. },
  59. { /* TCL_DATA */
  60. .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
  61. .max_rings = 3,
  62. .entry_size = (sizeof(struct hal_tlv_hdr) +
  63. sizeof(struct hal_tcl_data_cmd)) >> 2,
  64. .lmac_ring = false,
  65. .ring_dir = HAL_SRNG_DIR_SRC,
  66. .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
  67. },
  68. { /* TCL_CMD */
  69. .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
  70. .max_rings = 1,
  71. .entry_size = (sizeof(struct hal_tlv_hdr) +
  72. sizeof(struct hal_tcl_gse_cmd)) >> 2,
  73. .lmac_ring = false,
  74. .ring_dir = HAL_SRNG_DIR_SRC,
  75. .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
  76. },
  77. { /* TCL_STATUS */
  78. .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
  79. .max_rings = 1,
  80. .entry_size = (sizeof(struct hal_tlv_hdr) +
  81. sizeof(struct hal_tcl_status_ring)) >> 2,
  82. .lmac_ring = false,
  83. .ring_dir = HAL_SRNG_DIR_DST,
  84. .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
  85. },
  86. { /* CE_SRC */
  87. .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
  88. .max_rings = 12,
  89. .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
  90. .lmac_ring = false,
  91. .ring_dir = HAL_SRNG_DIR_SRC,
  92. .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
  93. },
  94. { /* CE_DST */
  95. .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
  96. .max_rings = 12,
  97. .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
  98. .lmac_ring = false,
  99. .ring_dir = HAL_SRNG_DIR_SRC,
  100. .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
  101. },
  102. { /* CE_DST_STATUS */
  103. .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
  104. .max_rings = 12,
  105. .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
  106. .lmac_ring = false,
  107. .ring_dir = HAL_SRNG_DIR_DST,
  108. .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
  109. },
  110. { /* WBM_IDLE_LINK */
  111. .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
  112. .max_rings = 1,
  113. .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
  114. .lmac_ring = false,
  115. .ring_dir = HAL_SRNG_DIR_SRC,
  116. .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
  117. },
  118. { /* SW2WBM_RELEASE */
  119. .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
  120. .max_rings = 1,
  121. .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
  122. .lmac_ring = false,
  123. .ring_dir = HAL_SRNG_DIR_SRC,
  124. .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
  125. },
  126. { /* WBM2SW_RELEASE */
  127. .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
  128. .max_rings = 5,
  129. .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
  130. .lmac_ring = false,
  131. .ring_dir = HAL_SRNG_DIR_DST,
  132. .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
  133. },
  134. { /* RXDMA_BUF */
  135. .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
  136. .max_rings = 2,
  137. .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
  138. .lmac_ring = true,
  139. .ring_dir = HAL_SRNG_DIR_SRC,
  140. .max_size = HAL_RXDMA_RING_MAX_SIZE,
  141. },
  142. { /* RXDMA_DST */
  143. .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
  144. .max_rings = 1,
  145. .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
  146. .lmac_ring = true,
  147. .ring_dir = HAL_SRNG_DIR_DST,
  148. .max_size = HAL_RXDMA_RING_MAX_SIZE,
  149. },
  150. { /* RXDMA_MONITOR_BUF */
  151. .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
  152. .max_rings = 1,
  153. .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
  154. .lmac_ring = true,
  155. .ring_dir = HAL_SRNG_DIR_SRC,
  156. .max_size = HAL_RXDMA_RING_MAX_SIZE,
  157. },
  158. { /* RXDMA_MONITOR_STATUS */
  159. .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
  160. .max_rings = 1,
  161. .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
  162. .lmac_ring = true,
  163. .ring_dir = HAL_SRNG_DIR_SRC,
  164. .max_size = HAL_RXDMA_RING_MAX_SIZE,
  165. },
  166. { /* RXDMA_MONITOR_DST */
  167. .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
  168. .max_rings = 1,
  169. .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
  170. .lmac_ring = true,
  171. .ring_dir = HAL_SRNG_DIR_DST,
  172. .max_size = HAL_RXDMA_RING_MAX_SIZE,
  173. },
  174. { /* RXDMA_MONITOR_DESC */
  175. .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
  176. .max_rings = 1,
  177. .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
  178. .lmac_ring = true,
  179. .ring_dir = HAL_SRNG_DIR_SRC,
  180. .max_size = HAL_RXDMA_RING_MAX_SIZE,
  181. },
  182. { /* RXDMA DIR BUF */
  183. .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
  184. .max_rings = 1,
  185. .entry_size = 8 >> 2, /* TODO: Define the struct */
  186. .lmac_ring = true,
  187. .ring_dir = HAL_SRNG_DIR_SRC,
  188. .max_size = HAL_RXDMA_RING_MAX_SIZE,
  189. },
  190. };
  191. static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
  192. {
  193. struct ath11k_hal *hal = &ab->hal;
  194. size_t size;
  195. size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
  196. hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
  197. GFP_KERNEL);
  198. if (!hal->rdp.vaddr)
  199. return -ENOMEM;
  200. return 0;
  201. }
  202. static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
  203. {
  204. struct ath11k_hal *hal = &ab->hal;
  205. size_t size;
  206. if (!hal->rdp.vaddr)
  207. return;
  208. size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
  209. dma_free_coherent(ab->dev, size,
  210. hal->rdp.vaddr, hal->rdp.paddr);
  211. hal->rdp.vaddr = NULL;
  212. }
  213. static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
  214. {
  215. struct ath11k_hal *hal = &ab->hal;
  216. size_t size;
  217. size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
  218. hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
  219. GFP_KERNEL);
  220. if (!hal->wrp.vaddr)
  221. return -ENOMEM;
  222. return 0;
  223. }
  224. static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
  225. {
  226. struct ath11k_hal *hal = &ab->hal;
  227. size_t size;
  228. if (!hal->wrp.vaddr)
  229. return;
  230. size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
  231. dma_free_coherent(ab->dev, size,
  232. hal->wrp.vaddr, hal->wrp.paddr);
  233. hal->wrp.vaddr = NULL;
  234. }
  235. static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
  236. struct hal_srng *srng, int ring_num)
  237. {
  238. struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
  239. u32 addr;
  240. u32 val;
  241. addr = HAL_CE_DST_RING_CTRL +
  242. srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
  243. ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
  244. val = ath11k_hif_read32(ab, addr);
  245. val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
  246. val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
  247. srng->u.dst_ring.max_buffer_length);
  248. ath11k_hif_write32(ab, addr, val);
  249. }
  250. static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
  251. struct hal_srng *srng)
  252. {
  253. struct ath11k_hal *hal = &ab->hal;
  254. u32 val;
  255. u64 hp_addr;
  256. u32 reg_base;
  257. reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
  258. if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
  259. ath11k_hif_write32(ab, reg_base +
  260. HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
  261. srng->msi_addr);
  262. val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
  263. ((u64)srng->msi_addr >>
  264. HAL_ADDR_MSB_REG_SHIFT)) |
  265. HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
  266. ath11k_hif_write32(ab, reg_base +
  267. HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
  268. ath11k_hif_write32(ab,
  269. reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
  270. srng->msi_data);
  271. }
  272. ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
  273. val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
  274. ((u64)srng->ring_base_paddr >>
  275. HAL_ADDR_MSB_REG_SHIFT)) |
  276. FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
  277. (srng->entry_size * srng->num_entries));
  278. ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
  279. val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
  280. FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
  281. ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
  282. /* interrupt setup */
  283. val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
  284. (srng->intr_timer_thres_us >> 3));
  285. val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
  286. (srng->intr_batch_cntr_thres_entries *
  287. srng->entry_size));
  288. ath11k_hif_write32(ab,
  289. reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
  290. val);
  291. hp_addr = hal->rdp.paddr +
  292. ((unsigned long)srng->u.dst_ring.hp_addr -
  293. (unsigned long)hal->rdp.vaddr);
  294. ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
  295. hp_addr & HAL_ADDR_LSB_REG_MASK);
  296. ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
  297. hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
  298. /* Initialize head and tail pointers to indicate ring is empty */
  299. reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
  300. ath11k_hif_write32(ab, reg_base, 0);
  301. ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
  302. *srng->u.dst_ring.hp_addr = 0;
  303. reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
  304. val = 0;
  305. if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
  306. val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
  307. if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
  308. val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
  309. if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
  310. val |= HAL_REO1_RING_MISC_MSI_SWAP;
  311. val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
  312. ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
  313. }
  314. static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
  315. struct hal_srng *srng)
  316. {
  317. struct ath11k_hal *hal = &ab->hal;
  318. u32 val;
  319. u64 tp_addr;
  320. u32 reg_base;
  321. reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
  322. if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
  323. ath11k_hif_write32(ab, reg_base +
  324. HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
  325. srng->msi_addr);
  326. val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
  327. ((u64)srng->msi_addr >>
  328. HAL_ADDR_MSB_REG_SHIFT)) |
  329. HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
  330. ath11k_hif_write32(ab, reg_base +
  331. HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
  332. val);
  333. ath11k_hif_write32(ab, reg_base +
  334. HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
  335. srng->msi_data);
  336. }
  337. ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
  338. val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
  339. ((u64)srng->ring_base_paddr >>
  340. HAL_ADDR_MSB_REG_SHIFT)) |
  341. FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
  342. (srng->entry_size * srng->num_entries));
  343. ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
  344. val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
  345. ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
  346. if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
  347. ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
  348. val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
  349. ((u64)srng->ring_base_paddr >>
  350. HAL_ADDR_MSB_REG_SHIFT)) |
  351. FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
  352. (srng->entry_size * srng->num_entries));
  353. ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
  354. }
  355. /* interrupt setup */
  356. /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
  357. * unit of 8 usecs instead of 1 usec (as required by v1).
  358. */
  359. val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
  360. srng->intr_timer_thres_us);
  361. val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
  362. (srng->intr_batch_cntr_thres_entries *
  363. srng->entry_size));
  364. ath11k_hif_write32(ab,
  365. reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
  366. val);
  367. val = 0;
  368. if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
  369. val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
  370. srng->u.src_ring.low_threshold);
  371. }
  372. ath11k_hif_write32(ab,
  373. reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
  374. val);
  375. if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
  376. tp_addr = hal->rdp.paddr +
  377. ((unsigned long)srng->u.src_ring.tp_addr -
  378. (unsigned long)hal->rdp.vaddr);
  379. ath11k_hif_write32(ab,
  380. reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
  381. tp_addr & HAL_ADDR_LSB_REG_MASK);
  382. ath11k_hif_write32(ab,
  383. reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
  384. tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
  385. }
  386. /* Initialize head and tail pointers to indicate ring is empty */
  387. reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
  388. ath11k_hif_write32(ab, reg_base, 0);
  389. ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
  390. *srng->u.src_ring.tp_addr = 0;
  391. reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
  392. val = 0;
  393. if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
  394. val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
  395. if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
  396. val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
  397. if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
  398. val |= HAL_TCL1_RING_MISC_MSI_SWAP;
  399. /* Loop count is not used for SRC rings */
  400. val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
  401. val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
  402. ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
  403. }
  404. static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
  405. struct hal_srng *srng)
  406. {
  407. if (srng->ring_dir == HAL_SRNG_DIR_SRC)
  408. ath11k_hal_srng_src_hw_init(ab, srng);
  409. else
  410. ath11k_hal_srng_dst_hw_init(ab, srng);
  411. }
  412. static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
  413. enum hal_ring_type type,
  414. int ring_num, int mac_id)
  415. {
  416. struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
  417. int ring_id;
  418. if (ring_num >= srng_config->max_rings) {
  419. ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
  420. return -EINVAL;
  421. }
  422. ring_id = srng_config->start_ring_id + ring_num;
  423. if (srng_config->lmac_ring)
  424. ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
  425. if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
  426. return -EINVAL;
  427. return ring_id;
  428. }
  429. int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
  430. {
  431. struct hal_srng_config *srng_config;
  432. if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
  433. return -EINVAL;
  434. srng_config = &ab->hal.srng_config[ring_type];
  435. return (srng_config->entry_size << 2);
  436. }
  437. int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
  438. {
  439. struct hal_srng_config *srng_config;
  440. if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
  441. return -EINVAL;
  442. srng_config = &ab->hal.srng_config[ring_type];
  443. return (srng_config->max_size / srng_config->entry_size);
  444. }
  445. void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
  446. struct hal_srng_params *params)
  447. {
  448. params->ring_base_paddr = srng->ring_base_paddr;
  449. params->ring_base_vaddr = srng->ring_base_vaddr;
  450. params->num_entries = srng->num_entries;
  451. params->intr_timer_thres_us = srng->intr_timer_thres_us;
  452. params->intr_batch_cntr_thres_entries =
  453. srng->intr_batch_cntr_thres_entries;
  454. params->low_threshold = srng->u.src_ring.low_threshold;
  455. params->msi_addr = srng->msi_addr;
  456. params->msi_data = srng->msi_data;
  457. params->flags = srng->flags;
  458. }
  459. dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
  460. struct hal_srng *srng)
  461. {
  462. if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
  463. return 0;
  464. if (srng->ring_dir == HAL_SRNG_DIR_SRC)
  465. return ab->hal.wrp.paddr +
  466. ((unsigned long)srng->u.src_ring.hp_addr -
  467. (unsigned long)ab->hal.wrp.vaddr);
  468. else
  469. return ab->hal.rdp.paddr +
  470. ((unsigned long)srng->u.dst_ring.hp_addr -
  471. (unsigned long)ab->hal.rdp.vaddr);
  472. }
  473. dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
  474. struct hal_srng *srng)
  475. {
  476. if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
  477. return 0;
  478. if (srng->ring_dir == HAL_SRNG_DIR_SRC)
  479. return ab->hal.rdp.paddr +
  480. ((unsigned long)srng->u.src_ring.tp_addr -
  481. (unsigned long)ab->hal.rdp.vaddr);
  482. else
  483. return ab->hal.wrp.paddr +
  484. ((unsigned long)srng->u.dst_ring.tp_addr -
  485. (unsigned long)ab->hal.wrp.vaddr);
  486. }
  487. u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
  488. {
  489. switch (type) {
  490. case HAL_CE_DESC_SRC:
  491. return sizeof(struct hal_ce_srng_src_desc);
  492. case HAL_CE_DESC_DST:
  493. return sizeof(struct hal_ce_srng_dest_desc);
  494. case HAL_CE_DESC_DST_STATUS:
  495. return sizeof(struct hal_ce_srng_dst_status_desc);
  496. }
  497. return 0;
  498. }
  499. void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
  500. u8 byte_swap_data)
  501. {
  502. struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
  503. desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
  504. desc->buffer_addr_info =
  505. FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
  506. ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
  507. FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
  508. byte_swap_data) |
  509. FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
  510. FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
  511. desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
  512. }
  513. void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
  514. {
  515. struct hal_ce_srng_dest_desc *desc =
  516. (struct hal_ce_srng_dest_desc *)buf;
  517. desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
  518. desc->buffer_addr_info =
  519. FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
  520. ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
  521. }
  522. u32 ath11k_hal_ce_dst_status_get_length(void *buf)
  523. {
  524. struct hal_ce_srng_dst_status_desc *desc =
  525. (struct hal_ce_srng_dst_status_desc *)buf;
  526. u32 len;
  527. len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
  528. desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
  529. return len;
  530. }
  531. void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
  532. dma_addr_t paddr)
  533. {
  534. desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
  535. (paddr & HAL_ADDR_LSB_REG_MASK));
  536. desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
  537. ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
  538. FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
  539. FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
  540. }
  541. u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
  542. {
  543. lockdep_assert_held(&srng->lock);
  544. if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
  545. return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
  546. return NULL;
  547. }
  548. static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
  549. struct hal_srng *srng)
  550. {
  551. u32 *desc;
  552. /* prefetch only if desc is available */
  553. desc = ath11k_hal_srng_dst_peek(ab, srng);
  554. if (likely(desc)) {
  555. dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
  556. (srng->entry_size * sizeof(u32)),
  557. DMA_FROM_DEVICE);
  558. prefetch(desc);
  559. }
  560. }
  561. u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
  562. struct hal_srng *srng)
  563. {
  564. u32 *desc;
  565. lockdep_assert_held(&srng->lock);
  566. if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
  567. return NULL;
  568. desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
  569. srng->u.dst_ring.tp += srng->entry_size;
  570. /* wrap around to start of ring*/
  571. if (srng->u.dst_ring.tp == srng->ring_size)
  572. srng->u.dst_ring.tp = 0;
  573. /* Try to prefetch the next descriptor in the ring */
  574. if (srng->flags & HAL_SRNG_FLAGS_CACHED)
  575. ath11k_hal_srng_prefetch_desc(ab, srng);
  576. return desc;
  577. }
  578. int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
  579. bool sync_hw_ptr)
  580. {
  581. u32 tp, hp;
  582. lockdep_assert_held(&srng->lock);
  583. tp = srng->u.dst_ring.tp;
  584. if (sync_hw_ptr) {
  585. hp = *srng->u.dst_ring.hp_addr;
  586. srng->u.dst_ring.cached_hp = hp;
  587. } else {
  588. hp = srng->u.dst_ring.cached_hp;
  589. }
  590. if (hp >= tp)
  591. return (hp - tp) / srng->entry_size;
  592. else
  593. return (srng->ring_size - tp + hp) / srng->entry_size;
  594. }
  595. /* Returns number of available entries in src ring */
  596. int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
  597. bool sync_hw_ptr)
  598. {
  599. u32 tp, hp;
  600. lockdep_assert_held(&srng->lock);
  601. hp = srng->u.src_ring.hp;
  602. if (sync_hw_ptr) {
  603. tp = *srng->u.src_ring.tp_addr;
  604. srng->u.src_ring.cached_tp = tp;
  605. } else {
  606. tp = srng->u.src_ring.cached_tp;
  607. }
  608. if (tp > hp)
  609. return ((tp - hp) / srng->entry_size) - 1;
  610. else
  611. return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
  612. }
  613. u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
  614. struct hal_srng *srng)
  615. {
  616. u32 *desc;
  617. u32 next_hp;
  618. lockdep_assert_held(&srng->lock);
  619. /* TODO: Using % is expensive, but we have to do this since size of some
  620. * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
  621. * if separate function is defined for rings having power of 2 ring size
  622. * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
  623. * overhead of % by using mask (with &).
  624. */
  625. next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
  626. if (next_hp == srng->u.src_ring.cached_tp)
  627. return NULL;
  628. desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
  629. srng->u.src_ring.hp = next_hp;
  630. /* TODO: Reap functionality is not used by all rings. If particular
  631. * ring does not use reap functionality, we need not update reap_hp
  632. * with next_hp pointer. Need to make sure a separate function is used
  633. * before doing any optimization by removing below code updating
  634. * reap_hp.
  635. */
  636. srng->u.src_ring.reap_hp = next_hp;
  637. return desc;
  638. }
  639. u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
  640. struct hal_srng *srng)
  641. {
  642. u32 *desc;
  643. u32 next_reap_hp;
  644. lockdep_assert_held(&srng->lock);
  645. next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
  646. srng->ring_size;
  647. if (next_reap_hp == srng->u.src_ring.cached_tp)
  648. return NULL;
  649. desc = srng->ring_base_vaddr + next_reap_hp;
  650. srng->u.src_ring.reap_hp = next_reap_hp;
  651. return desc;
  652. }
  653. u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
  654. struct hal_srng *srng)
  655. {
  656. u32 *desc;
  657. lockdep_assert_held(&srng->lock);
  658. if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
  659. return NULL;
  660. desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
  661. srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
  662. srng->ring_size;
  663. return desc;
  664. }
  665. u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
  666. {
  667. lockdep_assert_held(&srng->lock);
  668. if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
  669. srng->u.src_ring.cached_tp)
  670. return NULL;
  671. return srng->ring_base_vaddr + srng->u.src_ring.hp;
  672. }
  673. void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
  674. {
  675. lockdep_assert_held(&srng->lock);
  676. if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
  677. srng->u.src_ring.cached_tp =
  678. *(volatile u32 *)srng->u.src_ring.tp_addr;
  679. } else {
  680. srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
  681. /* Try to prefetch the next descriptor in the ring */
  682. if (srng->flags & HAL_SRNG_FLAGS_CACHED)
  683. ath11k_hal_srng_prefetch_desc(ab, srng);
  684. }
  685. }
  686. /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
  687. * should have been called before this.
  688. */
  689. void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
  690. {
  691. lockdep_assert_held(&srng->lock);
  692. /* TODO: See if we need a write memory barrier here */
  693. if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
  694. /* For LMAC rings, ring pointer updates are done through FW and
  695. * hence written to a shared memory location that is read by FW
  696. */
  697. if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
  698. srng->u.src_ring.last_tp =
  699. *(volatile u32 *)srng->u.src_ring.tp_addr;
  700. *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
  701. } else {
  702. srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
  703. *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
  704. }
  705. } else {
  706. if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
  707. srng->u.src_ring.last_tp =
  708. *(volatile u32 *)srng->u.src_ring.tp_addr;
  709. ath11k_hif_write32(ab,
  710. (unsigned long)srng->u.src_ring.hp_addr -
  711. (unsigned long)ab->mem,
  712. srng->u.src_ring.hp);
  713. } else {
  714. srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
  715. ath11k_hif_write32(ab,
  716. (unsigned long)srng->u.dst_ring.tp_addr -
  717. (unsigned long)ab->mem,
  718. srng->u.dst_ring.tp);
  719. }
  720. }
  721. srng->timestamp = jiffies;
  722. }
  723. void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
  724. struct hal_wbm_idle_scatter_list *sbuf,
  725. u32 nsbufs, u32 tot_link_desc,
  726. u32 end_offset)
  727. {
  728. struct ath11k_buffer_addr *link_addr;
  729. int i;
  730. u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
  731. link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
  732. for (i = 1; i < nsbufs; i++) {
  733. link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
  734. link_addr->info1 = FIELD_PREP(
  735. HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
  736. (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
  737. FIELD_PREP(
  738. HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
  739. BASE_ADDR_MATCH_TAG_VAL);
  740. link_addr = (void *)sbuf[i].vaddr +
  741. HAL_WBM_IDLE_SCATTER_BUF_SIZE;
  742. }
  743. ath11k_hif_write32(ab,
  744. HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
  745. FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
  746. FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
  747. ath11k_hif_write32(ab,
  748. HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
  749. FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
  750. reg_scatter_buf_sz * nsbufs));
  751. ath11k_hif_write32(ab,
  752. HAL_SEQ_WCSS_UMAC_WBM_REG +
  753. HAL_WBM_SCATTERED_RING_BASE_LSB,
  754. FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
  755. sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
  756. ath11k_hif_write32(ab,
  757. HAL_SEQ_WCSS_UMAC_WBM_REG +
  758. HAL_WBM_SCATTERED_RING_BASE_MSB,
  759. FIELD_PREP(
  760. HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
  761. (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
  762. FIELD_PREP(
  763. HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
  764. BASE_ADDR_MATCH_TAG_VAL));
  765. /* Setup head and tail pointers for the idle list */
  766. ath11k_hif_write32(ab,
  767. HAL_SEQ_WCSS_UMAC_WBM_REG +
  768. HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
  769. FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
  770. sbuf[nsbufs - 1].paddr));
  771. ath11k_hif_write32(ab,
  772. HAL_SEQ_WCSS_UMAC_WBM_REG +
  773. HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
  774. FIELD_PREP(
  775. HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
  776. ((u64)sbuf[nsbufs - 1].paddr >>
  777. HAL_ADDR_MSB_REG_SHIFT)) |
  778. FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
  779. (end_offset >> 2)));
  780. ath11k_hif_write32(ab,
  781. HAL_SEQ_WCSS_UMAC_WBM_REG +
  782. HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
  783. FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
  784. sbuf[0].paddr));
  785. ath11k_hif_write32(ab,
  786. HAL_SEQ_WCSS_UMAC_WBM_REG +
  787. HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
  788. FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
  789. sbuf[0].paddr));
  790. ath11k_hif_write32(ab,
  791. HAL_SEQ_WCSS_UMAC_WBM_REG +
  792. HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
  793. FIELD_PREP(
  794. HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
  795. ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
  796. FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
  797. 0));
  798. ath11k_hif_write32(ab,
  799. HAL_SEQ_WCSS_UMAC_WBM_REG +
  800. HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
  801. 2 * tot_link_desc);
  802. /* Enable the SRNG */
  803. ath11k_hif_write32(ab,
  804. HAL_SEQ_WCSS_UMAC_WBM_REG +
  805. HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40);
  806. }
  807. int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
  808. int ring_num, int mac_id,
  809. struct hal_srng_params *params)
  810. {
  811. struct ath11k_hal *hal = &ab->hal;
  812. struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
  813. struct hal_srng *srng;
  814. int ring_id;
  815. u32 lmac_idx;
  816. int i;
  817. u32 reg_base;
  818. ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
  819. if (ring_id < 0)
  820. return ring_id;
  821. srng = &hal->srng_list[ring_id];
  822. srng->ring_id = ring_id;
  823. srng->ring_dir = srng_config->ring_dir;
  824. srng->ring_base_paddr = params->ring_base_paddr;
  825. srng->ring_base_vaddr = params->ring_base_vaddr;
  826. srng->entry_size = srng_config->entry_size;
  827. srng->num_entries = params->num_entries;
  828. srng->ring_size = srng->entry_size * srng->num_entries;
  829. srng->intr_batch_cntr_thres_entries =
  830. params->intr_batch_cntr_thres_entries;
  831. srng->intr_timer_thres_us = params->intr_timer_thres_us;
  832. srng->flags = params->flags;
  833. srng->msi_addr = params->msi_addr;
  834. srng->msi_data = params->msi_data;
  835. srng->initialized = 1;
  836. spin_lock_init(&srng->lock);
  837. lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
  838. for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
  839. srng->hwreg_base[i] = srng_config->reg_start[i] +
  840. (ring_num * srng_config->reg_size[i]);
  841. }
  842. memset(srng->ring_base_vaddr, 0,
  843. (srng->entry_size * srng->num_entries) << 2);
  844. /* TODO: Add comments on these swap configurations */
  845. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  846. srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
  847. HAL_SRNG_FLAGS_RING_PTR_SWAP;
  848. reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
  849. if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
  850. srng->u.src_ring.hp = 0;
  851. srng->u.src_ring.cached_tp = 0;
  852. srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
  853. srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
  854. srng->u.src_ring.low_threshold = params->low_threshold *
  855. srng->entry_size;
  856. if (srng_config->lmac_ring) {
  857. lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
  858. srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
  859. lmac_idx);
  860. srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
  861. } else {
  862. if (!ab->hw_params.supports_shadow_regs)
  863. srng->u.src_ring.hp_addr =
  864. (u32 *)((unsigned long)ab->mem + reg_base);
  865. else
  866. ath11k_dbg(ab, ATH11k_DBG_HAL,
  867. "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
  868. type, ring_num,
  869. reg_base,
  870. (unsigned long)srng->u.src_ring.hp_addr -
  871. (unsigned long)ab->mem);
  872. }
  873. } else {
  874. /* During initialization loop count in all the descriptors
  875. * will be set to zero, and HW will set it to 1 on completing
  876. * descriptor update in first loop, and increments it by 1 on
  877. * subsequent loops (loop count wraps around after reaching
  878. * 0xffff). The 'loop_cnt' in SW ring state is the expected
  879. * loop count in descriptors updated by HW (to be processed
  880. * by SW).
  881. */
  882. srng->u.dst_ring.loop_cnt = 1;
  883. srng->u.dst_ring.tp = 0;
  884. srng->u.dst_ring.cached_hp = 0;
  885. srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
  886. if (srng_config->lmac_ring) {
  887. /* For LMAC rings, tail pointer updates will be done
  888. * through FW by writing to a shared memory location
  889. */
  890. lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
  891. srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
  892. lmac_idx);
  893. srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
  894. } else {
  895. if (!ab->hw_params.supports_shadow_regs)
  896. srng->u.dst_ring.tp_addr =
  897. (u32 *)((unsigned long)ab->mem + reg_base +
  898. (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
  899. else
  900. ath11k_dbg(ab, ATH11k_DBG_HAL,
  901. "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
  902. type, ring_num,
  903. reg_base + (HAL_REO1_RING_TP(ab) -
  904. HAL_REO1_RING_HP(ab)),
  905. (unsigned long)srng->u.dst_ring.tp_addr -
  906. (unsigned long)ab->mem);
  907. }
  908. }
  909. if (srng_config->lmac_ring)
  910. return ring_id;
  911. ath11k_hal_srng_hw_init(ab, srng);
  912. if (type == HAL_CE_DST) {
  913. srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
  914. ath11k_hal_ce_dst_setup(ab, srng, ring_num);
  915. }
  916. return ring_id;
  917. }
  918. static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
  919. int shadow_cfg_idx,
  920. enum hal_ring_type ring_type,
  921. int ring_num)
  922. {
  923. struct hal_srng *srng;
  924. struct ath11k_hal *hal = &ab->hal;
  925. int ring_id;
  926. struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
  927. ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
  928. if (ring_id < 0)
  929. return;
  930. srng = &hal->srng_list[ring_id];
  931. if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
  932. srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
  933. (unsigned long)ab->mem);
  934. else
  935. srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
  936. (unsigned long)ab->mem);
  937. }
  938. int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
  939. enum hal_ring_type ring_type,
  940. int ring_num)
  941. {
  942. struct ath11k_hal *hal = &ab->hal;
  943. struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
  944. int shadow_cfg_idx = hal->num_shadow_reg_configured;
  945. u32 target_reg;
  946. if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
  947. return -EINVAL;
  948. hal->num_shadow_reg_configured++;
  949. target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
  950. target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
  951. ring_num;
  952. /* For destination ring, shadow the TP */
  953. if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
  954. target_reg += HAL_OFFSET_FROM_HP_TO_TP;
  955. hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
  956. /* update hp/tp addr to hal structure*/
  957. ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
  958. ring_num);
  959. ath11k_dbg(ab, ATH11k_DBG_HAL,
  960. "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
  961. target_reg,
  962. HAL_SHADOW_REG(ab, shadow_cfg_idx),
  963. shadow_cfg_idx,
  964. ring_type, ring_num);
  965. return 0;
  966. }
  967. void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
  968. {
  969. struct ath11k_hal *hal = &ab->hal;
  970. int ring_type, ring_num;
  971. /* update all the non-CE srngs. */
  972. for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
  973. struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
  974. if (ring_type == HAL_CE_SRC ||
  975. ring_type == HAL_CE_DST ||
  976. ring_type == HAL_CE_DST_STATUS)
  977. continue;
  978. if (srng_config->lmac_ring)
  979. continue;
  980. for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
  981. ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
  982. }
  983. }
  984. void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
  985. u32 **cfg, u32 *len)
  986. {
  987. struct ath11k_hal *hal = &ab->hal;
  988. *len = hal->num_shadow_reg_configured;
  989. *cfg = hal->shadow_reg_addr;
  990. }
  991. void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
  992. struct hal_srng *srng)
  993. {
  994. lockdep_assert_held(&srng->lock);
  995. /* check whether the ring is empty. Update the shadow
  996. * HP only when then ring isn't empty.
  997. */
  998. if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
  999. *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
  1000. ath11k_hal_srng_access_end(ab, srng);
  1001. }
  1002. static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
  1003. {
  1004. struct ath11k_hal *hal = &ab->hal;
  1005. struct hal_srng_config *s;
  1006. hal->srng_config = kmemdup(hw_srng_config_template,
  1007. sizeof(hw_srng_config_template),
  1008. GFP_KERNEL);
  1009. if (!hal->srng_config)
  1010. return -ENOMEM;
  1011. s = &hal->srng_config[HAL_REO_DST];
  1012. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
  1013. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
  1014. s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
  1015. s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
  1016. s = &hal->srng_config[HAL_REO_EXCEPTION];
  1017. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
  1018. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
  1019. s = &hal->srng_config[HAL_REO_REINJECT];
  1020. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
  1021. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
  1022. s = &hal->srng_config[HAL_REO_CMD];
  1023. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
  1024. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
  1025. s = &hal->srng_config[HAL_REO_STATUS];
  1026. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
  1027. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
  1028. s = &hal->srng_config[HAL_TCL_DATA];
  1029. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
  1030. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
  1031. s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
  1032. s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
  1033. s = &hal->srng_config[HAL_TCL_CMD];
  1034. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
  1035. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
  1036. s = &hal->srng_config[HAL_TCL_STATUS];
  1037. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
  1038. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
  1039. s = &hal->srng_config[HAL_CE_SRC];
  1040. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
  1041. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
  1042. s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
  1043. HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
  1044. s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
  1045. HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
  1046. s = &hal->srng_config[HAL_CE_DST];
  1047. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
  1048. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
  1049. s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
  1050. HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
  1051. s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
  1052. HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
  1053. s = &hal->srng_config[HAL_CE_DST_STATUS];
  1054. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
  1055. HAL_CE_DST_STATUS_RING_BASE_LSB;
  1056. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
  1057. s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
  1058. HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
  1059. s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
  1060. HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
  1061. s = &hal->srng_config[HAL_WBM_IDLE_LINK];
  1062. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
  1063. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
  1064. s = &hal->srng_config[HAL_SW2WBM_RELEASE];
  1065. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab);
  1066. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
  1067. s = &hal->srng_config[HAL_WBM2SW_RELEASE];
  1068. s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
  1069. s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
  1070. s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
  1071. HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
  1072. s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
  1073. return 0;
  1074. }
  1075. static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
  1076. {
  1077. struct ath11k_hal *hal = &ab->hal;
  1078. u32 ring_id;
  1079. for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
  1080. lockdep_register_key(hal->srng_key + ring_id);
  1081. }
  1082. static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
  1083. {
  1084. struct ath11k_hal *hal = &ab->hal;
  1085. u32 ring_id;
  1086. for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
  1087. lockdep_unregister_key(hal->srng_key + ring_id);
  1088. }
  1089. int ath11k_hal_srng_init(struct ath11k_base *ab)
  1090. {
  1091. struct ath11k_hal *hal = &ab->hal;
  1092. int ret;
  1093. memset(hal, 0, sizeof(*hal));
  1094. ret = ath11k_hal_srng_create_config(ab);
  1095. if (ret)
  1096. goto err_hal;
  1097. ret = ath11k_hal_alloc_cont_rdp(ab);
  1098. if (ret)
  1099. goto err_hal;
  1100. ret = ath11k_hal_alloc_cont_wrp(ab);
  1101. if (ret)
  1102. goto err_free_cont_rdp;
  1103. ath11k_hal_register_srng_key(ab);
  1104. return 0;
  1105. err_free_cont_rdp:
  1106. ath11k_hal_free_cont_rdp(ab);
  1107. err_hal:
  1108. return ret;
  1109. }
  1110. EXPORT_SYMBOL(ath11k_hal_srng_init);
  1111. void ath11k_hal_srng_deinit(struct ath11k_base *ab)
  1112. {
  1113. struct ath11k_hal *hal = &ab->hal;
  1114. ath11k_hal_unregister_srng_key(ab);
  1115. ath11k_hal_free_cont_rdp(ab);
  1116. ath11k_hal_free_cont_wrp(ab);
  1117. kfree(hal->srng_config);
  1118. }
  1119. EXPORT_SYMBOL(ath11k_hal_srng_deinit);
  1120. void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
  1121. {
  1122. struct hal_srng *srng;
  1123. struct ath11k_ext_irq_grp *irq_grp;
  1124. struct ath11k_ce_pipe *ce_pipe;
  1125. int i;
  1126. ath11k_err(ab, "Last interrupt received for each CE:\n");
  1127. for (i = 0; i < ab->hw_params.ce_count; i++) {
  1128. ce_pipe = &ab->ce.ce_pipe[i];
  1129. if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
  1130. continue;
  1131. ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
  1132. i, ce_pipe->pipe_num,
  1133. jiffies_to_msecs(jiffies - ce_pipe->timestamp));
  1134. }
  1135. ath11k_err(ab, "\nLast interrupt received for each group:\n");
  1136. for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
  1137. irq_grp = &ab->ext_irq_grp[i];
  1138. ath11k_err(ab, "group_id %d %ums before\n",
  1139. irq_grp->grp_id,
  1140. jiffies_to_msecs(jiffies - irq_grp->timestamp));
  1141. }
  1142. for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
  1143. srng = &ab->hal.srng_list[i];
  1144. if (!srng->initialized)
  1145. continue;
  1146. if (srng->ring_dir == HAL_SRNG_DIR_SRC)
  1147. ath11k_err(ab,
  1148. "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
  1149. srng->ring_id, srng->u.src_ring.hp,
  1150. srng->u.src_ring.reap_hp,
  1151. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
  1152. srng->u.src_ring.last_tp,
  1153. jiffies_to_msecs(jiffies - srng->timestamp));
  1154. else if (srng->ring_dir == HAL_SRNG_DIR_DST)
  1155. ath11k_err(ab,
  1156. "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
  1157. srng->ring_id, srng->u.dst_ring.tp,
  1158. *srng->u.dst_ring.hp_addr,
  1159. srng->u.dst_ring.cached_hp,
  1160. srng->u.dst_ring.last_hp,
  1161. jiffies_to_msecs(jiffies - srng->timestamp));
  1162. }
  1163. }