hal_srng.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are
  6. * met:
  7. * * Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * * Redistributions in binary form must reproduce the above
  10. * copyright notice, this list of conditions and the following
  11. * disclaimer in the documentation and/or other materials provided
  12. * with the distribution.
  13. * * Neither the name of The Linux Foundation nor the names of its
  14. * contributors may be used to endorse or promote products derived
  15. * from this software without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  18. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
  21. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  24. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  25. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  26. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  27. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. */
  29. #include "hal_api.h"
  30. #include "wcss_version.h"
  31. /**
  32. * Common SRNG register access macros:
  33. * The SRNG registers are distributed accross various UMAC and LMAC HW blocks,
  34. * but the register group and format is exactly same for all rings, with some
  35. * difference between producer rings (these are 'producer rings' with respect
  36. * to HW and refered as 'destination rings' in SW) and consumer rings (these
  37. * are 'consumer rings' with respect to HW and refered as 'source rings' in SW).
  38. * The following macros provide uniform access to all SRNG rings.
  39. */
  40. /* SRNG registers are split among two groups R0 and R2 and following
  41. * definitions identify the group to which each register belongs to
  42. */
  43. #define R0_INDEX 0
  44. #define R2_INDEX 1
  45. #define HWREG_INDEX(_reg_group) _reg_group ## _ ## INDEX
  46. /* Registers in R0 group */
  47. #define BASE_LSB_GROUP R0
  48. #define BASE_MSB_GROUP R0
  49. #define ID_GROUP R0
  50. #define STATUS_GROUP R0
  51. #define MISC_GROUP R0
  52. #define HP_ADDR_LSB_GROUP R0
  53. #define HP_ADDR_MSB_GROUP R0
  54. #define PRODUCER_INT_SETUP_GROUP R0
  55. #define PRODUCER_INT_STATUS_GROUP R0
  56. #define PRODUCER_FULL_COUNTER_GROUP R0
  57. #define MSI1_BASE_LSB_GROUP R0
  58. #define MSI1_BASE_MSB_GROUP R0
  59. #define MSI1_DATA_GROUP R0
  60. #define HP_TP_SW_OFFSET_GROUP R0
  61. #define TP_ADDR_LSB_GROUP R0
  62. #define TP_ADDR_MSB_GROUP R0
  63. #define CONSUMER_INT_SETUP_IX0_GROUP R0
  64. #define CONSUMER_INT_SETUP_IX1_GROUP R0
  65. #define CONSUMER_INT_STATUS_GROUP R0
  66. #define CONSUMER_EMPTY_COUNTER_GROUP R0
  67. #define CONSUMER_PREFETCH_TIMER_GROUP R0
  68. #define CONSUMER_PREFETCH_STATUS_GROUP R0
  69. /* Registers in R2 group */
  70. #define HP_GROUP R2
  71. #define TP_GROUP R2
  72. /**
  73. * Register definitions for all SRNG based rings are same, except few
  74. * differences between source (HW consumer) and destination (HW producer)
  75. * registers. Following macros definitions provide generic access to all
  76. * SRNG based rings.
  77. * For source rings, we will use the register/field definitions of SW2TCL1
  78. * ring defined in the HW header file mac_tcl_reg_seq_hwioreg.h. To setup
  79. * individual fields, SRNG_SM macros should be used with fields specified
  80. * using SRNG_SRC_FLD(<register>, <field>), Register writes should be done
  81. * using SRNG_SRC_REG_WRITE(<hal_srng>, <register>, <value>).
  82. * Similarly for destination rings we will use definitions of REO2SW1 ring
  83. * defined in the register reo_destination_ring.h. To setup individual
  84. * fields SRNG_SM macros should be used with fields specified using
  85. * SRNG_DST_FLD(<register>, <field>). Register writes should be done using
  86. * SRNG_DST_REG_WRITE(<hal_srng>, <register>, <value>).
  87. */
  88. #define SRNG_DST_REG_OFFSET(_reg, _reg_group) \
  89. HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg##_ADDR(0)
  90. #define SRNG_SRC_REG_OFFSET(_reg, _reg_group) \
  91. HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg ## _ADDR(0)
  92. #define _SRNG_DST_FLD(_reg_group, _reg_fld) \
  93. HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg_fld
  94. #define _SRNG_SRC_FLD(_reg_group, _reg_fld) \
  95. HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg_fld
  96. #define _SRNG_FLD(_reg_group, _reg_fld, _dir) \
  97. _SRNG_ ## _dir ## _FLD(_reg_group, _reg_fld)
  98. #define SRNG_DST_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, DST)
  99. #define SRNG_SRC_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, SRC)
  100. #define SRNG_SRC_R0_START_OFFSET SRNG_SRC_REG_OFFSET(BASE_LSB, R0)
  101. #define SRNG_DST_R0_START_OFFSET SRNG_DST_REG_OFFSET(BASE_LSB, R0)
  102. #define SRNG_SRC_R2_START_OFFSET SRNG_SRC_REG_OFFSET(HP, R2)
  103. #define SRNG_DST_R2_START_OFFSET SRNG_DST_REG_OFFSET(HP, R2)
  104. #define SRNG_SRC_START_OFFSET(_reg_group) \
  105. SRNG_SRC_ ## _reg_group ## _START_OFFSET
  106. #define SRNG_DST_START_OFFSET(_reg_group) \
  107. SRNG_DST_ ## _reg_group ## _START_OFFSET
  108. #define SRNG_REG_ADDR(_srng, _reg, _reg_group, _dir) \
  109. ((_srng)->hwreg_base[HWREG_INDEX(_reg_group)] + \
  110. SRNG_ ## _dir ## _REG_OFFSET(_reg, _reg_group) - \
  111. SRNG_ ## _dir ## _START_OFFSET(_reg_group))
  112. #define SRNG_DST_ADDR(_srng, _reg) \
  113. SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, DST)
  114. #define SRNG_SRC_ADDR(_srng, _reg) \
  115. SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, SRC)
  116. #define SRNG_REG_WRITE(_srng, _reg, _value, _dir) \
  117. hal_write_address_32_mb(_srng->hal_soc, SRNG_ ## _dir ## _ADDR(_srng, _reg), (_value))
  118. #define SRNG_REG_READ(_srng, _reg, _dir) \
  119. hal_read_address_32_mb(_srng->hal_soc, SRNG_ ## _dir ## _ADDR(_srng, _reg))
  120. #define SRNG_SRC_REG_WRITE(_srng, _reg, _value) \
  121. SRNG_REG_WRITE(_srng, _reg, _value, SRC)
  122. #define SRNG_DST_REG_WRITE(_srng, _reg, _value) \
  123. SRNG_REG_WRITE(_srng, _reg, _value, DST)
  124. #define SRNG_SRC_REG_READ(_srng, _reg) \
  125. SRNG_REG_READ(_srng, _reg, SRC)
  126. #define _SRNG_FM(_reg_fld) _reg_fld ## _BMSK
  127. #define _SRNG_FS(_reg_fld) _reg_fld ## _SHFT
  128. #define SRNG_SM(_reg_fld, _val) \
  129. (((_val) << _SRNG_FS(_reg_fld)) & _SRNG_FM(_reg_fld))
  130. #define SRNG_MS(_reg_fld, _val) \
  131. (((_val) & _SRNG_FM(_reg_fld)) >> _SRNG_FS(_reg_fld))
  132. #define SRNG_MAX_SIZE_DWORDS \
  133. (SRNG_MS(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), 0xffffffff))
  134. /**
  135. * HW ring configuration table to identify hardware ring attributes like
  136. * register addresses, number of rings, ring entry size etc., for each type
  137. * of SRNG ring.
  138. *
  139. * Currently there is just one HW ring table, but there could be multiple
  140. * configurations in future based on HW variants from the same wifi3.0 family
  141. * and hence need to be attached with hal_soc based on HW type
  142. */
  143. #define HAL_SRNG_CONFIG(_hal_soc, _ring_type) (&hw_srng_table[_ring_type])
  144. static struct hal_hw_srng_config hw_srng_table[] = {
  145. /* TODO: max_rings can populated by querying HW capabilities */
  146. { /* REO_DST */
  147. .start_ring_id = HAL_SRNG_REO2SW1,
  148. .max_rings = 4,
  149. .entry_size = sizeof(struct reo_destination_ring) >> 2,
  150. .lmac_ring = FALSE,
  151. .ring_dir = HAL_SRNG_DST_RING,
  152. .reg_start = {
  153. HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(
  154. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  155. HWIO_REO_R2_REO2SW1_RING_HP_ADDR(
  156. SEQ_WCSS_UMAC_REO_REG_OFFSET)
  157. },
  158. .reg_size = {
  159. HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) -
  160. HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0),
  161. HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) -
  162. HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0),
  163. },
  164. },
  165. { /* REO_EXCEPTION */
  166. /* Designating REO2TCL ring as exception ring. This ring is
  167. * similar to other REO2SW rings though it is named as REO2TCL.
  168. * Any of theREO2SW rings can be used as exception ring.
  169. */
  170. .start_ring_id = HAL_SRNG_REO2TCL,
  171. .max_rings = 1,
  172. .entry_size = sizeof(struct reo_destination_ring) >> 2,
  173. .lmac_ring = FALSE,
  174. .ring_dir = HAL_SRNG_DST_RING,
  175. .reg_start = {
  176. HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR(
  177. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  178. HWIO_REO_R2_REO2TCL_RING_HP_ADDR(
  179. SEQ_WCSS_UMAC_REO_REG_OFFSET)
  180. },
  181. /* Single ring - provide ring size if multiple rings of this
  182. * type are supported */
  183. .reg_size = {},
  184. },
  185. { /* REO_REINJECT */
  186. .start_ring_id = HAL_SRNG_SW2REO,
  187. .max_rings = 1,
  188. .entry_size = sizeof(struct reo_entrance_ring) >> 2,
  189. .lmac_ring = FALSE,
  190. .ring_dir = HAL_SRNG_SRC_RING,
  191. .reg_start = {
  192. HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR(
  193. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  194. HWIO_REO_R2_SW2REO_RING_HP_ADDR(
  195. SEQ_WCSS_UMAC_REO_REG_OFFSET)
  196. },
  197. /* Single ring - provide ring size if multiple rings of this
  198. * type are supported */
  199. .reg_size = {},
  200. },
  201. { /* REO_CMD */
  202. .start_ring_id = HAL_SRNG_REO_CMD,
  203. .max_rings = 1,
  204. .entry_size = (sizeof(struct tlv_32_hdr) +
  205. sizeof(struct reo_get_queue_stats)) >> 2,
  206. .lmac_ring = FALSE,
  207. .ring_dir = HAL_SRNG_SRC_RING,
  208. .reg_start = {
  209. HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR(
  210. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  211. HWIO_REO_R2_REO_CMD_RING_HP_ADDR(
  212. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  213. },
  214. /* Single ring - provide ring size if multiple rings of this
  215. * type are supported */
  216. .reg_size = {},
  217. },
  218. { /* REO_STATUS */
  219. .start_ring_id = HAL_SRNG_REO_STATUS,
  220. .max_rings = 1,
  221. .entry_size = (sizeof(struct tlv_32_hdr) +
  222. sizeof(struct reo_get_queue_stats_status)) >> 2,
  223. .lmac_ring = FALSE,
  224. .ring_dir = HAL_SRNG_DST_RING,
  225. .reg_start = {
  226. HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR(
  227. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  228. HWIO_REO_R2_REO_STATUS_RING_HP_ADDR(
  229. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  230. },
  231. /* Single ring - provide ring size if multiple rings of this
  232. * type are supported */
  233. .reg_size = {},
  234. },
  235. { /* TCL_DATA */
  236. .start_ring_id = HAL_SRNG_SW2TCL1,
  237. .max_rings = 3,
  238. .entry_size = (sizeof(struct tlv_32_hdr) +
  239. sizeof(struct tcl_data_cmd)) >> 2,
  240. .lmac_ring = FALSE,
  241. .ring_dir = HAL_SRNG_SRC_RING,
  242. .reg_start = {
  243. HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(
  244. SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
  245. HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(
  246. SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
  247. },
  248. .reg_size = {
  249. HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) -
  250. HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0),
  251. HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) -
  252. HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0),
  253. },
  254. },
  255. { /* TCL_CMD */
  256. .start_ring_id = HAL_SRNG_SW2TCL_CMD,
  257. .max_rings = 1,
  258. .entry_size = (sizeof(struct tlv_32_hdr) +
  259. sizeof(struct tcl_gse_cmd)) >> 2,
  260. .lmac_ring = FALSE,
  261. .ring_dir = HAL_SRNG_SRC_RING,
  262. .reg_start = {
  263. HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR(
  264. SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
  265. HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR(
  266. SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
  267. },
  268. /* Single ring - provide ring size if multiple rings of this
  269. * type are supported */
  270. .reg_size = {},
  271. },
  272. { /* TCL_STATUS */
  273. .start_ring_id = HAL_SRNG_TCL_STATUS,
  274. .max_rings = 1,
  275. .entry_size = (sizeof(struct tlv_32_hdr) +
  276. sizeof(struct tcl_status_ring)) >> 2,
  277. .lmac_ring = FALSE,
  278. .ring_dir = HAL_SRNG_DST_RING,
  279. .reg_start = {
  280. HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR(
  281. SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
  282. HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR(
  283. SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
  284. },
  285. /* Single ring - provide ring size if multiple rings of this
  286. * type are supported */
  287. .reg_size = {},
  288. },
  289. { /* CE_SRC */
  290. .start_ring_id = HAL_SRNG_CE_0_SRC,
  291. .max_rings = 12,
  292. .entry_size = sizeof(struct ce_src_desc) >> 2,
  293. .lmac_ring = FALSE,
  294. .ring_dir = HAL_SRNG_SRC_RING,
  295. .reg_start = {
  296. HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR(
  297. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET),
  298. HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR(
  299. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET),
  300. },
  301. .reg_size = {
  302. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET -
  303. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET,
  304. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET -
  305. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET,
  306. },
  307. },
  308. { /* CE_DST */
  309. .start_ring_id = HAL_SRNG_CE_0_DST,
  310. .max_rings = 12,
  311. .entry_size = 8 >> 2,
  312. /*TODO: entry_size above should actually be
  313. * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition
  314. * of struct ce_dst_desc in HW header files
  315. */
  316. .lmac_ring = FALSE,
  317. .ring_dir = HAL_SRNG_SRC_RING,
  318. .reg_start = {
  319. HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR(
  320. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET),
  321. HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR(
  322. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET),
  323. },
  324. .reg_size = {
  325. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET -
  326. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET,
  327. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET -
  328. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET,
  329. },
  330. },
  331. { /* CE_DST_STATUS */
  332. .start_ring_id = HAL_SRNG_CE_0_DST_STATUS,
  333. .max_rings = 12,
  334. .entry_size = sizeof(struct ce_stat_desc) >> 2,
  335. .lmac_ring = FALSE,
  336. .ring_dir = HAL_SRNG_DST_RING,
  337. .reg_start = {
  338. HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR(
  339. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET),
  340. HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR(
  341. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET),
  342. },
  343. /* TODO: check destination status ring registers */
  344. .reg_size = {
  345. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET -
  346. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET,
  347. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET -
  348. SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET,
  349. },
  350. },
  351. { /* WBM_IDLE_LINK */
  352. .start_ring_id = HAL_SRNG_WBM_IDLE_LINK,
  353. .max_rings = 1,
  354. .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2,
  355. .lmac_ring = FALSE,
  356. .ring_dir = HAL_SRNG_SRC_RING,
  357. .reg_start = {
  358. HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
  359. HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
  360. },
  361. /* Single ring - provide ring size if multiple rings of this
  362. * type are supported */
  363. .reg_size = {},
  364. },
  365. { /* SW2WBM_RELEASE */
  366. .start_ring_id = HAL_SRNG_WBM_SW_RELEASE,
  367. .max_rings = 1,
  368. .entry_size = sizeof(struct wbm_release_ring) >> 2,
  369. .lmac_ring = FALSE,
  370. .ring_dir = HAL_SRNG_SRC_RING,
  371. .reg_start = {
  372. HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
  373. HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
  374. },
  375. /* Single ring - provide ring size if multiple rings of this
  376. * type are supported */
  377. .reg_size = {},
  378. },
  379. { /* WBM2SW_RELEASE */
  380. .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE,
  381. .max_rings = 4,
  382. .entry_size = sizeof(struct wbm_release_ring) >> 2,
  383. .lmac_ring = FALSE,
  384. .ring_dir = HAL_SRNG_DST_RING,
  385. .reg_start = {
  386. HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
  387. HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
  388. },
  389. .reg_size = {
  390. HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) -
  391. HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
  392. HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) -
  393. HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
  394. },
  395. },
  396. { /* RXDMA_BUF */
  397. .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0,
  398. #ifdef IPA_OFFLOAD
  399. .max_rings = 3,
  400. #else
  401. .max_rings = 2,
  402. #endif
  403. .entry_size = sizeof(struct wbm_buffer_ring) >> 2,
  404. .lmac_ring = TRUE,
  405. .ring_dir = HAL_SRNG_SRC_RING,
  406. /* reg_start is not set because LMAC rings are not accessed
  407. * from host
  408. */
  409. .reg_start = {},
  410. .reg_size = {},
  411. },
  412. { /* RXDMA_DST */
  413. .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0,
  414. .max_rings = 1,
  415. .entry_size = sizeof(struct reo_entrance_ring) >> 2,
  416. .lmac_ring = TRUE,
  417. .ring_dir = HAL_SRNG_DST_RING,
  418. /* reg_start is not set because LMAC rings are not accessed
  419. * from host
  420. */
  421. .reg_start = {},
  422. .reg_size = {},
  423. },
  424. { /* RXDMA_MONITOR_BUF */
  425. .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF,
  426. .max_rings = 1,
  427. .entry_size = sizeof(struct wbm_buffer_ring) >> 2,
  428. .lmac_ring = TRUE,
  429. .ring_dir = HAL_SRNG_SRC_RING,
  430. /* reg_start is not set because LMAC rings are not accessed
  431. * from host
  432. */
  433. .reg_start = {},
  434. .reg_size = {},
  435. },
  436. { /* RXDMA_MONITOR_STATUS */
  437. .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF,
  438. .max_rings = 1,
  439. .entry_size = sizeof(struct wbm_buffer_ring) >> 2,
  440. .lmac_ring = TRUE,
  441. .ring_dir = HAL_SRNG_SRC_RING,
  442. /* reg_start is not set because LMAC rings are not accessed
  443. * from host
  444. */
  445. .reg_start = {},
  446. .reg_size = {},
  447. },
  448. { /* RXDMA_MONITOR_DST */
  449. .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1,
  450. .max_rings = 1,
  451. .entry_size = sizeof(struct reo_entrance_ring) >> 2,
  452. .lmac_ring = TRUE,
  453. .ring_dir = HAL_SRNG_DST_RING,
  454. /* reg_start is not set because LMAC rings are not accessed
  455. * from host
  456. */
  457. .reg_start = {},
  458. .reg_size = {},
  459. },
  460. { /* RXDMA_MONITOR_DESC */
  461. .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC,
  462. .max_rings = 1,
  463. .entry_size = sizeof(struct wbm_buffer_ring) >> 2,
  464. .lmac_ring = TRUE,
  465. .ring_dir = HAL_SRNG_SRC_RING,
  466. /* reg_start is not set because LMAC rings are not accessed
  467. * from host
  468. */
  469. .reg_start = {},
  470. .reg_size = {},
  471. },
  472. #ifdef WLAN_FEATURE_CIF_CFR
  473. { /* WIFI_POS_SRC */
  474. .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING,
  475. .max_rings = 1,
  476. .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2,
  477. .lmac_ring = TRUE,
  478. .ring_dir = HAL_SRNG_SRC_RING,
  479. /* reg_start is not set because LMAC rings are not accessed
  480. * from host
  481. */
  482. .reg_start = {},
  483. .reg_size = {},
  484. },
  485. #endif
  486. };
  487. /**
  488. * hal_get_srng_ring_id() - get the ring id of a descriped ring
  489. * @hal: hal_soc data structure
  490. * @ring_type: type enum describing the ring
  491. * @ring_num: which ring of the ring type
  492. * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings)
  493. *
  494. * Return: the ring id or -EINVAL if the ring does not exist.
  495. */
  496. static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type,
  497. int ring_num, int mac_id)
  498. {
  499. struct hal_hw_srng_config *ring_config =
  500. HAL_SRNG_CONFIG(hal, ring_type);
  501. int ring_id;
  502. if (ring_num >= ring_config->max_rings) {
  503. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  504. "%s: ring_num exceeded maximum no. of supported rings\n",
  505. __func__);
  506. return -EINVAL;
  507. }
  508. if (ring_config->lmac_ring) {
  509. ring_id = ring_config->start_ring_id + ring_num +
  510. (mac_id * HAL_MAX_RINGS_PER_LMAC);
  511. } else {
  512. ring_id = ring_config->start_ring_id + ring_num;
  513. }
  514. return ring_id;
  515. }
  516. static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id)
  517. {
  518. /* TODO: Should we allocate srng structures dynamically? */
  519. return &(hal->srng_list[ring_id]);
  520. }
  521. #define HP_OFFSET_IN_REG_START 1
  522. #define OFFSET_FROM_HP_TO_TP 4
  523. static void hal_update_srng_hp_tp_address(void *hal_soc,
  524. int shadow_config_index,
  525. int ring_type,
  526. int ring_num)
  527. {
  528. struct hal_srng *srng;
  529. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  530. int ring_id;
  531. ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0);
  532. if (ring_id < 0)
  533. return;
  534. srng = hal_get_srng(hal_soc, ring_id);
  535. if (srng->ring_dir == HAL_SRNG_DST_RING)
  536. srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index)
  537. + hal->dev_base_addr;
  538. else
  539. srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index)
  540. + hal->dev_base_addr;
  541. }
  542. QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
  543. int ring_type,
  544. int ring_num)
  545. {
  546. uint32_t target_register;
  547. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  548. struct hal_hw_srng_config *srng_config = &hw_srng_table[ring_type];
  549. int shadow_config_index = hal->num_shadow_registers_configured;
  550. if (shadow_config_index >= MAX_SHADOW_REGISTERS) {
  551. QDF_ASSERT(0);
  552. return QDF_STATUS_E_RESOURCES;
  553. }
  554. hal->num_shadow_registers_configured++;
  555. target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START];
  556. target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
  557. *ring_num);
  558. /* if the ring is a dst ring, we need to shadow the tail pointer */
  559. if (srng_config->ring_dir == HAL_SRNG_DST_RING)
  560. target_register += OFFSET_FROM_HP_TO_TP;
  561. hal->shadow_config[shadow_config_index].addr = target_register;
  562. /* update hp/tp addr in the hal_soc structure*/
  563. hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type,
  564. ring_num);
  565. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  566. "%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d\n",
  567. __func__, target_register, shadow_config_index,
  568. ring_type, ring_num);
  569. return QDF_STATUS_SUCCESS;
  570. }
  571. QDF_STATUS hal_construct_shadow_config(void *hal_soc)
  572. {
  573. int ring_type, ring_num;
  574. for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) {
  575. struct hal_hw_srng_config *srng_config =
  576. &hw_srng_table[ring_type];
  577. if (ring_type == CE_SRC ||
  578. ring_type == CE_DST ||
  579. ring_type == CE_DST_STATUS)
  580. continue;
  581. if (srng_config->lmac_ring)
  582. continue;
  583. for (ring_num = 0; ring_num < srng_config->max_rings;
  584. ring_num++)
  585. hal_set_one_shadow_config(hal_soc, ring_type, ring_num);
  586. }
  587. return QDF_STATUS_SUCCESS;
  588. }
  589. void hal_get_shadow_config(void *hal_soc,
  590. struct pld_shadow_reg_v2_cfg **shadow_config,
  591. int *num_shadow_registers_configured)
  592. {
  593. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  594. *shadow_config = hal->shadow_config;
  595. *num_shadow_registers_configured =
  596. hal->num_shadow_registers_configured;
  597. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  598. "%s\n", __func__);
  599. }
  600. static void hal_validate_shadow_register(struct hal_soc *hal,
  601. uint32_t *destination,
  602. uint32_t *shadow_address)
  603. {
  604. unsigned int index;
  605. uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr;
  606. int destination_ba_offset =
  607. ((char *)destination) - (char *)hal->dev_base_addr;
  608. index = shadow_address - shadow_0_offset;
  609. if (index > MAX_SHADOW_REGISTERS) {
  610. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  611. "%s: index %x out of bounds\n", __func__, index);
  612. goto error;
  613. } else if (hal->shadow_config[index].addr != destination_ba_offset) {
  614. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  615. "%s: sanity check failure, expected %x, found %x\n",
  616. __func__, destination_ba_offset,
  617. hal->shadow_config[index].addr);
  618. goto error;
  619. }
  620. return;
  621. error:
  622. qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x",
  623. __func__, hal->dev_base_addr, destination, shadow_address,
  624. shadow_0_offset, index);
  625. QDF_BUG(0);
  626. return;
  627. }
  628. static void hal_target_based_configure(struct hal_soc *hal)
  629. {
  630. struct hif_target_info *tgt_info =
  631. hif_get_target_info_handle(hal->hif_handle);
  632. switch (tgt_info->target_type) {
  633. case TARGET_TYPE_QCA6290:
  634. hal->use_register_windowing = true;
  635. break;
  636. default:
  637. break;
  638. }
  639. }
  640. /**
  641. * hal_attach - Initalize HAL layer
  642. * @hif_handle: Opaque HIF handle
  643. * @qdf_dev: QDF device
  644. *
  645. * Return: Opaque HAL SOC handle
  646. * NULL on failure (if given ring is not available)
  647. *
  648. * This function should be called as part of HIF initialization (for accessing
  649. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  650. *
  651. */
  652. void *hal_attach(void *hif_handle, qdf_device_t qdf_dev)
  653. {
  654. struct hal_soc *hal;
  655. int i;
  656. hal = qdf_mem_malloc(sizeof(*hal));
  657. if (!hal) {
  658. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  659. "%s: hal_soc allocation failed\n", __func__);
  660. goto fail0;
  661. }
  662. hal->hif_handle = hif_handle;
  663. hal->dev_base_addr = hif_get_dev_ba(hif_handle);
  664. hal->qdf_dev = qdf_dev;
  665. hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent(
  666. qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) *
  667. HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
  668. if (!hal->shadow_rdptr_mem_paddr) {
  669. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  670. "%s: hal->shadow_rdptr_mem_paddr allocation failed\n",
  671. __func__);
  672. goto fail1;
  673. }
  674. hal->shadow_wrptr_mem_vaddr =
  675. (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  676. sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
  677. &(hal->shadow_wrptr_mem_paddr));
  678. if (!hal->shadow_wrptr_mem_vaddr) {
  679. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  680. "%s: hal->shadow_wrptr_mem_vaddr allocation failed\n",
  681. __func__);
  682. goto fail2;
  683. }
  684. for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
  685. hal->srng_list[i].initialized = 0;
  686. hal->srng_list[i].ring_id = i;
  687. }
  688. qdf_spinlock_create(&hal->register_access_lock);
  689. hal->register_window = 0;
  690. hal_target_based_configure(hal);
  691. return (void *)hal;
  692. fail2:
  693. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  694. sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
  695. hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
  696. fail1:
  697. qdf_mem_free(hal);
  698. fail0:
  699. return NULL;
  700. }
  701. /**
  702. * hal_mem_info - Retreive hal memory base address
  703. *
  704. * @hal_soc: Opaque HAL SOC handle
  705. * @mem: pointer to structure to be updated with hal mem info
  706. */
  707. void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem )
  708. {
  709. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  710. mem->dev_base_addr = (void *)hal->dev_base_addr;
  711. mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr;
  712. mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr;
  713. mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr;
  714. mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr;
  715. hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr);
  716. return;
  717. }
  718. /**
  719. * hal_detach - Detach HAL layer
  720. * @hal_soc: HAL SOC handle
  721. *
  722. * Return: Opaque HAL SOC handle
  723. * NULL on failure (if given ring is not available)
  724. *
  725. * This function should be called as part of HIF initialization (for accessing
  726. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  727. *
  728. */
  729. extern void hal_detach(void *hal_soc)
  730. {
  731. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  732. qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
  733. sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
  734. hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
  735. qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
  736. sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
  737. hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
  738. qdf_mem_free(hal);
  739. return;
  740. }
  741. /**
  742. * hal_srng_src_hw_init - Private function to initialize SRNG
  743. * source ring HW
  744. * @hal_soc: HAL SOC handle
  745. * @srng: SRNG ring pointer
  746. */
  747. static inline void hal_srng_src_hw_init(struct hal_soc *hal,
  748. struct hal_srng *srng)
  749. {
  750. uint32_t reg_val = 0;
  751. uint64_t tp_addr = 0;
  752. HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id);
  753. if (srng->flags & HAL_SRNG_MSI_INTR) {
  754. SRNG_SRC_REG_WRITE(srng, MSI1_BASE_LSB,
  755. srng->msi_addr & 0xffffffff);
  756. reg_val = SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, ADDR),
  757. (uint64_t)(srng->msi_addr) >> 32) |
  758. SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB,
  759. MSI1_ENABLE), 1);
  760. SRNG_SRC_REG_WRITE(srng, MSI1_BASE_MSB, reg_val);
  761. SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data);
  762. }
  763. SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff);
  764. reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB),
  765. ((uint64_t)(srng->ring_base_paddr) >> 32)) |
  766. SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_SIZE),
  767. srng->entry_size * srng->num_entries);
  768. SRNG_SRC_REG_WRITE(srng, BASE_MSB, reg_val);
  769. #if defined(WCSS_VERSION) && \
  770. ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \
  771. (defined(CONFIG_MCL) && (WCSS_VERSION >= 72)))
  772. reg_val = SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size);
  773. #else
  774. reg_val = SRNG_SM(SRNG_SRC_FLD(ID, RING_ID), srng->ring_id) |
  775. SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size);
  776. #endif
  777. SRNG_SRC_REG_WRITE(srng, ID, reg_val);
  778. /**
  779. * Interrupt setup:
  780. * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE
  781. * if level mode is required
  782. */
  783. reg_val = 0;
  784. /*
  785. * WAR - Hawkeye v1 has a hardware bug which requires timer value to be
  786. * programmed in terms of 1us resolution instead of 8us resolution as
  787. * given in MLD.
  788. */
  789. if (srng->intr_timer_thres_us) {
  790. reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0,
  791. INTERRUPT_TIMER_THRESHOLD),
  792. srng->intr_timer_thres_us);
  793. /* For HK v2 this should be (srng->intr_timer_thres_us >> 3) */
  794. }
  795. if (srng->intr_batch_cntr_thres_entries) {
  796. reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0,
  797. BATCH_COUNTER_THRESHOLD),
  798. srng->intr_batch_cntr_thres_entries *
  799. srng->entry_size);
  800. }
  801. SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX0, reg_val);
  802. reg_val = 0;
  803. if (srng->flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
  804. reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX1,
  805. LOW_THRESHOLD), srng->u.src_ring.low_threshold);
  806. }
  807. SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX1, reg_val);
  808. /* As per HW team, TP_ADDR and HP_ADDR for Idle link ring should
  809. * remain 0 to avoid some WBM stability issues. Remote head/tail
  810. * pointers are not required since this ring is completly managed
  811. * by WBM HW */
  812. if (srng->ring_id != HAL_SRNG_WBM_IDLE_LINK) {
  813. tp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr +
  814. ((unsigned long)(srng->u.src_ring.tp_addr) -
  815. (unsigned long)(hal->shadow_rdptr_mem_vaddr)));
  816. SRNG_SRC_REG_WRITE(srng, TP_ADDR_LSB, tp_addr & 0xffffffff);
  817. SRNG_SRC_REG_WRITE(srng, TP_ADDR_MSB, tp_addr >> 32);
  818. }
  819. /* Initilaize head and tail pointers to indicate ring is empty */
  820. SRNG_SRC_REG_WRITE(srng, HP, 0);
  821. SRNG_SRC_REG_WRITE(srng, TP, 0);
  822. *(srng->u.src_ring.tp_addr) = 0;
  823. reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ?
  824. SRNG_SM(SRNG_SRC_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) |
  825. ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ?
  826. SRNG_SM(SRNG_SRC_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) |
  827. ((srng->flags & HAL_SRNG_MSI_SWAP) ?
  828. SRNG_SM(SRNG_SRC_FLD(MISC, MSI_SWAP_BIT), 1) : 0);
  829. /* Loop count is not used for SRC rings */
  830. reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, LOOPCNT_DISABLE), 1);
  831. /*
  832. * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1);
  833. * todo: update fw_api and replace with above line
  834. * (when SRNG_ENABLE field for the MISC register is available in fw_api)
  835. * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC)
  836. */
  837. reg_val |= 0x40;
  838. SRNG_SRC_REG_WRITE(srng, MISC, reg_val);
  839. }
  840. /**
  841. * hal_ce_dst_setup - Initialize CE destination ring registers
  842. * @hal_soc: HAL SOC handle
  843. * @srng: SRNG ring pointer
  844. */
  845. static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng,
  846. int ring_num)
  847. {
  848. uint32_t reg_val = 0;
  849. uint32_t reg_addr;
  850. struct hal_hw_srng_config *ring_config =
  851. HAL_SRNG_CONFIG(hal, CE_DST);
  852. /* set DEST_MAX_LENGTH according to ce assignment */
  853. reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR(
  854. ring_config->reg_start[R0_INDEX] +
  855. (ring_num * ring_config->reg_size[R0_INDEX]));
  856. reg_val = HAL_REG_READ(hal, reg_addr);
  857. reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
  858. reg_val |= srng->u.dst_ring.max_buffer_length &
  859. HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
  860. HAL_REG_WRITE(hal, reg_addr, reg_val);
  861. }
  862. /**
  863. * hal_reo_remap_IX0 - Remap REO ring destination
  864. * @hal: HAL SOC handle
  865. * @remap_val: Remap value
  866. */
  867. void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val)
  868. {
  869. uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
  870. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  871. HAL_REG_WRITE(hal, reg_offset, remap_val);
  872. }
  873. /**
  874. * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer
  875. * @srng: sring pointer
  876. * @paddr: physical address
  877. */
  878. void hal_srng_dst_set_hp_paddr(struct hal_srng *srng,
  879. uint64_t paddr)
  880. {
  881. SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB,
  882. paddr & 0xffffffff);
  883. SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB,
  884. paddr >> 32);
  885. }
  886. /**
  887. * hal_srng_dst_init_hp() - Initilaize destination ring head pointer
  888. * @srng: sring pointer
  889. * @vaddr: virtual address
  890. */
  891. void hal_srng_dst_init_hp(struct hal_srng *srng,
  892. uint32_t *vaddr)
  893. {
  894. srng->u.dst_ring.hp_addr = vaddr;
  895. SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp);
  896. *(srng->u.dst_ring.hp_addr) = srng->u.dst_ring.cached_hp;
  897. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  898. "hp_addr=%p, cached_hp=%d, hp=%d\n",
  899. (void *)srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp,
  900. *(srng->u.dst_ring.hp_addr));
  901. }
  902. /**
  903. * hal_srng_dst_hw_init - Private function to initialize SRNG
  904. * destination ring HW
  905. * @hal_soc: HAL SOC handle
  906. * @srng: SRNG ring pointer
  907. */
  908. static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
  909. struct hal_srng *srng)
  910. {
  911. uint32_t reg_val = 0;
  912. uint64_t hp_addr = 0;
  913. HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id);
  914. if (srng->flags & HAL_SRNG_MSI_INTR) {
  915. SRNG_DST_REG_WRITE(srng, MSI1_BASE_LSB,
  916. srng->msi_addr & 0xffffffff);
  917. reg_val = SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, ADDR),
  918. (uint64_t)(srng->msi_addr) >> 32) |
  919. SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB,
  920. MSI1_ENABLE), 1);
  921. SRNG_DST_REG_WRITE(srng, MSI1_BASE_MSB, reg_val);
  922. SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data);
  923. }
  924. SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff);
  925. reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB),
  926. ((uint64_t)(srng->ring_base_paddr) >> 32)) |
  927. SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_SIZE),
  928. srng->entry_size * srng->num_entries);
  929. SRNG_DST_REG_WRITE(srng, BASE_MSB, reg_val);
  930. reg_val = SRNG_SM(SRNG_DST_FLD(ID, RING_ID), srng->ring_id) |
  931. SRNG_SM(SRNG_DST_FLD(ID, ENTRY_SIZE), srng->entry_size);
  932. SRNG_DST_REG_WRITE(srng, ID, reg_val);
  933. /**
  934. * Interrupt setup:
  935. * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE
  936. * if level mode is required
  937. */
  938. reg_val = 0;
  939. if (srng->intr_timer_thres_us) {
  940. reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP,
  941. INTERRUPT_TIMER_THRESHOLD),
  942. srng->intr_timer_thres_us >> 3);
  943. }
  944. if (srng->intr_batch_cntr_thres_entries) {
  945. reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP,
  946. BATCH_COUNTER_THRESHOLD),
  947. srng->intr_batch_cntr_thres_entries *
  948. srng->entry_size);
  949. }
  950. SRNG_DST_REG_WRITE(srng, PRODUCER_INT_SETUP, reg_val);
  951. hp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr +
  952. ((unsigned long)(srng->u.dst_ring.hp_addr) -
  953. (unsigned long)(hal->shadow_rdptr_mem_vaddr)));
  954. SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, hp_addr & 0xffffffff);
  955. SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, hp_addr >> 32);
  956. /* Initilaize head and tail pointers to indicate ring is empty */
  957. SRNG_DST_REG_WRITE(srng, HP, 0);
  958. SRNG_DST_REG_WRITE(srng, TP, 0);
  959. *(srng->u.dst_ring.hp_addr) = 0;
  960. reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ?
  961. SRNG_SM(SRNG_DST_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) |
  962. ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ?
  963. SRNG_SM(SRNG_DST_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) |
  964. ((srng->flags & HAL_SRNG_MSI_SWAP) ?
  965. SRNG_SM(SRNG_DST_FLD(MISC, MSI_SWAP_BIT), 1) : 0);
  966. /*
  967. * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1);
  968. * todo: update fw_api and replace with above line
  969. * (when SRNG_ENABLE field for the MISC register is available in fw_api)
  970. * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC)
  971. */
  972. reg_val |= 0x40;
  973. SRNG_DST_REG_WRITE(srng, MISC, reg_val);
  974. }
  975. /**
  976. * hal_srng_hw_init - Private function to initialize SRNG HW
  977. * @hal_soc: HAL SOC handle
  978. * @srng: SRNG ring pointer
  979. */
  980. static inline void hal_srng_hw_init(struct hal_soc *hal,
  981. struct hal_srng *srng)
  982. {
  983. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  984. hal_srng_src_hw_init(hal, srng);
  985. else
  986. hal_srng_dst_hw_init(hal, srng);
  987. }
  988. #ifdef CONFIG_SHADOW_V2
  989. #define ignore_shadow false
  990. #define CHECK_SHADOW_REGISTERS true
  991. #else
  992. #define ignore_shadow true
  993. #define CHECK_SHADOW_REGISTERS false
  994. #endif
  995. /**
  996. * hal_srng_setup - Initalize HW SRNG ring.
  997. * @hal_soc: Opaque HAL SOC handle
  998. * @ring_type: one of the types from hal_ring_type
  999. * @ring_num: Ring number if there are multiple rings of same type (staring
  1000. * from 0)
  1001. * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
  1002. * @ring_params: SRNG ring params in hal_srng_params structure.
  1003. * Callers are expected to allocate contiguous ring memory of size
  1004. * 'num_entries * entry_size' bytes and pass the physical and virtual base
  1005. * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in
  1006. * hal_srng_params structure. Ring base address should be 8 byte aligned
  1007. * and size of each ring entry should be queried using the API
  1008. * hal_srng_get_entrysize
  1009. *
  1010. * Return: Opaque pointer to ring on success
  1011. * NULL on failure (if given ring is not available)
  1012. */
  1013. void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
  1014. int mac_id, struct hal_srng_params *ring_params)
  1015. {
  1016. int ring_id;
  1017. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  1018. struct hal_srng *srng;
  1019. struct hal_hw_srng_config *ring_config =
  1020. HAL_SRNG_CONFIG(hal, ring_type);
  1021. void *dev_base_addr;
  1022. int i;
  1023. ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id);
  1024. if (ring_id < 0)
  1025. return NULL;
  1026. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1027. "%s: mac_id %d ring_id %d\n",
  1028. __func__, mac_id, ring_id);
  1029. srng = hal_get_srng(hal_soc, ring_id);
  1030. if (srng->initialized) {
  1031. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1032. "%s: Ring (ring_type, ring_num) already initialized\n",
  1033. __func__);
  1034. return NULL;
  1035. }
  1036. dev_base_addr = hal->dev_base_addr;
  1037. srng->ring_id = ring_id;
  1038. srng->ring_dir = ring_config->ring_dir;
  1039. srng->ring_base_paddr = ring_params->ring_base_paddr;
  1040. srng->ring_base_vaddr = ring_params->ring_base_vaddr;
  1041. srng->entry_size = ring_config->entry_size;
  1042. srng->num_entries = ring_params->num_entries;
  1043. srng->ring_size = srng->num_entries * srng->entry_size;
  1044. srng->ring_size_mask = srng->ring_size - 1;
  1045. srng->msi_addr = ring_params->msi_addr;
  1046. srng->msi_data = ring_params->msi_data;
  1047. srng->intr_timer_thres_us = ring_params->intr_timer_thres_us;
  1048. srng->intr_batch_cntr_thres_entries =
  1049. ring_params->intr_batch_cntr_thres_entries;
  1050. srng->hal_soc = hal_soc;
  1051. for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) {
  1052. srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i]
  1053. + (ring_num * ring_config->reg_size[i]);
  1054. }
  1055. /* Zero out the entire ring memory */
  1056. qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size *
  1057. srng->num_entries) << 2);
  1058. srng->flags = ring_params->flags;
  1059. #ifdef BIG_ENDIAN_HOST
  1060. /* TODO: See if we should we get these flags from caller */
  1061. srng->flags |= HAL_SRNG_DATA_TLV_SWAP;
  1062. srng->flags |= HAL_SRNG_MSI_SWAP;
  1063. srng->flags |= HAL_SRNG_RING_PTR_SWAP;
  1064. #endif
  1065. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  1066. srng->u.src_ring.hp = 0;
  1067. srng->u.src_ring.reap_hp = srng->ring_size -
  1068. srng->entry_size;
  1069. srng->u.src_ring.tp_addr =
  1070. &(hal->shadow_rdptr_mem_vaddr[ring_id]);
  1071. srng->u.src_ring.low_threshold =
  1072. ring_params->low_threshold * srng->entry_size;
  1073. if (ring_config->lmac_ring) {
  1074. /* For LMAC rings, head pointer updates will be done
  1075. * through FW by writing to a shared memory location
  1076. */
  1077. srng->u.src_ring.hp_addr =
  1078. &(hal->shadow_wrptr_mem_vaddr[ring_id -
  1079. HAL_SRNG_LMAC1_ID_START]);
  1080. srng->flags |= HAL_SRNG_LMAC_RING;
  1081. } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) {
  1082. srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP);
  1083. if (CHECK_SHADOW_REGISTERS) {
  1084. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1085. QDF_TRACE_LEVEL_ERROR,
  1086. "%s: Ring (%d, %d) missing shadow config\n",
  1087. __func__, ring_type, ring_num);
  1088. }
  1089. } else {
  1090. hal_validate_shadow_register(hal,
  1091. SRNG_SRC_ADDR(srng, HP),
  1092. srng->u.src_ring.hp_addr);
  1093. }
  1094. } else {
  1095. /* During initialization loop count in all the descriptors
  1096. * will be set to zero, and HW will set it to 1 on completing
  1097. * descriptor update in first loop, and increments it by 1 on
  1098. * subsequent loops (loop count wraps around after reaching
  1099. * 0xffff). The 'loop_cnt' in SW ring state is the expected
  1100. * loop count in descriptors updated by HW (to be processed
  1101. * by SW).
  1102. */
  1103. srng->u.dst_ring.loop_cnt = 1;
  1104. srng->u.dst_ring.tp = 0;
  1105. srng->u.dst_ring.hp_addr =
  1106. &(hal->shadow_rdptr_mem_vaddr[ring_id]);
  1107. if (ring_config->lmac_ring) {
  1108. /* For LMAC rings, tail pointer updates will be done
  1109. * through FW by writing to a shared memory location
  1110. */
  1111. srng->u.dst_ring.tp_addr =
  1112. &(hal->shadow_wrptr_mem_vaddr[ring_id -
  1113. HAL_SRNG_LMAC1_ID_START]);
  1114. srng->flags |= HAL_SRNG_LMAC_RING;
  1115. } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) {
  1116. srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP);
  1117. if (CHECK_SHADOW_REGISTERS) {
  1118. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1119. QDF_TRACE_LEVEL_ERROR,
  1120. "%s: Ring (%d, %d) missing shadow config\n",
  1121. __func__, ring_type, ring_num);
  1122. }
  1123. } else {
  1124. hal_validate_shadow_register(hal,
  1125. SRNG_DST_ADDR(srng, TP),
  1126. srng->u.dst_ring.tp_addr);
  1127. }
  1128. }
  1129. if (!(ring_config->lmac_ring)) {
  1130. hal_srng_hw_init(hal, srng);
  1131. if (ring_type == CE_DST) {
  1132. srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length;
  1133. hal_ce_dst_setup(hal, srng, ring_num);
  1134. }
  1135. }
  1136. SRNG_LOCK_INIT(&srng->lock);
  1137. srng->initialized = true;
  1138. return (void *)srng;
  1139. }
  1140. /**
  1141. * hal_srng_cleanup - Deinitialize HW SRNG ring.
  1142. * @hal_soc: Opaque HAL SOC handle
  1143. * @hal_srng: Opaque HAL SRNG pointer
  1144. */
  1145. void hal_srng_cleanup(void *hal_soc, void *hal_srng)
  1146. {
  1147. struct hal_srng *srng = (struct hal_srng *)hal_srng;
  1148. SRNG_LOCK_DESTROY(&srng->lock);
  1149. srng->initialized = 0;
  1150. }
  1151. /**
  1152. * hal_srng_get_entrysize - Returns size of ring entry in bytes
  1153. * @hal_soc: Opaque HAL SOC handle
  1154. * @ring_type: one of the types from hal_ring_type
  1155. *
  1156. */
  1157. uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type)
  1158. {
  1159. struct hal_hw_srng_config *ring_config =
  1160. HAL_SRNG_CONFIG(hal, ring_type);
  1161. return ring_config->entry_size << 2;
  1162. }
  1163. /**
  1164. * hal_srng_max_entries - Returns maximum possible number of ring entries
  1165. * @hal_soc: Opaque HAL SOC handle
  1166. * @ring_type: one of the types from hal_ring_type
  1167. *
  1168. * Return: Maximum number of entries for the given ring_type
  1169. */
  1170. uint32_t hal_srng_max_entries(void *hal_soc, int ring_type)
  1171. {
  1172. struct hal_hw_srng_config *ring_config = HAL_SRNG_CONFIG(hal, ring_type);
  1173. return SRNG_MAX_SIZE_DWORDS / ring_config->entry_size;
  1174. }
  1175. enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type)
  1176. {
  1177. struct hal_hw_srng_config *ring_config =
  1178. HAL_SRNG_CONFIG(hal, ring_type);
  1179. return ring_config->ring_dir;
  1180. }
  1181. /**
  1182. * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
  1183. *
  1184. * @hal_soc: Opaque HAL SOC handle
  1185. * @hal_ring: Ring pointer (Source or Destination ring)
  1186. * @ring_params: SRNG parameters will be returned through this structure
  1187. */
  1188. extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
  1189. struct hal_srng_params *ring_params)
  1190. {
  1191. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  1192. int i =0;
  1193. ring_params->ring_id = srng->ring_id;
  1194. ring_params->ring_dir = srng->ring_dir;
  1195. ring_params->entry_size = srng->entry_size;
  1196. ring_params->ring_base_paddr = srng->ring_base_paddr;
  1197. ring_params->ring_base_vaddr = srng->ring_base_vaddr;
  1198. ring_params->num_entries = srng->num_entries;
  1199. ring_params->msi_addr = srng->msi_addr;
  1200. ring_params->msi_data = srng->msi_data;
  1201. ring_params->intr_timer_thres_us = srng->intr_timer_thres_us;
  1202. ring_params->intr_batch_cntr_thres_entries =
  1203. srng->intr_batch_cntr_thres_entries;
  1204. ring_params->low_threshold = srng->u.src_ring.low_threshold;
  1205. ring_params->flags = srng->flags;
  1206. ring_params->ring_id = srng->ring_id;
  1207. for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++)
  1208. ring_params->hwreg_base[i] = srng->hwreg_base[i];
  1209. }