hal_srng.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "hal_api.h"
  20. #include "target_type.h"
  21. #include "wcss_version.h"
  22. #include "qdf_module.h"
  23. #ifdef QCA_WIFI_QCA8074
  24. void hal_qca6290_attach(struct hal_soc *hal);
  25. #endif
  26. #ifdef QCA_WIFI_QCA8074
  27. void hal_qca8074_attach(struct hal_soc *hal);
  28. #endif
  29. #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018)
  30. void hal_qca8074v2_attach(struct hal_soc *hal);
  31. #endif
  32. #ifdef QCA_WIFI_QCA6390
  33. void hal_qca6390_attach(struct hal_soc *hal);
  34. #endif
  35. #ifdef QCA_WIFI_QCA6490
  36. void hal_qca6490_attach(struct hal_soc *hal);
  37. #endif
  38. #ifdef QCA_WIFI_QCN9000
  39. void hal_qcn9000_attach(struct hal_soc *hal);
  40. #endif
  41. #ifdef QCA_WIFI_QCA6750
  42. void hal_qca6750_attach(struct hal_soc *hal);
  43. #endif
  44. #ifdef QCA_WIFI_QCA5018
  45. void hal_qca5018_attach(struct hal_soc *hal);
  46. #endif
  47. #ifdef ENABLE_VERBOSE_DEBUG
  48. bool is_hal_verbose_debug_enabled;
  49. #endif
  50. #ifdef ENABLE_HAL_REG_WR_HISTORY
  51. struct hal_reg_write_fail_history hal_reg_wr_hist;
  52. void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
  53. uint32_t offset,
  54. uint32_t wr_val, uint32_t rd_val)
  55. {
  56. struct hal_reg_write_fail_entry *record;
  57. int idx;
  58. idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index,
  59. HAL_REG_WRITE_HIST_SIZE);
  60. record = &hal_soc->reg_wr_fail_hist->record[idx];
  61. record->timestamp = qdf_get_log_timestamp();
  62. record->reg_offset = offset;
  63. record->write_val = wr_val;
  64. record->read_val = rd_val;
  65. }
  66. static void hal_reg_write_fail_history_init(struct hal_soc *hal)
  67. {
  68. hal->reg_wr_fail_hist = &hal_reg_wr_hist;
  69. qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1);
  70. }
  71. #else
  72. static void hal_reg_write_fail_history_init(struct hal_soc *hal)
  73. {
  74. }
  75. #endif
  76. /**
  77. * hal_get_srng_ring_id() - get the ring id of a descriped ring
  78. * @hal: hal_soc data structure
  79. * @ring_type: type enum describing the ring
  80. * @ring_num: which ring of the ring type
  81. * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings)
  82. *
  83. * Return: the ring id or -EINVAL if the ring does not exist.
  84. */
  85. static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type,
  86. int ring_num, int mac_id)
  87. {
  88. struct hal_hw_srng_config *ring_config =
  89. HAL_SRNG_CONFIG(hal, ring_type);
  90. int ring_id;
  91. if (ring_num >= ring_config->max_rings) {
  92. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  93. "%s: ring_num exceeded maximum no. of supported rings",
  94. __func__);
  95. /* TODO: This is a programming error. Assert if this happens */
  96. return -EINVAL;
  97. }
  98. if (ring_config->lmac_ring) {
  99. ring_id = ring_config->start_ring_id + ring_num +
  100. (mac_id * HAL_MAX_RINGS_PER_LMAC);
  101. } else {
  102. ring_id = ring_config->start_ring_id + ring_num;
  103. }
  104. return ring_id;
  105. }
  106. static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id)
  107. {
  108. /* TODO: Should we allocate srng structures dynamically? */
  109. return &(hal->srng_list[ring_id]);
  110. }
  111. #define HP_OFFSET_IN_REG_START 1
  112. #define OFFSET_FROM_HP_TO_TP 4
  113. static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc,
  114. int shadow_config_index,
  115. int ring_type,
  116. int ring_num)
  117. {
  118. struct hal_srng *srng;
  119. int ring_id;
  120. struct hal_hw_srng_config *ring_config =
  121. HAL_SRNG_CONFIG(hal_soc, ring_type);
  122. ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0);
  123. if (ring_id < 0)
  124. return;
  125. srng = hal_get_srng(hal_soc, ring_id);
  126. if (ring_config->ring_dir == HAL_SRNG_DST_RING) {
  127. srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index)
  128. + hal_soc->dev_base_addr;
  129. hal_debug("tp_addr=%pK dev base addr %pK index %u",
  130. srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr,
  131. shadow_config_index);
  132. } else {
  133. srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index)
  134. + hal_soc->dev_base_addr;
  135. hal_debug("hp_addr=%pK dev base addr %pK index %u",
  136. srng->u.src_ring.hp_addr,
  137. hal_soc->dev_base_addr, shadow_config_index);
  138. }
  139. }
  140. QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
  141. int ring_type,
  142. int ring_num)
  143. {
  144. uint32_t target_register;
  145. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  146. struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type];
  147. int shadow_config_index = hal->num_shadow_registers_configured;
  148. if (shadow_config_index >= MAX_SHADOW_REGISTERS) {
  149. QDF_ASSERT(0);
  150. return QDF_STATUS_E_RESOURCES;
  151. }
  152. hal->num_shadow_registers_configured++;
  153. target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START];
  154. target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
  155. *ring_num);
  156. /* if the ring is a dst ring, we need to shadow the tail pointer */
  157. if (srng_config->ring_dir == HAL_SRNG_DST_RING)
  158. target_register += OFFSET_FROM_HP_TO_TP;
  159. hal->shadow_config[shadow_config_index].addr = target_register;
  160. /* update hp/tp addr in the hal_soc structure*/
  161. hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type,
  162. ring_num);
  163. hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d",
  164. target_register,
  165. SHADOW_REGISTER(shadow_config_index),
  166. shadow_config_index,
  167. ring_type, ring_num);
  168. return QDF_STATUS_SUCCESS;
  169. }
  170. qdf_export_symbol(hal_set_one_shadow_config);
  171. QDF_STATUS hal_construct_shadow_config(void *hal_soc)
  172. {
  173. int ring_type, ring_num;
  174. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  175. for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) {
  176. struct hal_hw_srng_config *srng_config =
  177. &hal->hw_srng_table[ring_type];
  178. if (ring_type == CE_SRC ||
  179. ring_type == CE_DST ||
  180. ring_type == CE_DST_STATUS)
  181. continue;
  182. if (srng_config->lmac_ring)
  183. continue;
  184. for (ring_num = 0; ring_num < srng_config->max_rings;
  185. ring_num++)
  186. hal_set_one_shadow_config(hal_soc, ring_type, ring_num);
  187. }
  188. return QDF_STATUS_SUCCESS;
  189. }
  190. qdf_export_symbol(hal_construct_shadow_config);
  191. void hal_get_shadow_config(void *hal_soc,
  192. struct pld_shadow_reg_v2_cfg **shadow_config,
  193. int *num_shadow_registers_configured)
  194. {
  195. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  196. *shadow_config = hal->shadow_config;
  197. *num_shadow_registers_configured =
  198. hal->num_shadow_registers_configured;
  199. }
  200. qdf_export_symbol(hal_get_shadow_config);
  201. static void hal_validate_shadow_register(struct hal_soc *hal,
  202. uint32_t *destination,
  203. uint32_t *shadow_address)
  204. {
  205. unsigned int index;
  206. uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr;
  207. int destination_ba_offset =
  208. ((char *)destination) - (char *)hal->dev_base_addr;
  209. index = shadow_address - shadow_0_offset;
  210. if (index >= MAX_SHADOW_REGISTERS) {
  211. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  212. "%s: index %x out of bounds", __func__, index);
  213. goto error;
  214. } else if (hal->shadow_config[index].addr != destination_ba_offset) {
  215. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  216. "%s: sanity check failure, expected %x, found %x",
  217. __func__, destination_ba_offset,
  218. hal->shadow_config[index].addr);
  219. goto error;
  220. }
  221. return;
  222. error:
  223. qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x",
  224. __func__, hal->dev_base_addr, destination, shadow_address,
  225. shadow_0_offset, index);
  226. QDF_BUG(0);
  227. return;
  228. }
  229. static void hal_target_based_configure(struct hal_soc *hal)
  230. {
  231. switch (hal->target_type) {
  232. #ifdef QCA_WIFI_QCA6290
  233. case TARGET_TYPE_QCA6290:
  234. hal->use_register_windowing = true;
  235. hal_qca6290_attach(hal);
  236. break;
  237. #endif
  238. #ifdef QCA_WIFI_QCA6390
  239. case TARGET_TYPE_QCA6390:
  240. hal->use_register_windowing = true;
  241. hal_qca6390_attach(hal);
  242. break;
  243. #endif
  244. #ifdef QCA_WIFI_QCA6490
  245. case TARGET_TYPE_QCA6490:
  246. hal->use_register_windowing = true;
  247. hal_qca6490_attach(hal);
  248. break;
  249. #endif
  250. #ifdef QCA_WIFI_QCA6750
  251. case TARGET_TYPE_QCA6750:
  252. hal->use_register_windowing = true;
  253. hal->static_window_map = true;
  254. hal_qca6750_attach(hal);
  255. break;
  256. #endif
  257. #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0)
  258. case TARGET_TYPE_QCA8074:
  259. hal_qca8074_attach(hal);
  260. break;
  261. #endif
  262. #if defined(QCA_WIFI_QCA8074V2)
  263. case TARGET_TYPE_QCA8074V2:
  264. hal_qca8074v2_attach(hal);
  265. break;
  266. #endif
  267. #if defined(QCA_WIFI_QCA6018)
  268. case TARGET_TYPE_QCA6018:
  269. hal_qca8074v2_attach(hal);
  270. break;
  271. #endif
  272. #ifdef QCA_WIFI_QCN9000
  273. case TARGET_TYPE_QCN9000:
  274. hal->use_register_windowing = true;
  275. /*
  276. * Static window map is enabled for qcn9000 to use 2mb bar
  277. * size and use multiple windows to write into registers.
  278. */
  279. hal->static_window_map = true;
  280. hal_qcn9000_attach(hal);
  281. break;
  282. #endif
  283. #ifdef QCA_WIFI_QCA5018
  284. case TARGET_TYPE_QCA5018:
  285. hal_qca5018_attach(hal);
  286. break;
  287. #endif
  288. default:
  289. break;
  290. }
  291. }
  292. uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl)
  293. {
  294. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  295. struct hif_target_info *tgt_info =
  296. hif_get_target_info_handle(hal_soc->hif_handle);
  297. return tgt_info->target_type;
  298. }
  299. qdf_export_symbol(hal_get_target_type);
  300. #ifdef FEATURE_HAL_DELAYED_REG_WRITE
  301. #ifdef MEMORY_DEBUG
  302. /*
  303. * Length of the queue(array) used to hold delayed register writes.
  304. * Must be a multiple of 2.
  305. */
  306. #define HAL_REG_WRITE_QUEUE_LEN 128
  307. #else
  308. #define HAL_REG_WRITE_QUEUE_LEN 32
  309. #endif
  310. /**
  311. * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes
  312. * @hal: hal_soc pointer
  313. *
  314. * Return: true if throughput is high, else false.
  315. */
  316. static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal)
  317. {
  318. int bw_level = hif_get_bandwidth_level(hal->hif_handle);
  319. return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
  320. }
  321. /**
  322. * hal_process_reg_write_q_elem() - process a regiter write queue element
  323. * @hal: hal_soc pointer
  324. * @q_elem: pointer to hal regiter write queue element
  325. *
  326. * Return: The value which was written to the address
  327. */
  328. static uint32_t
  329. hal_process_reg_write_q_elem(struct hal_soc *hal,
  330. struct hal_reg_write_q_elem *q_elem)
  331. {
  332. struct hal_srng *srng = q_elem->srng;
  333. uint32_t write_val;
  334. SRNG_LOCK(&srng->lock);
  335. srng->reg_write_in_progress = false;
  336. srng->wstats.dequeues++;
  337. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  338. q_elem->dequeue_val = srng->u.src_ring.hp;
  339. hal_write_address_32_mb(hal,
  340. srng->u.src_ring.hp_addr,
  341. srng->u.src_ring.hp, false);
  342. write_val = srng->u.src_ring.hp;
  343. } else {
  344. q_elem->dequeue_val = srng->u.dst_ring.tp;
  345. hal_write_address_32_mb(hal,
  346. srng->u.dst_ring.tp_addr,
  347. srng->u.dst_ring.tp, false);
  348. write_val = srng->u.dst_ring.tp;
  349. }
  350. q_elem->valid = 0;
  351. SRNG_UNLOCK(&srng->lock);
  352. return write_val;
  353. }
  354. /**
  355. * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal
  356. * @hal: hal_soc pointer
  357. * @delay: delay in us
  358. *
  359. * Return: None
  360. */
  361. static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal,
  362. uint64_t delay_us)
  363. {
  364. uint32_t *hist;
  365. hist = hal->stats.wstats.sched_delay;
  366. if (delay_us < 100)
  367. hist[REG_WRITE_SCHED_DELAY_SUB_100us]++;
  368. else if (delay_us < 1000)
  369. hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++;
  370. else if (delay_us < 5000)
  371. hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++;
  372. else
  373. hist[REG_WRITE_SCHED_DELAY_GT_5000us]++;
  374. }
  375. /**
  376. * hal_reg_write_work() - Worker to process delayed writes
  377. * @arg: hal_soc pointer
  378. *
  379. * Return: None
  380. */
  381. static void hal_reg_write_work(void *arg)
  382. {
  383. int32_t q_depth, write_val;
  384. struct hal_soc *hal = arg;
  385. struct hal_reg_write_q_elem *q_elem;
  386. uint64_t delta_us;
  387. uint8_t ring_id;
  388. uint32_t *addr;
  389. q_elem = &hal->reg_write_queue[(hal->read_idx)];
  390. if (!q_elem->valid)
  391. return;
  392. q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth);
  393. if (q_depth > hal->stats.wstats.max_q_depth)
  394. hal->stats.wstats.max_q_depth = q_depth;
  395. if (hif_prevent_link_low_power_states(hal->hif_handle)) {
  396. hal->stats.wstats.prevent_l1_fails++;
  397. return;
  398. }
  399. while (q_elem->valid) {
  400. q_elem->dequeue_time = qdf_get_log_timestamp();
  401. ring_id = q_elem->srng->ring_id;
  402. addr = q_elem->addr;
  403. delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
  404. q_elem->enqueue_time);
  405. hal_reg_write_fill_sched_delay_hist(hal, delta_us);
  406. hal->stats.wstats.dequeues++;
  407. qdf_atomic_dec(&hal->stats.wstats.q_depth);
  408. write_val = hal_process_reg_write_q_elem(hal, q_elem);
  409. hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us",
  410. hal->read_idx, ring_id, addr, write_val, delta_us);
  411. hal->read_idx = (hal->read_idx + 1) &
  412. (HAL_REG_WRITE_QUEUE_LEN - 1);
  413. q_elem = &hal->reg_write_queue[(hal->read_idx)];
  414. }
  415. hif_allow_link_low_power_states(hal->hif_handle);
  416. }
  417. /**
  418. * hal_flush_reg_write_work() - flush all writes from regiter write queue
  419. * @arg: hal_soc pointer
  420. *
  421. * Return: None
  422. */
  423. static inline void hal_flush_reg_write_work(struct hal_soc *hal)
  424. {
  425. qdf_cancel_work(&hal->reg_write_work);
  426. qdf_flush_work(&hal->reg_write_work);
  427. qdf_flush_workqueue(0, hal->reg_write_wq);
  428. }
  429. /**
  430. * hal_reg_write_enqueue() - enqueue register writes into kworker
  431. * @hal_soc: hal_soc pointer
  432. * @srng: srng pointer
  433. * @addr: iomem address of regiter
  434. * @value: value to be written to iomem address
  435. *
  436. * This function executes from within the SRNG LOCK
  437. *
  438. * Return: None
  439. */
  440. static void hal_reg_write_enqueue(struct hal_soc *hal_soc,
  441. struct hal_srng *srng,
  442. void __iomem *addr,
  443. uint32_t value)
  444. {
  445. struct hal_reg_write_q_elem *q_elem;
  446. uint32_t write_idx;
  447. if (srng->reg_write_in_progress) {
  448. hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u",
  449. srng->ring_id, addr, value);
  450. qdf_atomic_inc(&hal_soc->stats.wstats.coalesces);
  451. srng->wstats.coalesces++;
  452. return;
  453. }
  454. write_idx = qdf_atomic_inc_return(&hal_soc->write_idx);
  455. write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1);
  456. q_elem = &hal_soc->reg_write_queue[write_idx];
  457. if (q_elem->valid) {
  458. hal_err("queue full");
  459. QDF_BUG(0);
  460. return;
  461. }
  462. qdf_atomic_inc(&hal_soc->stats.wstats.enqueues);
  463. srng->wstats.enqueues++;
  464. qdf_atomic_inc(&hal_soc->stats.wstats.q_depth);
  465. q_elem->srng = srng;
  466. q_elem->addr = addr;
  467. q_elem->enqueue_val = value;
  468. q_elem->enqueue_time = qdf_get_log_timestamp();
  469. /*
  470. * Before the valid flag is set to true, all the other
  471. * fields in the q_elem needs to be updated in memory.
  472. * Else there is a chance that the dequeuing worker thread
  473. * might read stale entries and process incorrect srng.
  474. */
  475. qdf_wmb();
  476. q_elem->valid = true;
  477. srng->reg_write_in_progress = true;
  478. hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u",
  479. write_idx, srng->ring_id, addr, value);
  480. qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq,
  481. &hal_soc->reg_write_work);
  482. }
  483. void hal_delayed_reg_write(struct hal_soc *hal_soc,
  484. struct hal_srng *srng,
  485. void __iomem *addr,
  486. uint32_t value)
  487. {
  488. if (pld_is_device_awake(hal_soc->qdf_dev->dev) ||
  489. hal_is_reg_write_tput_level_high(hal_soc)) {
  490. qdf_atomic_inc(&hal_soc->stats.wstats.direct);
  491. srng->wstats.direct++;
  492. hal_write_address_32_mb(hal_soc, addr, value, false);
  493. } else {
  494. hal_reg_write_enqueue(hal_soc, srng, addr, value);
  495. }
  496. }
  497. /**
  498. * hal_delayed_reg_write_init() - Initialization function for delayed reg writes
  499. * @hal_soc: hal_soc pointer
  500. *
  501. * Initialize main data structures to process register writes in a delayed
  502. * workqueue.
  503. *
  504. * Return: QDF_STATUS_SUCCESS on success else a QDF error.
  505. */
  506. static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal)
  507. {
  508. hal->reg_write_wq =
  509. qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq");
  510. qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal);
  511. hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN *
  512. sizeof(*hal->reg_write_queue));
  513. if (!hal->reg_write_queue) {
  514. hal_err("unable to allocate memory");
  515. QDF_BUG(0);
  516. return QDF_STATUS_E_NOMEM;
  517. }
  518. /* Initial value of indices */
  519. hal->read_idx = 0;
  520. qdf_atomic_set(&hal->write_idx, -1);
  521. return QDF_STATUS_SUCCESS;
  522. }
  523. /**
  524. * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
  525. * @hal_soc: hal_soc pointer
  526. *
  527. * De-initialize main data structures to process register writes in a delayed
  528. * workqueue.
  529. *
  530. * Return: None
  531. */
  532. static void hal_delayed_reg_write_deinit(struct hal_soc *hal)
  533. {
  534. hal_flush_reg_write_work(hal);
  535. qdf_destroy_workqueue(0, hal->reg_write_wq);
  536. qdf_mem_free(hal->reg_write_queue);
  537. }
  538. static inline
  539. char *hal_fill_reg_write_srng_stats(struct hal_srng *srng,
  540. char *buf, qdf_size_t size)
  541. {
  542. qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u",
  543. srng->wstats.enqueues, srng->wstats.dequeues,
  544. srng->wstats.coalesces, srng->wstats.direct);
  545. return buf;
  546. }
  547. /* bytes for local buffer */
  548. #define HAL_REG_WRITE_SRNG_STATS_LEN 100
  549. void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
  550. {
  551. struct hal_srng *srng;
  552. char buf[HAL_REG_WRITE_SRNG_STATS_LEN];
  553. struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
  554. srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1);
  555. hal_debug("SW2TCL1: %s",
  556. hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
  557. srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE);
  558. hal_debug("WBM2SW0: %s",
  559. hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
  560. srng = hal_get_srng(hal, HAL_SRNG_REO2SW1);
  561. hal_debug("REO2SW1: %s",
  562. hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
  563. srng = hal_get_srng(hal, HAL_SRNG_REO2SW2);
  564. hal_debug("REO2SW2: %s",
  565. hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
  566. srng = hal_get_srng(hal, HAL_SRNG_REO2SW3);
  567. hal_debug("REO2SW3: %s",
  568. hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
  569. }
  570. void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
  571. {
  572. uint32_t *hist;
  573. struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
  574. hist = hal->stats.wstats.sched_delay;
  575. hal_debug("enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
  576. qdf_atomic_read(&hal->stats.wstats.enqueues),
  577. hal->stats.wstats.dequeues,
  578. qdf_atomic_read(&hal->stats.wstats.coalesces),
  579. qdf_atomic_read(&hal->stats.wstats.direct),
  580. qdf_atomic_read(&hal->stats.wstats.q_depth),
  581. hal->stats.wstats.max_q_depth,
  582. hist[REG_WRITE_SCHED_DELAY_SUB_100us],
  583. hist[REG_WRITE_SCHED_DELAY_SUB_1000us],
  584. hist[REG_WRITE_SCHED_DELAY_SUB_5000us],
  585. hist[REG_WRITE_SCHED_DELAY_GT_5000us]);
  586. }
  587. #else
  588. static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal)
  589. {
  590. return QDF_STATUS_SUCCESS;
  591. }
  592. static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal)
  593. {
  594. }
  595. #endif
  596. /**
  597. * hal_attach - Initialize HAL layer
  598. * @hif_handle: Opaque HIF handle
  599. * @qdf_dev: QDF device
  600. *
  601. * Return: Opaque HAL SOC handle
  602. * NULL on failure (if given ring is not available)
  603. *
  604. * This function should be called as part of HIF initialization (for accessing
  605. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  606. *
  607. */
  608. void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev)
  609. {
  610. struct hal_soc *hal;
  611. int i;
  612. hal = qdf_mem_malloc(sizeof(*hal));
  613. if (!hal) {
  614. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  615. "%s: hal_soc allocation failed", __func__);
  616. goto fail0;
  617. }
  618. hal->hif_handle = hif_handle;
  619. hal->dev_base_addr = hif_get_dev_ba(hif_handle);
  620. hal->qdf_dev = qdf_dev;
  621. hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent(
  622. qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) *
  623. HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
  624. if (!hal->shadow_rdptr_mem_paddr) {
  625. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  626. "%s: hal->shadow_rdptr_mem_paddr allocation failed",
  627. __func__);
  628. goto fail1;
  629. }
  630. qdf_mem_zero(hal->shadow_rdptr_mem_vaddr,
  631. sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX);
  632. hal->shadow_wrptr_mem_vaddr =
  633. (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  634. sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
  635. &(hal->shadow_wrptr_mem_paddr));
  636. if (!hal->shadow_wrptr_mem_vaddr) {
  637. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  638. "%s: hal->shadow_wrptr_mem_vaddr allocation failed",
  639. __func__);
  640. goto fail2;
  641. }
  642. qdf_mem_zero(hal->shadow_wrptr_mem_vaddr,
  643. sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS);
  644. for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
  645. hal->srng_list[i].initialized = 0;
  646. hal->srng_list[i].ring_id = i;
  647. }
  648. qdf_spinlock_create(&hal->register_access_lock);
  649. hal->register_window = 0;
  650. hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal));
  651. hal_target_based_configure(hal);
  652. hal_reg_write_fail_history_init(hal);
  653. /**
  654. * Indicate Initialization of srngs to avoid force wake
  655. * as umac power collapse is not enabled yet
  656. */
  657. hal->init_phase = true;
  658. qdf_minidump_log(hal, sizeof(*hal), "hal_soc");
  659. hal_delayed_reg_write_init(hal);
  660. return (void *)hal;
  661. fail2:
  662. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  663. sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
  664. hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
  665. fail1:
  666. qdf_mem_free(hal);
  667. fail0:
  668. return NULL;
  669. }
  670. qdf_export_symbol(hal_attach);
  671. /**
  672. * hal_mem_info - Retrieve hal memory base address
  673. *
  674. * @hal_soc: Opaque HAL SOC handle
  675. * @mem: pointer to structure to be updated with hal mem info
  676. */
  677. void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem)
  678. {
  679. struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
  680. mem->dev_base_addr = (void *)hal->dev_base_addr;
  681. mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr;
  682. mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr;
  683. mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr;
  684. mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr;
  685. hif_read_phy_mem_base((void *)hal->hif_handle,
  686. (qdf_dma_addr_t *)&mem->dev_base_paddr);
  687. return;
  688. }
  689. qdf_export_symbol(hal_get_meminfo);
  690. /**
  691. * hal_detach - Detach HAL layer
  692. * @hal_soc: HAL SOC handle
  693. *
  694. * Return: Opaque HAL SOC handle
  695. * NULL on failure (if given ring is not available)
  696. *
  697. * This function should be called as part of HIF initialization (for accessing
  698. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  699. *
  700. */
  701. extern void hal_detach(void *hal_soc)
  702. {
  703. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  704. hal_delayed_reg_write_deinit(hal);
  705. qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
  706. sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
  707. hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
  708. qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
  709. sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
  710. hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
  711. qdf_minidump_remove(hal);
  712. qdf_mem_free(hal);
  713. return;
  714. }
  715. qdf_export_symbol(hal_detach);
  716. /**
  717. * hal_ce_dst_setup - Initialize CE destination ring registers
  718. * @hal_soc: HAL SOC handle
  719. * @srng: SRNG ring pointer
  720. */
  721. static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng,
  722. int ring_num)
  723. {
  724. uint32_t reg_val = 0;
  725. uint32_t reg_addr;
  726. struct hal_hw_srng_config *ring_config =
  727. HAL_SRNG_CONFIG(hal, CE_DST);
  728. /* set DEST_MAX_LENGTH according to ce assignment */
  729. reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR(
  730. ring_config->reg_start[R0_INDEX] +
  731. (ring_num * ring_config->reg_size[R0_INDEX]));
  732. reg_val = HAL_REG_READ(hal, reg_addr);
  733. reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
  734. reg_val |= srng->u.dst_ring.max_buffer_length &
  735. HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
  736. HAL_REG_WRITE(hal, reg_addr, reg_val);
  737. }
  738. /**
  739. * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
  740. * @hal: HAL SOC handle
  741. * @read: boolean value to indicate if read or write
  742. * @ix0: pointer to store IX0 reg value
  743. * @ix1: pointer to store IX1 reg value
  744. * @ix2: pointer to store IX2 reg value
  745. * @ix3: pointer to store IX3 reg value
  746. */
  747. void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
  748. uint32_t *ix0, uint32_t *ix1,
  749. uint32_t *ix2, uint32_t *ix3)
  750. {
  751. uint32_t reg_offset;
  752. struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
  753. if (read) {
  754. if (ix0) {
  755. reg_offset =
  756. HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
  757. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  758. *ix0 = HAL_REG_READ(hal, reg_offset);
  759. }
  760. if (ix1) {
  761. reg_offset =
  762. HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR(
  763. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  764. *ix1 = HAL_REG_READ(hal, reg_offset);
  765. }
  766. if (ix2) {
  767. reg_offset =
  768. HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
  769. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  770. *ix2 = HAL_REG_READ(hal, reg_offset);
  771. }
  772. if (ix3) {
  773. reg_offset =
  774. HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
  775. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  776. *ix3 = HAL_REG_READ(hal, reg_offset);
  777. }
  778. } else {
  779. if (ix0) {
  780. reg_offset =
  781. HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
  782. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  783. HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix0);
  784. }
  785. if (ix1) {
  786. reg_offset =
  787. HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR(
  788. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  789. HAL_REG_WRITE(hal, reg_offset, *ix1);
  790. }
  791. if (ix2) {
  792. reg_offset =
  793. HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
  794. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  795. HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix2);
  796. }
  797. if (ix3) {
  798. reg_offset =
  799. HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
  800. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  801. HAL_REG_WRITE_CONFIRM(hal, reg_offset, *ix3);
  802. }
  803. }
  804. }
  805. /**
  806. * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer
  807. * @srng: sring pointer
  808. * @paddr: physical address
  809. */
  810. void hal_srng_dst_set_hp_paddr(struct hal_srng *srng,
  811. uint64_t paddr)
  812. {
  813. SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB,
  814. paddr & 0xffffffff);
  815. SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB,
  816. paddr >> 32);
  817. }
  818. /**
  819. * hal_srng_dst_init_hp() - Initilaize destination ring head pointer
  820. * @srng: sring pointer
  821. * @vaddr: virtual address
  822. */
  823. void hal_srng_dst_init_hp(struct hal_srng *srng,
  824. uint32_t *vaddr)
  825. {
  826. if (!srng)
  827. return;
  828. srng->u.dst_ring.hp_addr = vaddr;
  829. SRNG_DST_REG_WRITE_CONFIRM(srng, HP, srng->u.dst_ring.cached_hp);
  830. if (vaddr) {
  831. *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp;
  832. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  833. "hp_addr=%pK, cached_hp=%d, hp=%d",
  834. (void *)srng->u.dst_ring.hp_addr,
  835. srng->u.dst_ring.cached_hp,
  836. *srng->u.dst_ring.hp_addr);
  837. }
  838. }
  839. /**
  840. * hal_srng_hw_init - Private function to initialize SRNG HW
  841. * @hal_soc: HAL SOC handle
  842. * @srng: SRNG ring pointer
  843. */
  844. static inline void hal_srng_hw_init(struct hal_soc *hal,
  845. struct hal_srng *srng)
  846. {
  847. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  848. hal_srng_src_hw_init(hal, srng);
  849. else
  850. hal_srng_dst_hw_init(hal, srng);
  851. }
  852. #ifdef CONFIG_SHADOW_V2
  853. #define ignore_shadow false
  854. #define CHECK_SHADOW_REGISTERS true
  855. #else
  856. #define ignore_shadow true
  857. #define CHECK_SHADOW_REGISTERS false
  858. #endif
  859. /**
  860. * hal_srng_setup - Initialize HW SRNG ring.
  861. * @hal_soc: Opaque HAL SOC handle
  862. * @ring_type: one of the types from hal_ring_type
  863. * @ring_num: Ring number if there are multiple rings of same type (staring
  864. * from 0)
  865. * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
  866. * @ring_params: SRNG ring params in hal_srng_params structure.
  867. * Callers are expected to allocate contiguous ring memory of size
  868. * 'num_entries * entry_size' bytes and pass the physical and virtual base
  869. * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in
  870. * hal_srng_params structure. Ring base address should be 8 byte aligned
  871. * and size of each ring entry should be queried using the API
  872. * hal_srng_get_entrysize
  873. *
  874. * Return: Opaque pointer to ring on success
  875. * NULL on failure (if given ring is not available)
  876. */
  877. void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
  878. int mac_id, struct hal_srng_params *ring_params)
  879. {
  880. int ring_id;
  881. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  882. struct hal_srng *srng;
  883. struct hal_hw_srng_config *ring_config =
  884. HAL_SRNG_CONFIG(hal, ring_type);
  885. void *dev_base_addr;
  886. int i;
  887. ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id);
  888. if (ring_id < 0)
  889. return NULL;
  890. hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id);
  891. srng = hal_get_srng(hal_soc, ring_id);
  892. if (srng->initialized) {
  893. hal_verbose_debug("Ring (ring_type, ring_num) already initialized");
  894. return NULL;
  895. }
  896. dev_base_addr = hal->dev_base_addr;
  897. srng->ring_id = ring_id;
  898. srng->ring_dir = ring_config->ring_dir;
  899. srng->ring_base_paddr = ring_params->ring_base_paddr;
  900. srng->ring_base_vaddr = ring_params->ring_base_vaddr;
  901. srng->entry_size = ring_config->entry_size;
  902. srng->num_entries = ring_params->num_entries;
  903. srng->ring_size = srng->num_entries * srng->entry_size;
  904. srng->ring_size_mask = srng->ring_size - 1;
  905. srng->msi_addr = ring_params->msi_addr;
  906. srng->msi_data = ring_params->msi_data;
  907. srng->intr_timer_thres_us = ring_params->intr_timer_thres_us;
  908. srng->intr_batch_cntr_thres_entries =
  909. ring_params->intr_batch_cntr_thres_entries;
  910. srng->hal_soc = hal_soc;
  911. for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) {
  912. srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i]
  913. + (ring_num * ring_config->reg_size[i]);
  914. }
  915. /* Zero out the entire ring memory */
  916. qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size *
  917. srng->num_entries) << 2);
  918. srng->flags = ring_params->flags;
  919. #ifdef BIG_ENDIAN_HOST
  920. /* TODO: See if we should we get these flags from caller */
  921. srng->flags |= HAL_SRNG_DATA_TLV_SWAP;
  922. srng->flags |= HAL_SRNG_MSI_SWAP;
  923. srng->flags |= HAL_SRNG_RING_PTR_SWAP;
  924. #endif
  925. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  926. srng->u.src_ring.hp = 0;
  927. srng->u.src_ring.reap_hp = srng->ring_size -
  928. srng->entry_size;
  929. srng->u.src_ring.tp_addr =
  930. &(hal->shadow_rdptr_mem_vaddr[ring_id]);
  931. srng->u.src_ring.low_threshold =
  932. ring_params->low_threshold * srng->entry_size;
  933. if (ring_config->lmac_ring) {
  934. /* For LMAC rings, head pointer updates will be done
  935. * through FW by writing to a shared memory location
  936. */
  937. srng->u.src_ring.hp_addr =
  938. &(hal->shadow_wrptr_mem_vaddr[ring_id -
  939. HAL_SRNG_LMAC1_ID_START]);
  940. srng->flags |= HAL_SRNG_LMAC_RING;
  941. } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) {
  942. srng->u.src_ring.hp_addr =
  943. hal_get_window_address(hal,
  944. SRNG_SRC_ADDR(srng, HP));
  945. if (CHECK_SHADOW_REGISTERS) {
  946. QDF_TRACE(QDF_MODULE_ID_TXRX,
  947. QDF_TRACE_LEVEL_ERROR,
  948. "%s: Ring (%d, %d) missing shadow config",
  949. __func__, ring_type, ring_num);
  950. }
  951. } else {
  952. hal_validate_shadow_register(hal,
  953. SRNG_SRC_ADDR(srng, HP),
  954. srng->u.src_ring.hp_addr);
  955. }
  956. } else {
  957. /* During initialization loop count in all the descriptors
  958. * will be set to zero, and HW will set it to 1 on completing
  959. * descriptor update in first loop, and increments it by 1 on
  960. * subsequent loops (loop count wraps around after reaching
  961. * 0xffff). The 'loop_cnt' in SW ring state is the expected
  962. * loop count in descriptors updated by HW (to be processed
  963. * by SW).
  964. */
  965. srng->u.dst_ring.loop_cnt = 1;
  966. srng->u.dst_ring.tp = 0;
  967. srng->u.dst_ring.hp_addr =
  968. &(hal->shadow_rdptr_mem_vaddr[ring_id]);
  969. if (ring_config->lmac_ring) {
  970. /* For LMAC rings, tail pointer updates will be done
  971. * through FW by writing to a shared memory location
  972. */
  973. srng->u.dst_ring.tp_addr =
  974. &(hal->shadow_wrptr_mem_vaddr[ring_id -
  975. HAL_SRNG_LMAC1_ID_START]);
  976. srng->flags |= HAL_SRNG_LMAC_RING;
  977. } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) {
  978. srng->u.dst_ring.tp_addr =
  979. hal_get_window_address(hal,
  980. SRNG_DST_ADDR(srng, TP));
  981. if (CHECK_SHADOW_REGISTERS) {
  982. QDF_TRACE(QDF_MODULE_ID_TXRX,
  983. QDF_TRACE_LEVEL_ERROR,
  984. "%s: Ring (%d, %d) missing shadow config",
  985. __func__, ring_type, ring_num);
  986. }
  987. } else {
  988. hal_validate_shadow_register(hal,
  989. SRNG_DST_ADDR(srng, TP),
  990. srng->u.dst_ring.tp_addr);
  991. }
  992. }
  993. if (!(ring_config->lmac_ring)) {
  994. hal_srng_hw_init(hal, srng);
  995. if (ring_type == CE_DST) {
  996. srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length;
  997. hal_ce_dst_setup(hal, srng, ring_num);
  998. }
  999. }
  1000. SRNG_LOCK_INIT(&srng->lock);
  1001. srng->srng_event = 0;
  1002. srng->initialized = true;
  1003. return (void *)srng;
  1004. }
  1005. qdf_export_symbol(hal_srng_setup);
  1006. /**
  1007. * hal_srng_cleanup - Deinitialize HW SRNG ring.
  1008. * @hal_soc: Opaque HAL SOC handle
  1009. * @hal_srng: Opaque HAL SRNG pointer
  1010. */
  1011. void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  1012. {
  1013. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1014. SRNG_LOCK_DESTROY(&srng->lock);
  1015. srng->initialized = 0;
  1016. }
  1017. qdf_export_symbol(hal_srng_cleanup);
  1018. /**
  1019. * hal_srng_get_entrysize - Returns size of ring entry in bytes
  1020. * @hal_soc: Opaque HAL SOC handle
  1021. * @ring_type: one of the types from hal_ring_type
  1022. *
  1023. */
  1024. uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type)
  1025. {
  1026. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  1027. struct hal_hw_srng_config *ring_config =
  1028. HAL_SRNG_CONFIG(hal, ring_type);
  1029. return ring_config->entry_size << 2;
  1030. }
  1031. qdf_export_symbol(hal_srng_get_entrysize);
  1032. /**
  1033. * hal_srng_max_entries - Returns maximum possible number of ring entries
  1034. * @hal_soc: Opaque HAL SOC handle
  1035. * @ring_type: one of the types from hal_ring_type
  1036. *
  1037. * Return: Maximum number of entries for the given ring_type
  1038. */
  1039. uint32_t hal_srng_max_entries(void *hal_soc, int ring_type)
  1040. {
  1041. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  1042. struct hal_hw_srng_config *ring_config =
  1043. HAL_SRNG_CONFIG(hal, ring_type);
  1044. return ring_config->max_size / ring_config->entry_size;
  1045. }
  1046. qdf_export_symbol(hal_srng_max_entries);
  1047. enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type)
  1048. {
  1049. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  1050. struct hal_hw_srng_config *ring_config =
  1051. HAL_SRNG_CONFIG(hal, ring_type);
  1052. return ring_config->ring_dir;
  1053. }
  1054. /**
  1055. * hal_srng_dump - Dump ring status
  1056. * @srng: hal srng pointer
  1057. */
  1058. void hal_srng_dump(struct hal_srng *srng)
  1059. {
  1060. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  1061. hal_debug("=== SRC RING %d ===", srng->ring_id);
  1062. hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u",
  1063. srng->u.src_ring.hp,
  1064. srng->u.src_ring.reap_hp,
  1065. *srng->u.src_ring.tp_addr,
  1066. srng->u.src_ring.cached_tp);
  1067. } else {
  1068. hal_debug("=== DST RING %d ===", srng->ring_id);
  1069. hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u",
  1070. srng->u.dst_ring.tp,
  1071. *srng->u.dst_ring.hp_addr,
  1072. srng->u.dst_ring.cached_hp,
  1073. srng->u.dst_ring.loop_cnt);
  1074. }
  1075. }
  1076. /**
  1077. * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
  1078. *
  1079. * @hal_soc: Opaque HAL SOC handle
  1080. * @hal_ring: Ring pointer (Source or Destination ring)
  1081. * @ring_params: SRNG parameters will be returned through this structure
  1082. */
  1083. extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
  1084. hal_ring_handle_t hal_ring_hdl,
  1085. struct hal_srng_params *ring_params)
  1086. {
  1087. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1088. int i =0;
  1089. ring_params->ring_id = srng->ring_id;
  1090. ring_params->ring_dir = srng->ring_dir;
  1091. ring_params->entry_size = srng->entry_size;
  1092. ring_params->ring_base_paddr = srng->ring_base_paddr;
  1093. ring_params->ring_base_vaddr = srng->ring_base_vaddr;
  1094. ring_params->num_entries = srng->num_entries;
  1095. ring_params->msi_addr = srng->msi_addr;
  1096. ring_params->msi_data = srng->msi_data;
  1097. ring_params->intr_timer_thres_us = srng->intr_timer_thres_us;
  1098. ring_params->intr_batch_cntr_thres_entries =
  1099. srng->intr_batch_cntr_thres_entries;
  1100. ring_params->low_threshold = srng->u.src_ring.low_threshold;
  1101. ring_params->flags = srng->flags;
  1102. ring_params->ring_id = srng->ring_id;
  1103. for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++)
  1104. ring_params->hwreg_base[i] = srng->hwreg_base[i];
  1105. }
  1106. qdf_export_symbol(hal_get_srng_params);
  1107. #ifdef FORCE_WAKE
  1108. void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
  1109. {
  1110. struct hal_soc *hal_soc = (struct hal_soc *)soc;
  1111. hal_soc->init_phase = init_phase;
  1112. }
  1113. #endif /* FORCE_WAKE */