hal_srng.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are
  6. * met:
  7. * * Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * * Redistributions in binary form must reproduce the above
  10. * copyright notice, this list of conditions and the following
  11. * disclaimer in the documentation and/or other materials provided
  12. * with the distribution.
  13. * * Neither the name of The Linux Foundation nor the names of its
  14. * contributors may be used to endorse or promote products derived
  15. * from this software without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  18. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
  21. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  24. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  25. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  26. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  27. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. */
  29. #include "hal_hw_headers.h"
  30. #include "hal_api.h"
  31. #include "target_type.h"
  32. #include "wcss_version.h"
  33. #include "qdf_module.h"
  34. #ifdef QCA_WIFI_QCA8074
  35. void hal_qca6290_attach(struct hal_soc *hal);
  36. #endif
  37. #ifdef QCA_WIFI_QCA8074
  38. void hal_qca8074_attach(struct hal_soc *hal);
  39. #endif
  40. #ifdef QCA_WIFI_QCA6390
  41. void hal_qca6390_attach(struct hal_soc *hal);
  42. #endif
  43. /**
  44. * hal_get_srng_ring_id() - get the ring id of a descriped ring
  45. * @hal: hal_soc data structure
  46. * @ring_type: type enum describing the ring
  47. * @ring_num: which ring of the ring type
  48. * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings)
  49. *
  50. * Return: the ring id or -EINVAL if the ring does not exist.
  51. */
  52. static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type,
  53. int ring_num, int mac_id)
  54. {
  55. struct hal_hw_srng_config *ring_config =
  56. HAL_SRNG_CONFIG(hal, ring_type);
  57. int ring_id;
  58. if (ring_num >= ring_config->max_rings) {
  59. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  60. "%s: ring_num exceeded maximum no. of supported rings\n",
  61. __func__);
  62. /* TODO: This is a programming error. Assert if this happens */
  63. return -EINVAL;
  64. }
  65. if (ring_config->lmac_ring) {
  66. ring_id = ring_config->start_ring_id + ring_num +
  67. (mac_id * HAL_MAX_RINGS_PER_LMAC);
  68. } else {
  69. ring_id = ring_config->start_ring_id + ring_num;
  70. }
  71. return ring_id;
  72. }
  73. static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id)
  74. {
  75. /* TODO: Should we allocate srng structures dynamically? */
  76. return &(hal->srng_list[ring_id]);
  77. }
  78. #define HP_OFFSET_IN_REG_START 1
  79. #define OFFSET_FROM_HP_TO_TP 4
  80. static void hal_update_srng_hp_tp_address(void *hal_soc,
  81. int shadow_config_index,
  82. int ring_type,
  83. int ring_num)
  84. {
  85. struct hal_srng *srng;
  86. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  87. int ring_id;
  88. ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0);
  89. if (ring_id < 0)
  90. return;
  91. srng = hal_get_srng(hal_soc, ring_id);
  92. if (srng->ring_dir == HAL_SRNG_DST_RING)
  93. srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index)
  94. + hal->dev_base_addr;
  95. else
  96. srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index)
  97. + hal->dev_base_addr;
  98. }
  99. QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
  100. int ring_type,
  101. int ring_num)
  102. {
  103. uint32_t target_register;
  104. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  105. struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type];
  106. int shadow_config_index = hal->num_shadow_registers_configured;
  107. if (shadow_config_index >= MAX_SHADOW_REGISTERS) {
  108. QDF_ASSERT(0);
  109. return QDF_STATUS_E_RESOURCES;
  110. }
  111. hal->num_shadow_registers_configured++;
  112. target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START];
  113. target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
  114. *ring_num);
  115. /* if the ring is a dst ring, we need to shadow the tail pointer */
  116. if (srng_config->ring_dir == HAL_SRNG_DST_RING)
  117. target_register += OFFSET_FROM_HP_TO_TP;
  118. hal->shadow_config[shadow_config_index].addr = target_register;
  119. /* update hp/tp addr in the hal_soc structure*/
  120. hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type,
  121. ring_num);
  122. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  123. "%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d\n",
  124. __func__, target_register, shadow_config_index,
  125. ring_type, ring_num);
  126. return QDF_STATUS_SUCCESS;
  127. }
  128. QDF_STATUS hal_construct_shadow_config(void *hal_soc)
  129. {
  130. int ring_type, ring_num;
  131. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  132. for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) {
  133. struct hal_hw_srng_config *srng_config =
  134. &hal->hw_srng_table[ring_type];
  135. if (ring_type == CE_SRC ||
  136. ring_type == CE_DST ||
  137. ring_type == CE_DST_STATUS)
  138. continue;
  139. if (srng_config->lmac_ring)
  140. continue;
  141. for (ring_num = 0; ring_num < srng_config->max_rings;
  142. ring_num++)
  143. hal_set_one_shadow_config(hal_soc, ring_type, ring_num);
  144. }
  145. return QDF_STATUS_SUCCESS;
  146. }
  147. void hal_get_shadow_config(void *hal_soc,
  148. struct pld_shadow_reg_v2_cfg **shadow_config,
  149. int *num_shadow_registers_configured)
  150. {
  151. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  152. *shadow_config = hal->shadow_config;
  153. *num_shadow_registers_configured =
  154. hal->num_shadow_registers_configured;
  155. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  156. "%s\n", __func__);
  157. }
  158. static void hal_validate_shadow_register(struct hal_soc *hal,
  159. uint32_t *destination,
  160. uint32_t *shadow_address)
  161. {
  162. unsigned int index;
  163. uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr;
  164. int destination_ba_offset =
  165. ((char *)destination) - (char *)hal->dev_base_addr;
  166. index = shadow_address - shadow_0_offset;
  167. if (index >= MAX_SHADOW_REGISTERS) {
  168. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  169. "%s: index %x out of bounds\n", __func__, index);
  170. goto error;
  171. } else if (hal->shadow_config[index].addr != destination_ba_offset) {
  172. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  173. "%s: sanity check failure, expected %x, found %x\n",
  174. __func__, destination_ba_offset,
  175. hal->shadow_config[index].addr);
  176. goto error;
  177. }
  178. return;
  179. error:
  180. qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x",
  181. __func__, hal->dev_base_addr, destination, shadow_address,
  182. shadow_0_offset, index);
  183. QDF_BUG(0);
  184. return;
  185. }
  186. static void hal_target_based_configure(struct hal_soc *hal)
  187. {
  188. switch (hal->target_type) {
  189. #ifdef QCA_WIFI_QCA6290
  190. case TARGET_TYPE_QCA6290:
  191. hal->use_register_windowing = true;
  192. hal_qca6290_attach(hal);
  193. break;
  194. #endif
  195. #ifdef QCA_WIFI_QCA6390
  196. case TARGET_TYPE_QCA6390:
  197. hal->use_register_windowing = true;
  198. hal_qca6390_attach(hal);
  199. break;
  200. #endif
  201. #if defined(QCA_WIFI_QCA8074) && defined(CONFIG_WIN)
  202. case TARGET_TYPE_QCA8074:
  203. hal_qca8074_attach(hal);
  204. break;
  205. #endif
  206. default:
  207. break;
  208. }
  209. }
  210. uint32_t hal_get_target_type(struct hal_soc *hal)
  211. {
  212. struct hif_target_info *tgt_info =
  213. hif_get_target_info_handle(hal->hif_handle);
  214. return tgt_info->target_type;
  215. }
  216. qdf_export_symbol(hal_get_target_type);
  217. /**
  218. * hal_attach - Initialize HAL layer
  219. * @hif_handle: Opaque HIF handle
  220. * @qdf_dev: QDF device
  221. *
  222. * Return: Opaque HAL SOC handle
  223. * NULL on failure (if given ring is not available)
  224. *
  225. * This function should be called as part of HIF initialization (for accessing
  226. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  227. *
  228. */
  229. void *hal_attach(void *hif_handle, qdf_device_t qdf_dev)
  230. {
  231. struct hal_soc *hal;
  232. int i;
  233. hal = qdf_mem_malloc(sizeof(*hal));
  234. if (!hal) {
  235. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  236. "%s: hal_soc allocation failed\n", __func__);
  237. goto fail0;
  238. }
  239. hal->hif_handle = hif_handle;
  240. hal->dev_base_addr = hif_get_dev_ba(hif_handle);
  241. hal->qdf_dev = qdf_dev;
  242. hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent(
  243. qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) *
  244. HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
  245. if (!hal->shadow_rdptr_mem_paddr) {
  246. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  247. "%s: hal->shadow_rdptr_mem_paddr allocation failed\n",
  248. __func__);
  249. goto fail1;
  250. }
  251. hal->shadow_wrptr_mem_vaddr =
  252. (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  253. sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
  254. &(hal->shadow_wrptr_mem_paddr));
  255. if (!hal->shadow_wrptr_mem_vaddr) {
  256. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  257. "%s: hal->shadow_wrptr_mem_vaddr allocation failed\n",
  258. __func__);
  259. goto fail2;
  260. }
  261. for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
  262. hal->srng_list[i].initialized = 0;
  263. hal->srng_list[i].ring_id = i;
  264. }
  265. qdf_spinlock_create(&hal->register_access_lock);
  266. hal->register_window = 0;
  267. hal->target_type = hal_get_target_type(hal);
  268. hal_target_based_configure(hal);
  269. return (void *)hal;
  270. fail2:
  271. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  272. sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
  273. hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
  274. fail1:
  275. qdf_mem_free(hal);
  276. fail0:
  277. return NULL;
  278. }
  279. qdf_export_symbol(hal_attach);
  280. /**
  281. * hal_mem_info - Retrieve hal memory base address
  282. *
  283. * @hal_soc: Opaque HAL SOC handle
  284. * @mem: pointer to structure to be updated with hal mem info
  285. */
  286. void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem )
  287. {
  288. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  289. mem->dev_base_addr = (void *)hal->dev_base_addr;
  290. mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr;
  291. mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr;
  292. mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr;
  293. mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr;
  294. hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr);
  295. return;
  296. }
  297. qdf_export_symbol(hal_get_meminfo);
  298. /**
  299. * hal_detach - Detach HAL layer
  300. * @hal_soc: HAL SOC handle
  301. *
  302. * Return: Opaque HAL SOC handle
  303. * NULL on failure (if given ring is not available)
  304. *
  305. * This function should be called as part of HIF initialization (for accessing
  306. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  307. *
  308. */
  309. extern void hal_detach(void *hal_soc)
  310. {
  311. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  312. qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
  313. sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
  314. hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
  315. qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
  316. sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
  317. hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
  318. qdf_mem_free(hal);
  319. return;
  320. }
  321. qdf_export_symbol(hal_detach);
  322. /**
  323. * hal_srng_src_hw_init - Private function to initialize SRNG
  324. * source ring HW
  325. * @hal_soc: HAL SOC handle
  326. * @srng: SRNG ring pointer
  327. */
  328. static inline void hal_srng_src_hw_init(struct hal_soc *hal,
  329. struct hal_srng *srng)
  330. {
  331. uint32_t reg_val = 0;
  332. uint64_t tp_addr = 0;
  333. HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id);
  334. if (srng->flags & HAL_SRNG_MSI_INTR) {
  335. SRNG_SRC_REG_WRITE(srng, MSI1_BASE_LSB,
  336. srng->msi_addr & 0xffffffff);
  337. reg_val = SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, ADDR),
  338. (uint64_t)(srng->msi_addr) >> 32) |
  339. SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB,
  340. MSI1_ENABLE), 1);
  341. SRNG_SRC_REG_WRITE(srng, MSI1_BASE_MSB, reg_val);
  342. SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data);
  343. }
  344. SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff);
  345. reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB),
  346. ((uint64_t)(srng->ring_base_paddr) >> 32)) |
  347. SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_SIZE),
  348. srng->entry_size * srng->num_entries);
  349. SRNG_SRC_REG_WRITE(srng, BASE_MSB, reg_val);
  350. #if defined(WCSS_VERSION) && \
  351. ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \
  352. (defined(CONFIG_MCL) && (WCSS_VERSION >= 72)))
  353. reg_val = SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size);
  354. #else
  355. reg_val = SRNG_SM(SRNG_SRC_FLD(ID, RING_ID), srng->ring_id) |
  356. SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size);
  357. #endif
  358. SRNG_SRC_REG_WRITE(srng, ID, reg_val);
  359. /**
  360. * Interrupt setup:
  361. * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE
  362. * if level mode is required
  363. */
  364. reg_val = 0;
  365. /*
  366. * WAR - Hawkeye v1 has a hardware bug which requires timer value to be
  367. * programmed in terms of 1us resolution instead of 8us resolution as
  368. * given in MLD.
  369. */
  370. if (srng->intr_timer_thres_us) {
  371. reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0,
  372. INTERRUPT_TIMER_THRESHOLD),
  373. srng->intr_timer_thres_us);
  374. /* For HK v2 this should be (srng->intr_timer_thres_us >> 3) */
  375. }
  376. if (srng->intr_batch_cntr_thres_entries) {
  377. reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0,
  378. BATCH_COUNTER_THRESHOLD),
  379. srng->intr_batch_cntr_thres_entries *
  380. srng->entry_size);
  381. }
  382. SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX0, reg_val);
  383. reg_val = 0;
  384. if (srng->flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
  385. reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX1,
  386. LOW_THRESHOLD), srng->u.src_ring.low_threshold);
  387. }
  388. SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX1, reg_val);
  389. /* As per HW team, TP_ADDR and HP_ADDR for Idle link ring should
  390. * remain 0 to avoid some WBM stability issues. Remote head/tail
  391. * pointers are not required since this ring is completely managed
  392. * by WBM HW */
  393. if (srng->ring_id != HAL_SRNG_WBM_IDLE_LINK) {
  394. tp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr +
  395. ((unsigned long)(srng->u.src_ring.tp_addr) -
  396. (unsigned long)(hal->shadow_rdptr_mem_vaddr)));
  397. SRNG_SRC_REG_WRITE(srng, TP_ADDR_LSB, tp_addr & 0xffffffff);
  398. SRNG_SRC_REG_WRITE(srng, TP_ADDR_MSB, tp_addr >> 32);
  399. }
  400. /* Initilaize head and tail pointers to indicate ring is empty */
  401. SRNG_SRC_REG_WRITE(srng, HP, 0);
  402. SRNG_SRC_REG_WRITE(srng, TP, 0);
  403. *(srng->u.src_ring.tp_addr) = 0;
  404. reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ?
  405. SRNG_SM(SRNG_SRC_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) |
  406. ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ?
  407. SRNG_SM(SRNG_SRC_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) |
  408. ((srng->flags & HAL_SRNG_MSI_SWAP) ?
  409. SRNG_SM(SRNG_SRC_FLD(MISC, MSI_SWAP_BIT), 1) : 0);
  410. /* Loop count is not used for SRC rings */
  411. reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, LOOPCNT_DISABLE), 1);
  412. /*
  413. * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1);
  414. * todo: update fw_api and replace with above line
  415. * (when SRNG_ENABLE field for the MISC register is available in fw_api)
  416. * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC)
  417. */
  418. reg_val |= 0x40;
  419. SRNG_SRC_REG_WRITE(srng, MISC, reg_val);
  420. }
  421. /**
  422. * hal_ce_dst_setup - Initialize CE destination ring registers
  423. * @hal_soc: HAL SOC handle
  424. * @srng: SRNG ring pointer
  425. */
  426. static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng,
  427. int ring_num)
  428. {
  429. uint32_t reg_val = 0;
  430. uint32_t reg_addr;
  431. struct hal_hw_srng_config *ring_config =
  432. HAL_SRNG_CONFIG(hal, CE_DST);
  433. /* set DEST_MAX_LENGTH according to ce assignment */
  434. reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR(
  435. ring_config->reg_start[R0_INDEX] +
  436. (ring_num * ring_config->reg_size[R0_INDEX]));
  437. reg_val = HAL_REG_READ(hal, reg_addr);
  438. reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
  439. reg_val |= srng->u.dst_ring.max_buffer_length &
  440. HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
  441. HAL_REG_WRITE(hal, reg_addr, reg_val);
  442. }
  443. /**
  444. * hal_reo_remap_IX0 - Remap REO ring destination
  445. * @hal: HAL SOC handle
  446. * @remap_val: Remap value
  447. */
  448. void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val)
  449. {
  450. uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
  451. SEQ_WCSS_UMAC_REO_REG_OFFSET);
  452. HAL_REG_WRITE(hal, reg_offset, remap_val);
  453. }
  454. /**
  455. * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer
  456. * @srng: sring pointer
  457. * @paddr: physical address
  458. */
  459. void hal_srng_dst_set_hp_paddr(struct hal_srng *srng,
  460. uint64_t paddr)
  461. {
  462. SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB,
  463. paddr & 0xffffffff);
  464. SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB,
  465. paddr >> 32);
  466. }
  467. /**
  468. * hal_srng_dst_init_hp() - Initilaize destination ring head pointer
  469. * @srng: sring pointer
  470. * @vaddr: virtual address
  471. */
  472. void hal_srng_dst_init_hp(struct hal_srng *srng,
  473. uint32_t *vaddr)
  474. {
  475. srng->u.dst_ring.hp_addr = vaddr;
  476. SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp);
  477. *(srng->u.dst_ring.hp_addr) = srng->u.dst_ring.cached_hp;
  478. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  479. "hp_addr=%pK, cached_hp=%d, hp=%d\n",
  480. (void *)srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp,
  481. *(srng->u.dst_ring.hp_addr));
  482. }
  483. /**
  484. * hal_srng_dst_hw_init - Private function to initialize SRNG
  485. * destination ring HW
  486. * @hal_soc: HAL SOC handle
  487. * @srng: SRNG ring pointer
  488. */
  489. static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
  490. struct hal_srng *srng)
  491. {
  492. uint32_t reg_val = 0;
  493. uint64_t hp_addr = 0;
  494. HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id);
  495. if (srng->flags & HAL_SRNG_MSI_INTR) {
  496. SRNG_DST_REG_WRITE(srng, MSI1_BASE_LSB,
  497. srng->msi_addr & 0xffffffff);
  498. reg_val = SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, ADDR),
  499. (uint64_t)(srng->msi_addr) >> 32) |
  500. SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB,
  501. MSI1_ENABLE), 1);
  502. SRNG_DST_REG_WRITE(srng, MSI1_BASE_MSB, reg_val);
  503. SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data);
  504. }
  505. SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff);
  506. reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB),
  507. ((uint64_t)(srng->ring_base_paddr) >> 32)) |
  508. SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_SIZE),
  509. srng->entry_size * srng->num_entries);
  510. SRNG_DST_REG_WRITE(srng, BASE_MSB, reg_val);
  511. reg_val = SRNG_SM(SRNG_DST_FLD(ID, RING_ID), srng->ring_id) |
  512. SRNG_SM(SRNG_DST_FLD(ID, ENTRY_SIZE), srng->entry_size);
  513. SRNG_DST_REG_WRITE(srng, ID, reg_val);
  514. /**
  515. * Interrupt setup:
  516. * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE
  517. * if level mode is required
  518. */
  519. reg_val = 0;
  520. if (srng->intr_timer_thres_us) {
  521. reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP,
  522. INTERRUPT_TIMER_THRESHOLD),
  523. srng->intr_timer_thres_us >> 3);
  524. }
  525. if (srng->intr_batch_cntr_thres_entries) {
  526. reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP,
  527. BATCH_COUNTER_THRESHOLD),
  528. srng->intr_batch_cntr_thres_entries *
  529. srng->entry_size);
  530. }
  531. SRNG_DST_REG_WRITE(srng, PRODUCER_INT_SETUP, reg_val);
  532. hp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr +
  533. ((unsigned long)(srng->u.dst_ring.hp_addr) -
  534. (unsigned long)(hal->shadow_rdptr_mem_vaddr)));
  535. SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, hp_addr & 0xffffffff);
  536. SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, hp_addr >> 32);
  537. /* Initilaize head and tail pointers to indicate ring is empty */
  538. SRNG_DST_REG_WRITE(srng, HP, 0);
  539. SRNG_DST_REG_WRITE(srng, TP, 0);
  540. *(srng->u.dst_ring.hp_addr) = 0;
  541. reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ?
  542. SRNG_SM(SRNG_DST_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) |
  543. ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ?
  544. SRNG_SM(SRNG_DST_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) |
  545. ((srng->flags & HAL_SRNG_MSI_SWAP) ?
  546. SRNG_SM(SRNG_DST_FLD(MISC, MSI_SWAP_BIT), 1) : 0);
  547. /*
  548. * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1);
  549. * todo: update fw_api and replace with above line
  550. * (when SRNG_ENABLE field for the MISC register is available in fw_api)
  551. * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC)
  552. */
  553. reg_val |= 0x40;
  554. SRNG_DST_REG_WRITE(srng, MISC, reg_val);
  555. }
  556. /**
  557. * hal_srng_hw_init - Private function to initialize SRNG HW
  558. * @hal_soc: HAL SOC handle
  559. * @srng: SRNG ring pointer
  560. */
  561. static inline void hal_srng_hw_init(struct hal_soc *hal,
  562. struct hal_srng *srng)
  563. {
  564. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  565. hal_srng_src_hw_init(hal, srng);
  566. else
  567. hal_srng_dst_hw_init(hal, srng);
  568. }
  569. #ifdef CONFIG_SHADOW_V2
  570. #define ignore_shadow false
  571. #define CHECK_SHADOW_REGISTERS true
  572. #else
  573. #define ignore_shadow true
  574. #define CHECK_SHADOW_REGISTERS false
  575. #endif
  576. /**
  577. * hal_srng_setup - Initialize HW SRNG ring.
  578. * @hal_soc: Opaque HAL SOC handle
  579. * @ring_type: one of the types from hal_ring_type
  580. * @ring_num: Ring number if there are multiple rings of same type (staring
  581. * from 0)
  582. * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
  583. * @ring_params: SRNG ring params in hal_srng_params structure.
  584. * Callers are expected to allocate contiguous ring memory of size
  585. * 'num_entries * entry_size' bytes and pass the physical and virtual base
  586. * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in
  587. * hal_srng_params structure. Ring base address should be 8 byte aligned
  588. * and size of each ring entry should be queried using the API
  589. * hal_srng_get_entrysize
  590. *
  591. * Return: Opaque pointer to ring on success
  592. * NULL on failure (if given ring is not available)
  593. */
  594. void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
  595. int mac_id, struct hal_srng_params *ring_params)
  596. {
  597. int ring_id;
  598. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  599. struct hal_srng *srng;
  600. struct hal_hw_srng_config *ring_config =
  601. HAL_SRNG_CONFIG(hal, ring_type);
  602. void *dev_base_addr;
  603. int i;
  604. ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id);
  605. if (ring_id < 0)
  606. return NULL;
  607. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  608. "%s: mac_id %d ring_id %d\n",
  609. __func__, mac_id, ring_id);
  610. srng = hal_get_srng(hal_soc, ring_id);
  611. if (srng->initialized) {
  612. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  613. "%s: Ring (ring_type, ring_num) already initialized\n",
  614. __func__);
  615. return NULL;
  616. }
  617. dev_base_addr = hal->dev_base_addr;
  618. srng->ring_id = ring_id;
  619. srng->ring_dir = ring_config->ring_dir;
  620. srng->ring_base_paddr = ring_params->ring_base_paddr;
  621. srng->ring_base_vaddr = ring_params->ring_base_vaddr;
  622. srng->entry_size = ring_config->entry_size;
  623. srng->num_entries = ring_params->num_entries;
  624. srng->ring_size = srng->num_entries * srng->entry_size;
  625. srng->ring_size_mask = srng->ring_size - 1;
  626. srng->msi_addr = ring_params->msi_addr;
  627. srng->msi_data = ring_params->msi_data;
  628. srng->intr_timer_thres_us = ring_params->intr_timer_thres_us;
  629. srng->intr_batch_cntr_thres_entries =
  630. ring_params->intr_batch_cntr_thres_entries;
  631. srng->hal_soc = hal_soc;
  632. for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) {
  633. srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i]
  634. + (ring_num * ring_config->reg_size[i]);
  635. }
  636. /* Zero out the entire ring memory */
  637. qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size *
  638. srng->num_entries) << 2);
  639. srng->flags = ring_params->flags;
  640. #ifdef BIG_ENDIAN_HOST
  641. /* TODO: See if we should we get these flags from caller */
  642. srng->flags |= HAL_SRNG_DATA_TLV_SWAP;
  643. srng->flags |= HAL_SRNG_MSI_SWAP;
  644. srng->flags |= HAL_SRNG_RING_PTR_SWAP;
  645. #endif
  646. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  647. srng->u.src_ring.hp = 0;
  648. srng->u.src_ring.reap_hp = srng->ring_size -
  649. srng->entry_size;
  650. srng->u.src_ring.tp_addr =
  651. &(hal->shadow_rdptr_mem_vaddr[ring_id]);
  652. srng->u.src_ring.low_threshold =
  653. ring_params->low_threshold * srng->entry_size;
  654. if (ring_config->lmac_ring) {
  655. /* For LMAC rings, head pointer updates will be done
  656. * through FW by writing to a shared memory location
  657. */
  658. srng->u.src_ring.hp_addr =
  659. &(hal->shadow_wrptr_mem_vaddr[ring_id -
  660. HAL_SRNG_LMAC1_ID_START]);
  661. srng->flags |= HAL_SRNG_LMAC_RING;
  662. } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) {
  663. srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP);
  664. if (CHECK_SHADOW_REGISTERS) {
  665. QDF_TRACE(QDF_MODULE_ID_TXRX,
  666. QDF_TRACE_LEVEL_ERROR,
  667. "%s: Ring (%d, %d) missing shadow config\n",
  668. __func__, ring_type, ring_num);
  669. }
  670. } else {
  671. hal_validate_shadow_register(hal,
  672. SRNG_SRC_ADDR(srng, HP),
  673. srng->u.src_ring.hp_addr);
  674. }
  675. } else {
  676. /* During initialization loop count in all the descriptors
  677. * will be set to zero, and HW will set it to 1 on completing
  678. * descriptor update in first loop, and increments it by 1 on
  679. * subsequent loops (loop count wraps around after reaching
  680. * 0xffff). The 'loop_cnt' in SW ring state is the expected
  681. * loop count in descriptors updated by HW (to be processed
  682. * by SW).
  683. */
  684. srng->u.dst_ring.loop_cnt = 1;
  685. srng->u.dst_ring.tp = 0;
  686. srng->u.dst_ring.hp_addr =
  687. &(hal->shadow_rdptr_mem_vaddr[ring_id]);
  688. if (ring_config->lmac_ring) {
  689. /* For LMAC rings, tail pointer updates will be done
  690. * through FW by writing to a shared memory location
  691. */
  692. srng->u.dst_ring.tp_addr =
  693. &(hal->shadow_wrptr_mem_vaddr[ring_id -
  694. HAL_SRNG_LMAC1_ID_START]);
  695. srng->flags |= HAL_SRNG_LMAC_RING;
  696. } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) {
  697. srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP);
  698. if (CHECK_SHADOW_REGISTERS) {
  699. QDF_TRACE(QDF_MODULE_ID_TXRX,
  700. QDF_TRACE_LEVEL_ERROR,
  701. "%s: Ring (%d, %d) missing shadow config\n",
  702. __func__, ring_type, ring_num);
  703. }
  704. } else {
  705. hal_validate_shadow_register(hal,
  706. SRNG_DST_ADDR(srng, TP),
  707. srng->u.dst_ring.tp_addr);
  708. }
  709. }
  710. if (!(ring_config->lmac_ring)) {
  711. hal_srng_hw_init(hal, srng);
  712. if (ring_type == CE_DST) {
  713. srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length;
  714. hal_ce_dst_setup(hal, srng, ring_num);
  715. }
  716. }
  717. SRNG_LOCK_INIT(&srng->lock);
  718. srng->initialized = true;
  719. return (void *)srng;
  720. }
  721. qdf_export_symbol(hal_srng_setup);
  722. /**
  723. * hal_srng_cleanup - Deinitialize HW SRNG ring.
  724. * @hal_soc: Opaque HAL SOC handle
  725. * @hal_srng: Opaque HAL SRNG pointer
  726. */
  727. void hal_srng_cleanup(void *hal_soc, void *hal_srng)
  728. {
  729. struct hal_srng *srng = (struct hal_srng *)hal_srng;
  730. SRNG_LOCK_DESTROY(&srng->lock);
  731. srng->initialized = 0;
  732. }
  733. qdf_export_symbol(hal_srng_cleanup);
  734. /**
  735. * hal_srng_get_entrysize - Returns size of ring entry in bytes
  736. * @hal_soc: Opaque HAL SOC handle
  737. * @ring_type: one of the types from hal_ring_type
  738. *
  739. */
  740. uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type)
  741. {
  742. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  743. struct hal_hw_srng_config *ring_config =
  744. HAL_SRNG_CONFIG(hal, ring_type);
  745. return ring_config->entry_size << 2;
  746. }
  747. qdf_export_symbol(hal_srng_get_entrysize);
  748. /**
  749. * hal_srng_max_entries - Returns maximum possible number of ring entries
  750. * @hal_soc: Opaque HAL SOC handle
  751. * @ring_type: one of the types from hal_ring_type
  752. *
  753. * Return: Maximum number of entries for the given ring_type
  754. */
  755. uint32_t hal_srng_max_entries(void *hal_soc, int ring_type)
  756. {
  757. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  758. struct hal_hw_srng_config *ring_config =
  759. HAL_SRNG_CONFIG(hal, ring_type);
  760. return ring_config->max_size / ring_config->entry_size;
  761. }
  762. qdf_export_symbol(hal_srng_max_entries);
  763. enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type)
  764. {
  765. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  766. struct hal_hw_srng_config *ring_config =
  767. HAL_SRNG_CONFIG(hal, ring_type);
  768. return ring_config->ring_dir;
  769. }
  770. /**
  771. * hal_srng_dump - Dump ring status
  772. * @srng: hal srng pointer
  773. */
  774. void hal_srng_dump(struct hal_srng *srng)
  775. {
  776. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  777. qdf_print("=== SRC RING %d ===", srng->ring_id);
  778. qdf_print("hp %u, reap_hp %u, tp %u, cached tp %u",
  779. srng->u.src_ring.hp,
  780. srng->u.src_ring.reap_hp,
  781. *srng->u.src_ring.tp_addr,
  782. srng->u.src_ring.cached_tp);
  783. } else {
  784. qdf_print("=== DST RING %d ===", srng->ring_id);
  785. qdf_print("tp %u, hp %u, cached tp %u, loop_cnt %u",
  786. srng->u.dst_ring.tp,
  787. *srng->u.dst_ring.hp_addr,
  788. srng->u.dst_ring.cached_hp,
  789. srng->u.dst_ring.loop_cnt);
  790. }
  791. }
  792. /**
  793. * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
  794. *
  795. * @hal_soc: Opaque HAL SOC handle
  796. * @hal_ring: Ring pointer (Source or Destination ring)
  797. * @ring_params: SRNG parameters will be returned through this structure
  798. */
  799. extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
  800. struct hal_srng_params *ring_params)
  801. {
  802. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  803. int i =0;
  804. ring_params->ring_id = srng->ring_id;
  805. ring_params->ring_dir = srng->ring_dir;
  806. ring_params->entry_size = srng->entry_size;
  807. ring_params->ring_base_paddr = srng->ring_base_paddr;
  808. ring_params->ring_base_vaddr = srng->ring_base_vaddr;
  809. ring_params->num_entries = srng->num_entries;
  810. ring_params->msi_addr = srng->msi_addr;
  811. ring_params->msi_data = srng->msi_data;
  812. ring_params->intr_timer_thres_us = srng->intr_timer_thres_us;
  813. ring_params->intr_batch_cntr_thres_entries =
  814. srng->intr_batch_cntr_thres_entries;
  815. ring_params->low_threshold = srng->u.src_ring.low_threshold;
  816. ring_params->flags = srng->flags;
  817. ring_params->ring_id = srng->ring_id;
  818. for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++)
  819. ring_params->hwreg_base[i] = srng->hwreg_base[i];
  820. }
  821. qdf_export_symbol(hal_get_srng_params);