dbring.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. // SPDX-License-Identifier: BSD-3-Clause-Clear
  2. /*
  3. * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  4. */
  5. #include "core.h"
  6. #include "debug.h"
  7. #define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
  8. int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
  9. {
  10. u32 *temp;
  11. int idx;
  12. size = size >> 2;
  13. for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
  14. if (*temp == ATH11K_DB_MAGIC_VALUE)
  15. return -EINVAL;
  16. }
  17. return 0;
  18. }
  19. static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
  20. void *buffer, u32 size)
  21. {
  22. /* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
  23. * and the variable size is expected to be the number of u32 values
  24. * to be stored, not the number of bytes.
  25. */
  26. size = size / sizeof(u32);
  27. memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
  28. }
  29. static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
  30. struct ath11k_dbring *ring,
  31. struct ath11k_dbring_element *buff,
  32. enum wmi_direct_buffer_module id)
  33. {
  34. struct ath11k_base *ab = ar->ab;
  35. struct hal_srng *srng;
  36. dma_addr_t paddr;
  37. void *ptr_aligned, *ptr_unaligned, *desc;
  38. int ret;
  39. int buf_id;
  40. u32 cookie;
  41. srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
  42. lockdep_assert_held(&srng->lock);
  43. ath11k_hal_srng_access_begin(ab, srng);
  44. ptr_unaligned = buff->payload;
  45. ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
  46. ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
  47. paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
  48. DMA_FROM_DEVICE);
  49. ret = dma_mapping_error(ab->dev, paddr);
  50. if (ret)
  51. goto err;
  52. spin_lock_bh(&ring->idr_lock);
  53. buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
  54. spin_unlock_bh(&ring->idr_lock);
  55. if (buf_id < 0) {
  56. ret = -ENOBUFS;
  57. goto err_dma_unmap;
  58. }
  59. desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
  60. if (!desc) {
  61. ret = -ENOENT;
  62. goto err_idr_remove;
  63. }
  64. buff->paddr = paddr;
  65. cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
  66. FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
  67. ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
  68. ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
  69. ath11k_hal_srng_access_end(ab, srng);
  70. return 0;
  71. err_idr_remove:
  72. spin_lock_bh(&ring->idr_lock);
  73. idr_remove(&ring->bufs_idr, buf_id);
  74. spin_unlock_bh(&ring->idr_lock);
  75. err_dma_unmap:
  76. dma_unmap_single(ab->dev, paddr, ring->buf_sz,
  77. DMA_FROM_DEVICE);
  78. err:
  79. ath11k_hal_srng_access_end(ab, srng);
  80. return ret;
  81. }
  82. static int ath11k_dbring_fill_bufs(struct ath11k *ar,
  83. struct ath11k_dbring *ring,
  84. enum wmi_direct_buffer_module id)
  85. {
  86. struct ath11k_dbring_element *buff;
  87. struct hal_srng *srng;
  88. int num_remain, req_entries, num_free;
  89. u32 align;
  90. int size, ret;
  91. srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
  92. spin_lock_bh(&srng->lock);
  93. num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
  94. req_entries = min(num_free, ring->bufs_max);
  95. num_remain = req_entries;
  96. align = ring->buf_align;
  97. size = ring->buf_sz + align - 1;
  98. while (num_remain > 0) {
  99. buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
  100. if (!buff)
  101. break;
  102. buff->payload = kzalloc(size, GFP_ATOMIC);
  103. if (!buff->payload) {
  104. kfree(buff);
  105. break;
  106. }
  107. ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
  108. if (ret) {
  109. ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
  110. num_remain, req_entries);
  111. kfree(buff->payload);
  112. kfree(buff);
  113. break;
  114. }
  115. num_remain--;
  116. }
  117. spin_unlock_bh(&srng->lock);
  118. return num_remain;
  119. }
  120. int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
  121. struct ath11k_dbring *ring,
  122. enum wmi_direct_buffer_module id)
  123. {
  124. struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
  125. int ret;
  126. if (id >= WMI_DIRECT_BUF_MAX)
  127. return -EINVAL;
  128. param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
  129. param.module_id = id;
  130. param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
  131. param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
  132. param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
  133. param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
  134. param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
  135. param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
  136. param.num_elems = ring->bufs_max;
  137. param.buf_size = ring->buf_sz;
  138. param.num_resp_per_event = ring->num_resp_per_event;
  139. param.event_timeout_ms = ring->event_timeout_ms;
  140. ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
  141. if (ret) {
  142. ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
  143. return ret;
  144. }
  145. return 0;
  146. }
  147. int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
  148. u32 num_resp_per_event, u32 event_timeout_ms,
  149. int (*handler)(struct ath11k *,
  150. struct ath11k_dbring_data *))
  151. {
  152. if (WARN_ON(!ring))
  153. return -EINVAL;
  154. ring->num_resp_per_event = num_resp_per_event;
  155. ring->event_timeout_ms = event_timeout_ms;
  156. ring->handler = handler;
  157. return 0;
  158. }
  159. int ath11k_dbring_buf_setup(struct ath11k *ar,
  160. struct ath11k_dbring *ring,
  161. struct ath11k_dbring_cap *db_cap)
  162. {
  163. struct ath11k_base *ab = ar->ab;
  164. struct hal_srng *srng;
  165. int ret;
  166. srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
  167. ring->bufs_max = ring->refill_srng.size /
  168. ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
  169. ring->buf_sz = db_cap->min_buf_sz;
  170. ring->buf_align = db_cap->min_buf_align;
  171. ring->pdev_id = db_cap->pdev_id;
  172. ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
  173. ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
  174. ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
  175. return ret;
  176. }
  177. int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
  178. int ring_num, int num_entries)
  179. {
  180. int ret;
  181. ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
  182. ring_num, ar->pdev_idx, num_entries);
  183. if (ret < 0) {
  184. ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
  185. ret, ring_num);
  186. goto err;
  187. }
  188. return 0;
  189. err:
  190. ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
  191. return ret;
  192. }
  193. int ath11k_dbring_get_cap(struct ath11k_base *ab,
  194. u8 pdev_idx,
  195. enum wmi_direct_buffer_module id,
  196. struct ath11k_dbring_cap *db_cap)
  197. {
  198. int i;
  199. if (!ab->num_db_cap || !ab->db_caps)
  200. return -ENOENT;
  201. if (id >= WMI_DIRECT_BUF_MAX)
  202. return -EINVAL;
  203. for (i = 0; i < ab->num_db_cap; i++) {
  204. if (pdev_idx == ab->db_caps[i].pdev_id &&
  205. id == ab->db_caps[i].id) {
  206. *db_cap = ab->db_caps[i];
  207. return 0;
  208. }
  209. }
  210. return -ENOENT;
  211. }
  212. int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
  213. struct ath11k_dbring_buf_release_event *ev)
  214. {
  215. struct ath11k_dbring *ring;
  216. struct hal_srng *srng;
  217. struct ath11k *ar;
  218. struct ath11k_dbring_element *buff;
  219. struct ath11k_dbring_data handler_data;
  220. struct ath11k_buffer_addr desc;
  221. u8 *vaddr_unalign;
  222. u32 num_entry, num_buff_reaped;
  223. u8 pdev_idx, rbm, module_id;
  224. u32 cookie;
  225. int buf_id;
  226. int size;
  227. dma_addr_t paddr;
  228. int ret = 0;
  229. pdev_idx = ev->fixed.pdev_id;
  230. module_id = ev->fixed.module_id;
  231. if (pdev_idx >= ab->num_radios) {
  232. ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
  233. return -EINVAL;
  234. }
  235. if (ev->fixed.num_buf_release_entry !=
  236. ev->fixed.num_meta_data_entry) {
  237. ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
  238. ev->fixed.num_buf_release_entry,
  239. ev->fixed.num_meta_data_entry);
  240. return -EINVAL;
  241. }
  242. ar = ab->pdevs[pdev_idx].ar;
  243. rcu_read_lock();
  244. if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
  245. ret = -EINVAL;
  246. goto rcu_unlock;
  247. }
  248. switch (ev->fixed.module_id) {
  249. case WMI_DIRECT_BUF_SPECTRAL:
  250. ring = ath11k_spectral_get_dbring(ar);
  251. break;
  252. default:
  253. ring = NULL;
  254. ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
  255. ev->fixed.module_id);
  256. break;
  257. }
  258. if (!ring) {
  259. ret = -EINVAL;
  260. goto rcu_unlock;
  261. }
  262. srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
  263. num_entry = ev->fixed.num_buf_release_entry;
  264. size = ring->buf_sz + ring->buf_align - 1;
  265. num_buff_reaped = 0;
  266. spin_lock_bh(&srng->lock);
  267. while (num_buff_reaped < num_entry) {
  268. desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
  269. desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
  270. handler_data.meta = ev->meta_data[num_buff_reaped];
  271. num_buff_reaped++;
  272. ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
  273. buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
  274. spin_lock_bh(&ring->idr_lock);
  275. buff = idr_find(&ring->bufs_idr, buf_id);
  276. if (!buff) {
  277. spin_unlock_bh(&ring->idr_lock);
  278. continue;
  279. }
  280. idr_remove(&ring->bufs_idr, buf_id);
  281. spin_unlock_bh(&ring->idr_lock);
  282. dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
  283. DMA_FROM_DEVICE);
  284. ath11k_debugfs_add_dbring_entry(ar, module_id,
  285. ATH11K_DBG_DBR_EVENT_RX, srng);
  286. if (ring->handler) {
  287. vaddr_unalign = buff->payload;
  288. handler_data.data = PTR_ALIGN(vaddr_unalign,
  289. ring->buf_align);
  290. handler_data.data_sz = ring->buf_sz;
  291. ring->handler(ar, &handler_data);
  292. }
  293. buff->paddr = 0;
  294. memset(buff->payload, 0, size);
  295. ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
  296. }
  297. spin_unlock_bh(&srng->lock);
  298. rcu_unlock:
  299. rcu_read_unlock();
  300. return ret;
  301. }
  302. void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
  303. {
  304. ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
  305. }
  306. void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
  307. {
  308. struct ath11k_dbring_element *buff;
  309. int buf_id;
  310. spin_lock_bh(&ring->idr_lock);
  311. idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
  312. idr_remove(&ring->bufs_idr, buf_id);
  313. dma_unmap_single(ar->ab->dev, buff->paddr,
  314. ring->buf_sz, DMA_FROM_DEVICE);
  315. kfree(buff->payload);
  316. kfree(buff);
  317. }
  318. idr_destroy(&ring->bufs_idr);
  319. spin_unlock_bh(&ring->idr_lock);
  320. }