dp_main.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. /*
  2. * Copyright (c) 2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_api.h>
  21. #include <hif.h>
  22. #include <htt.h>
  23. #include <wdi_event.h>
  24. #include <queue.h>
  25. #include "dp_htt.h"
  26. #include "dp_types.h"
  27. #include "dp_internal.h"
  28. /**
  29. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  30. */
  31. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  32. int ring_type, int ring_num, int pdev_id, uint32_t num_entries)
  33. {
  34. void *hal_soc = soc->hal_soc;
  35. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  36. /* TODO: See if we should get align size from hal */
  37. uint32_t ring_base_align = 8;
  38. struct hal_srng_params ring_params;
  39. srng->hal_srng = NULL;
  40. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  41. srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
  42. soc->osdev, NULL, srng->alloc_size,
  43. &(srng->base_paddr_unaligned));
  44. if (!srng->base_vaddr_unaligned) {
  45. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  46. "%s: alloc failed - ring_type: %d, ring_num %d\n",
  47. __func__, ring_type, ring_num);
  48. return QDF_STATUS_E_NOMEM;
  49. }
  50. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  51. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  52. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  53. ((unsigned long)(ring_params.ring_base_vaddr) -
  54. (unsigned long)srng->base_vaddr_unaligned);
  55. ring_params.num_entries = num_entries;
  56. /* TODO: Check MSI support and get MSI settings from HIF layer */
  57. ring_params.msi_data = 0;
  58. ring_params.msi_addr = 0;
  59. /* TODO: Setup interrupt timer and batch counter thresholds for
  60. * interrupt mitigation based on ring type
  61. */
  62. ring_params.intr_timer_thres_us = 8;
  63. ring_params.intr_batch_cntr_thres_entries = 1;
  64. /* TODO: Currently hal layer takes care of endianness related settings.
  65. * See if these settings need to passed from DP layer
  66. */
  67. ring_params.flags = 0;
  68. /* Enable low threshold interrupts for rx buffer rings (regular and
  69. * monitor buffer rings.
  70. * TODO: See if this is required for any other ring
  71. */
  72. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF)) {
  73. /* TODO: Setting low threshold to 1/8th of ring size
  74. * see if this needs to be configurable
  75. */
  76. ring_params.low_threshold = num_entries >> 3;
  77. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  78. }
  79. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  80. pdev_id, &ring_params);
  81. return 0;
  82. }
  83. /**
  84. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  85. * Any buffers allocated and attached to ring entries are expected to be freed
  86. * before calling this function.
  87. */
  88. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  89. int ring_type, int ring_num)
  90. {
  91. if (!srng->hal_srng) {
  92. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  93. "%s: Ring type: %d, num:%d not setup\n",
  94. __func__, ring_type, ring_num);
  95. return;
  96. }
  97. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  98. qdf_mem_free_consistent(soc->osdev, NULL,
  99. srng->alloc_size,
  100. srng->base_vaddr_unaligned,
  101. srng->base_paddr_unaligned, 0);
  102. }
  103. /* TODO: Need this interface from HIF */
  104. void *hif_get_hal_handle(void *hif_handle);
  105. /*
  106. * dp_soc_attach_wifi3() - Attach txrx SOC
  107. * @osif_soc: Opaque SOC handle from OSIF/HDD
  108. * @htc_handle: Opaque HTC handle
  109. * @hif_handle: Opaque HIF handle
  110. * @qdf_osdev: QDF device
  111. *
  112. * Return: DP SOC handle on success, NULL on failure
  113. */
  114. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  115. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  116. struct ol_if_ops *ol_ops)
  117. {
  118. struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
  119. if (!soc) {
  120. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  121. "%s: DP SOC memory allocation failed\n", __func__);
  122. goto fail0;
  123. }
  124. soc->osif_soc = osif_soc;
  125. soc->osdev = qdf_osdev;
  126. soc->ol_ops = ol_ops;
  127. soc->hif_handle = hif_handle;
  128. soc->hal_soc = hif_get_hal_handle(hif_handle);
  129. soc->htt_handle = htt_soc_attach(soc, osif_soc, htc_handle,
  130. soc->hal_soc, qdf_osdev);
  131. if (soc->htt_handle == NULL) {
  132. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  133. "%s: HTT attach failed\n", __func__);
  134. goto fail1;
  135. }
  136. #ifdef notyet
  137. if (wdi_event_attach(soc)) {
  138. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  139. "%s: WDI event attach failed\n", __func__);
  140. goto fail2;
  141. }
  142. #endif
  143. return (void *)soc;
  144. #ifdef notyet
  145. fail2:
  146. htt_soc_detach(soc->htt_handle);
  147. #endif
  148. fail1:
  149. qdf_mem_free(soc);
  150. fail0:
  151. return NULL;
  152. }
  153. /* Temporary definitions to be moved to wlan_cfg */
  154. static inline uint32_t wlan_cfg_get_max_clients(void *wlan_cfg_ctx)
  155. {
  156. return 512;
  157. }
  158. static inline uint32_t wlan_cfg_max_alloc_size(void *wlan_cfg_ctx)
  159. {
  160. /* Change this to a lower value to enforce scattered idle list mode */
  161. return 32 << 20;
  162. }
  163. static inline int wlan_cfg_per_pdev_tx_ring(void *wlan_cfg_ctx)
  164. {
  165. return 1;
  166. }
  167. static inline int wlan_cfg_num_tcl_data_rings(void *wlan_cfg_ctx)
  168. {
  169. return 1;
  170. }
  171. static inline int wlan_cfg_per_pdev_rx_ring(void *wlan_cfg_ctx)
  172. {
  173. return 1;
  174. }
  175. static inline int wlan_cfg_num_reo_dest_rings(void *wlan_cfg_ctx)
  176. {
  177. return 4;
  178. }
  179. static inline int wlan_cfg_pkt_type(void *wlan_cfg_ctx)
  180. {
  181. return htt_pkt_type_ethernet;
  182. }
  183. #define AVG_MAX_MPDUS_PER_TID 128
  184. #define AVG_TIDS_PER_CLIENT 2
  185. #define AVG_FLOWS_PER_TID 2
  186. #define AVG_MSDUS_PER_FLOW 128
  187. #define AVG_MSDUS_PER_MPDU 4
  188. /*
  189. * Allocate and setup link descriptor pool that will be used by HW for
  190. * various link and queue descriptors and managed by WBM
  191. */
  192. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  193. {
  194. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  195. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  196. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  197. uint32_t num_mpdus_per_link_desc =
  198. hal_num_mpdus_per_link_desc(soc->hal_soc);
  199. uint32_t num_msdus_per_link_desc =
  200. hal_num_msdus_per_link_desc(soc->hal_soc);
  201. uint32_t num_mpdu_links_per_queue_desc =
  202. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  203. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  204. uint32_t total_link_descs, total_mem_size;
  205. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  206. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  207. uint32_t num_link_desc_banks;
  208. uint32_t last_bank_size = 0;
  209. uint32_t entry_size, num_entries;
  210. int i;
  211. /* Only Tx queue descriptors are allocated from common link descriptor
  212. * pool Rx queue descriptors are not included in this because (REO queue
  213. * extension descriptors) they are expected to be allocated contiguously
  214. * with REO queue descriptors
  215. */
  216. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  217. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  218. num_mpdu_queue_descs = num_mpdu_link_descs /
  219. num_mpdu_links_per_queue_desc;
  220. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  221. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  222. num_msdus_per_link_desc;
  223. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  224. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  225. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  226. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  227. /* Round up to power of 2 */
  228. total_link_descs = 1;
  229. while (total_link_descs < num_entries)
  230. total_link_descs <<= 1;
  231. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  232. "%s: total_link_descs: %u, link_desc_size: %d\n",
  233. __func__, total_link_descs, link_desc_size);
  234. total_mem_size = total_link_descs * link_desc_size;
  235. total_mem_size += link_desc_align;
  236. if (total_mem_size <= max_alloc_size) {
  237. num_link_desc_banks = 0;
  238. last_bank_size = total_mem_size;
  239. } else {
  240. num_link_desc_banks = (total_mem_size) /
  241. (max_alloc_size - link_desc_align);
  242. last_bank_size = total_mem_size %
  243. (max_alloc_size - link_desc_align);
  244. }
  245. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  246. "%s: total_mem_size: %d, num_link_desc_banks: %u\n",
  247. __func__, total_mem_size, num_link_desc_banks);
  248. for (i = 0; i < num_link_desc_banks; i++) {
  249. soc->link_desc_banks[i].base_vaddr_unaligned =
  250. qdf_mem_alloc_consistent(soc->osdev, NULL,
  251. max_alloc_size,
  252. &(soc->link_desc_banks[i].base_paddr_unaligned));
  253. soc->link_desc_banks[i].size = max_alloc_size;
  254. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  255. soc->link_desc_banks[i].base_vaddr_unaligned) +
  256. ((unsigned long)(
  257. soc->link_desc_banks[i].base_vaddr_unaligned) %
  258. link_desc_align));
  259. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  260. soc->link_desc_banks[i].base_paddr_unaligned) +
  261. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  262. (unsigned long)(
  263. soc->link_desc_banks[i].base_vaddr_unaligned));
  264. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  265. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  266. "%s: Link descriptor memory alloc failed\n",
  267. __func__);
  268. goto fail;
  269. }
  270. }
  271. if (last_bank_size) {
  272. /* Allocate last bank in case total memory required is not exact
  273. * multiple of max_alloc_size
  274. */
  275. soc->link_desc_banks[i].base_vaddr_unaligned =
  276. qdf_mem_alloc_consistent(soc->osdev, NULL,
  277. last_bank_size,
  278. &(soc->link_desc_banks[i].base_paddr_unaligned));
  279. soc->link_desc_banks[i].size = last_bank_size;
  280. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  281. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  282. ((unsigned long)(
  283. soc->link_desc_banks[i].base_vaddr_unaligned) %
  284. link_desc_align));
  285. soc->link_desc_banks[i].base_paddr =
  286. (unsigned long)(
  287. soc->link_desc_banks[i].base_paddr_unaligned) +
  288. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  289. (unsigned long)(
  290. soc->link_desc_banks[i].base_vaddr_unaligned));
  291. }
  292. /* Allocate and setup link descriptor idle list for HW internal use */
  293. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  294. total_mem_size = entry_size * total_link_descs;
  295. if (total_mem_size <= max_alloc_size) {
  296. void *desc;
  297. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  298. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  299. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  300. "%s: Link desc idle ring setup failed\n",
  301. __func__);
  302. goto fail;
  303. }
  304. hal_srng_access_start_unlocked(soc->hal_soc,
  305. soc->wbm_idle_link_ring.hal_srng);
  306. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  307. soc->link_desc_banks[i].base_paddr; i++) {
  308. uint32_t num_entries = (soc->link_desc_banks[i].size -
  309. (unsigned long)(
  310. soc->link_desc_banks[i].base_vaddr) -
  311. (unsigned long)(
  312. soc->link_desc_banks[i].base_vaddr_unaligned))
  313. / link_desc_size;
  314. unsigned long paddr = (unsigned long)(
  315. soc->link_desc_banks[i].base_paddr);
  316. while (num_entries && (desc = hal_srng_src_get_next(
  317. soc->hal_soc,
  318. soc->wbm_idle_link_ring.hal_srng))) {
  319. hal_set_link_desc_addr(desc, i, paddr);
  320. num_entries--;
  321. paddr += link_desc_size;
  322. }
  323. }
  324. hal_srng_access_end_unlocked(soc->hal_soc,
  325. soc->wbm_idle_link_ring.hal_srng);
  326. } else {
  327. uint32_t num_scatter_bufs;
  328. uint32_t num_entries_per_buf;
  329. uint32_t rem_entries;
  330. uint8_t *scatter_buf_ptr;
  331. uint16_t scatter_buf_num;
  332. soc->wbm_idle_scatter_buf_size =
  333. hal_idle_list_scatter_buf_size(soc->hal_soc);
  334. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  335. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  336. num_scatter_bufs = (total_mem_size /
  337. soc->wbm_idle_scatter_buf_size) + (total_mem_size %
  338. soc->wbm_idle_scatter_buf_size) ? 1 : 0;
  339. for (i = 0; i < num_scatter_bufs; i++) {
  340. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  341. qdf_mem_alloc_consistent(soc->osdev, NULL,
  342. soc->wbm_idle_scatter_buf_size,
  343. &(soc->wbm_idle_scatter_buf_base_paddr[i]));
  344. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  345. QDF_TRACE(QDF_MODULE_ID_TXRX,
  346. QDF_TRACE_LEVEL_ERROR,
  347. "%s:Scatter list memory alloc failed\n",
  348. __func__);
  349. goto fail;
  350. }
  351. }
  352. /* Populate idle list scatter buffers with link descriptor
  353. * pointers
  354. */
  355. scatter_buf_num = 0;
  356. scatter_buf_ptr = (uint8_t *)(
  357. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  358. rem_entries = num_entries_per_buf;
  359. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  360. soc->link_desc_banks[i].base_paddr; i++) {
  361. uint32_t num_link_descs =
  362. (soc->link_desc_banks[i].size -
  363. (unsigned long)(
  364. soc->link_desc_banks[i].base_vaddr) -
  365. (unsigned long)(
  366. soc->link_desc_banks[i].base_vaddr_unaligned)) /
  367. link_desc_size;
  368. unsigned long paddr = (unsigned long)(
  369. soc->link_desc_banks[i].base_paddr);
  370. void *desc = NULL;
  371. while (num_link_descs && (desc =
  372. hal_srng_src_get_next(soc->hal_soc,
  373. soc->wbm_idle_link_ring.hal_srng))) {
  374. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  375. i, paddr);
  376. num_link_descs--;
  377. paddr += link_desc_size;
  378. if (rem_entries) {
  379. rem_entries--;
  380. scatter_buf_ptr += link_desc_size;
  381. } else {
  382. rem_entries = num_entries_per_buf;
  383. scatter_buf_num++;
  384. scatter_buf_ptr = (uint8_t *)(
  385. soc->wbm_idle_scatter_buf_base_vaddr[
  386. scatter_buf_num]);
  387. }
  388. }
  389. }
  390. /* Setup link descriptor idle list in HW */
  391. hal_setup_link_idle_list(soc->hal_soc,
  392. soc->wbm_idle_scatter_buf_base_paddr,
  393. soc->wbm_idle_scatter_buf_base_vaddr,
  394. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  395. (uint32_t)(scatter_buf_ptr - (unsigned long)(
  396. soc->wbm_idle_scatter_buf_base_vaddr[
  397. scatter_buf_num])));
  398. }
  399. return 0;
  400. fail:
  401. if (soc->wbm_idle_link_ring.hal_srng) {
  402. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  403. WBM_IDLE_LINK, 0);
  404. }
  405. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  406. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  407. qdf_mem_free_consistent(soc->osdev, NULL,
  408. soc->wbm_idle_scatter_buf_size,
  409. soc->wbm_idle_scatter_buf_base_vaddr[i],
  410. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  411. }
  412. }
  413. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  414. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  415. qdf_mem_free_consistent(soc->osdev, NULL,
  416. soc->link_desc_banks[i].size,
  417. soc->link_desc_banks[i].base_vaddr_unaligned,
  418. soc->link_desc_banks[i].base_paddr_unaligned,
  419. 0);
  420. }
  421. }
  422. return QDF_STATUS_E_FAILURE;
  423. }
  424. /*
  425. * Free link descriptor pool that was setup HW
  426. */
  427. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  428. {
  429. int i;
  430. if (soc->wbm_idle_link_ring.hal_srng) {
  431. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  432. WBM_IDLE_LINK, 0);
  433. }
  434. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  435. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  436. qdf_mem_free_consistent(soc->osdev, NULL,
  437. soc->wbm_idle_scatter_buf_size,
  438. soc->wbm_idle_scatter_buf_base_vaddr[i],
  439. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  440. }
  441. }
  442. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  443. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  444. qdf_mem_free_consistent(soc->osdev, NULL,
  445. soc->link_desc_banks[i].size,
  446. soc->link_desc_banks[i].base_vaddr_unaligned,
  447. soc->link_desc_banks[i].base_paddr_unaligned,
  448. 0);
  449. }
  450. }
  451. }
  452. /* TODO: Following should be configurable */
  453. #define WBM_RELEASE_RING_SIZE 64
  454. #define TCL_DATA_RING_SIZE 512
  455. #define TCL_CMD_RING_SIZE 32
  456. #define TCL_STATUS_RING_SIZE 32
  457. #define REO_DST_RING_SIZE 2048
  458. #define REO_REINJECT_RING_SIZE 32
  459. #define RX_RELEASE_RING_SIZE 256
  460. #define REO_EXCEPTION_RING_SIZE 128
  461. #define REO_CMD_RING_SIZE 32
  462. #define REO_STATUS_RING_SIZE 32
  463. #define RXDMA_BUF_RING_SIZE 8192
  464. #define RXDMA_MONITOR_BUF_RING_SIZE 8192
  465. #define RXDMA_MONITOR_DST_RING_SIZE 2048
  466. #define RXDMA_MONITOR_STATUS_RING_SIZE 2048
  467. /*
  468. * dp_soc_cmn_setup() - Common SoC level initializion
  469. * @soc: Datapath SOC handle
  470. *
  471. * This is an internal function used to setup common SOC data structures,
  472. * to be called from PDEV attach after receiving HW mode capabilities from FW
  473. */
  474. static int dp_soc_cmn_setup(struct dp_soc *soc)
  475. {
  476. int i;
  477. if (soc->cmn_init_done)
  478. return 0;
  479. if (dp_peer_find_attach(soc))
  480. goto fail0;
  481. if (dp_hw_link_desc_pool_setup(soc))
  482. goto fail1;
  483. /* Setup SRNG rings */
  484. /* Common rings */
  485. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  486. WBM_RELEASE_RING_SIZE)) {
  487. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  488. "%s: dp_srng_setup failed for wbm_desc_rel_ring\n",
  489. __func__);
  490. goto fail1;
  491. }
  492. /* Tx data rings */
  493. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  494. soc->num_tcl_data_rings =
  495. wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  496. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  497. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  498. TCL_DATA, i, 0, TCL_DATA_RING_SIZE)) {
  499. QDF_TRACE(QDF_MODULE_ID_TXRX,
  500. QDF_TRACE_LEVEL_ERROR,
  501. "%s: dp_srng_setup failed for tcl_data_ring[%d]\n",
  502. __func__, i);
  503. goto fail1;
  504. }
  505. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  506. WBM2SW_RELEASE, i, 0, TCL_DATA_RING_SIZE)) {
  507. QDF_TRACE(QDF_MODULE_ID_TXRX,
  508. QDF_TRACE_LEVEL_ERROR,
  509. "%s: dp_srng_setup failed for tx_comp_ring[%d]\n",
  510. __func__, i);
  511. goto fail1;
  512. }
  513. }
  514. } else {
  515. /* This will be incremented during per pdev ring setup */
  516. soc->num_tcl_data_rings = 0;
  517. }
  518. /* TCL command and status rings */
  519. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  520. TCL_CMD_RING_SIZE)) {
  521. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  522. "%s: dp_srng_setup failed for tcl_cmd_ring\n",
  523. __func__);
  524. goto fail1;
  525. }
  526. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  527. TCL_STATUS_RING_SIZE)) {
  528. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  529. "%s: dp_srng_setup failed for tcl_status_ring\n",
  530. __func__);
  531. goto fail1;
  532. }
  533. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  534. * descriptors
  535. */
  536. /* Rx data rings */
  537. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  538. soc->num_reo_dest_rings =
  539. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  540. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  541. /* TODO: Get number of rings and ring sizes from
  542. * wlan_cfg
  543. */
  544. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  545. i, 0, REO_DST_RING_SIZE)) {
  546. QDF_TRACE(QDF_MODULE_ID_TXRX,
  547. QDF_TRACE_LEVEL_ERROR,
  548. "%s: dp_srng_setup failed for reo_dest_ring[%d]\n",
  549. __func__, i);
  550. goto fail1;
  551. }
  552. }
  553. } else {
  554. /* This will be incremented during per pdev ring setup */
  555. soc->num_reo_dest_rings = 0;
  556. }
  557. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  558. /* REO reinjection ring */
  559. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  560. REO_REINJECT_RING_SIZE)) {
  561. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  562. "%s: dp_srng_setup failed for reo_reinject_ring\n",
  563. __func__);
  564. goto fail1;
  565. }
  566. /* Rx release ring */
  567. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  568. RX_RELEASE_RING_SIZE)) {
  569. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  570. "%s: dp_srng_setup failed for rx_rel_ring\n",
  571. __func__);
  572. goto fail1;
  573. }
  574. /* Rx exception ring */
  575. if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
  576. MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
  577. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  578. "%s: dp_srng_setup failed for reo_exception_ring\n",
  579. __func__);
  580. goto fail1;
  581. }
  582. /* REO command and status rings */
  583. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  584. REO_CMD_RING_SIZE)) {
  585. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  586. "%s: dp_srng_setup failed for reo_cmd_ring\n",
  587. __func__);
  588. goto fail1;
  589. }
  590. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  591. REO_STATUS_RING_SIZE)) {
  592. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  593. "%s: dp_srng_setup failed for reo_status_ring\n",
  594. __func__);
  595. goto fail1;
  596. }
  597. /* Setup HW REO */
  598. hal_reo_setup(soc->hal_soc);
  599. soc->cmn_init_done = 1;
  600. return 0;
  601. fail1:
  602. /*
  603. * Cleanup will be done as part of soc_detach, which will
  604. * be called on pdev attach failure
  605. */
  606. fail0:
  607. return QDF_STATUS_E_FAILURE;
  608. }
  609. static void dp_pdev_detach_wifi3(void *txrx_pdev, int force);
  610. /*
  611. * dp_pdev_attach_wifi3() - attach txrx pdev
  612. * @osif_pdev: Opaque PDEV handle from OSIF/HDD
  613. * @txrx_soc: Datapath SOC handle
  614. * @htc_handle: HTC handle for host-target interface
  615. * @qdf_osdev: QDF OS device
  616. * @pdev_id: PDEV ID
  617. *
  618. * Return: DP PDEV handle on success, NULL on failure
  619. */
  620. void *dp_pdev_attach_wifi3(void *txrx_soc, void *ctrl_pdev,
  621. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, int pdev_id)
  622. {
  623. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  624. struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
  625. if (!pdev) {
  626. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  627. "%s: DP PDEV memory allocation failed\n", __func__);
  628. goto fail0;
  629. }
  630. pdev->soc = soc;
  631. pdev->osif_pdev = ctrl_pdev;
  632. pdev->pdev_id = pdev_id;
  633. soc->pdev_list[pdev_id] = pdev;
  634. TAILQ_INIT(&pdev->vdev_list);
  635. pdev->vdev_count = 0;
  636. if (dp_soc_cmn_setup(soc)) {
  637. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  638. "%s: dp_soc_cmn_setup failed\n", __func__);
  639. goto fail0;
  640. }
  641. /* Setup per PDEV TCL rings if configured */
  642. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  643. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  644. pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
  645. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  646. "%s: dp_srng_setup failed for tcl_data_ring\n",
  647. __func__);
  648. goto fail0;
  649. }
  650. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  651. WBM2SW_RELEASE, pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
  652. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  653. "%s: dp_srng_setup failed for tx_comp_ring\n",
  654. __func__);
  655. goto fail0;
  656. }
  657. soc->num_tcl_data_rings++;
  658. }
  659. /* Setup per PDEV REO rings if configured */
  660. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  661. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  662. pdev_id, pdev_id, REO_DST_RING_SIZE)) {
  663. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  664. "%s: dp_srng_setup failed for reo_dest_ring\n",
  665. __func__);
  666. goto fail0;
  667. }
  668. soc->num_reo_dest_rings++;
  669. }
  670. if (dp_srng_setup(soc, &pdev->rxdma_buf_ring, RXDMA_BUF, 0, pdev_id,
  671. RXDMA_BUF_RING_SIZE)) {
  672. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  673. "%s: dp_srng_setup failed for rxdma_buf_ring\n",
  674. __func__);
  675. goto fail0;
  676. }
  677. /* TODO: RXDMA destination ring is not planned to be used currently.
  678. * Setup the ring when required
  679. */
  680. if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
  681. pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
  682. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  683. "%s: dp_srng_setup failed for rxdma_mon_buf_ring\n",
  684. __func__);
  685. goto fail0;
  686. }
  687. if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
  688. pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
  689. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  690. "%s: dp_srng_setup failed for rxdma_mon_dst_ring\n",
  691. __func__);
  692. goto fail0;
  693. }
  694. if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
  695. RXDMA_MONITOR_STATUS, 0, pdev_id,
  696. RXDMA_MONITOR_STATUS_RING_SIZE)) {
  697. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  698. "%s: dp_srng_setup failed for rxdma_mon_status_ring\n",
  699. __func__);
  700. goto fail0;
  701. }
  702. return (void *)pdev;
  703. fail0:
  704. dp_pdev_detach_wifi3((void *)pdev, 0);
  705. return NULL;
  706. }
  707. /*
  708. * dp_pdev_detach_wifi3() - detach txrx pdev
  709. * @txrx_pdev: Datapath PDEV handle
  710. * @force: Force detach
  711. *
  712. */
  713. static void dp_pdev_detach_wifi3(void *txrx_pdev, int force)
  714. {
  715. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  716. struct dp_soc *soc = pdev->soc;
  717. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  718. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  719. TCL_DATA, pdev->pdev_id);
  720. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  721. WBM2SW_RELEASE, pdev->pdev_id);
  722. }
  723. /* Setup per PDEV REO rings if configured */
  724. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  725. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  726. REO_DST, pdev->pdev_id);
  727. }
  728. dp_srng_cleanup(soc, &pdev->rxdma_buf_ring, RXDMA_BUF, 0);
  729. dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
  730. dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
  731. dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
  732. RXDMA_MONITOR_STATUS, 0);
  733. soc->pdev_list[pdev->pdev_id] = NULL;
  734. qdf_mem_free(pdev);
  735. }
  736. /*
  737. * dp_soc_detach_wifi3() - Detach txrx SOC
  738. * @txrx_soc: DP SOC handle
  739. *
  740. */
  741. void dp_soc_detach_wifi3(void *txrx_soc)
  742. {
  743. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  744. struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
  745. int i;
  746. soc->cmn_init_done = 0;
  747. for (i = 0; i < MAX_PDEV_CNT; i++) {
  748. if (soc->pdev_list[i])
  749. dp_pdev_detach_wifi3((void *)pdev, 1);
  750. }
  751. dp_peer_find_detach(soc);
  752. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  753. * SW descriptors
  754. */
  755. /* Free the ring memories */
  756. /* Common rings */
  757. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  758. /* Tx data rings */
  759. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  760. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  761. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  762. TCL_DATA, i);
  763. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  764. WBM2SW_RELEASE, i);
  765. }
  766. }
  767. /* TCL command and status rings */
  768. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  769. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  770. /* Rx data rings */
  771. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  772. soc->num_reo_dest_rings =
  773. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  774. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  775. /* TODO: Get number of rings and ring sizes
  776. * from wlan_cfg
  777. */
  778. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  779. REO_DST, i);
  780. }
  781. }
  782. /* REO reinjection ring */
  783. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  784. /* Rx release ring */
  785. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  786. /* Rx exception ring */
  787. /* TODO: Better to store ring_type and ring_num in
  788. * dp_srng during setup
  789. */
  790. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  791. /* REO command and status rings */
  792. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  793. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  794. htt_soc_detach(soc->htt_handle);
  795. }
  796. /*
  797. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  798. * @txrx_soc: Datapath SOC handle
  799. */
  800. int dp_soc_attach_target_wifi3(void *txrx_soc)
  801. {
  802. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  803. int i;
  804. htt_soc_attach_target(soc->htt_handle);
  805. for (i = 0; i < MAX_PDEV_CNT; i++) {
  806. struct dp_pdev *pdev = soc->pdev_list[i];
  807. if (pdev) {
  808. htt_srng_setup(soc->htt_handle, i,
  809. pdev->rxdma_buf_ring.hal_srng, RXDMA_BUF);
  810. #ifdef notyet /* FW doesn't handle monitor rings yet */
  811. htt_srng_setup(soc->htt_handle, i,
  812. pdev->rxdma_mon_buf_ring.hal_srng,
  813. RXDMA_MONITOR_BUF);
  814. htt_srng_setup(soc->htt_handle, i,
  815. pdev->rxdma_mon_dst_ring.hal_srng,
  816. RXDMA_MONITOR_DST);
  817. htt_srng_setup(soc->htt_handle, i,
  818. pdev->rxdma_mon_status_ring.hal_srng,
  819. RXDMA_MONITOR_STATUS);
  820. #endif
  821. }
  822. }
  823. return 0;
  824. }
  825. /*
  826. * dp_vdev_attach_wifi3() - attach txrx vdev
  827. * @txrx_pdev: Datapath PDEV handle
  828. * @vdev_mac_addr: MAC address of the virtual interface
  829. * @vdev_id: VDEV Id
  830. * @wlan_op_mode: VDEV operating mode
  831. *
  832. * Return: DP VDEV handle on success, NULL on failure
  833. */
  834. void *dp_vdev_attach_wifi3(void *txrx_pdev,
  835. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  836. {
  837. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  838. struct dp_soc *soc = pdev->soc;
  839. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  840. if (!vdev) {
  841. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  842. "%s: DP VDEV memory allocation failed\n", __func__);
  843. goto fail0;
  844. }
  845. vdev->pdev = pdev;
  846. vdev->vdev_id = vdev_id;
  847. vdev->opmode = op_mode;
  848. vdev->osif_rx = NULL;
  849. vdev->osif_rx_mon = NULL;
  850. vdev->osif_vdev = NULL;
  851. vdev->delete.pending = 0;
  852. vdev->safemode = 0;
  853. vdev->drop_unenc = 1;
  854. #ifdef notyet
  855. vdev->filters_num = 0;
  856. #endif
  857. qdf_mem_copy(
  858. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  859. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  860. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  861. /* TODO: Initialize default HTT meta data that will be used in
  862. * TCL descriptors for packets transmitted from this VDEV
  863. */
  864. TAILQ_INIT(&vdev->peer_list);
  865. /* add this vdev into the pdev's list */
  866. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  867. pdev->vdev_count++;
  868. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  869. "Created vdev %p (%02x:%02x:%02x:%02x:%02x:%02x)\n", vdev,
  870. vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
  871. vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
  872. vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
  873. return (void *)vdev;
  874. fail0:
  875. return NULL;
  876. }
  877. /**
  878. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  879. * @vdev: Datapath VDEV handle
  880. * @osif_vdev: OSIF vdev handle
  881. * @txrx_ops: Tx and Rx operations
  882. *
  883. * Return: DP VDEV handle on success, NULL on failure
  884. */
  885. void dp_vdev_register_wifi3(void *vdev_handle, void *osif_vdev,
  886. struct ol_txrx_ops *txrx_ops)
  887. {
  888. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  889. vdev->osif_vdev = osif_vdev;
  890. vdev->osif_rx = txrx_ops->rx.rx;
  891. vdev->osif_rx_mon = txrx_ops->rx.mon;
  892. #ifdef notyet
  893. #if ATH_SUPPORT_WAPI
  894. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  895. #endif
  896. #if UMAC_SUPPORT_PROXY_ARP
  897. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  898. #endif
  899. #endif
  900. #ifdef notyet
  901. /* TODO: Enable the following once Tx code is integrated */
  902. txrx_ops->tx.tx = dp_tx_send;
  903. #endif
  904. }
  905. /*
  906. * dp_vdev_detach_wifi3() - Detach txrx vdev
  907. * @txrx_vdev: Datapath VDEV handle
  908. * @callback: Callback OL_IF on completion of detach
  909. * @cb_context: Callback context
  910. *
  911. */
  912. void dp_vdev_detach_wifi3(void *vdev_handle,
  913. ol_txrx_vdev_delete_cb callback, void *cb_context)
  914. {
  915. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  916. struct dp_pdev *pdev = vdev->pdev;
  917. struct dp_soc *soc = pdev->soc;
  918. /* preconditions */
  919. qdf_assert(vdev);
  920. /* remove the vdev from its parent pdev's list */
  921. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  922. /*
  923. * Use peer_ref_mutex while accessing peer_list, in case
  924. * a peer is in the process of being removed from the list.
  925. */
  926. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  927. /* check that the vdev has no peers allocated */
  928. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  929. /* debug print - will be removed later */
  930. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  931. "%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x)"
  932. "until deletion finishes for all its peers\n",
  933. __func__, vdev,
  934. vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
  935. vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
  936. vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
  937. /* indicate that the vdev needs to be deleted */
  938. vdev->delete.pending = 1;
  939. vdev->delete.callback = callback;
  940. vdev->delete.context = cb_context;
  941. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  942. return;
  943. }
  944. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  945. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  946. "%s: deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
  947. __func__, vdev,
  948. vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
  949. vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
  950. vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
  951. qdf_mem_free(vdev);
  952. if (callback)
  953. callback(cb_context);
  954. }
  955. /*
  956. * dp_peer_attach_wifi3() - attach txrx peer
  957. * @txrx_vdev: Datapath VDEV handle
  958. * @peer_mac_addr: Peer MAC address
  959. *
  960. * Return: DP peeer handle on success, NULL on failure
  961. */
  962. void *dp_peer_attach_wifi3(void *vdev_handle, uint8_t *peer_mac_addr)
  963. {
  964. struct dp_peer *peer;
  965. int i;
  966. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  967. struct dp_pdev *pdev;
  968. struct dp_soc *soc;
  969. /* preconditions */
  970. qdf_assert(vdev);
  971. qdf_assert(peer_mac_addr);
  972. pdev = vdev->pdev;
  973. soc = pdev->soc;
  974. #ifdef notyet
  975. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  976. soc->mempool_ol_ath_peer);
  977. #else
  978. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  979. #endif
  980. if (!peer)
  981. return NULL; /* failure */
  982. qdf_mem_zero(peer, sizeof(struct dp_peer));
  983. /* store provided params */
  984. peer->vdev = vdev;
  985. qdf_mem_copy(
  986. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  987. /* TODO: See of rx_opt_proc is really required */
  988. peer->rx_opt_proc = soc->rx_opt_proc;
  989. dp_peer_rx_init(pdev, peer);
  990. /* initialize the peer_id */
  991. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  992. peer->peer_ids[i] = HTT_INVALID_PEER;
  993. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  994. qdf_atomic_init(&peer->ref_cnt);
  995. /* keep one reference for attach */
  996. qdf_atomic_inc(&peer->ref_cnt);
  997. /* add this peer into the vdev's list */
  998. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  999. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1000. /* TODO: See if hash based search is required */
  1001. dp_peer_find_hash_add(soc, peer);
  1002. if (soc->ol_ops->peer_set_default_routing) {
  1003. /* TODO: Check on the destination ring number to be passed
  1004. * to FW
  1005. */
  1006. soc->ol_ops->peer_set_default_routing(soc->osif_soc,
  1007. peer->mac_addr.raw, peer->vdev->vdev_id, 0, 1);
  1008. }
  1009. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  1010. "vdev %p created peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
  1011. vdev, peer,
  1012. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  1013. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  1014. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  1015. /*
  1016. * For every peer MAp message search and set if bss_peer
  1017. */
  1018. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  1019. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  1020. "vdev bss_peer!!!!\n");
  1021. peer->bss_peer = 1;
  1022. vdev->vap_bss_peer = peer;
  1023. }
  1024. return (void *)peer;
  1025. }
  1026. /*
  1027. * dp_peer_authorize() - authorize txrx peer
  1028. * @peer_handle: Datapath peer handle
  1029. * @authorize
  1030. *
  1031. */
  1032. void dp_peer_authorize(void *peer_handle, uint32_t authorize)
  1033. {
  1034. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1035. struct dp_soc *soc;
  1036. if (peer != NULL) {
  1037. soc = peer->vdev->pdev->soc;
  1038. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1039. peer->authorize = authorize ? 1 : 0;
  1040. #ifdef notyet /* ATH_BAND_STEERING */
  1041. peer->peer_bs_inact_flag = 0;
  1042. peer->peer_bs_inact = soc->pdev_bs_inact_reload;
  1043. #endif
  1044. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1045. }
  1046. }
  1047. /*
  1048. * dp_peer_unref_delete() - unref and delete peer
  1049. * @peer_handle: Datapath peer handle
  1050. *
  1051. */
  1052. void dp_peer_unref_delete(void *peer_handle)
  1053. {
  1054. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1055. struct dp_vdev *vdev = peer->vdev;
  1056. struct dp_soc *soc = vdev->pdev->soc;
  1057. struct dp_peer *tmppeer;
  1058. int found = 0;
  1059. uint16_t peer_id;
  1060. /*
  1061. * Hold the lock all the way from checking if the peer ref count
  1062. * is zero until the peer references are removed from the hash
  1063. * table and vdev list (if the peer ref count is zero).
  1064. * This protects against a new HL tx operation starting to use the
  1065. * peer object just after this function concludes it's done being used.
  1066. * Furthermore, the lock needs to be held while checking whether the
  1067. * vdev's list of peers is empty, to make sure that list is not modified
  1068. * concurrently with the empty check.
  1069. */
  1070. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1071. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  1072. peer_id = peer->peer_ids[0];
  1073. /*
  1074. * Make sure that the reference to the peer in
  1075. * peer object map is removed
  1076. */
  1077. if (peer_id != HTT_INVALID_PEER)
  1078. soc->peer_id_to_obj_map[peer_id] = NULL;
  1079. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  1080. "Deleting peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
  1081. peer, peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  1082. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  1083. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  1084. /* remove the reference to the peer from the hash table */
  1085. dp_peer_find_hash_remove(soc, peer);
  1086. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  1087. if (tmppeer == peer) {
  1088. found = 1;
  1089. break;
  1090. }
  1091. }
  1092. if (found) {
  1093. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  1094. peer_list_elem);
  1095. } else {
  1096. /*Ignoring the remove operation as peer not found*/
  1097. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  1098. "WARN peer %p not found in vdev (%p)->peer_list:%p\n",
  1099. peer, vdev, &peer->vdev->peer_list);
  1100. }
  1101. /* cleanup the Rx reorder queues for this peer */
  1102. dp_peer_rx_cleanup(vdev, peer);
  1103. /* check whether the parent vdev has no peers left */
  1104. if (TAILQ_EMPTY(&vdev->peer_list)) {
  1105. /*
  1106. * Now that there are no references to the peer, we can
  1107. * release the peer reference lock.
  1108. */
  1109. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1110. /*
  1111. * Check if the parent vdev was waiting for its peers
  1112. * to be deleted, in order for it to be deleted too.
  1113. */
  1114. if (vdev->delete.pending) {
  1115. ol_txrx_vdev_delete_cb vdev_delete_cb =
  1116. vdev->delete.callback;
  1117. void *vdev_delete_context =
  1118. vdev->delete.context;
  1119. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1120. QDF_TRACE_LEVEL_INFO_HIGH,
  1121. "%s: deleting vdev object %p "
  1122. "(%02x:%02x:%02x:%02x:%02x:%02x)"
  1123. " - its last peer is done\n",
  1124. __func__, vdev,
  1125. vdev->mac_addr.raw[0],
  1126. vdev->mac_addr.raw[1],
  1127. vdev->mac_addr.raw[2],
  1128. vdev->mac_addr.raw[3],
  1129. vdev->mac_addr.raw[4],
  1130. vdev->mac_addr.raw[5]);
  1131. /* all peers are gone, go ahead and delete it */
  1132. qdf_mem_free(vdev);
  1133. if (vdev_delete_cb)
  1134. vdev_delete_cb(vdev_delete_context);
  1135. }
  1136. } else {
  1137. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1138. }
  1139. #ifdef notyet
  1140. qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
  1141. #else
  1142. qdf_mem_free(peer);
  1143. #endif
  1144. #ifdef notyet /* See why this should be done in DP layer */
  1145. qdf_atomic_inc(&soc->peer_count);
  1146. #endif
  1147. } else {
  1148. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1149. }
  1150. }
  1151. /*
  1152. * dp_peer_detach_wifi3() – Detach txrx peer
  1153. * @peer_handle: Datapath peer handle
  1154. *
  1155. */
  1156. void dp_peer_detach_wifi3(void *peer_handle)
  1157. {
  1158. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1159. /* redirect the peer's rx delivery function to point to a
  1160. * discard func
  1161. */
  1162. peer->rx_opt_proc = dp_rx_discard;
  1163. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  1164. "%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n", __func__, peer,
  1165. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  1166. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  1167. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  1168. /*
  1169. * Remove the reference added during peer_attach.
  1170. * The peer will still be left allocated until the
  1171. * PEER_UNMAP message arrives to remove the other
  1172. * reference, added by the PEER_MAP message.
  1173. */
  1174. dp_peer_unref_delete(peer_handle);
  1175. }