ce_service_srng.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hif_io32.h"
  19. #include "reg_struct.h"
  20. #include "ce_api.h"
  21. #include "ce_main.h"
  22. #include "ce_internal.h"
  23. #include "ce_reg.h"
  24. #include "qdf_lock.h"
  25. #include "regtable.h"
  26. #include "hif_main.h"
  27. #include "hif_debug.h"
  28. #include "hal_api.h"
  29. #include "pld_common.h"
  30. #include "qdf_module.h"
  31. #include "hif.h"
  32. /*
  33. * Support for Copy Engine hardware, which is mainly used for
  34. * communication between Host and Target over a PCIe interconnect.
  35. */
  36. /*
  37. * A single CopyEngine (CE) comprises two "rings":
  38. * a source ring
  39. * a destination ring
  40. *
  41. * Each ring consists of a number of descriptors which specify
  42. * an address, length, and meta-data.
  43. *
  44. * Typically, one side of the PCIe interconnect (Host or Target)
  45. * controls one ring and the other side controls the other ring.
  46. * The source side chooses when to initiate a transfer and it
  47. * chooses what to send (buffer address, length). The destination
  48. * side keeps a supply of "anonymous receive buffers" available and
  49. * it handles incoming data as it arrives (when the destination
  50. * receives an interrupt).
  51. *
  52. * The sender may send a simple buffer (address/length) or it may
  53. * send a small list of buffers. When a small list is sent, hardware
  54. * "gathers" these and they end up in a single destination buffer
  55. * with a single interrupt.
  56. *
  57. * There are several "contexts" managed by this layer -- more, it
  58. * may seem -- than should be needed. These are provided mainly for
  59. * maximum flexibility and especially to facilitate a simpler HIF
  60. * implementation. There are per-CopyEngine recv, send, and watermark
  61. * contexts. These are supplied by the caller when a recv, send,
  62. * or watermark handler is established and they are echoed back to
  63. * the caller when the respective callbacks are invoked. There is
  64. * also a per-transfer context supplied by the caller when a buffer
  65. * (or sendlist) is sent and when a buffer is enqueued for recv.
  66. * These per-transfer contexts are echoed back to the caller when
  67. * the buffer is sent/received.
  68. * Target TX harsh result toeplitz_hash_result
  69. */
  70. #define CE_ADDR_COPY(desc, dma_addr) do {\
  71. (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
  72. 0xFFFFFFFF);\
  73. (desc)->buffer_addr_hi =\
  74. (uint32_t)(((dma_addr) >> 32) & 0xFF);\
  75. } while (0)
  76. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  77. void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
  78. enum hif_ce_event_type type,
  79. union ce_srng_desc *descriptor,
  80. void *memory, int index,
  81. int len, void *hal_ring)
  82. {
  83. int record_index;
  84. struct hif_ce_desc_event *event;
  85. struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
  86. struct hif_ce_desc_event *hist_ev = NULL;
  87. if (ce_id < CE_COUNT_MAX)
  88. hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
  89. else
  90. return;
  91. if (ce_id >= CE_COUNT_MAX)
  92. return;
  93. if (!ce_hist->enable[ce_id])
  94. return;
  95. if (!hist_ev)
  96. return;
  97. record_index = get_next_record_index(
  98. &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
  99. event = &hist_ev[record_index];
  100. hif_clear_ce_desc_debug_data(event);
  101. event->type = type;
  102. event->time = qdf_get_log_timestamp();
  103. if (descriptor)
  104. qdf_mem_copy(&event->descriptor, descriptor,
  105. hal_get_entrysize_from_srng(hal_ring));
  106. if (hal_ring)
  107. hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
  108. &event->current_hp);
  109. event->memory = memory;
  110. event->index = index;
  111. if (event->type == HIF_CE_SRC_RING_BUFFER_POST)
  112. hif_ce_desc_record_rx_paddr(scn, event, memory);
  113. if (ce_hist->data_enable[ce_id])
  114. hif_ce_desc_data_record(event, len);
  115. }
  116. #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
  117. static int
  118. ce_send_nolock_srng(struct CE_handle *copyeng,
  119. void *per_transfer_context,
  120. qdf_dma_addr_t buffer,
  121. uint32_t nbytes,
  122. uint32_t transfer_id,
  123. uint32_t flags,
  124. uint32_t user_flags)
  125. {
  126. int status;
  127. struct CE_state *CE_state = (struct CE_state *)copyeng;
  128. struct CE_ring_state *src_ring = CE_state->src_ring;
  129. unsigned int nentries_mask = src_ring->nentries_mask;
  130. unsigned int write_index = src_ring->write_index;
  131. uint64_t dma_addr = buffer;
  132. struct hif_softc *scn = CE_state->scn;
  133. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  134. return QDF_STATUS_E_FAILURE;
  135. if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
  136. false) <= 0)) {
  137. OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
  138. Q_TARGET_ACCESS_END(scn);
  139. return QDF_STATUS_E_FAILURE;
  140. }
  141. {
  142. enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
  143. struct ce_srng_src_desc *src_desc;
  144. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
  145. Q_TARGET_ACCESS_END(scn);
  146. return QDF_STATUS_E_FAILURE;
  147. }
  148. src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
  149. src_ring->srng_ctx);
  150. if (!src_desc) {
  151. Q_TARGET_ACCESS_END(scn);
  152. return QDF_STATUS_E_INVAL;
  153. }
  154. /* Update low 32 bits source descriptor address */
  155. src_desc->buffer_addr_lo =
  156. (uint32_t)(dma_addr & 0xFFFFFFFF);
  157. src_desc->buffer_addr_hi =
  158. (uint32_t)((dma_addr >> 32) & 0xFF);
  159. src_desc->meta_data = transfer_id;
  160. /*
  161. * Set the swap bit if:
  162. * typical sends on this CE are swapped (host is big-endian)
  163. * and this send doesn't disable the swapping
  164. * (data is not bytestream)
  165. */
  166. src_desc->byte_swap =
  167. (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
  168. != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
  169. src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
  170. src_desc->nbytes = nbytes;
  171. src_ring->per_transfer_context[write_index] =
  172. per_transfer_context;
  173. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  174. hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
  175. /* src_ring->write index hasn't been updated event though
  176. * the register has allready been written to.
  177. */
  178. hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
  179. (union ce_srng_desc *)src_desc,
  180. per_transfer_context,
  181. src_ring->write_index, nbytes,
  182. src_ring->srng_ctx);
  183. src_ring->write_index = write_index;
  184. status = QDF_STATUS_SUCCESS;
  185. }
  186. Q_TARGET_ACCESS_END(scn);
  187. return status;
  188. }
  189. static int
  190. ce_sendlist_send_srng(struct CE_handle *copyeng,
  191. void *per_transfer_context,
  192. struct ce_sendlist *sendlist, unsigned int transfer_id)
  193. {
  194. int status = -ENOMEM;
  195. struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
  196. struct CE_state *CE_state = (struct CE_state *)copyeng;
  197. struct CE_ring_state *src_ring = CE_state->src_ring;
  198. unsigned int num_items = sl->num_items;
  199. unsigned int sw_index;
  200. unsigned int write_index;
  201. struct hif_softc *scn = CE_state->scn;
  202. QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
  203. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  204. sw_index = src_ring->sw_index;
  205. write_index = src_ring->write_index;
  206. if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
  207. num_items) {
  208. struct ce_sendlist_item *item;
  209. int i;
  210. /* handle all but the last item uniformly */
  211. for (i = 0; i < num_items - 1; i++) {
  212. item = &sl->item[i];
  213. /* TBDXXX: Support extensible sendlist_types? */
  214. QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
  215. status = ce_send_nolock_srng(copyeng,
  216. CE_SENDLIST_ITEM_CTXT,
  217. (qdf_dma_addr_t) item->data,
  218. item->u.nbytes, transfer_id,
  219. item->flags | CE_SEND_FLAG_GATHER,
  220. item->user_flags);
  221. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  222. }
  223. /* provide valid context pointer for final item */
  224. item = &sl->item[i];
  225. /* TBDXXX: Support extensible sendlist_types? */
  226. QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
  227. status = ce_send_nolock_srng(copyeng, per_transfer_context,
  228. (qdf_dma_addr_t) item->data,
  229. item->u.nbytes,
  230. transfer_id, item->flags,
  231. item->user_flags);
  232. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  233. QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
  234. QDF_NBUF_TX_PKT_CE);
  235. DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
  236. QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
  237. QDF_TRACE_DEFAULT_PDEV_ID,
  238. (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
  239. sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
  240. } else {
  241. /*
  242. * Probably not worth the additional complexity to support
  243. * partial sends with continuation or notification. We expect
  244. * to use large rings and small sendlists. If we can't handle
  245. * the entire request at once, punt it back to the caller.
  246. */
  247. }
  248. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  249. return status;
  250. }
  251. #define SLOTS_PER_DATAPATH_TX 2
  252. #ifndef AH_NEED_TX_DATA_SWAP
  253. #define AH_NEED_TX_DATA_SWAP 0
  254. #endif
  255. /**
  256. * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
  257. * @coyeng: copy engine handle
  258. * @per_recv_context: virtual address of the nbuf
  259. * @buffer: physical address of the nbuf
  260. *
  261. * Return: 0 if the buffer is enqueued
  262. */
  263. static int
  264. ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
  265. void *per_recv_context, qdf_dma_addr_t buffer)
  266. {
  267. int status;
  268. struct CE_state *CE_state = (struct CE_state *)copyeng;
  269. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  270. unsigned int nentries_mask = dest_ring->nentries_mask;
  271. unsigned int write_index;
  272. unsigned int sw_index;
  273. uint64_t dma_addr = buffer;
  274. struct hif_softc *scn = CE_state->scn;
  275. struct ce_srng_dest_desc *dest_desc = NULL;
  276. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  277. write_index = dest_ring->write_index;
  278. sw_index = dest_ring->sw_index;
  279. if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
  280. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  281. return -EIO;
  282. }
  283. if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
  284. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  285. return QDF_STATUS_E_FAILURE;
  286. }
  287. if ((hal_srng_src_num_avail(scn->hal_soc,
  288. dest_ring->srng_ctx, false) > 0)) {
  289. dest_desc = hal_srng_src_get_next(scn->hal_soc,
  290. dest_ring->srng_ctx);
  291. if (!dest_desc) {
  292. status = QDF_STATUS_E_FAILURE;
  293. } else {
  294. CE_ADDR_COPY(dest_desc, dma_addr);
  295. dest_ring->per_transfer_context[write_index] =
  296. per_recv_context;
  297. /* Update Destination Ring Write Index */
  298. write_index = CE_RING_IDX_INCR(nentries_mask,
  299. write_index);
  300. status = QDF_STATUS_SUCCESS;
  301. }
  302. } else {
  303. dest_desc = NULL;
  304. status = QDF_STATUS_E_FAILURE;
  305. }
  306. dest_ring->write_index = write_index;
  307. hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
  308. hif_record_ce_srng_desc_event(scn, CE_state->id,
  309. HIF_CE_DEST_RING_BUFFER_POST,
  310. (union ce_srng_desc *)dest_desc,
  311. per_recv_context,
  312. dest_ring->write_index, 0,
  313. dest_ring->srng_ctx);
  314. Q_TARGET_ACCESS_END(scn);
  315. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  316. return status;
  317. }
  318. /*
  319. * Guts of ce_recv_entries_done.
  320. * The caller takes responsibility for any necessary locking.
  321. */
  322. static unsigned int
  323. ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
  324. struct CE_state *CE_state)
  325. {
  326. struct CE_ring_state *status_ring = CE_state->status_ring;
  327. return hal_srng_dst_num_valid(scn->hal_soc,
  328. status_ring->srng_ctx, false);
  329. }
  330. /*
  331. * Guts of ce_send_entries_done.
  332. * The caller takes responsibility for any necessary locking.
  333. */
  334. static unsigned int
  335. ce_send_entries_done_nolock_srng(struct hif_softc *scn,
  336. struct CE_state *CE_state)
  337. {
  338. struct CE_ring_state *src_ring = CE_state->src_ring;
  339. int count = 0;
  340. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
  341. return 0;
  342. count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
  343. hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
  344. return count;
  345. }
  346. /*
  347. * Guts of ce_completed_recv_next.
  348. * The caller takes responsibility for any necessary locking.
  349. */
  350. static int
  351. ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
  352. void **per_CE_contextp,
  353. void **per_transfer_contextp,
  354. qdf_dma_addr_t *bufferp,
  355. unsigned int *nbytesp,
  356. unsigned int *transfer_idp,
  357. unsigned int *flagsp)
  358. {
  359. int status;
  360. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  361. struct CE_ring_state *status_ring = CE_state->status_ring;
  362. unsigned int nentries_mask = dest_ring->nentries_mask;
  363. unsigned int sw_index = dest_ring->sw_index;
  364. struct hif_softc *scn = CE_state->scn;
  365. struct ce_srng_dest_status_desc *dest_status;
  366. int nbytes;
  367. struct ce_srng_dest_status_desc dest_status_info;
  368. if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
  369. status = QDF_STATUS_E_FAILURE;
  370. goto done;
  371. }
  372. dest_status = hal_srng_dst_get_next(scn->hal_soc,
  373. status_ring->srng_ctx);
  374. if (!dest_status) {
  375. status = QDF_STATUS_E_FAILURE;
  376. goto done;
  377. }
  378. /*
  379. * By copying the dest_desc_info element to local memory, we could
  380. * avoid extra memory read from non-cachable memory.
  381. */
  382. dest_status_info = *dest_status;
  383. nbytes = dest_status_info.nbytes;
  384. if (nbytes == 0) {
  385. /*
  386. * This closes a relatively unusual race where the Host
  387. * sees the updated DRRI before the update to the
  388. * corresponding descriptor has completed. We treat this
  389. * as a descriptor that is not yet done.
  390. */
  391. status = QDF_STATUS_E_FAILURE;
  392. goto done;
  393. }
  394. dest_status->nbytes = 0;
  395. *nbytesp = nbytes;
  396. *transfer_idp = dest_status_info.meta_data;
  397. *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
  398. if (per_CE_contextp)
  399. *per_CE_contextp = CE_state->recv_context;
  400. /* NOTE: sw_index is more like a read_index in this context. It has a
  401. * one-to-one mapping with status ring.
  402. * Get the per trasnfer context from dest_ring.
  403. */
  404. if (per_transfer_contextp)
  405. *per_transfer_contextp =
  406. dest_ring->per_transfer_context[sw_index];
  407. dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
  408. /* Update sw_index */
  409. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  410. dest_ring->sw_index = sw_index;
  411. status = QDF_STATUS_SUCCESS;
  412. done:
  413. hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
  414. if (status == QDF_STATUS_SUCCESS) {
  415. hif_record_ce_srng_desc_event(scn, CE_state->id,
  416. HIF_CE_DEST_RING_BUFFER_REAP,
  417. NULL,
  418. dest_ring->
  419. per_transfer_context[sw_index],
  420. dest_ring->sw_index, nbytes,
  421. dest_ring->srng_ctx);
  422. hif_record_ce_srng_desc_event(scn, CE_state->id,
  423. HIF_CE_DEST_STATUS_RING_REAP,
  424. (union ce_srng_desc *)dest_status,
  425. NULL,
  426. -1, 0,
  427. status_ring->srng_ctx);
  428. }
  429. return status;
  430. }
  431. static QDF_STATUS
  432. ce_revoke_recv_next_srng(struct CE_handle *copyeng,
  433. void **per_CE_contextp,
  434. void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
  435. {
  436. struct CE_state *CE_state = (struct CE_state *)copyeng;
  437. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  438. unsigned int sw_index;
  439. if (!dest_ring)
  440. return QDF_STATUS_E_FAILURE;
  441. sw_index = dest_ring->sw_index;
  442. if (per_CE_contextp)
  443. *per_CE_contextp = CE_state->recv_context;
  444. /* NOTE: sw_index is more like a read_index in this context. It has a
  445. * one-to-one mapping with status ring.
  446. * Get the per trasnfer context from dest_ring.
  447. */
  448. if (per_transfer_contextp)
  449. *per_transfer_contextp =
  450. dest_ring->per_transfer_context[sw_index];
  451. if (!dest_ring->per_transfer_context[sw_index])
  452. return QDF_STATUS_E_FAILURE;
  453. /* provide end condition */
  454. dest_ring->per_transfer_context[sw_index] = NULL;
  455. /* Update sw_index */
  456. sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
  457. dest_ring->sw_index = sw_index;
  458. return QDF_STATUS_SUCCESS;
  459. }
  460. /*
  461. * Guts of ce_completed_send_next.
  462. * The caller takes responsibility for any necessary locking.
  463. */
  464. static int
  465. ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
  466. void **per_CE_contextp,
  467. void **per_transfer_contextp,
  468. qdf_dma_addr_t *bufferp,
  469. unsigned int *nbytesp,
  470. unsigned int *transfer_idp,
  471. unsigned int *sw_idx,
  472. unsigned int *hw_idx,
  473. uint32_t *toeplitz_hash_result)
  474. {
  475. int status = QDF_STATUS_E_FAILURE;
  476. struct CE_ring_state *src_ring = CE_state->src_ring;
  477. unsigned int nentries_mask = src_ring->nentries_mask;
  478. unsigned int sw_index = src_ring->sw_index;
  479. unsigned int swi = src_ring->sw_index;
  480. struct hif_softc *scn = CE_state->scn;
  481. struct ce_srng_src_desc *src_desc;
  482. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
  483. status = QDF_STATUS_E_FAILURE;
  484. return status;
  485. }
  486. src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
  487. if (src_desc) {
  488. hif_record_ce_srng_desc_event(scn, CE_state->id,
  489. HIF_TX_DESC_COMPLETION,
  490. (union ce_srng_desc *)src_desc,
  491. src_ring->
  492. per_transfer_context[swi],
  493. swi, src_desc->nbytes,
  494. src_ring->srng_ctx);
  495. /* Return data from completed source descriptor */
  496. *bufferp = (qdf_dma_addr_t)
  497. (((uint64_t)(src_desc)->buffer_addr_lo +
  498. ((uint64_t)((src_desc)->buffer_addr_hi &
  499. 0xFF) << 32)));
  500. *nbytesp = src_desc->nbytes;
  501. *transfer_idp = src_desc->meta_data;
  502. *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
  503. if (per_CE_contextp)
  504. *per_CE_contextp = CE_state->send_context;
  505. /* sw_index is used more like read index */
  506. if (per_transfer_contextp)
  507. *per_transfer_contextp =
  508. src_ring->per_transfer_context[sw_index];
  509. src_ring->per_transfer_context[sw_index] = 0; /* sanity */
  510. /* Update sw_index */
  511. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  512. src_ring->sw_index = sw_index;
  513. status = QDF_STATUS_SUCCESS;
  514. }
  515. hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
  516. return status;
  517. }
  518. /* NB: Modelled after ce_completed_send_next */
  519. static QDF_STATUS
  520. ce_cancel_send_next_srng(struct CE_handle *copyeng,
  521. void **per_CE_contextp,
  522. void **per_transfer_contextp,
  523. qdf_dma_addr_t *bufferp,
  524. unsigned int *nbytesp,
  525. unsigned int *transfer_idp,
  526. uint32_t *toeplitz_hash_result)
  527. {
  528. struct CE_state *CE_state;
  529. int status = QDF_STATUS_E_FAILURE;
  530. struct CE_ring_state *src_ring;
  531. unsigned int nentries_mask;
  532. unsigned int sw_index;
  533. struct hif_softc *scn;
  534. struct ce_srng_src_desc *src_desc;
  535. CE_state = (struct CE_state *)copyeng;
  536. src_ring = CE_state->src_ring;
  537. if (!src_ring)
  538. return QDF_STATUS_E_FAILURE;
  539. nentries_mask = src_ring->nentries_mask;
  540. sw_index = src_ring->sw_index;
  541. scn = CE_state->scn;
  542. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
  543. status = QDF_STATUS_E_FAILURE;
  544. return status;
  545. }
  546. src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
  547. src_ring->srng_ctx);
  548. if (src_desc) {
  549. /* Return data from completed source descriptor */
  550. *bufferp = (qdf_dma_addr_t)
  551. (((uint64_t)(src_desc)->buffer_addr_lo +
  552. ((uint64_t)((src_desc)->buffer_addr_hi &
  553. 0xFF) << 32)));
  554. *nbytesp = src_desc->nbytes;
  555. *transfer_idp = src_desc->meta_data;
  556. *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
  557. if (per_CE_contextp)
  558. *per_CE_contextp = CE_state->send_context;
  559. /* sw_index is used more like read index */
  560. if (per_transfer_contextp)
  561. *per_transfer_contextp =
  562. src_ring->per_transfer_context[sw_index];
  563. src_ring->per_transfer_context[sw_index] = 0; /* sanity */
  564. /* Update sw_index */
  565. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  566. src_ring->sw_index = sw_index;
  567. status = QDF_STATUS_SUCCESS;
  568. }
  569. hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
  570. return status;
  571. }
  572. /*
  573. * Adjust interrupts for the copy complete handler.
  574. * If it's needed for either send or recv, then unmask
  575. * this interrupt; otherwise, mask it.
  576. *
  577. * Called with target_lock held.
  578. */
  579. static void
  580. ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
  581. int disable_copy_compl_intr)
  582. {
  583. }
  584. static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
  585. unsigned int *flags)
  586. {
  587. /*TODO*/
  588. return false;
  589. }
  590. static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
  591. {
  592. switch (ring_type) {
  593. case CE_RING_SRC:
  594. return sizeof(struct ce_srng_src_desc);
  595. case CE_RING_DEST:
  596. return sizeof(struct ce_srng_dest_desc);
  597. case CE_RING_STATUS:
  598. return sizeof(struct ce_srng_dest_status_desc);
  599. default:
  600. return 0;
  601. }
  602. return 0;
  603. }
  604. static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
  605. struct hal_srng_params *ring_params)
  606. {
  607. uint32_t addr_low;
  608. uint32_t addr_high;
  609. uint32_t msi_data_start;
  610. uint32_t msi_data_count;
  611. uint32_t msi_irq_start;
  612. int ret;
  613. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  614. &msi_data_count, &msi_data_start,
  615. &msi_irq_start);
  616. /* msi config not found */
  617. if (ret)
  618. return;
  619. pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
  620. ring_params->msi_addr = addr_low;
  621. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  622. ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
  623. ring_params->flags |= HAL_SRNG_MSI_INTR;
  624. HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
  625. (void *)ring_params->msi_addr, ring_params->msi_data);
  626. }
  627. static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
  628. struct CE_ring_state *src_ring,
  629. struct CE_attr *attr)
  630. {
  631. struct hal_srng_params ring_params = {0};
  632. hif_debug("%s: ce_id %d", __func__, ce_id);
  633. ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
  634. ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
  635. ring_params.num_entries = src_ring->nentries;
  636. /*
  637. * The minimum increment for the timer is 8us
  638. * A default value of 0 disables the timer
  639. * A valid default value caused continuous interrupts to
  640. * fire with MSI enabled. Need to revisit usage of the timer
  641. */
  642. if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
  643. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  644. ring_params.intr_timer_thres_us = 0;
  645. ring_params.intr_batch_cntr_thres_entries = 1;
  646. }
  647. src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
  648. &ring_params);
  649. }
  650. /**
  651. * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
  652. * @dest_ring: ring being initialized
  653. * @ring_params: pointer to initialized parameters
  654. *
  655. * For Napier & Hawkeye v1, the status ring timer interrupts do not work
  656. * As a work arround host configures the destination rings to be a proxy for
  657. * work needing to be done.
  658. *
  659. * The interrupts are setup such that if the destination ring is less than fully
  660. * posted, there is likely undone work for the status ring that the host should
  661. * process.
  662. *
  663. * There is a timing bug in srng based copy engines such that a fully posted
  664. * srng based copy engine has 2 empty entries instead of just one. The copy
  665. * engine data sturctures work with 1 empty entry, but the software frequently
  666. * fails to post the last entry due to the race condition.
  667. */
  668. static void ce_srng_initialize_dest_timer_interrupt_war(
  669. struct CE_ring_state *dest_ring,
  670. struct hal_srng_params *ring_params) {
  671. int num_buffers_when_fully_posted = dest_ring->nentries - 2;
  672. ring_params->low_threshold = num_buffers_when_fully_posted - 1;
  673. ring_params->intr_timer_thres_us = 1024;
  674. ring_params->intr_batch_cntr_thres_entries = 0;
  675. ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  676. }
  677. static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
  678. struct CE_ring_state *dest_ring,
  679. struct CE_attr *attr)
  680. {
  681. struct hal_srng_params ring_params = {0};
  682. bool status_ring_timer_thresh_work_arround = true;
  683. HIF_INFO("%s: ce_id %d", __func__, ce_id);
  684. ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
  685. ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
  686. ring_params.num_entries = dest_ring->nentries;
  687. ring_params.max_buffer_length = attr->src_sz_max;
  688. if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
  689. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  690. if (status_ring_timer_thresh_work_arround) {
  691. ce_srng_initialize_dest_timer_interrupt_war(
  692. dest_ring, &ring_params);
  693. } else {
  694. /* normal behavior for future chips */
  695. ring_params.low_threshold = dest_ring->nentries >> 3;
  696. ring_params.intr_timer_thres_us = 100000;
  697. ring_params.intr_batch_cntr_thres_entries = 0;
  698. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  699. }
  700. }
  701. /*Dest ring is also source ring*/
  702. dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
  703. &ring_params);
  704. }
  705. #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
  706. /**
  707. * ce_status_ring_config_int_threshold() - configure ce status ring interrupt
  708. * thresholds
  709. * @scn: hif handle
  710. * @ring_params: ce srng params
  711. *
  712. * Return: None
  713. */
  714. static inline
  715. void ce_status_ring_config_int_threshold(struct hif_softc *scn,
  716. struct hal_srng_params *ring_params)
  717. {
  718. ring_params->intr_timer_thres_us =
  719. scn->ini_cfg.ce_status_ring_timer_threshold;
  720. ring_params->intr_batch_cntr_thres_entries =
  721. scn->ini_cfg.ce_status_ring_batch_count_threshold;
  722. }
  723. #else
  724. static inline
  725. void ce_status_ring_config_int_threshold(struct hif_softc *scn,
  726. struct hal_srng_params *ring_params)
  727. {
  728. ring_params->intr_timer_thres_us = 0x1000;
  729. ring_params->intr_batch_cntr_thres_entries = 0x1;
  730. }
  731. #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
  732. static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
  733. struct CE_ring_state *status_ring,
  734. struct CE_attr *attr)
  735. {
  736. struct hal_srng_params ring_params = {0};
  737. HIF_INFO("%s: ce_id %d", __func__, ce_id);
  738. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  739. ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
  740. ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
  741. ring_params.num_entries = status_ring->nentries;
  742. if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
  743. ce_status_ring_config_int_threshold(scn, &ring_params);
  744. }
  745. status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
  746. ce_id, 0, &ring_params);
  747. }
  748. static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
  749. uint32_t ce_id, struct CE_ring_state *ring,
  750. struct CE_attr *attr)
  751. {
  752. switch (ring_type) {
  753. case CE_RING_SRC:
  754. ce_srng_src_ring_setup(scn, ce_id, ring, attr);
  755. break;
  756. case CE_RING_DEST:
  757. ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
  758. break;
  759. case CE_RING_STATUS:
  760. ce_srng_status_ring_setup(scn, ce_id, ring, attr);
  761. break;
  762. default:
  763. qdf_assert(0);
  764. break;
  765. }
  766. return 0;
  767. }
  768. static void ce_construct_shadow_config_srng(struct hif_softc *scn)
  769. {
  770. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  771. int ce_id;
  772. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  773. if (hif_state->host_ce_config[ce_id].src_nentries)
  774. hal_set_one_shadow_config(scn->hal_soc,
  775. CE_SRC, ce_id);
  776. if (hif_state->host_ce_config[ce_id].dest_nentries) {
  777. hal_set_one_shadow_config(scn->hal_soc,
  778. CE_DST, ce_id);
  779. hal_set_one_shadow_config(scn->hal_soc,
  780. CE_DST_STATUS, ce_id);
  781. }
  782. }
  783. }
  784. static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
  785. struct pld_shadow_reg_v2_cfg **shadow_config,
  786. int *num_shadow_registers_configured)
  787. {
  788. if (!scn->hal_soc) {
  789. HIF_ERROR("%s: hal not initialized: not initializing shadow config",
  790. __func__);
  791. return;
  792. }
  793. hal_get_shadow_config(scn->hal_soc, shadow_config,
  794. num_shadow_registers_configured);
  795. if (*num_shadow_registers_configured != 0) {
  796. HIF_ERROR("%s: hal shadow register configuration allready constructed",
  797. __func__);
  798. /* return with original configuration*/
  799. return;
  800. }
  801. hal_construct_shadow_config(scn->hal_soc);
  802. ce_construct_shadow_config_srng(scn);
  803. /* get updated configuration */
  804. hal_get_shadow_config(scn->hal_soc, shadow_config,
  805. num_shadow_registers_configured);
  806. }
  807. #ifdef HIF_CE_LOG_INFO
  808. /**
  809. * ce_get_index_info_srng(): Get CE index info
  810. * @scn: HIF Context
  811. * @ce_state: CE opaque handle
  812. * @info: CE info
  813. *
  814. * Return: 0 for success and non zero for failure
  815. */
  816. static
  817. int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state,
  818. struct ce_index *info)
  819. {
  820. struct CE_state *CE_state = (struct CE_state *)ce_state;
  821. uint32_t tp, hp;
  822. info->id = CE_state->id;
  823. if (CE_state->src_ring) {
  824. hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx,
  825. &tp, &hp);
  826. info->u.srng_info.tp = tp;
  827. info->u.srng_info.hp = hp;
  828. } else if (CE_state->dest_ring && CE_state->status_ring) {
  829. hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx,
  830. &tp, &hp);
  831. info->u.srng_info.status_tp = tp;
  832. info->u.srng_info.status_hp = hp;
  833. hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx,
  834. &tp, &hp);
  835. info->u.srng_info.tp = tp;
  836. info->u.srng_info.hp = hp;
  837. }
  838. return 0;
  839. }
  840. #endif
  841. static struct ce_ops ce_service_srng = {
  842. .ce_get_desc_size = ce_get_desc_size_srng,
  843. .ce_ring_setup = ce_ring_setup_srng,
  844. .ce_sendlist_send = ce_sendlist_send_srng,
  845. .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
  846. .ce_revoke_recv_next = ce_revoke_recv_next_srng,
  847. .ce_cancel_send_next = ce_cancel_send_next_srng,
  848. .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
  849. .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
  850. .ce_send_nolock = ce_send_nolock_srng,
  851. .watermark_int = ce_check_int_watermark_srng,
  852. .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
  853. .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
  854. .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
  855. .ce_prepare_shadow_register_v2_cfg =
  856. ce_prepare_shadow_register_v2_cfg_srng,
  857. #ifdef HIF_CE_LOG_INFO
  858. .ce_get_index_info =
  859. ce_get_index_info_srng,
  860. #endif
  861. };
  862. struct ce_ops *ce_services_srng()
  863. {
  864. return &ce_service_srng;
  865. }
  866. qdf_export_symbol(ce_services_srng);
  867. void ce_service_srng_init(void)
  868. {
  869. ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
  870. }