ce_service_srng.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * Copyright (c) 2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hif.h"
  19. #include "hif_io32.h"
  20. #include "reg_struct.h"
  21. #include "ce_api.h"
  22. #include "ce_main.h"
  23. #include "ce_internal.h"
  24. #include "ce_reg.h"
  25. #include "qdf_lock.h"
  26. #include "regtable.h"
  27. #include "hif_main.h"
  28. #include "hif_debug.h"
  29. #include "hal_api.h"
  30. #include "pld_common.h"
  31. /*
  32. * Support for Copy Engine hardware, which is mainly used for
  33. * communication between Host and Target over a PCIe interconnect.
  34. */
  35. /*
  36. * A single CopyEngine (CE) comprises two "rings":
  37. * a source ring
  38. * a destination ring
  39. *
  40. * Each ring consists of a number of descriptors which specify
  41. * an address, length, and meta-data.
  42. *
  43. * Typically, one side of the PCIe interconnect (Host or Target)
  44. * controls one ring and the other side controls the other ring.
  45. * The source side chooses when to initiate a transfer and it
  46. * chooses what to send (buffer address, length). The destination
  47. * side keeps a supply of "anonymous receive buffers" available and
  48. * it handles incoming data as it arrives (when the destination
  49. * receives an interrupt).
  50. *
  51. * The sender may send a simple buffer (address/length) or it may
  52. * send a small list of buffers. When a small list is sent, hardware
  53. * "gathers" these and they end up in a single destination buffer
  54. * with a single interrupt.
  55. *
  56. * There are several "contexts" managed by this layer -- more, it
  57. * may seem -- than should be needed. These are provided mainly for
  58. * maximum flexibility and especially to facilitate a simpler HIF
  59. * implementation. There are per-CopyEngine recv, send, and watermark
  60. * contexts. These are supplied by the caller when a recv, send,
  61. * or watermark handler is established and they are echoed back to
  62. * the caller when the respective callbacks are invoked. There is
  63. * also a per-transfer context supplied by the caller when a buffer
  64. * (or sendlist) is sent and when a buffer is enqueued for recv.
  65. * These per-transfer contexts are echoed back to the caller when
  66. * the buffer is sent/received.
  67. * Target TX harsh result toeplitz_hash_result
  68. */
  69. #define CE_ADDR_COPY(desc, dma_addr) do {\
  70. (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
  71. 0xFFFFFFFF);\
  72. (desc)->buffer_addr_hi =\
  73. (uint32_t)(((dma_addr) >> 32) & 0xFF);\
  74. } while (0)
  75. int
  76. ce_send_nolock_srng(struct CE_handle *copyeng,
  77. void *per_transfer_context,
  78. qdf_dma_addr_t buffer,
  79. uint32_t nbytes,
  80. uint32_t transfer_id,
  81. uint32_t flags,
  82. uint32_t user_flags)
  83. {
  84. int status;
  85. struct CE_state *CE_state = (struct CE_state *)copyeng;
  86. struct CE_ring_state *src_ring = CE_state->src_ring;
  87. unsigned int nentries_mask = src_ring->nentries_mask;
  88. unsigned int write_index = src_ring->write_index;
  89. uint64_t dma_addr = buffer;
  90. struct hif_softc *scn = CE_state->scn;
  91. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  92. return QDF_STATUS_E_FAILURE;
  93. if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
  94. false) <= 0)) {
  95. OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
  96. Q_TARGET_ACCESS_END(scn);
  97. return QDF_STATUS_E_FAILURE;
  98. }
  99. {
  100. enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
  101. struct ce_srng_src_desc *src_desc;
  102. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
  103. Q_TARGET_ACCESS_END(scn);
  104. return QDF_STATUS_E_FAILURE;
  105. }
  106. src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
  107. src_ring->srng_ctx);
  108. /* Update low 32 bits source descriptor address */
  109. src_desc->buffer_addr_lo =
  110. (uint32_t)(dma_addr & 0xFFFFFFFF);
  111. src_desc->buffer_addr_hi =
  112. (uint32_t)((dma_addr >> 32) & 0xFF);
  113. src_desc->meta_data = transfer_id;
  114. /*
  115. * Set the swap bit if:
  116. * typical sends on this CE are swapped (host is big-endian)
  117. * and this send doesn't disable the swapping
  118. * (data is not bytestream)
  119. */
  120. src_desc->byte_swap =
  121. (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
  122. != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
  123. src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
  124. src_desc->nbytes = nbytes;
  125. src_ring->per_transfer_context[write_index] =
  126. per_transfer_context;
  127. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  128. hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
  129. /* src_ring->write index hasn't been updated event though
  130. * the register has allready been written to.
  131. */
  132. hif_record_ce_desc_event(scn, CE_state->id, event_type,
  133. (union ce_desc *) src_desc, per_transfer_context,
  134. src_ring->write_index);
  135. src_ring->write_index = write_index;
  136. status = QDF_STATUS_SUCCESS;
  137. }
  138. Q_TARGET_ACCESS_END(scn);
  139. return status;
  140. }
  141. int
  142. ce_sendlist_send_srng(struct CE_handle *copyeng,
  143. void *per_transfer_context,
  144. struct ce_sendlist *sendlist, unsigned int transfer_id)
  145. {
  146. int status = -ENOMEM;
  147. struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
  148. struct CE_state *CE_state = (struct CE_state *)copyeng;
  149. struct CE_ring_state *src_ring = CE_state->src_ring;
  150. unsigned int num_items = sl->num_items;
  151. unsigned int sw_index;
  152. unsigned int write_index;
  153. struct hif_softc *scn = CE_state->scn;
  154. QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
  155. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  156. sw_index = src_ring->sw_index;
  157. write_index = src_ring->write_index;
  158. if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
  159. num_items) {
  160. struct ce_sendlist_item *item;
  161. int i;
  162. /* handle all but the last item uniformly */
  163. for (i = 0; i < num_items - 1; i++) {
  164. item = &sl->item[i];
  165. /* TBDXXX: Support extensible sendlist_types? */
  166. QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
  167. status = ce_send_nolock_srng(copyeng,
  168. CE_SENDLIST_ITEM_CTXT,
  169. (qdf_dma_addr_t) item->data,
  170. item->u.nbytes, transfer_id,
  171. item->flags | CE_SEND_FLAG_GATHER,
  172. item->user_flags);
  173. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  174. }
  175. /* provide valid context pointer for final item */
  176. item = &sl->item[i];
  177. /* TBDXXX: Support extensible sendlist_types? */
  178. QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
  179. status = ce_send_nolock_srng(copyeng, per_transfer_context,
  180. (qdf_dma_addr_t) item->data,
  181. item->u.nbytes,
  182. transfer_id, item->flags,
  183. item->user_flags);
  184. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  185. QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
  186. QDF_NBUF_TX_PKT_CE);
  187. DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
  188. QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
  189. (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
  190. sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
  191. } else {
  192. /*
  193. * Probably not worth the additional complexity to support
  194. * partial sends with continuation or notification. We expect
  195. * to use large rings and small sendlists. If we can't handle
  196. * the entire request at once, punt it back to the caller.
  197. */
  198. }
  199. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  200. return status;
  201. }
  202. #define SLOTS_PER_DATAPATH_TX 2
  203. #ifndef AH_NEED_TX_DATA_SWAP
  204. #define AH_NEED_TX_DATA_SWAP 0
  205. #endif
  206. /**
  207. * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
  208. * @coyeng: copy engine handle
  209. * @per_recv_context: virtual address of the nbuf
  210. * @buffer: physical address of the nbuf
  211. *
  212. * Return: 0 if the buffer is enqueued
  213. */
  214. int
  215. ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
  216. void *per_recv_context, qdf_dma_addr_t buffer)
  217. {
  218. int status;
  219. struct CE_state *CE_state = (struct CE_state *)copyeng;
  220. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  221. unsigned int nentries_mask = dest_ring->nentries_mask;
  222. unsigned int write_index;
  223. unsigned int sw_index;
  224. uint64_t dma_addr = buffer;
  225. struct hif_softc *scn = CE_state->scn;
  226. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  227. write_index = dest_ring->write_index;
  228. sw_index = dest_ring->sw_index;
  229. if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
  230. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  231. return -EIO;
  232. }
  233. if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
  234. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  235. return QDF_STATUS_E_FAILURE;
  236. }
  237. if ((hal_srng_src_num_avail(scn->hal_soc,
  238. dest_ring->srng_ctx, false) > 0)) {
  239. struct ce_srng_dest_desc *dest_desc =
  240. hal_srng_src_get_next(scn->hal_soc,
  241. dest_ring->srng_ctx);
  242. if (dest_desc == NULL) {
  243. status = QDF_STATUS_E_FAILURE;
  244. } else {
  245. CE_ADDR_COPY(dest_desc, dma_addr);
  246. dest_ring->per_transfer_context[write_index] =
  247. per_recv_context;
  248. /* Update Destination Ring Write Index */
  249. write_index = CE_RING_IDX_INCR(nentries_mask,
  250. write_index);
  251. status = QDF_STATUS_SUCCESS;
  252. }
  253. } else
  254. status = QDF_STATUS_E_FAILURE;
  255. dest_ring->write_index = write_index;
  256. hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
  257. Q_TARGET_ACCESS_END(scn);
  258. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  259. return status;
  260. }
  261. /**
  262. * ce_send_watermarks_set_srng
  263. */
  264. void
  265. ce_send_watermarks_set_srng(struct CE_handle *copyeng,
  266. unsigned int low_alert_nentries,
  267. unsigned int high_alert_nentries)
  268. {
  269. /*TODO*/
  270. }
  271. /*
  272. * ce_recv_watermarks_set_srng
  273. */
  274. void
  275. ce_recv_watermarks_set_srng(struct CE_handle *copyeng,
  276. unsigned int low_alert_nentries,
  277. unsigned int high_alert_nentries)
  278. {
  279. /*TODO*/
  280. }
  281. unsigned int ce_send_entries_avail_srng(struct CE_handle *copyeng)
  282. {
  283. struct CE_state *CE_state = (struct CE_state *)copyeng;
  284. struct CE_ring_state *src_ring = CE_state->src_ring;
  285. struct hif_softc *scn = CE_state->scn;
  286. return hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false);
  287. }
  288. unsigned int ce_recv_entries_avail_srng(struct CE_handle *copyeng)
  289. {
  290. struct CE_state *CE_state = (struct CE_state *)copyeng;
  291. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  292. struct hif_softc *scn = CE_state->scn;
  293. return hal_srng_src_num_avail(scn->hal_soc, dest_ring->srng_ctx, false);
  294. }
  295. /*
  296. * Guts of ce_recv_entries_done.
  297. * The caller takes responsibility for any necessary locking.
  298. */
  299. unsigned int
  300. ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
  301. struct CE_state *CE_state)
  302. {
  303. struct CE_ring_state *status_ring = CE_state->status_ring;
  304. return hal_srng_dst_num_valid(scn->hal_soc,
  305. status_ring->srng_ctx, false);
  306. }
  307. /*
  308. * Guts of ce_send_entries_done.
  309. * The caller takes responsibility for any necessary locking.
  310. */
  311. unsigned int
  312. ce_send_entries_done_nolock_srng(struct hif_softc *scn,
  313. struct CE_state *CE_state)
  314. {
  315. struct CE_ring_state *src_ring = CE_state->src_ring;
  316. int count = 0;
  317. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
  318. return 0;
  319. count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
  320. hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
  321. return count;
  322. }
  323. /* Debug support */
  324. void *ce_debug_cmplrn_context_srng; /* completed recv next context */
  325. void *ce_debug_cmplsn_context_srng; /* completed send next context */
  326. /*
  327. * Guts of ce_completed_recv_next.
  328. * The caller takes responsibility for any necessary locking.
  329. */
  330. int
  331. ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
  332. void **per_CE_contextp,
  333. void **per_transfer_contextp,
  334. qdf_dma_addr_t *bufferp,
  335. unsigned int *nbytesp,
  336. unsigned int *transfer_idp,
  337. unsigned int *flagsp)
  338. {
  339. int status;
  340. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  341. struct CE_ring_state *status_ring = CE_state->status_ring;
  342. unsigned int nentries_mask = dest_ring->nentries_mask;
  343. unsigned int sw_index = dest_ring->sw_index;
  344. struct hif_softc *scn = CE_state->scn;
  345. struct ce_srng_dest_status_desc *dest_status;
  346. int nbytes;
  347. struct ce_srng_dest_status_desc dest_status_info;
  348. if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
  349. status = QDF_STATUS_E_FAILURE;
  350. goto done;
  351. }
  352. dest_status = hal_srng_dst_get_next(scn->hal_soc,
  353. status_ring->srng_ctx);
  354. if (dest_status == NULL) {
  355. status = QDF_STATUS_E_FAILURE;
  356. goto done;
  357. }
  358. /*
  359. * By copying the dest_desc_info element to local memory, we could
  360. * avoid extra memory read from non-cachable memory.
  361. */
  362. dest_status_info = *dest_status;
  363. nbytes = dest_status_info.nbytes;
  364. if (nbytes == 0) {
  365. /*
  366. * This closes a relatively unusual race where the Host
  367. * sees the updated DRRI before the update to the
  368. * corresponding descriptor has completed. We treat this
  369. * as a descriptor that is not yet done.
  370. */
  371. status = QDF_STATUS_E_FAILURE;
  372. goto done;
  373. }
  374. dest_status->nbytes = 0;
  375. *nbytesp = nbytes;
  376. *transfer_idp = dest_status_info.meta_data;
  377. *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
  378. if (per_CE_contextp)
  379. *per_CE_contextp = CE_state->recv_context;
  380. /* NOTE: sw_index is more like a read_index in this context. It has a
  381. * one-to-one mapping with status ring.
  382. * Get the per trasnfer context from dest_ring.
  383. */
  384. ce_debug_cmplrn_context_srng =
  385. dest_ring->per_transfer_context[sw_index];
  386. if (per_transfer_contextp)
  387. *per_transfer_contextp = ce_debug_cmplrn_context_srng;
  388. dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
  389. /* Update sw_index */
  390. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  391. dest_ring->sw_index = sw_index;
  392. status = QDF_STATUS_SUCCESS;
  393. done:
  394. hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
  395. return status;
  396. }
  397. QDF_STATUS
  398. ce_revoke_recv_next_srng(struct CE_handle *copyeng,
  399. void **per_CE_contextp,
  400. void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
  401. {
  402. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  403. return status;
  404. }
  405. /*
  406. * Guts of ce_completed_send_next.
  407. * The caller takes responsibility for any necessary locking.
  408. */
  409. int
  410. ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
  411. void **per_CE_contextp,
  412. void **per_transfer_contextp,
  413. qdf_dma_addr_t *bufferp,
  414. unsigned int *nbytesp,
  415. unsigned int *transfer_idp,
  416. unsigned int *sw_idx,
  417. unsigned int *hw_idx,
  418. uint32_t *toeplitz_hash_result)
  419. {
  420. int status = QDF_STATUS_E_FAILURE;
  421. struct CE_ring_state *src_ring = CE_state->src_ring;
  422. unsigned int nentries_mask = src_ring->nentries_mask;
  423. unsigned int sw_index = src_ring->sw_index;
  424. struct hif_softc *scn = CE_state->scn;
  425. struct ce_srng_src_desc *src_desc;
  426. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
  427. status = QDF_STATUS_E_FAILURE;
  428. return status;
  429. }
  430. src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
  431. if (src_desc) {
  432. /* Return data from completed source descriptor */
  433. *bufferp = (qdf_dma_addr_t)
  434. (((uint64_t)(src_desc)->buffer_addr_lo +
  435. ((uint64_t)((src_desc)->buffer_addr_hi &
  436. 0xFF) << 32)));
  437. *nbytesp = src_desc->nbytes;
  438. *transfer_idp = src_desc->meta_data;
  439. *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
  440. if (per_CE_contextp)
  441. *per_CE_contextp = CE_state->send_context;
  442. /* sw_index is used more like read index */
  443. ce_debug_cmplsn_context_srng =
  444. src_ring->per_transfer_context[sw_index];
  445. if (per_transfer_contextp)
  446. *per_transfer_contextp = ce_debug_cmplsn_context_srng;
  447. src_ring->per_transfer_context[sw_index] = 0; /* sanity */
  448. /* Update sw_index */
  449. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  450. src_ring->sw_index = sw_index;
  451. status = QDF_STATUS_SUCCESS;
  452. }
  453. hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
  454. return status;
  455. }
  456. /* NB: Modelled after ce_completed_send_next */
  457. QDF_STATUS
  458. ce_cancel_send_next_srng(struct CE_handle *copyeng,
  459. void **per_CE_contextp,
  460. void **per_transfer_contextp,
  461. qdf_dma_addr_t *bufferp,
  462. unsigned int *nbytesp,
  463. unsigned int *transfer_idp,
  464. uint32_t *toeplitz_hash_result)
  465. {
  466. return 0;
  467. }
  468. /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
  469. #define CE_WM_SHFT 1
  470. /*
  471. * Number of times to check for any pending tx/rx completion on
  472. * a copy engine, this count should be big enough. Once we hit
  473. * this threashold we'll not check for any Tx/Rx comlpetion in same
  474. * interrupt handling. Note that this threashold is only used for
  475. * Rx interrupt processing, this can be used tor Tx as well if we
  476. * suspect any infinite loop in checking for pending Tx completion.
  477. */
  478. #define CE_TXRX_COMP_CHECK_THRESHOLD 20
  479. /*
  480. * Adjust interrupts for the copy complete handler.
  481. * If it's needed for either send or recv, then unmask
  482. * this interrupt; otherwise, mask it.
  483. *
  484. * Called with target_lock held.
  485. */
  486. static void
  487. ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
  488. int disable_copy_compl_intr)
  489. {
  490. }
  491. bool ce_check_int_watermark_srng(struct CE_state *CE_state, unsigned int *flags)
  492. {
  493. /*TODO*/
  494. return false;
  495. }
  496. uint32_t ce_get_desc_size_srng(uint8_t ring_type)
  497. {
  498. switch (ring_type) {
  499. case CE_RING_SRC:
  500. return sizeof(struct ce_srng_src_desc);
  501. case CE_RING_DEST:
  502. return sizeof(struct ce_srng_dest_desc);
  503. case CE_RING_STATUS:
  504. return sizeof(struct ce_srng_dest_status_desc);
  505. default:
  506. return 0;
  507. }
  508. return 0;
  509. }
  510. static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
  511. struct hal_srng_params *ring_params)
  512. {
  513. uint32_t addr_low;
  514. uint32_t addr_high;
  515. uint32_t msi_data_start;
  516. uint32_t msi_data_count;
  517. uint32_t msi_irq_start;
  518. int ret;
  519. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  520. &msi_data_count, &msi_data_start,
  521. &msi_irq_start);
  522. /* msi config not found */
  523. if (ret)
  524. return;
  525. HIF_INFO("%s: ce_id %d, msi_start: %d, msi_count %d", __func__, ce_id,
  526. msi_data_start, msi_data_count);
  527. pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
  528. ring_params->msi_addr = addr_low;
  529. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  530. ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
  531. ring_params->flags |= HAL_SRNG_MSI_INTR;
  532. HIF_INFO("%s: ce_id %d, msi_addr %p, msi_data %d", __func__, ce_id,
  533. (void *)ring_params->msi_addr, ring_params->msi_data);
  534. }
  535. void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
  536. struct CE_ring_state *src_ring)
  537. {
  538. struct hal_srng_params ring_params = {0};
  539. HIF_INFO("%s: ce_id %d", __func__, ce_id);
  540. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  541. ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
  542. ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
  543. ring_params.num_entries = src_ring->nentries;
  544. /*
  545. * The minimum increment for the timer is 8us
  546. * A default value of 0 disables the timer
  547. * A valid default value caused continuous interrupts to
  548. * fire with MSI enabled. Need to revisit usage of the timer
  549. */
  550. ring_params.intr_timer_thres_us = 0;
  551. ring_params.intr_batch_cntr_thres_entries = 1;
  552. /* TODO
  553. * ring_params.msi_addr = XXX;
  554. * ring_params.msi_data = XXX;
  555. * ring_params.flags = XXX;
  556. */
  557. src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
  558. &ring_params);
  559. }
  560. void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
  561. struct CE_ring_state *dest_ring,
  562. struct CE_attr *attr)
  563. {
  564. struct hal_srng_params ring_params = {0};
  565. HIF_INFO("%s: ce_id %d", __func__, ce_id);
  566. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  567. ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
  568. ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
  569. ring_params.num_entries = dest_ring->nentries;
  570. ring_params.intr_timer_thres_us = 0;
  571. ring_params.intr_batch_cntr_thres_entries = 1;
  572. ring_params.max_buffer_length = attr->src_sz_max;
  573. /* TODO
  574. * ring_params.msi_addr = XXX;
  575. * ring_params.msi_data = XXX;
  576. * ring_params.flags = XXX;
  577. */
  578. /*Dest ring is also source ring*/
  579. dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
  580. &ring_params);
  581. }
  582. void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
  583. struct CE_ring_state *status_ring)
  584. {
  585. struct hal_srng_params ring_params = {0};
  586. HIF_INFO("%s: ce_id %d", __func__, ce_id);
  587. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  588. ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
  589. ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
  590. ring_params.num_entries = status_ring->nentries;
  591. ring_params.intr_timer_thres_us = 0;
  592. ring_params.intr_batch_cntr_thres_entries = 1;
  593. /* TODO
  594. * ring_params.msi_addr = XXX;
  595. * ring_params.msi_data = XXX;
  596. * ring_params.flags = XXX;
  597. */
  598. status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
  599. ce_id, 0, &ring_params);
  600. }
  601. void ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
  602. uint32_t ce_id, struct CE_ring_state *ring,
  603. struct CE_attr *attr)
  604. {
  605. switch (ring_type) {
  606. case CE_RING_SRC:
  607. ce_srng_src_ring_setup(scn, ce_id, ring);
  608. break;
  609. case CE_RING_DEST:
  610. ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
  611. break;
  612. case CE_RING_STATUS:
  613. ce_srng_status_ring_setup(scn, ce_id, ring);
  614. break;
  615. default:
  616. qdf_assert(0);
  617. break;
  618. }
  619. }
  620. struct ce_ops ce_service_srng = {
  621. .ce_get_desc_size = ce_get_desc_size_srng,
  622. .ce_ring_setup = ce_ring_setup_srng,
  623. .ce_sendlist_send = ce_sendlist_send_srng,
  624. .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
  625. .ce_revoke_recv_next = ce_revoke_recv_next_srng,
  626. .ce_cancel_send_next = ce_cancel_send_next_srng,
  627. .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
  628. .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
  629. .ce_send_nolock = ce_send_nolock_srng,
  630. .watermark_int = ce_check_int_watermark_srng,
  631. .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
  632. .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
  633. .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
  634. };
  635. struct ce_ops *ce_services_srng()
  636. {
  637. return &ce_service_srng;
  638. }