ce_service_srng.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hif_io32.h"
  19. #include "reg_struct.h"
  20. #include "ce_api.h"
  21. #include "ce_main.h"
  22. #include "ce_internal.h"
  23. #include "ce_reg.h"
  24. #include "qdf_lock.h"
  25. #include "regtable.h"
  26. #include "hif_main.h"
  27. #include "hif_debug.h"
  28. #include "hal_api.h"
  29. #include "pld_common.h"
  30. #include "qdf_module.h"
  31. #include "hif.h"
  32. /*
  33. * Support for Copy Engine hardware, which is mainly used for
  34. * communication between Host and Target over a PCIe interconnect.
  35. */
  36. /*
  37. * A single CopyEngine (CE) comprises two "rings":
  38. * a source ring
  39. * a destination ring
  40. *
  41. * Each ring consists of a number of descriptors which specify
  42. * an address, length, and meta-data.
  43. *
  44. * Typically, one side of the PCIe interconnect (Host or Target)
  45. * controls one ring and the other side controls the other ring.
  46. * The source side chooses when to initiate a transfer and it
  47. * chooses what to send (buffer address, length). The destination
  48. * side keeps a supply of "anonymous receive buffers" available and
  49. * it handles incoming data as it arrives (when the destination
  50. * receives an interrupt).
  51. *
  52. * The sender may send a simple buffer (address/length) or it may
  53. * send a small list of buffers. When a small list is sent, hardware
  54. * "gathers" these and they end up in a single destination buffer
  55. * with a single interrupt.
  56. *
  57. * There are several "contexts" managed by this layer -- more, it
  58. * may seem -- than should be needed. These are provided mainly for
  59. * maximum flexibility and especially to facilitate a simpler HIF
  60. * implementation. There are per-CopyEngine recv, send, and watermark
  61. * contexts. These are supplied by the caller when a recv, send,
  62. * or watermark handler is established and they are echoed back to
  63. * the caller when the respective callbacks are invoked. There is
  64. * also a per-transfer context supplied by the caller when a buffer
  65. * (or sendlist) is sent and when a buffer is enqueued for recv.
  66. * These per-transfer contexts are echoed back to the caller when
  67. * the buffer is sent/received.
  68. * Target TX harsh result toeplitz_hash_result
  69. */
  70. #define CE_ADDR_COPY(desc, dma_addr) do {\
  71. (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
  72. 0xFFFFFFFF);\
  73. (desc)->buffer_addr_hi =\
  74. (uint32_t)(((dma_addr) >> 32) & 0xFF);\
  75. } while (0)
  76. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  77. void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
  78. enum hif_ce_event_type type,
  79. union ce_srng_desc *descriptor,
  80. void *memory, int index,
  81. int len, void *hal_ring)
  82. {
  83. int record_index;
  84. struct hif_ce_desc_event *event;
  85. struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
  86. struct hif_ce_desc_event *hist_ev = NULL;
  87. if (ce_id < CE_COUNT_MAX)
  88. hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
  89. else
  90. return;
  91. if (ce_id >= CE_COUNT_MAX)
  92. return;
  93. if (!ce_hist->enable[ce_id])
  94. return;
  95. if (!hist_ev)
  96. return;
  97. record_index = get_next_record_index(
  98. &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
  99. event = &hist_ev[record_index];
  100. hif_clear_ce_desc_debug_data(event);
  101. event->type = type;
  102. event->time = qdf_get_log_timestamp();
  103. event->cpu_id = qdf_get_cpu();
  104. if (descriptor)
  105. qdf_mem_copy(&event->descriptor, descriptor,
  106. hal_get_entrysize_from_srng(hal_ring));
  107. if (hal_ring)
  108. hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
  109. &event->current_hp);
  110. event->memory = memory;
  111. event->index = index;
  112. if (event->type == HIF_CE_SRC_RING_BUFFER_POST)
  113. hif_ce_desc_record_rx_paddr(scn, event, memory);
  114. if (ce_hist->data_enable[ce_id])
  115. hif_ce_desc_data_record(event, len);
  116. }
  117. #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
  118. static QDF_STATUS
  119. ce_send_nolock_srng(struct CE_handle *copyeng,
  120. void *per_transfer_context,
  121. qdf_dma_addr_t buffer,
  122. uint32_t nbytes,
  123. uint32_t transfer_id,
  124. uint32_t flags,
  125. uint32_t user_flags)
  126. {
  127. QDF_STATUS status;
  128. struct CE_state *CE_state = (struct CE_state *)copyeng;
  129. struct CE_ring_state *src_ring = CE_state->src_ring;
  130. unsigned int nentries_mask = src_ring->nentries_mask;
  131. unsigned int write_index = src_ring->write_index;
  132. uint64_t dma_addr = buffer;
  133. struct hif_softc *scn = CE_state->scn;
  134. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  135. return QDF_STATUS_E_FAILURE;
  136. if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
  137. false) <= 0)) {
  138. OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
  139. Q_TARGET_ACCESS_END(scn);
  140. return QDF_STATUS_E_FAILURE;
  141. }
  142. {
  143. enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
  144. struct ce_srng_src_desc *src_desc;
  145. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
  146. Q_TARGET_ACCESS_END(scn);
  147. return QDF_STATUS_E_FAILURE;
  148. }
  149. src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
  150. src_ring->srng_ctx);
  151. if (!src_desc) {
  152. Q_TARGET_ACCESS_END(scn);
  153. return QDF_STATUS_E_INVAL;
  154. }
  155. /* Update low 32 bits source descriptor address */
  156. src_desc->buffer_addr_lo =
  157. (uint32_t)(dma_addr & 0xFFFFFFFF);
  158. src_desc->buffer_addr_hi =
  159. (uint32_t)((dma_addr >> 32) & 0xFF);
  160. src_desc->meta_data = transfer_id;
  161. /*
  162. * Set the swap bit if:
  163. * typical sends on this CE are swapped (host is big-endian)
  164. * and this send doesn't disable the swapping
  165. * (data is not bytestream)
  166. */
  167. src_desc->byte_swap =
  168. (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
  169. != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
  170. src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
  171. src_desc->nbytes = nbytes;
  172. src_ring->per_transfer_context[write_index] =
  173. per_transfer_context;
  174. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  175. hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
  176. /* src_ring->write index hasn't been updated event though
  177. * the register has allready been written to.
  178. */
  179. hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
  180. (union ce_srng_desc *)src_desc,
  181. per_transfer_context,
  182. src_ring->write_index, nbytes,
  183. src_ring->srng_ctx);
  184. src_ring->write_index = write_index;
  185. status = QDF_STATUS_SUCCESS;
  186. }
  187. Q_TARGET_ACCESS_END(scn);
  188. return status;
  189. }
  190. static QDF_STATUS
  191. ce_sendlist_send_srng(struct CE_handle *copyeng,
  192. void *per_transfer_context,
  193. struct ce_sendlist *sendlist, unsigned int transfer_id)
  194. {
  195. QDF_STATUS status = QDF_STATUS_E_NOMEM;
  196. struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
  197. struct CE_state *CE_state = (struct CE_state *)copyeng;
  198. struct CE_ring_state *src_ring = CE_state->src_ring;
  199. unsigned int num_items = sl->num_items;
  200. unsigned int sw_index;
  201. unsigned int write_index;
  202. struct hif_softc *scn = CE_state->scn;
  203. QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
  204. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  205. sw_index = src_ring->sw_index;
  206. write_index = src_ring->write_index;
  207. if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
  208. num_items) {
  209. struct ce_sendlist_item *item;
  210. int i;
  211. /* handle all but the last item uniformly */
  212. for (i = 0; i < num_items - 1; i++) {
  213. item = &sl->item[i];
  214. /* TBDXXX: Support extensible sendlist_types? */
  215. QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
  216. status = ce_send_nolock_srng(copyeng,
  217. CE_SENDLIST_ITEM_CTXT,
  218. (qdf_dma_addr_t) item->data,
  219. item->u.nbytes, transfer_id,
  220. item->flags | CE_SEND_FLAG_GATHER,
  221. item->user_flags);
  222. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  223. }
  224. /* provide valid context pointer for final item */
  225. item = &sl->item[i];
  226. /* TBDXXX: Support extensible sendlist_types? */
  227. QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
  228. status = ce_send_nolock_srng(copyeng, per_transfer_context,
  229. (qdf_dma_addr_t) item->data,
  230. item->u.nbytes,
  231. transfer_id, item->flags,
  232. item->user_flags);
  233. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  234. QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
  235. QDF_NBUF_TX_PKT_CE);
  236. DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
  237. QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
  238. QDF_TRACE_DEFAULT_PDEV_ID,
  239. (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
  240. sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
  241. } else {
  242. /*
  243. * Probably not worth the additional complexity to support
  244. * partial sends with continuation or notification. We expect
  245. * to use large rings and small sendlists. If we can't handle
  246. * the entire request at once, punt it back to the caller.
  247. */
  248. }
  249. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  250. return status;
  251. }
  252. #define SLOTS_PER_DATAPATH_TX 2
  253. #ifndef AH_NEED_TX_DATA_SWAP
  254. #define AH_NEED_TX_DATA_SWAP 0
  255. #endif
  256. /**
  257. * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
  258. * @coyeng: copy engine handle
  259. * @per_recv_context: virtual address of the nbuf
  260. * @buffer: physical address of the nbuf
  261. *
  262. * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
  263. */
  264. static QDF_STATUS
  265. ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
  266. void *per_recv_context, qdf_dma_addr_t buffer)
  267. {
  268. QDF_STATUS status;
  269. struct CE_state *CE_state = (struct CE_state *)copyeng;
  270. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  271. unsigned int nentries_mask = dest_ring->nentries_mask;
  272. unsigned int write_index;
  273. unsigned int sw_index;
  274. uint64_t dma_addr = buffer;
  275. struct hif_softc *scn = CE_state->scn;
  276. struct ce_srng_dest_desc *dest_desc = NULL;
  277. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  278. write_index = dest_ring->write_index;
  279. sw_index = dest_ring->sw_index;
  280. if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
  281. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  282. return QDF_STATUS_E_IO;
  283. }
  284. if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
  285. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  286. return QDF_STATUS_E_FAILURE;
  287. }
  288. if ((hal_srng_src_num_avail(scn->hal_soc,
  289. dest_ring->srng_ctx, false) > 0)) {
  290. dest_desc = hal_srng_src_get_next(scn->hal_soc,
  291. dest_ring->srng_ctx);
  292. if (!dest_desc) {
  293. status = QDF_STATUS_E_FAILURE;
  294. } else {
  295. CE_ADDR_COPY(dest_desc, dma_addr);
  296. dest_ring->per_transfer_context[write_index] =
  297. per_recv_context;
  298. /* Update Destination Ring Write Index */
  299. write_index = CE_RING_IDX_INCR(nentries_mask,
  300. write_index);
  301. status = QDF_STATUS_SUCCESS;
  302. }
  303. } else {
  304. dest_desc = NULL;
  305. status = QDF_STATUS_E_FAILURE;
  306. }
  307. dest_ring->write_index = write_index;
  308. hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
  309. hif_record_ce_srng_desc_event(scn, CE_state->id,
  310. HIF_CE_DEST_RING_BUFFER_POST,
  311. (union ce_srng_desc *)dest_desc,
  312. per_recv_context,
  313. dest_ring->write_index, 0,
  314. dest_ring->srng_ctx);
  315. Q_TARGET_ACCESS_END(scn);
  316. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  317. return status;
  318. }
  319. /*
  320. * Guts of ce_recv_entries_done.
  321. * The caller takes responsibility for any necessary locking.
  322. */
  323. static unsigned int
  324. ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
  325. struct CE_state *CE_state)
  326. {
  327. struct CE_ring_state *status_ring = CE_state->status_ring;
  328. return hal_srng_dst_num_valid(scn->hal_soc,
  329. status_ring->srng_ctx, false);
  330. }
  331. /*
  332. * Guts of ce_send_entries_done.
  333. * The caller takes responsibility for any necessary locking.
  334. */
  335. static unsigned int
  336. ce_send_entries_done_nolock_srng(struct hif_softc *scn,
  337. struct CE_state *CE_state)
  338. {
  339. struct CE_ring_state *src_ring = CE_state->src_ring;
  340. int count = 0;
  341. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
  342. return 0;
  343. count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
  344. hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
  345. return count;
  346. }
  347. /*
  348. * Guts of ce_completed_recv_next.
  349. * The caller takes responsibility for any necessary locking.
  350. */
  351. static QDF_STATUS
  352. ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
  353. void **per_CE_contextp,
  354. void **per_transfer_contextp,
  355. qdf_dma_addr_t *bufferp,
  356. unsigned int *nbytesp,
  357. unsigned int *transfer_idp,
  358. unsigned int *flagsp)
  359. {
  360. QDF_STATUS status;
  361. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  362. struct CE_ring_state *status_ring = CE_state->status_ring;
  363. unsigned int nentries_mask = dest_ring->nentries_mask;
  364. unsigned int sw_index = dest_ring->sw_index;
  365. struct hif_softc *scn = CE_state->scn;
  366. struct ce_srng_dest_status_desc *dest_status = NULL;
  367. int nbytes;
  368. struct ce_srng_dest_status_desc dest_status_info;
  369. if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
  370. status = QDF_STATUS_E_FAILURE;
  371. goto done;
  372. }
  373. dest_status = hal_srng_dst_peek(scn->hal_soc, status_ring->srng_ctx);
  374. if (!dest_status) {
  375. status = QDF_STATUS_E_FAILURE;
  376. hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
  377. goto done;
  378. }
  379. /*
  380. * By copying the dest_desc_info element to local memory, we could
  381. * avoid extra memory read from non-cachable memory.
  382. */
  383. dest_status_info = *dest_status;
  384. nbytes = dest_status_info.nbytes;
  385. if (nbytes == 0) {
  386. uint32_t hp, tp;
  387. /*
  388. * This closes a relatively unusual race where the Host
  389. * sees the updated DRRI before the update to the
  390. * corresponding descriptor has completed. We treat this
  391. * as a descriptor that is not yet done.
  392. */
  393. hal_get_sw_hptp(scn->hal_soc, status_ring->srng_ctx,
  394. &hp, &tp);
  395. hif_info("No data to reap, hp %d tp %d", hp, tp);
  396. status = QDF_STATUS_E_FAILURE;
  397. hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
  398. goto done;
  399. }
  400. /*
  401. * Move the tail pointer since nbytes is non-zero and
  402. * this entry is processed.
  403. */
  404. hal_srng_dst_get_next(scn->hal_soc, status_ring->srng_ctx);
  405. dest_status->nbytes = 0;
  406. *nbytesp = nbytes;
  407. *transfer_idp = dest_status_info.meta_data;
  408. *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
  409. if (per_CE_contextp)
  410. *per_CE_contextp = CE_state->recv_context;
  411. /* NOTE: sw_index is more like a read_index in this context. It has a
  412. * one-to-one mapping with status ring.
  413. * Get the per trasnfer context from dest_ring.
  414. */
  415. if (per_transfer_contextp)
  416. *per_transfer_contextp =
  417. dest_ring->per_transfer_context[sw_index];
  418. dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
  419. /* Update sw_index */
  420. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  421. dest_ring->sw_index = sw_index;
  422. status = QDF_STATUS_SUCCESS;
  423. hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
  424. hif_record_ce_srng_desc_event(scn, CE_state->id,
  425. HIF_CE_DEST_RING_BUFFER_REAP,
  426. NULL,
  427. dest_ring->
  428. per_transfer_context[sw_index],
  429. dest_ring->sw_index, nbytes,
  430. dest_ring->srng_ctx);
  431. done:
  432. hif_record_ce_srng_desc_event(scn, CE_state->id,
  433. HIF_CE_DEST_STATUS_RING_REAP,
  434. (union ce_srng_desc *)dest_status,
  435. NULL,
  436. -1, 0,
  437. status_ring->srng_ctx);
  438. return status;
  439. }
  440. static QDF_STATUS
  441. ce_revoke_recv_next_srng(struct CE_handle *copyeng,
  442. void **per_CE_contextp,
  443. void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
  444. {
  445. struct CE_state *CE_state = (struct CE_state *)copyeng;
  446. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  447. unsigned int sw_index;
  448. if (!dest_ring)
  449. return QDF_STATUS_E_FAILURE;
  450. sw_index = dest_ring->sw_index;
  451. if (per_CE_contextp)
  452. *per_CE_contextp = CE_state->recv_context;
  453. /* NOTE: sw_index is more like a read_index in this context. It has a
  454. * one-to-one mapping with status ring.
  455. * Get the per trasnfer context from dest_ring.
  456. */
  457. if (per_transfer_contextp)
  458. *per_transfer_contextp =
  459. dest_ring->per_transfer_context[sw_index];
  460. if (!dest_ring->per_transfer_context[sw_index])
  461. return QDF_STATUS_E_FAILURE;
  462. /* provide end condition */
  463. dest_ring->per_transfer_context[sw_index] = NULL;
  464. /* Update sw_index */
  465. sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
  466. dest_ring->sw_index = sw_index;
  467. return QDF_STATUS_SUCCESS;
  468. }
  469. /*
  470. * Guts of ce_completed_send_next.
  471. * The caller takes responsibility for any necessary locking.
  472. */
  473. static QDF_STATUS
  474. ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
  475. void **per_CE_contextp,
  476. void **per_transfer_contextp,
  477. qdf_dma_addr_t *bufferp,
  478. unsigned int *nbytesp,
  479. unsigned int *transfer_idp,
  480. unsigned int *sw_idx,
  481. unsigned int *hw_idx,
  482. uint32_t *toeplitz_hash_result)
  483. {
  484. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  485. struct CE_ring_state *src_ring = CE_state->src_ring;
  486. unsigned int nentries_mask = src_ring->nentries_mask;
  487. unsigned int sw_index = src_ring->sw_index;
  488. unsigned int swi = src_ring->sw_index;
  489. struct hif_softc *scn = CE_state->scn;
  490. struct ce_srng_src_desc *src_desc;
  491. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
  492. status = QDF_STATUS_E_FAILURE;
  493. return status;
  494. }
  495. src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
  496. if (src_desc) {
  497. hif_record_ce_srng_desc_event(scn, CE_state->id,
  498. HIF_TX_DESC_COMPLETION,
  499. (union ce_srng_desc *)src_desc,
  500. src_ring->
  501. per_transfer_context[swi],
  502. swi, src_desc->nbytes,
  503. src_ring->srng_ctx);
  504. /* Return data from completed source descriptor */
  505. *bufferp = (qdf_dma_addr_t)
  506. (((uint64_t)(src_desc)->buffer_addr_lo +
  507. ((uint64_t)((src_desc)->buffer_addr_hi &
  508. 0xFF) << 32)));
  509. *nbytesp = src_desc->nbytes;
  510. *transfer_idp = src_desc->meta_data;
  511. *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
  512. if (per_CE_contextp)
  513. *per_CE_contextp = CE_state->send_context;
  514. /* sw_index is used more like read index */
  515. if (per_transfer_contextp)
  516. *per_transfer_contextp =
  517. src_ring->per_transfer_context[sw_index];
  518. src_ring->per_transfer_context[sw_index] = 0; /* sanity */
  519. /* Update sw_index */
  520. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  521. src_ring->sw_index = sw_index;
  522. status = QDF_STATUS_SUCCESS;
  523. }
  524. hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
  525. return status;
  526. }
  527. /* NB: Modelled after ce_completed_send_next */
  528. static QDF_STATUS
  529. ce_cancel_send_next_srng(struct CE_handle *copyeng,
  530. void **per_CE_contextp,
  531. void **per_transfer_contextp,
  532. qdf_dma_addr_t *bufferp,
  533. unsigned int *nbytesp,
  534. unsigned int *transfer_idp,
  535. uint32_t *toeplitz_hash_result)
  536. {
  537. struct CE_state *CE_state;
  538. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  539. struct CE_ring_state *src_ring;
  540. unsigned int nentries_mask;
  541. unsigned int sw_index;
  542. struct hif_softc *scn;
  543. struct ce_srng_src_desc *src_desc;
  544. CE_state = (struct CE_state *)copyeng;
  545. src_ring = CE_state->src_ring;
  546. if (!src_ring)
  547. return QDF_STATUS_E_FAILURE;
  548. nentries_mask = src_ring->nentries_mask;
  549. sw_index = src_ring->sw_index;
  550. scn = CE_state->scn;
  551. if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
  552. status = QDF_STATUS_E_FAILURE;
  553. return status;
  554. }
  555. src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
  556. src_ring->srng_ctx);
  557. if (src_desc) {
  558. /* Return data from completed source descriptor */
  559. *bufferp = (qdf_dma_addr_t)
  560. (((uint64_t)(src_desc)->buffer_addr_lo +
  561. ((uint64_t)((src_desc)->buffer_addr_hi &
  562. 0xFF) << 32)));
  563. *nbytesp = src_desc->nbytes;
  564. *transfer_idp = src_desc->meta_data;
  565. *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
  566. if (per_CE_contextp)
  567. *per_CE_contextp = CE_state->send_context;
  568. /* sw_index is used more like read index */
  569. if (per_transfer_contextp)
  570. *per_transfer_contextp =
  571. src_ring->per_transfer_context[sw_index];
  572. src_ring->per_transfer_context[sw_index] = 0; /* sanity */
  573. /* Update sw_index */
  574. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  575. src_ring->sw_index = sw_index;
  576. status = QDF_STATUS_SUCCESS;
  577. }
  578. hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
  579. return status;
  580. }
  581. /*
  582. * Adjust interrupts for the copy complete handler.
  583. * If it's needed for either send or recv, then unmask
  584. * this interrupt; otherwise, mask it.
  585. *
  586. * Called with target_lock held.
  587. */
  588. static void
  589. ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
  590. int disable_copy_compl_intr)
  591. {
  592. }
  593. static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
  594. unsigned int *flags)
  595. {
  596. /*TODO*/
  597. return false;
  598. }
  599. static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
  600. {
  601. switch (ring_type) {
  602. case CE_RING_SRC:
  603. return sizeof(struct ce_srng_src_desc);
  604. case CE_RING_DEST:
  605. return sizeof(struct ce_srng_dest_desc);
  606. case CE_RING_STATUS:
  607. return sizeof(struct ce_srng_dest_status_desc);
  608. default:
  609. return 0;
  610. }
  611. return 0;
  612. }
  613. static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
  614. struct hal_srng_params *ring_params)
  615. {
  616. uint32_t addr_low;
  617. uint32_t addr_high;
  618. uint32_t msi_data_start;
  619. uint32_t msi_data_count;
  620. uint32_t msi_irq_start;
  621. int ret;
  622. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  623. &msi_data_count, &msi_data_start,
  624. &msi_irq_start);
  625. /* msi config not found */
  626. if (ret)
  627. return;
  628. pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
  629. ring_params->msi_addr = addr_low;
  630. ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  631. ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
  632. ring_params->flags |= HAL_SRNG_MSI_INTR;
  633. HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
  634. (void *)ring_params->msi_addr, ring_params->msi_data);
  635. }
  636. static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
  637. struct CE_ring_state *src_ring,
  638. struct CE_attr *attr)
  639. {
  640. struct hal_srng_params ring_params = {0};
  641. hif_debug("%s: ce_id %d", __func__, ce_id);
  642. ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
  643. ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
  644. ring_params.num_entries = src_ring->nentries;
  645. /*
  646. * The minimum increment for the timer is 8us
  647. * A default value of 0 disables the timer
  648. * A valid default value caused continuous interrupts to
  649. * fire with MSI enabled. Need to revisit usage of the timer
  650. */
  651. if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
  652. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  653. ring_params.intr_timer_thres_us = 0;
  654. ring_params.intr_batch_cntr_thres_entries = 1;
  655. ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
  656. }
  657. src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
  658. &ring_params);
  659. }
  660. /**
  661. * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
  662. * @dest_ring: ring being initialized
  663. * @ring_params: pointer to initialized parameters
  664. *
  665. * For Napier & Hawkeye v1, the status ring timer interrupts do not work
  666. * As a work arround host configures the destination rings to be a proxy for
  667. * work needing to be done.
  668. *
  669. * The interrupts are setup such that if the destination ring is less than fully
  670. * posted, there is likely undone work for the status ring that the host should
  671. * process.
  672. *
  673. * There is a timing bug in srng based copy engines such that a fully posted
  674. * srng based copy engine has 2 empty entries instead of just one. The copy
  675. * engine data sturctures work with 1 empty entry, but the software frequently
  676. * fails to post the last entry due to the race condition.
  677. */
  678. static void ce_srng_initialize_dest_timer_interrupt_war(
  679. struct CE_ring_state *dest_ring,
  680. struct hal_srng_params *ring_params)
  681. {
  682. int num_buffers_when_fully_posted = dest_ring->nentries - 2;
  683. ring_params->low_threshold = num_buffers_when_fully_posted - 1;
  684. ring_params->intr_timer_thres_us = 1024;
  685. ring_params->intr_batch_cntr_thres_entries = 0;
  686. ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  687. }
  688. static void ce_srng_dest_ring_setup(struct hif_softc *scn,
  689. uint32_t ce_id,
  690. struct CE_ring_state *dest_ring,
  691. struct CE_attr *attr)
  692. {
  693. struct hal_srng_params ring_params = {0};
  694. bool status_ring_timer_thresh_work_arround = true;
  695. HIF_INFO("%s: ce_id %d", __func__, ce_id);
  696. ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
  697. ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
  698. ring_params.num_entries = dest_ring->nentries;
  699. ring_params.max_buffer_length = attr->src_sz_max;
  700. if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
  701. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  702. if (status_ring_timer_thresh_work_arround) {
  703. ce_srng_initialize_dest_timer_interrupt_war(
  704. dest_ring, &ring_params);
  705. } else {
  706. /* normal behavior for future chips */
  707. ring_params.low_threshold = dest_ring->nentries >> 3;
  708. ring_params.intr_timer_thres_us = 100000;
  709. ring_params.intr_batch_cntr_thres_entries = 0;
  710. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  711. }
  712. ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
  713. }
  714. /*Dest ring is also source ring*/
  715. dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
  716. &ring_params);
  717. }
  718. #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
  719. /**
  720. * ce_status_ring_config_int_threshold() - configure ce status ring interrupt
  721. * thresholds
  722. * @scn: hif handle
  723. * @ring_params: ce srng params
  724. *
  725. * Return: None
  726. */
  727. static inline
  728. void ce_status_ring_config_int_threshold(struct hif_softc *scn,
  729. struct hal_srng_params *ring_params)
  730. {
  731. ring_params->intr_timer_thres_us =
  732. scn->ini_cfg.ce_status_ring_timer_threshold;
  733. ring_params->intr_batch_cntr_thres_entries =
  734. scn->ini_cfg.ce_status_ring_batch_count_threshold;
  735. }
  736. #else
  737. static inline
  738. void ce_status_ring_config_int_threshold(struct hif_softc *scn,
  739. struct hal_srng_params *ring_params)
  740. {
  741. ring_params->intr_timer_thres_us = 0x1000;
  742. ring_params->intr_batch_cntr_thres_entries = 0x1;
  743. }
  744. #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
  745. static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
  746. struct CE_ring_state *status_ring,
  747. struct CE_attr *attr)
  748. {
  749. struct hal_srng_params ring_params = {0};
  750. HIF_INFO("%s: ce_id %d", __func__, ce_id);
  751. ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
  752. ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
  753. ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
  754. ring_params.num_entries = status_ring->nentries;
  755. if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
  756. ce_status_ring_config_int_threshold(scn, &ring_params);
  757. }
  758. status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
  759. ce_id, 0, &ring_params);
  760. }
  761. static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
  762. uint32_t ce_id, struct CE_ring_state *ring,
  763. struct CE_attr *attr)
  764. {
  765. switch (ring_type) {
  766. case CE_RING_SRC:
  767. ce_srng_src_ring_setup(scn, ce_id, ring, attr);
  768. break;
  769. case CE_RING_DEST:
  770. ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
  771. break;
  772. case CE_RING_STATUS:
  773. ce_srng_status_ring_setup(scn, ce_id, ring, attr);
  774. break;
  775. default:
  776. qdf_assert(0);
  777. break;
  778. }
  779. return 0;
  780. }
  781. static void ce_construct_shadow_config_srng(struct hif_softc *scn)
  782. {
  783. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  784. int ce_id;
  785. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  786. if (hif_state->host_ce_config[ce_id].src_nentries)
  787. hal_set_one_shadow_config(scn->hal_soc,
  788. CE_SRC, ce_id);
  789. if (hif_state->host_ce_config[ce_id].dest_nentries) {
  790. hal_set_one_shadow_config(scn->hal_soc,
  791. CE_DST, ce_id);
  792. hal_set_one_shadow_config(scn->hal_soc,
  793. CE_DST_STATUS, ce_id);
  794. }
  795. }
  796. }
  797. static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
  798. struct pld_shadow_reg_v2_cfg **shadow_config,
  799. int *num_shadow_registers_configured)
  800. {
  801. if (!scn->hal_soc) {
  802. HIF_ERROR("%s: hal not initialized: not initializing shadow config",
  803. __func__);
  804. return;
  805. }
  806. hal_get_shadow_config(scn->hal_soc, shadow_config,
  807. num_shadow_registers_configured);
  808. if (*num_shadow_registers_configured != 0) {
  809. HIF_ERROR("%s: hal shadow register configuration allready constructed",
  810. __func__);
  811. /* return with original configuration*/
  812. return;
  813. }
  814. hal_construct_shadow_config(scn->hal_soc);
  815. ce_construct_shadow_config_srng(scn);
  816. /* get updated configuration */
  817. hal_get_shadow_config(scn->hal_soc, shadow_config,
  818. num_shadow_registers_configured);
  819. }
  820. #ifdef HIF_CE_LOG_INFO
  821. /**
  822. * ce_get_index_info_srng(): Get CE index info
  823. * @scn: HIF Context
  824. * @ce_state: CE opaque handle
  825. * @info: CE info
  826. *
  827. * Return: 0 for success and non zero for failure
  828. */
  829. static
  830. int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state,
  831. struct ce_index *info)
  832. {
  833. struct CE_state *CE_state = (struct CE_state *)ce_state;
  834. uint32_t tp, hp;
  835. info->id = CE_state->id;
  836. if (CE_state->src_ring) {
  837. hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx,
  838. &tp, &hp);
  839. info->u.srng_info.tp = tp;
  840. info->u.srng_info.hp = hp;
  841. } else if (CE_state->dest_ring && CE_state->status_ring) {
  842. hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx,
  843. &tp, &hp);
  844. info->u.srng_info.status_tp = tp;
  845. info->u.srng_info.status_hp = hp;
  846. hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx,
  847. &tp, &hp);
  848. info->u.srng_info.tp = tp;
  849. info->u.srng_info.hp = hp;
  850. }
  851. return 0;
  852. }
  853. #endif
  854. static struct ce_ops ce_service_srng = {
  855. .ce_get_desc_size = ce_get_desc_size_srng,
  856. .ce_ring_setup = ce_ring_setup_srng,
  857. .ce_sendlist_send = ce_sendlist_send_srng,
  858. .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
  859. .ce_revoke_recv_next = ce_revoke_recv_next_srng,
  860. .ce_cancel_send_next = ce_cancel_send_next_srng,
  861. .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
  862. .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
  863. .ce_send_nolock = ce_send_nolock_srng,
  864. .watermark_int = ce_check_int_watermark_srng,
  865. .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
  866. .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
  867. .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
  868. .ce_prepare_shadow_register_v2_cfg =
  869. ce_prepare_shadow_register_v2_cfg_srng,
  870. #ifdef HIF_CE_LOG_INFO
  871. .ce_get_index_info =
  872. ce_get_index_info_srng,
  873. #endif
  874. };
  875. struct ce_ops *ce_services_srng()
  876. {
  877. return &ce_service_srng;
  878. }
  879. qdf_export_symbol(ce_services_srng);
  880. void ce_service_srng_init(void)
  881. {
  882. ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
  883. }