ce_service.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862
  1. /*
  2. * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <osdep.h>
  27. #include "a_types.h"
  28. #include <athdefs.h>
  29. #include "osapi_linux.h"
  30. #include "hif.h"
  31. #include "hif_io32.h"
  32. #include "ce_api.h"
  33. #include "ce_main.h"
  34. #include "ce_internal.h"
  35. #include "ce_reg.h"
  36. #include "qdf_lock.h"
  37. #include "regtable.h"
  38. #include "epping_main.h"
  39. #include "hif_main.h"
  40. #include "hif_debug.h"
  41. #ifdef IPA_OFFLOAD
  42. #ifdef QCA_WIFI_3_0
  43. #define CE_IPA_RING_INIT(ce_desc) \
  44. do { \
  45. ce_desc->gather = 0; \
  46. ce_desc->enable_11h = 0; \
  47. ce_desc->meta_data_low = 0; \
  48. ce_desc->packet_result_offset = 64; \
  49. ce_desc->toeplitz_hash_enable = 0; \
  50. ce_desc->addr_y_search_disable = 0; \
  51. ce_desc->addr_x_search_disable = 0; \
  52. ce_desc->misc_int_disable = 0; \
  53. ce_desc->target_int_disable = 0; \
  54. ce_desc->host_int_disable = 0; \
  55. ce_desc->dest_byte_swap = 0; \
  56. ce_desc->byte_swap = 0; \
  57. ce_desc->type = 2; \
  58. ce_desc->tx_classify = 1; \
  59. ce_desc->buffer_addr_hi = 0; \
  60. ce_desc->meta_data = 0; \
  61. ce_desc->nbytes = 128; \
  62. } while (0)
  63. #else
  64. #define CE_IPA_RING_INIT(ce_desc) \
  65. do { \
  66. ce_desc->byte_swap = 0; \
  67. ce_desc->nbytes = 60; \
  68. ce_desc->gather = 0; \
  69. } while (0)
  70. #endif /* QCA_WIFI_3_0 */
  71. #endif /* IPA_OFFLOAD */
  72. static int war1_allow_sleep;
  73. /* io32 write workaround */
  74. static int hif_ce_war1;
  75. #ifdef CONFIG_SLUB_DEBUG_ON
  76. /**
  77. * struct hif_ce_event - structure for detailing a ce event
  78. * @type: what the event was
  79. * @time: when it happened
  80. * @descriptor: descriptor enqueued or dequeued
  81. * @memory: virtual address that was used
  82. * @index: location of the descriptor in the ce ring;
  83. */
  84. struct hif_ce_desc_event {
  85. uint16_t index;
  86. enum hif_ce_event_type type;
  87. uint64_t time;
  88. union ce_desc descriptor;
  89. void *memory;
  90. };
  91. /* max history to record per copy engine */
  92. #define HIF_CE_HISTORY_MAX 512
  93. qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
  94. struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
  95. /**
  96. * get_next_record_index() - get the next record index
  97. * @table_index: atomic index variable to increment
  98. * @array_size: array size of the circular buffer
  99. *
  100. * Increment the atomic index and reserve the value.
  101. * Takes care of buffer wrap.
  102. * Guaranteed to be thread safe as long as fewer than array_size contexts
  103. * try to access the array. If there are more than array_size contexts
  104. * trying to access the array, full locking of the recording process would
  105. * be needed to have sane logging.
  106. */
  107. static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
  108. {
  109. int record_index = qdf_atomic_inc_return(table_index);
  110. if (record_index == array_size)
  111. qdf_atomic_sub(array_size, table_index);
  112. while (record_index >= array_size)
  113. record_index -= array_size;
  114. return record_index;
  115. }
  116. /**
  117. * hif_record_ce_desc_event() - record ce descriptor events
  118. * @scn: hif_softc
  119. * @ce_id: which ce is the event occuring on
  120. * @type: what happened
  121. * @descriptor: pointer to the descriptor posted/completed
  122. * @memory: virtual address of buffer related to the descriptor
  123. * @index: index that the descriptor was/will be at.
  124. */
  125. void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
  126. enum hif_ce_event_type type,
  127. union ce_desc *descriptor,
  128. void *memory, int index)
  129. {
  130. struct hif_callbacks *cbk = hif_get_callbacks_handle(scn);
  131. int record_index = get_next_record_index(
  132. &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
  133. struct hif_ce_desc_event *event =
  134. &hif_ce_desc_history[ce_id][record_index];
  135. event->type = type;
  136. if (cbk && cbk->get_monotonic_boottime)
  137. event->time = cbk->get_monotonic_boottime();
  138. else
  139. event->time = ((uint64_t)qdf_system_ticks_to_msecs(
  140. qdf_system_ticks()) * 1000);
  141. if (descriptor != NULL)
  142. event->descriptor = *descriptor;
  143. else
  144. memset(&event->descriptor, 0, sizeof(union ce_desc));
  145. event->memory = memory;
  146. event->index = index;
  147. }
  148. /**
  149. * ce_init_ce_desc_event_log() - initialize the ce event log
  150. * @ce_id: copy engine id for which we are initializing the log
  151. * @size: size of array to dedicate
  152. *
  153. * Currently the passed size is ignored in favor of a precompiled value.
  154. */
  155. void ce_init_ce_desc_event_log(int ce_id, int size)
  156. {
  157. qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
  158. }
  159. #else
  160. void hif_record_ce_desc_event(struct hif_softc *scn,
  161. int ce_id, enum hif_ce_event_type type,
  162. union ce_desc *descriptor, void *memory,
  163. int index)
  164. {
  165. }
  166. inline void ce_init_ce_desc_event_log(int ce_id, int size)
  167. {
  168. }
  169. #endif
  170. /*
  171. * Support for Copy Engine hardware, which is mainly used for
  172. * communication between Host and Target over a PCIe interconnect.
  173. */
  174. /*
  175. * A single CopyEngine (CE) comprises two "rings":
  176. * a source ring
  177. * a destination ring
  178. *
  179. * Each ring consists of a number of descriptors which specify
  180. * an address, length, and meta-data.
  181. *
  182. * Typically, one side of the PCIe interconnect (Host or Target)
  183. * controls one ring and the other side controls the other ring.
  184. * The source side chooses when to initiate a transfer and it
  185. * chooses what to send (buffer address, length). The destination
  186. * side keeps a supply of "anonymous receive buffers" available and
  187. * it handles incoming data as it arrives (when the destination
  188. * recieves an interrupt).
  189. *
  190. * The sender may send a simple buffer (address/length) or it may
  191. * send a small list of buffers. When a small list is sent, hardware
  192. * "gathers" these and they end up in a single destination buffer
  193. * with a single interrupt.
  194. *
  195. * There are several "contexts" managed by this layer -- more, it
  196. * may seem -- than should be needed. These are provided mainly for
  197. * maximum flexibility and especially to facilitate a simpler HIF
  198. * implementation. There are per-CopyEngine recv, send, and watermark
  199. * contexts. These are supplied by the caller when a recv, send,
  200. * or watermark handler is established and they are echoed back to
  201. * the caller when the respective callbacks are invoked. There is
  202. * also a per-transfer context supplied by the caller when a buffer
  203. * (or sendlist) is sent and when a buffer is enqueued for recv.
  204. * These per-transfer contexts are echoed back to the caller when
  205. * the buffer is sent/received.
  206. * Target TX harsh result toeplitz_hash_result
  207. */
  208. /*
  209. * Guts of ce_send, used by both ce_send and ce_sendlist_send.
  210. * The caller takes responsibility for any needed locking.
  211. */
  212. int
  213. ce_completed_send_next_nolock(struct CE_state *CE_state,
  214. void **per_CE_contextp,
  215. void **per_transfer_contextp,
  216. qdf_dma_addr_t *bufferp,
  217. unsigned int *nbytesp,
  218. unsigned int *transfer_idp,
  219. unsigned int *sw_idx, unsigned int *hw_idx,
  220. uint32_t *toeplitz_hash_result);
  221. void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
  222. u32 ctrl_addr, unsigned int write_index)
  223. {
  224. if (hif_ce_war1) {
  225. void __iomem *indicator_addr;
  226. indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
  227. if (!war1_allow_sleep
  228. && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
  229. hif_write32_mb(indicator_addr,
  230. (CDC_WAR_MAGIC_STR | write_index));
  231. } else {
  232. unsigned long irq_flags;
  233. local_irq_save(irq_flags);
  234. hif_write32_mb(indicator_addr, 1);
  235. /*
  236. * PCIE write waits for ACK in IPQ8K, there is no
  237. * need to read back value.
  238. */
  239. (void)hif_read32_mb(indicator_addr);
  240. (void)hif_read32_mb(indicator_addr); /* conservative */
  241. CE_SRC_RING_WRITE_IDX_SET(scn,
  242. ctrl_addr, write_index);
  243. hif_write32_mb(indicator_addr, 0);
  244. local_irq_restore(irq_flags);
  245. }
  246. } else
  247. CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
  248. }
  249. int
  250. ce_send_nolock(struct CE_handle *copyeng,
  251. void *per_transfer_context,
  252. qdf_dma_addr_t buffer,
  253. uint32_t nbytes,
  254. uint32_t transfer_id,
  255. uint32_t flags,
  256. uint32_t user_flags)
  257. {
  258. int status;
  259. struct CE_state *CE_state = (struct CE_state *)copyeng;
  260. struct CE_ring_state *src_ring = CE_state->src_ring;
  261. uint32_t ctrl_addr = CE_state->ctrl_addr;
  262. unsigned int nentries_mask = src_ring->nentries_mask;
  263. unsigned int sw_index = src_ring->sw_index;
  264. unsigned int write_index = src_ring->write_index;
  265. uint64_t dma_addr = buffer;
  266. struct hif_softc *scn = CE_state->scn;
  267. A_TARGET_ACCESS_BEGIN_RET(scn);
  268. if (unlikely(CE_RING_DELTA(nentries_mask,
  269. write_index, sw_index - 1) <= 0)) {
  270. OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
  271. status = QDF_STATUS_E_FAILURE;
  272. A_TARGET_ACCESS_END_RET(scn);
  273. return status;
  274. }
  275. {
  276. enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
  277. struct CE_src_desc *src_ring_base =
  278. (struct CE_src_desc *)src_ring->base_addr_owner_space;
  279. struct CE_src_desc *shadow_base =
  280. (struct CE_src_desc *)src_ring->shadow_base;
  281. struct CE_src_desc *src_desc =
  282. CE_SRC_RING_TO_DESC(src_ring_base, write_index);
  283. struct CE_src_desc *shadow_src_desc =
  284. CE_SRC_RING_TO_DESC(shadow_base, write_index);
  285. /* Update low 32 bits source descriptor address */
  286. shadow_src_desc->buffer_addr =
  287. (uint32_t)(dma_addr & 0xFFFFFFFF);
  288. #ifdef QCA_WIFI_3_0
  289. shadow_src_desc->buffer_addr_hi =
  290. (uint32_t)((dma_addr >> 32) & 0x1F);
  291. user_flags |= shadow_src_desc->buffer_addr_hi;
  292. memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
  293. sizeof(uint32_t));
  294. #endif
  295. shadow_src_desc->meta_data = transfer_id;
  296. /*
  297. * Set the swap bit if:
  298. * typical sends on this CE are swapped (host is big-endian)
  299. * and this send doesn't disable the swapping
  300. * (data is not bytestream)
  301. */
  302. shadow_src_desc->byte_swap =
  303. (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
  304. != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
  305. shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
  306. shadow_src_desc->nbytes = nbytes;
  307. *src_desc = *shadow_src_desc;
  308. src_ring->per_transfer_context[write_index] =
  309. per_transfer_context;
  310. /* Update Source Ring Write Index */
  311. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  312. /* WORKAROUND */
  313. if (!shadow_src_desc->gather) {
  314. event_type = HIF_TX_DESC_POST;
  315. war_ce_src_ring_write_idx_set(scn, ctrl_addr,
  316. write_index);
  317. }
  318. /* src_ring->write index hasn't been updated event though
  319. * the register has allready been written to.
  320. */
  321. hif_record_ce_desc_event(scn, CE_state->id, event_type,
  322. (union ce_desc *) shadow_src_desc, per_transfer_context,
  323. src_ring->write_index);
  324. src_ring->write_index = write_index;
  325. status = QDF_STATUS_SUCCESS;
  326. }
  327. A_TARGET_ACCESS_END_RET(scn);
  328. return status;
  329. }
  330. int
  331. ce_send(struct CE_handle *copyeng,
  332. void *per_transfer_context,
  333. qdf_dma_addr_t buffer,
  334. uint32_t nbytes,
  335. uint32_t transfer_id,
  336. uint32_t flags,
  337. uint32_t user_flag)
  338. {
  339. struct CE_state *CE_state = (struct CE_state *)copyeng;
  340. int status;
  341. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  342. status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
  343. transfer_id, flags, user_flag);
  344. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  345. return status;
  346. }
  347. unsigned int ce_sendlist_sizeof(void)
  348. {
  349. return sizeof(struct ce_sendlist);
  350. }
  351. void ce_sendlist_init(struct ce_sendlist *sendlist)
  352. {
  353. struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
  354. sl->num_items = 0;
  355. }
  356. int
  357. ce_sendlist_buf_add(struct ce_sendlist *sendlist,
  358. qdf_dma_addr_t buffer,
  359. uint32_t nbytes,
  360. uint32_t flags,
  361. uint32_t user_flags)
  362. {
  363. struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
  364. unsigned int num_items = sl->num_items;
  365. struct ce_sendlist_item *item;
  366. if (num_items >= CE_SENDLIST_ITEMS_MAX) {
  367. QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
  368. return QDF_STATUS_E_RESOURCES;
  369. }
  370. item = &sl->item[num_items];
  371. item->send_type = CE_SIMPLE_BUFFER_TYPE;
  372. item->data = buffer;
  373. item->u.nbytes = nbytes;
  374. item->flags = flags;
  375. item->user_flags = user_flags;
  376. sl->num_items = num_items + 1;
  377. return QDF_STATUS_SUCCESS;
  378. }
  379. int
  380. ce_sendlist_send(struct CE_handle *copyeng,
  381. void *per_transfer_context,
  382. struct ce_sendlist *sendlist, unsigned int transfer_id)
  383. {
  384. int status = -ENOMEM;
  385. struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
  386. struct CE_state *CE_state = (struct CE_state *)copyeng;
  387. struct CE_ring_state *src_ring = CE_state->src_ring;
  388. unsigned int nentries_mask = src_ring->nentries_mask;
  389. unsigned int num_items = sl->num_items;
  390. unsigned int sw_index;
  391. unsigned int write_index;
  392. QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
  393. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  394. sw_index = src_ring->sw_index;
  395. write_index = src_ring->write_index;
  396. if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
  397. num_items) {
  398. struct ce_sendlist_item *item;
  399. int i;
  400. /* handle all but the last item uniformly */
  401. for (i = 0; i < num_items - 1; i++) {
  402. item = &sl->item[i];
  403. /* TBDXXX: Support extensible sendlist_types? */
  404. QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
  405. status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
  406. (qdf_dma_addr_t) item->data,
  407. item->u.nbytes, transfer_id,
  408. item->flags | CE_SEND_FLAG_GATHER,
  409. item->user_flags);
  410. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  411. }
  412. /* provide valid context pointer for final item */
  413. item = &sl->item[i];
  414. /* TBDXXX: Support extensible sendlist_types? */
  415. QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
  416. status = ce_send_nolock(copyeng, per_transfer_context,
  417. (qdf_dma_addr_t) item->data,
  418. item->u.nbytes,
  419. transfer_id, item->flags,
  420. item->user_flags);
  421. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  422. QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
  423. QDF_NBUF_TX_PKT_CE);
  424. DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
  425. QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
  426. (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
  427. sizeof(((qdf_nbuf_t)per_transfer_context)->data)));
  428. } else {
  429. /*
  430. * Probably not worth the additional complexity to support
  431. * partial sends with continuation or notification. We expect
  432. * to use large rings and small sendlists. If we can't handle
  433. * the entire request at once, punt it back to the caller.
  434. */
  435. }
  436. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  437. return status;
  438. }
  439. #ifdef WLAN_FEATURE_FASTPATH
  440. #ifdef QCA_WIFI_3_0
  441. static inline void
  442. ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
  443. uint64_t dma_addr,
  444. uint32_t user_flags)
  445. {
  446. shadow_src_desc->buffer_addr_hi =
  447. (uint32_t)((dma_addr >> 32) & 0x1F);
  448. user_flags |= shadow_src_desc->buffer_addr_hi;
  449. memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
  450. sizeof(uint32_t));
  451. }
  452. #else
  453. static inline void
  454. ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
  455. uint64_t dma_addr,
  456. uint32_t user_flags)
  457. {
  458. }
  459. #endif
  460. /**
  461. * ce_send_fast() CE layer Tx buffer posting function
  462. * @copyeng: copy engine handle
  463. * @msdus: iarray of msdu to be sent
  464. * @num_msdus: number of msdus in an array
  465. * @transfer_id: transfer_id
  466. *
  467. * Assumption : Called with an array of MSDU's
  468. * Function:
  469. * For each msdu in the array
  470. * 1. Check no. of available entries
  471. * 2. Create src ring entries (allocated in consistent memory
  472. * 3. Write index to h/w
  473. *
  474. * Return: No. of packets that could be sent
  475. */
  476. int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
  477. unsigned int num_msdus, unsigned int transfer_id)
  478. {
  479. struct CE_state *ce_state = (struct CE_state *)copyeng;
  480. struct hif_softc *scn = ce_state->scn;
  481. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  482. struct CE_ring_state *src_ring = ce_state->src_ring;
  483. u_int32_t ctrl_addr = ce_state->ctrl_addr;
  484. unsigned int nentries_mask = src_ring->nentries_mask;
  485. unsigned int write_index;
  486. unsigned int sw_index;
  487. unsigned int frag_len;
  488. qdf_nbuf_t msdu;
  489. int i;
  490. uint64_t dma_addr;
  491. uint32_t user_flags = 0;
  492. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  493. sw_index = src_ring->sw_index;
  494. write_index = src_ring->write_index;
  495. /* 2 msdus per packet */
  496. for (i = 0; i < num_msdus; i++) {
  497. struct CE_src_desc *src_ring_base =
  498. (struct CE_src_desc *)src_ring->base_addr_owner_space;
  499. struct CE_src_desc *shadow_base =
  500. (struct CE_src_desc *)src_ring->shadow_base;
  501. struct CE_src_desc *src_desc =
  502. CE_SRC_RING_TO_DESC(src_ring_base, write_index);
  503. struct CE_src_desc *shadow_src_desc =
  504. CE_SRC_RING_TO_DESC(shadow_base, write_index);
  505. hif_pm_runtime_get_noresume(hif_hdl);
  506. msdu = msdus[i];
  507. /*
  508. * First fill out the ring descriptor for the HTC HTT frame
  509. * header. These are uncached writes. Should we use a local
  510. * structure instead?
  511. */
  512. /* HTT/HTC header can be passed as a argument */
  513. dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
  514. shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
  515. 0xFFFFFFFF);
  516. user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
  517. ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
  518. shadow_src_desc->meta_data = transfer_id;
  519. shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
  520. /*
  521. * HTC HTT header is a word stream, so byte swap if CE byte
  522. * swap enabled
  523. */
  524. shadow_src_desc->byte_swap = ((ce_state->attr_flags &
  525. CE_ATTR_BYTE_SWAP_DATA) != 0);
  526. /* For the first one, it still does not need to write */
  527. shadow_src_desc->gather = 1;
  528. *src_desc = *shadow_src_desc;
  529. /* By default we could initialize the transfer context to this
  530. * value
  531. */
  532. src_ring->per_transfer_context[write_index] =
  533. CE_SENDLIST_ITEM_CTXT;
  534. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  535. src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
  536. shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
  537. /*
  538. * Now fill out the ring descriptor for the actual data
  539. * packet
  540. */
  541. dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
  542. shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
  543. 0xFFFFFFFF);
  544. /*
  545. * Clear packet offset for all but the first CE desc.
  546. */
  547. user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
  548. ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
  549. shadow_src_desc->meta_data = transfer_id;
  550. /* get actual packet length */
  551. frag_len = qdf_nbuf_get_frag_len(msdu, 1);
  552. /* only read download_len once */
  553. shadow_src_desc->nbytes = ce_state->download_len;
  554. if (shadow_src_desc->nbytes > frag_len)
  555. shadow_src_desc->nbytes = frag_len;
  556. /* Data packet is a byte stream, so disable byte swap */
  557. shadow_src_desc->byte_swap = 0;
  558. /* For the last one, gather is not set */
  559. shadow_src_desc->gather = 0;
  560. *src_desc = *shadow_src_desc;
  561. src_ring->per_transfer_context[write_index] = msdu;
  562. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  563. }
  564. /* Write the final index to h/w one-shot */
  565. if (i) {
  566. src_ring->write_index = write_index;
  567. if (hif_pm_runtime_get(hif_hdl) == 0) {
  568. /* Don't call WAR_XXX from here
  569. * Just call XXX instead, that has the reqd. intel
  570. */
  571. war_ce_src_ring_write_idx_set(scn, ctrl_addr,
  572. write_index);
  573. hif_pm_runtime_put(hif_hdl);
  574. }
  575. }
  576. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  577. /*
  578. * If all packets in the array are transmitted,
  579. * i = num_msdus
  580. * Temporarily add an ASSERT
  581. */
  582. ASSERT(i == num_msdus);
  583. return i;
  584. }
  585. #endif /* WLAN_FEATURE_FASTPATH */
  586. int
  587. ce_recv_buf_enqueue(struct CE_handle *copyeng,
  588. void *per_recv_context, qdf_dma_addr_t buffer)
  589. {
  590. int status;
  591. struct CE_state *CE_state = (struct CE_state *)copyeng;
  592. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  593. uint32_t ctrl_addr = CE_state->ctrl_addr;
  594. unsigned int nentries_mask = dest_ring->nentries_mask;
  595. unsigned int write_index;
  596. unsigned int sw_index;
  597. int val = 0;
  598. uint64_t dma_addr = buffer;
  599. struct hif_softc *scn = CE_state->scn;
  600. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  601. write_index = dest_ring->write_index;
  602. sw_index = dest_ring->sw_index;
  603. A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
  604. if (val == -1) {
  605. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  606. return val;
  607. }
  608. if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
  609. struct CE_dest_desc *dest_ring_base =
  610. (struct CE_dest_desc *)dest_ring->
  611. base_addr_owner_space;
  612. struct CE_dest_desc *dest_desc =
  613. CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
  614. /* Update low 32 bit destination descriptor */
  615. dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
  616. #ifdef QCA_WIFI_3_0
  617. dest_desc->buffer_addr_hi =
  618. (uint32_t)((dma_addr >> 32) & 0x1F);
  619. #endif
  620. dest_desc->nbytes = 0;
  621. dest_ring->per_transfer_context[write_index] =
  622. per_recv_context;
  623. hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
  624. (union ce_desc *) dest_desc, per_recv_context,
  625. write_index);
  626. /* Update Destination Ring Write Index */
  627. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  628. CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
  629. dest_ring->write_index = write_index;
  630. status = QDF_STATUS_SUCCESS;
  631. } else {
  632. status = QDF_STATUS_E_FAILURE;
  633. }
  634. A_TARGET_ACCESS_END_RET_EXT(scn, val);
  635. if (val == -1) {
  636. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  637. return val;
  638. }
  639. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  640. return status;
  641. }
  642. void
  643. ce_send_watermarks_set(struct CE_handle *copyeng,
  644. unsigned int low_alert_nentries,
  645. unsigned int high_alert_nentries)
  646. {
  647. struct CE_state *CE_state = (struct CE_state *)copyeng;
  648. uint32_t ctrl_addr = CE_state->ctrl_addr;
  649. struct hif_softc *scn = CE_state->scn;
  650. CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
  651. CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
  652. }
  653. void
  654. ce_recv_watermarks_set(struct CE_handle *copyeng,
  655. unsigned int low_alert_nentries,
  656. unsigned int high_alert_nentries)
  657. {
  658. struct CE_state *CE_state = (struct CE_state *)copyeng;
  659. uint32_t ctrl_addr = CE_state->ctrl_addr;
  660. struct hif_softc *scn = CE_state->scn;
  661. CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
  662. low_alert_nentries);
  663. CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
  664. high_alert_nentries);
  665. }
  666. unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
  667. {
  668. struct CE_state *CE_state = (struct CE_state *)copyeng;
  669. struct CE_ring_state *src_ring = CE_state->src_ring;
  670. unsigned int nentries_mask = src_ring->nentries_mask;
  671. unsigned int sw_index;
  672. unsigned int write_index;
  673. qdf_spin_lock(&CE_state->ce_index_lock);
  674. sw_index = src_ring->sw_index;
  675. write_index = src_ring->write_index;
  676. qdf_spin_unlock(&CE_state->ce_index_lock);
  677. return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
  678. }
  679. unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
  680. {
  681. struct CE_state *CE_state = (struct CE_state *)copyeng;
  682. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  683. unsigned int nentries_mask = dest_ring->nentries_mask;
  684. unsigned int sw_index;
  685. unsigned int write_index;
  686. qdf_spin_lock(&CE_state->ce_index_lock);
  687. sw_index = dest_ring->sw_index;
  688. write_index = dest_ring->write_index;
  689. qdf_spin_unlock(&CE_state->ce_index_lock);
  690. return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
  691. }
  692. /*
  693. * Guts of ce_send_entries_done.
  694. * The caller takes responsibility for any necessary locking.
  695. */
  696. unsigned int
  697. ce_send_entries_done_nolock(struct hif_softc *scn,
  698. struct CE_state *CE_state)
  699. {
  700. struct CE_ring_state *src_ring = CE_state->src_ring;
  701. uint32_t ctrl_addr = CE_state->ctrl_addr;
  702. unsigned int nentries_mask = src_ring->nentries_mask;
  703. unsigned int sw_index;
  704. unsigned int read_index;
  705. sw_index = src_ring->sw_index;
  706. read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
  707. return CE_RING_DELTA(nentries_mask, sw_index, read_index);
  708. }
  709. unsigned int ce_send_entries_done(struct CE_handle *copyeng)
  710. {
  711. struct CE_state *CE_state = (struct CE_state *)copyeng;
  712. unsigned int nentries;
  713. qdf_spin_lock(&CE_state->ce_index_lock);
  714. nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
  715. qdf_spin_unlock(&CE_state->ce_index_lock);
  716. return nentries;
  717. }
  718. /*
  719. * Guts of ce_recv_entries_done.
  720. * The caller takes responsibility for any necessary locking.
  721. */
  722. unsigned int
  723. ce_recv_entries_done_nolock(struct hif_softc *scn,
  724. struct CE_state *CE_state)
  725. {
  726. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  727. uint32_t ctrl_addr = CE_state->ctrl_addr;
  728. unsigned int nentries_mask = dest_ring->nentries_mask;
  729. unsigned int sw_index;
  730. unsigned int read_index;
  731. sw_index = dest_ring->sw_index;
  732. read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
  733. return CE_RING_DELTA(nentries_mask, sw_index, read_index);
  734. }
  735. unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
  736. {
  737. struct CE_state *CE_state = (struct CE_state *)copyeng;
  738. unsigned int nentries;
  739. qdf_spin_lock(&CE_state->ce_index_lock);
  740. nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
  741. qdf_spin_unlock(&CE_state->ce_index_lock);
  742. return nentries;
  743. }
  744. /* Debug support */
  745. void *ce_debug_cmplrn_context; /* completed recv next context */
  746. void *ce_debug_cnclsn_context; /* cancel send next context */
  747. void *ce_debug_rvkrn_context; /* revoke receive next context */
  748. void *ce_debug_cmplsn_context; /* completed send next context */
  749. /*
  750. * Guts of ce_completed_recv_next.
  751. * The caller takes responsibility for any necessary locking.
  752. */
  753. int
  754. ce_completed_recv_next_nolock(struct CE_state *CE_state,
  755. void **per_CE_contextp,
  756. void **per_transfer_contextp,
  757. qdf_dma_addr_t *bufferp,
  758. unsigned int *nbytesp,
  759. unsigned int *transfer_idp,
  760. unsigned int *flagsp)
  761. {
  762. int status;
  763. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  764. unsigned int nentries_mask = dest_ring->nentries_mask;
  765. unsigned int sw_index = dest_ring->sw_index;
  766. struct hif_softc *scn = CE_state->scn;
  767. struct CE_dest_desc *dest_ring_base =
  768. (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
  769. struct CE_dest_desc *dest_desc =
  770. CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
  771. int nbytes;
  772. struct CE_dest_desc dest_desc_info;
  773. /*
  774. * By copying the dest_desc_info element to local memory, we could
  775. * avoid extra memory read from non-cachable memory.
  776. */
  777. dest_desc_info = *dest_desc;
  778. nbytes = dest_desc_info.nbytes;
  779. if (nbytes == 0) {
  780. /*
  781. * This closes a relatively unusual race where the Host
  782. * sees the updated DRRI before the update to the
  783. * corresponding descriptor has completed. We treat this
  784. * as a descriptor that is not yet done.
  785. */
  786. status = QDF_STATUS_E_FAILURE;
  787. goto done;
  788. }
  789. hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
  790. (union ce_desc *) dest_desc,
  791. dest_ring->per_transfer_context[sw_index],
  792. sw_index);
  793. dest_desc->nbytes = 0;
  794. /* Return data from completed destination descriptor */
  795. *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
  796. *nbytesp = nbytes;
  797. *transfer_idp = dest_desc_info.meta_data;
  798. *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
  799. if (per_CE_contextp) {
  800. *per_CE_contextp = CE_state->recv_context;
  801. }
  802. ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
  803. if (per_transfer_contextp) {
  804. *per_transfer_contextp = ce_debug_cmplrn_context;
  805. }
  806. dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
  807. /* Update sw_index */
  808. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  809. dest_ring->sw_index = sw_index;
  810. status = QDF_STATUS_SUCCESS;
  811. done:
  812. return status;
  813. }
  814. int
  815. ce_completed_recv_next(struct CE_handle *copyeng,
  816. void **per_CE_contextp,
  817. void **per_transfer_contextp,
  818. qdf_dma_addr_t *bufferp,
  819. unsigned int *nbytesp,
  820. unsigned int *transfer_idp, unsigned int *flagsp)
  821. {
  822. struct CE_state *CE_state = (struct CE_state *)copyeng;
  823. int status;
  824. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  825. status =
  826. ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
  827. per_transfer_contextp, bufferp,
  828. nbytesp, transfer_idp, flagsp);
  829. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  830. return status;
  831. }
  832. /* NB: Modeled after ce_completed_recv_next_nolock */
  833. QDF_STATUS
  834. ce_revoke_recv_next(struct CE_handle *copyeng,
  835. void **per_CE_contextp,
  836. void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
  837. {
  838. struct CE_state *CE_state;
  839. struct CE_ring_state *dest_ring;
  840. unsigned int nentries_mask;
  841. unsigned int sw_index;
  842. unsigned int write_index;
  843. QDF_STATUS status;
  844. struct hif_softc *scn;
  845. CE_state = (struct CE_state *)copyeng;
  846. dest_ring = CE_state->dest_ring;
  847. if (!dest_ring) {
  848. return QDF_STATUS_E_FAILURE;
  849. }
  850. scn = CE_state->scn;
  851. qdf_spin_lock(&CE_state->ce_index_lock);
  852. nentries_mask = dest_ring->nentries_mask;
  853. sw_index = dest_ring->sw_index;
  854. write_index = dest_ring->write_index;
  855. if (write_index != sw_index) {
  856. struct CE_dest_desc *dest_ring_base =
  857. (struct CE_dest_desc *)dest_ring->
  858. base_addr_owner_space;
  859. struct CE_dest_desc *dest_desc =
  860. CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
  861. /* Return data from completed destination descriptor */
  862. *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
  863. if (per_CE_contextp) {
  864. *per_CE_contextp = CE_state->recv_context;
  865. }
  866. ce_debug_rvkrn_context =
  867. dest_ring->per_transfer_context[sw_index];
  868. if (per_transfer_contextp) {
  869. *per_transfer_contextp = ce_debug_rvkrn_context;
  870. }
  871. dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
  872. /* Update sw_index */
  873. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  874. dest_ring->sw_index = sw_index;
  875. status = QDF_STATUS_SUCCESS;
  876. } else {
  877. status = QDF_STATUS_E_FAILURE;
  878. }
  879. qdf_spin_unlock(&CE_state->ce_index_lock);
  880. return status;
  881. }
  882. /*
  883. * Guts of ce_completed_send_next.
  884. * The caller takes responsibility for any necessary locking.
  885. */
  886. int
  887. ce_completed_send_next_nolock(struct CE_state *CE_state,
  888. void **per_CE_contextp,
  889. void **per_transfer_contextp,
  890. qdf_dma_addr_t *bufferp,
  891. unsigned int *nbytesp,
  892. unsigned int *transfer_idp,
  893. unsigned int *sw_idx,
  894. unsigned int *hw_idx,
  895. uint32_t *toeplitz_hash_result)
  896. {
  897. int status = QDF_STATUS_E_FAILURE;
  898. struct CE_ring_state *src_ring = CE_state->src_ring;
  899. uint32_t ctrl_addr = CE_state->ctrl_addr;
  900. unsigned int nentries_mask = src_ring->nentries_mask;
  901. unsigned int sw_index = src_ring->sw_index;
  902. unsigned int read_index;
  903. struct hif_softc *scn = CE_state->scn;
  904. if (src_ring->hw_index == sw_index) {
  905. /*
  906. * The SW completion index has caught up with the cached
  907. * version of the HW completion index.
  908. * Update the cached HW completion index to see whether
  909. * the SW has really caught up to the HW, or if the cached
  910. * value of the HW index has become stale.
  911. */
  912. A_TARGET_ACCESS_BEGIN_RET(scn);
  913. src_ring->hw_index =
  914. CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
  915. A_TARGET_ACCESS_END_RET(scn);
  916. }
  917. read_index = src_ring->hw_index;
  918. if (sw_idx)
  919. *sw_idx = sw_index;
  920. if (hw_idx)
  921. *hw_idx = read_index;
  922. if ((read_index != sw_index) && (read_index != 0xffffffff)) {
  923. struct CE_src_desc *shadow_base =
  924. (struct CE_src_desc *)src_ring->shadow_base;
  925. struct CE_src_desc *shadow_src_desc =
  926. CE_SRC_RING_TO_DESC(shadow_base, sw_index);
  927. #ifdef QCA_WIFI_3_0
  928. struct CE_src_desc *src_ring_base =
  929. (struct CE_src_desc *)src_ring->base_addr_owner_space;
  930. struct CE_src_desc *src_desc =
  931. CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
  932. #endif
  933. hif_record_ce_desc_event(scn, CE_state->id,
  934. HIF_TX_DESC_COMPLETION,
  935. (union ce_desc *) shadow_src_desc,
  936. src_ring->per_transfer_context[sw_index],
  937. sw_index);
  938. /* Return data from completed source descriptor */
  939. *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
  940. *nbytesp = shadow_src_desc->nbytes;
  941. *transfer_idp = shadow_src_desc->meta_data;
  942. #ifdef QCA_WIFI_3_0
  943. *toeplitz_hash_result = src_desc->toeplitz_hash_result;
  944. #else
  945. *toeplitz_hash_result = 0;
  946. #endif
  947. if (per_CE_contextp) {
  948. *per_CE_contextp = CE_state->send_context;
  949. }
  950. ce_debug_cmplsn_context =
  951. src_ring->per_transfer_context[sw_index];
  952. if (per_transfer_contextp) {
  953. *per_transfer_contextp = ce_debug_cmplsn_context;
  954. }
  955. src_ring->per_transfer_context[sw_index] = 0; /* sanity */
  956. /* Update sw_index */
  957. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  958. src_ring->sw_index = sw_index;
  959. status = QDF_STATUS_SUCCESS;
  960. }
  961. return status;
  962. }
  963. /* NB: Modeled after ce_completed_send_next */
  964. QDF_STATUS
  965. ce_cancel_send_next(struct CE_handle *copyeng,
  966. void **per_CE_contextp,
  967. void **per_transfer_contextp,
  968. qdf_dma_addr_t *bufferp,
  969. unsigned int *nbytesp,
  970. unsigned int *transfer_idp,
  971. uint32_t *toeplitz_hash_result)
  972. {
  973. struct CE_state *CE_state;
  974. struct CE_ring_state *src_ring;
  975. unsigned int nentries_mask;
  976. unsigned int sw_index;
  977. unsigned int write_index;
  978. QDF_STATUS status;
  979. struct hif_softc *scn;
  980. CE_state = (struct CE_state *)copyeng;
  981. src_ring = CE_state->src_ring;
  982. if (!src_ring) {
  983. return QDF_STATUS_E_FAILURE;
  984. }
  985. scn = CE_state->scn;
  986. qdf_spin_lock(&CE_state->ce_index_lock);
  987. nentries_mask = src_ring->nentries_mask;
  988. sw_index = src_ring->sw_index;
  989. write_index = src_ring->write_index;
  990. if (write_index != sw_index) {
  991. struct CE_src_desc *src_ring_base =
  992. (struct CE_src_desc *)src_ring->base_addr_owner_space;
  993. struct CE_src_desc *src_desc =
  994. CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
  995. /* Return data from completed source descriptor */
  996. *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
  997. *nbytesp = src_desc->nbytes;
  998. *transfer_idp = src_desc->meta_data;
  999. #ifdef QCA_WIFI_3_0
  1000. *toeplitz_hash_result = src_desc->toeplitz_hash_result;
  1001. #else
  1002. *toeplitz_hash_result = 0;
  1003. #endif
  1004. if (per_CE_contextp) {
  1005. *per_CE_contextp = CE_state->send_context;
  1006. }
  1007. ce_debug_cnclsn_context =
  1008. src_ring->per_transfer_context[sw_index];
  1009. if (per_transfer_contextp) {
  1010. *per_transfer_contextp = ce_debug_cnclsn_context;
  1011. }
  1012. src_ring->per_transfer_context[sw_index] = 0; /* sanity */
  1013. /* Update sw_index */
  1014. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  1015. src_ring->sw_index = sw_index;
  1016. status = QDF_STATUS_SUCCESS;
  1017. } else {
  1018. status = QDF_STATUS_E_FAILURE;
  1019. }
  1020. qdf_spin_unlock(&CE_state->ce_index_lock);
  1021. return status;
  1022. }
  1023. /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
  1024. #define CE_WM_SHFT 1
  1025. int
  1026. ce_completed_send_next(struct CE_handle *copyeng,
  1027. void **per_CE_contextp,
  1028. void **per_transfer_contextp,
  1029. qdf_dma_addr_t *bufferp,
  1030. unsigned int *nbytesp,
  1031. unsigned int *transfer_idp,
  1032. unsigned int *sw_idx,
  1033. unsigned int *hw_idx,
  1034. unsigned int *toeplitz_hash_result)
  1035. {
  1036. struct CE_state *CE_state = (struct CE_state *)copyeng;
  1037. int status;
  1038. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  1039. status =
  1040. ce_completed_send_next_nolock(CE_state, per_CE_contextp,
  1041. per_transfer_contextp, bufferp,
  1042. nbytesp, transfer_idp, sw_idx,
  1043. hw_idx, toeplitz_hash_result);
  1044. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  1045. return status;
  1046. }
  1047. #ifdef ATH_11AC_TXCOMPACT
  1048. /* CE engine descriptor reap
  1049. * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
  1050. * does recieve and reaping of completed descriptor ,
  1051. * This function only handles reaping of Tx complete descriptor.
  1052. * The Function is called from threshold reap poll routine
  1053. * hif_send_complete_check so should not countain recieve functionality
  1054. * within it .
  1055. */
  1056. void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
  1057. {
  1058. void *CE_context;
  1059. void *transfer_context;
  1060. qdf_dma_addr_t buf;
  1061. unsigned int nbytes;
  1062. unsigned int id;
  1063. unsigned int sw_idx, hw_idx;
  1064. uint32_t toeplitz_hash_result;
  1065. struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
  1066. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1067. return;
  1068. hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
  1069. NULL, NULL, 0);
  1070. /* Since this function is called from both user context and
  1071. * tasklet context the spinlock has to lock the bottom halves.
  1072. * This fix assumes that ATH_11AC_TXCOMPACT flag is always
  1073. * enabled in TX polling mode. If this is not the case, more
  1074. * bottom halve spin lock changes are needed. Due to data path
  1075. * performance concern, after internal discussion we've decided
  1076. * to make minimum change, i.e., only address the issue occured
  1077. * in this function. The possible negative effect of this minimum
  1078. * change is that, in the future, if some other function will also
  1079. * be opened to let the user context to use, those cases need to be
  1080. * addressed by change spin_lock to spin_lock_bh also.
  1081. */
  1082. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  1083. if (CE_state->send_cb) {
  1084. {
  1085. /* Pop completed send buffers and call the
  1086. * registered send callback for each
  1087. */
  1088. while (ce_completed_send_next_nolock
  1089. (CE_state, &CE_context,
  1090. &transfer_context, &buf,
  1091. &nbytes, &id, &sw_idx, &hw_idx,
  1092. &toeplitz_hash_result) ==
  1093. QDF_STATUS_SUCCESS) {
  1094. if (ce_id != CE_HTT_H2T_MSG) {
  1095. qdf_spin_unlock_bh(
  1096. &CE_state->ce_index_lock);
  1097. CE_state->send_cb(
  1098. (struct CE_handle *)
  1099. CE_state, CE_context,
  1100. transfer_context, buf,
  1101. nbytes, id, sw_idx, hw_idx,
  1102. toeplitz_hash_result);
  1103. qdf_spin_lock_bh(
  1104. &CE_state->ce_index_lock);
  1105. } else {
  1106. struct HIF_CE_pipe_info *pipe_info =
  1107. (struct HIF_CE_pipe_info *)
  1108. CE_context;
  1109. qdf_spin_lock_bh(&pipe_info->
  1110. completion_freeq_lock);
  1111. pipe_info->num_sends_allowed++;
  1112. qdf_spin_unlock_bh(&pipe_info->
  1113. completion_freeq_lock);
  1114. }
  1115. }
  1116. }
  1117. }
  1118. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  1119. hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
  1120. NULL, NULL, 0);
  1121. Q_TARGET_ACCESS_END(scn);
  1122. }
  1123. #endif /*ATH_11AC_TXCOMPACT */
  1124. /*
  1125. * Number of times to check for any pending tx/rx completion on
  1126. * a copy engine, this count should be big enough. Once we hit
  1127. * this threashold we'll not check for any Tx/Rx comlpetion in same
  1128. * interrupt handling. Note that this threashold is only used for
  1129. * Rx interrupt processing, this can be used tor Tx as well if we
  1130. * suspect any infinite loop in checking for pending Tx completion.
  1131. */
  1132. #define CE_TXRX_COMP_CHECK_THRESHOLD 20
  1133. /*
  1134. * Guts of interrupt handler for per-engine interrupts on a particular CE.
  1135. *
  1136. * Invokes registered callbacks for recv_complete,
  1137. * send_complete, and watermarks.
  1138. *
  1139. * Returns: number of messages processed
  1140. */
  1141. int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
  1142. {
  1143. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1144. uint32_t ctrl_addr = CE_state->ctrl_addr;
  1145. void *CE_context;
  1146. void *transfer_context;
  1147. qdf_dma_addr_t buf;
  1148. unsigned int nbytes;
  1149. unsigned int id;
  1150. unsigned int flags;
  1151. uint32_t CE_int_status;
  1152. unsigned int more_comp_cnt = 0;
  1153. unsigned int more_snd_comp_cnt = 0;
  1154. unsigned int sw_idx, hw_idx;
  1155. uint32_t toeplitz_hash_result;
  1156. uint32_t mode = hif_get_conparam(scn);
  1157. if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
  1158. HIF_ERROR("[premature rc=0]\n");
  1159. return 0; /* no work done */
  1160. }
  1161. qdf_spin_lock(&CE_state->ce_index_lock);
  1162. /* Clear force_break flag and re-initialize receive_count to 0 */
  1163. /* NAPI: scn variables- thread/multi-processing safety? */
  1164. CE_state->receive_count = 0;
  1165. CE_state->force_break = 0;
  1166. more_completions:
  1167. if (CE_state->recv_cb) {
  1168. /* Pop completed recv buffers and call
  1169. * the registered recv callback for each
  1170. */
  1171. while (ce_completed_recv_next_nolock
  1172. (CE_state, &CE_context, &transfer_context,
  1173. &buf, &nbytes, &id, &flags) ==
  1174. QDF_STATUS_SUCCESS) {
  1175. qdf_spin_unlock(&CE_state->ce_index_lock);
  1176. CE_state->recv_cb((struct CE_handle *)CE_state,
  1177. CE_context, transfer_context, buf,
  1178. nbytes, id, flags);
  1179. /*
  1180. * EV #112693 -
  1181. * [Peregrine][ES1][WB342][Win8x86][Performance]
  1182. * BSoD_0x133 occurred in VHT80 UDP_DL
  1183. * Break out DPC by force if number of loops in
  1184. * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
  1185. * to avoid spending too long time in
  1186. * DPC for each interrupt handling. Schedule another
  1187. * DPC to avoid data loss if we had taken
  1188. * force-break action before apply to Windows OS
  1189. * only currently, Linux/MAC os can expand to their
  1190. * platform if necessary
  1191. */
  1192. /* Break the receive processes by
  1193. * force if force_break set up
  1194. */
  1195. if (qdf_unlikely(CE_state->force_break)) {
  1196. qdf_atomic_set(&CE_state->rx_pending, 1);
  1197. CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
  1198. HOST_IS_COPY_COMPLETE_MASK);
  1199. if (Q_TARGET_ACCESS_END(scn) < 0)
  1200. HIF_ERROR("<--[premature rc=%d]\n",
  1201. CE_state->receive_count);
  1202. return CE_state->receive_count;
  1203. }
  1204. qdf_spin_lock(&CE_state->ce_index_lock);
  1205. }
  1206. }
  1207. /*
  1208. * Attention: We may experience potential infinite loop for below
  1209. * While Loop during Sending Stress test.
  1210. * Resolve the same way as Receive Case (Refer to EV #112693)
  1211. */
  1212. if (CE_state->send_cb) {
  1213. /* Pop completed send buffers and call
  1214. * the registered send callback for each
  1215. */
  1216. #ifdef ATH_11AC_TXCOMPACT
  1217. while (ce_completed_send_next_nolock
  1218. (CE_state, &CE_context,
  1219. &transfer_context, &buf, &nbytes,
  1220. &id, &sw_idx, &hw_idx,
  1221. &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
  1222. if (CE_id != CE_HTT_H2T_MSG ||
  1223. WLAN_IS_EPPING_ENABLED(mode)) {
  1224. qdf_spin_unlock(&CE_state->ce_index_lock);
  1225. CE_state->send_cb((struct CE_handle *)CE_state,
  1226. CE_context, transfer_context,
  1227. buf, nbytes, id, sw_idx,
  1228. hw_idx, toeplitz_hash_result);
  1229. qdf_spin_lock(&CE_state->ce_index_lock);
  1230. } else {
  1231. struct HIF_CE_pipe_info *pipe_info =
  1232. (struct HIF_CE_pipe_info *)CE_context;
  1233. qdf_spin_lock(&pipe_info->
  1234. completion_freeq_lock);
  1235. pipe_info->num_sends_allowed++;
  1236. qdf_spin_unlock(&pipe_info->
  1237. completion_freeq_lock);
  1238. }
  1239. }
  1240. #else /*ATH_11AC_TXCOMPACT */
  1241. while (ce_completed_send_next_nolock
  1242. (CE_state, &CE_context,
  1243. &transfer_context, &buf, &nbytes,
  1244. &id, &sw_idx, &hw_idx,
  1245. &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
  1246. qdf_spin_unlock(&CE_state->ce_index_lock);
  1247. CE_state->send_cb((struct CE_handle *)CE_state,
  1248. CE_context, transfer_context, buf,
  1249. nbytes, id, sw_idx, hw_idx,
  1250. toeplitz_hash_result);
  1251. qdf_spin_lock(&CE_state->ce_index_lock);
  1252. }
  1253. #endif /*ATH_11AC_TXCOMPACT */
  1254. }
  1255. more_watermarks:
  1256. if (CE_state->misc_cbs) {
  1257. CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
  1258. if (CE_int_status & CE_WATERMARK_MASK) {
  1259. if (CE_state->watermark_cb) {
  1260. qdf_spin_unlock(&CE_state->ce_index_lock);
  1261. /* Convert HW IS bits to software flags */
  1262. flags =
  1263. (CE_int_status & CE_WATERMARK_MASK) >>
  1264. CE_WM_SHFT;
  1265. CE_state->
  1266. watermark_cb((struct CE_handle *)CE_state,
  1267. CE_state->wm_context, flags);
  1268. qdf_spin_lock(&CE_state->ce_index_lock);
  1269. }
  1270. }
  1271. }
  1272. /*
  1273. * Clear the misc interrupts (watermark) that were handled above,
  1274. * and that will be checked again below.
  1275. * Clear and check for copy-complete interrupts again, just in case
  1276. * more copy completions happened while the misc interrupts were being
  1277. * handled.
  1278. */
  1279. CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
  1280. CE_WATERMARK_MASK |
  1281. HOST_IS_COPY_COMPLETE_MASK);
  1282. /*
  1283. * Now that per-engine interrupts are cleared, verify that
  1284. * no recv interrupts arrive while processing send interrupts,
  1285. * and no recv or send interrupts happened while processing
  1286. * misc interrupts.Go back and check again.Keep checking until
  1287. * we find no more events to process.
  1288. */
  1289. if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
  1290. if (WLAN_IS_EPPING_ENABLED(mode) ||
  1291. more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
  1292. goto more_completions;
  1293. } else {
  1294. HIF_ERROR(
  1295. "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
  1296. __func__, CE_state->dest_ring->nentries_mask,
  1297. CE_state->dest_ring->sw_index,
  1298. CE_DEST_RING_READ_IDX_GET(scn,
  1299. CE_state->ctrl_addr));
  1300. }
  1301. }
  1302. if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
  1303. if (WLAN_IS_EPPING_ENABLED(mode) ||
  1304. more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
  1305. goto more_completions;
  1306. } else {
  1307. HIF_ERROR(
  1308. "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
  1309. __func__, CE_state->src_ring->nentries_mask,
  1310. CE_state->src_ring->sw_index,
  1311. CE_SRC_RING_READ_IDX_GET(scn,
  1312. CE_state->ctrl_addr));
  1313. }
  1314. }
  1315. if (CE_state->misc_cbs) {
  1316. CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
  1317. if (CE_int_status & CE_WATERMARK_MASK) {
  1318. if (CE_state->watermark_cb) {
  1319. goto more_watermarks;
  1320. }
  1321. }
  1322. }
  1323. qdf_spin_unlock(&CE_state->ce_index_lock);
  1324. qdf_atomic_set(&CE_state->rx_pending, 0);
  1325. if (Q_TARGET_ACCESS_END(scn) < 0)
  1326. HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
  1327. return CE_state->receive_count;
  1328. }
  1329. /*
  1330. * Handler for per-engine interrupts on ALL active CEs.
  1331. * This is used in cases where the system is sharing a
  1332. * single interrput for all CEs
  1333. */
  1334. void ce_per_engine_service_any(int irq, struct hif_softc *scn)
  1335. {
  1336. int CE_id;
  1337. uint32_t intr_summary;
  1338. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1339. return;
  1340. if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
  1341. for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
  1342. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1343. if (qdf_atomic_read(&CE_state->rx_pending)) {
  1344. qdf_atomic_set(&CE_state->rx_pending, 0);
  1345. ce_per_engine_service(scn, CE_id);
  1346. }
  1347. }
  1348. Q_TARGET_ACCESS_END(scn);
  1349. return;
  1350. }
  1351. intr_summary = CE_INTERRUPT_SUMMARY(scn);
  1352. for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
  1353. if (intr_summary & (1 << CE_id)) {
  1354. intr_summary &= ~(1 << CE_id);
  1355. } else {
  1356. continue; /* no intr pending on this CE */
  1357. }
  1358. ce_per_engine_service(scn, CE_id);
  1359. }
  1360. Q_TARGET_ACCESS_END(scn);
  1361. }
  1362. /*
  1363. * Adjust interrupts for the copy complete handler.
  1364. * If it's needed for either send or recv, then unmask
  1365. * this interrupt; otherwise, mask it.
  1366. *
  1367. * Called with target_lock held.
  1368. */
  1369. static void
  1370. ce_per_engine_handler_adjust(struct CE_state *CE_state,
  1371. int disable_copy_compl_intr)
  1372. {
  1373. uint32_t ctrl_addr = CE_state->ctrl_addr;
  1374. struct hif_softc *scn = CE_state->scn;
  1375. CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
  1376. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1377. return;
  1378. if ((!disable_copy_compl_intr) &&
  1379. (CE_state->send_cb || CE_state->recv_cb)) {
  1380. CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
  1381. } else {
  1382. CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
  1383. }
  1384. if (CE_state->watermark_cb) {
  1385. CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
  1386. } else {
  1387. CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
  1388. }
  1389. Q_TARGET_ACCESS_END(scn);
  1390. }
  1391. /*Iterate the CE_state list and disable the compl interrupt
  1392. * if it has been registered already.
  1393. */
  1394. void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
  1395. {
  1396. int CE_id;
  1397. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1398. return;
  1399. for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
  1400. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1401. uint32_t ctrl_addr = CE_state->ctrl_addr;
  1402. /* if the interrupt is currently enabled, disable it */
  1403. if (!CE_state->disable_copy_compl_intr
  1404. && (CE_state->send_cb || CE_state->recv_cb)) {
  1405. CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
  1406. }
  1407. if (CE_state->watermark_cb) {
  1408. CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
  1409. }
  1410. }
  1411. Q_TARGET_ACCESS_END(scn);
  1412. }
  1413. void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
  1414. {
  1415. int CE_id;
  1416. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1417. return;
  1418. for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
  1419. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1420. uint32_t ctrl_addr = CE_state->ctrl_addr;
  1421. /*
  1422. * If the CE is supposed to have copy complete interrupts
  1423. * enabled (i.e. there a callback registered, and the
  1424. * "disable" flag is not set), then re-enable the interrupt.
  1425. */
  1426. if (!CE_state->disable_copy_compl_intr
  1427. && (CE_state->send_cb || CE_state->recv_cb)) {
  1428. CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
  1429. }
  1430. if (CE_state->watermark_cb) {
  1431. CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
  1432. }
  1433. }
  1434. Q_TARGET_ACCESS_END(scn);
  1435. }
  1436. /**
  1437. * ce_send_cb_register(): register completion handler
  1438. * @copyeng: CE_state representing the ce we are adding the behavior to
  1439. * @fn_ptr: callback that the ce should use when processing tx completions
  1440. * @disable_interrupts: if the interupts should be enabled or not.
  1441. *
  1442. * Caller should guarantee that no transactions are in progress before
  1443. * switching the callback function.
  1444. *
  1445. * Registers the send context before the fn pointer so that if the cb is valid
  1446. * the context should be valid.
  1447. *
  1448. * Beware that currently this function will enable completion interrupts.
  1449. */
  1450. void
  1451. ce_send_cb_register(struct CE_handle *copyeng,
  1452. ce_send_cb fn_ptr,
  1453. void *ce_send_context, int disable_interrupts)
  1454. {
  1455. struct CE_state *CE_state = (struct CE_state *)copyeng;
  1456. if (CE_state == NULL) {
  1457. pr_err("%s: Error CE state = NULL\n", __func__);
  1458. return;
  1459. }
  1460. CE_state->send_context = ce_send_context;
  1461. CE_state->send_cb = fn_ptr;
  1462. ce_per_engine_handler_adjust(CE_state, disable_interrupts);
  1463. }
  1464. /**
  1465. * ce_recv_cb_register(): register completion handler
  1466. * @copyeng: CE_state representing the ce we are adding the behavior to
  1467. * @fn_ptr: callback that the ce should use when processing rx completions
  1468. * @disable_interrupts: if the interupts should be enabled or not.
  1469. *
  1470. * Registers the send context before the fn pointer so that if the cb is valid
  1471. * the context should be valid.
  1472. *
  1473. * Caller should guarantee that no transactions are in progress before
  1474. * switching the callback function.
  1475. */
  1476. void
  1477. ce_recv_cb_register(struct CE_handle *copyeng,
  1478. CE_recv_cb fn_ptr,
  1479. void *CE_recv_context, int disable_interrupts)
  1480. {
  1481. struct CE_state *CE_state = (struct CE_state *)copyeng;
  1482. if (CE_state == NULL) {
  1483. pr_err("%s: ERROR CE state = NULL\n", __func__);
  1484. return;
  1485. }
  1486. CE_state->recv_context = CE_recv_context;
  1487. CE_state->recv_cb = fn_ptr;
  1488. ce_per_engine_handler_adjust(CE_state, disable_interrupts);
  1489. }
  1490. /**
  1491. * ce_watermark_cb_register(): register completion handler
  1492. * @copyeng: CE_state representing the ce we are adding the behavior to
  1493. * @fn_ptr: callback that the ce should use when processing watermark events
  1494. *
  1495. * Caller should guarantee that no watermark events are being processed before
  1496. * switching the callback function.
  1497. */
  1498. void
  1499. ce_watermark_cb_register(struct CE_handle *copyeng,
  1500. CE_watermark_cb fn_ptr, void *CE_wm_context)
  1501. {
  1502. struct CE_state *CE_state = (struct CE_state *)copyeng;
  1503. CE_state->watermark_cb = fn_ptr;
  1504. CE_state->wm_context = CE_wm_context;
  1505. ce_per_engine_handler_adjust(CE_state, 0);
  1506. if (fn_ptr) {
  1507. CE_state->misc_cbs = 1;
  1508. }
  1509. }
  1510. #ifdef WLAN_FEATURE_FASTPATH
  1511. /**
  1512. * ce_pkt_dl_len_set() set the HTT packet download length
  1513. * @hif_sc: HIF context
  1514. * @pkt_download_len: download length
  1515. *
  1516. * Return: None
  1517. */
  1518. void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
  1519. {
  1520. struct hif_softc *sc = (struct hif_softc *)(hif_sc);
  1521. struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
  1522. qdf_assert_always(ce_state);
  1523. ce_state->download_len = pkt_download_len;
  1524. qdf_print("%s CE %d Pkt download length %d", __func__,
  1525. ce_state->id, ce_state->download_len);
  1526. }
  1527. #else
  1528. void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
  1529. {
  1530. }
  1531. #endif /* WLAN_FEATURE_FASTPATH */
  1532. bool ce_get_rx_pending(struct hif_softc *scn)
  1533. {
  1534. int CE_id;
  1535. for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
  1536. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1537. if (qdf_atomic_read(&CE_state->rx_pending))
  1538. return true;
  1539. }
  1540. return false;
  1541. }
  1542. /**
  1543. * ce_check_rx_pending() - ce_check_rx_pending
  1544. * @scn: hif_softc
  1545. * @ce_id: ce_id
  1546. *
  1547. * Return: bool
  1548. */
  1549. bool ce_check_rx_pending(struct hif_softc *scn, int ce_id)
  1550. {
  1551. struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
  1552. if (qdf_atomic_read(&CE_state->rx_pending))
  1553. return true;
  1554. else
  1555. return false;
  1556. }
  1557. /**
  1558. * ce_enable_msi(): write the msi configuration to the target
  1559. * @scn: hif context
  1560. * @CE_id: which copy engine will be configured for msi interupts
  1561. * @msi_addr_lo: Hardware will write to this address to generate an interrupt
  1562. * @msi_addr_hi: Hardware will write to this address to generate an interrupt
  1563. * @msi_data: Hardware will write this data to generate an interrupt
  1564. *
  1565. * should be done in the initialization sequence so no locking would be needed
  1566. */
  1567. void ce_enable_msi(struct hif_softc *scn, unsigned int CE_id,
  1568. uint32_t msi_addr_lo, uint32_t msi_addr_hi,
  1569. uint32_t msi_data)
  1570. {
  1571. #ifdef WLAN_ENABLE_QCA6180
  1572. struct CE_state *CE_state;
  1573. A_target_id_t targid;
  1574. u_int32_t ctrl_addr;
  1575. uint32_t tmp;
  1576. CE_state = scn->ce_id_to_state[CE_id];
  1577. if (!CE_state) {
  1578. HIF_ERROR("%s: error - CE_state = NULL", __func__);
  1579. return;
  1580. }
  1581. targid = TARGID(sc);
  1582. ctrl_addr = CE_state->ctrl_addr;
  1583. CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
  1584. CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
  1585. CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
  1586. tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
  1587. tmp |= (1 << CE_MSI_ENABLE_BIT);
  1588. CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
  1589. #endif
  1590. }
  1591. #ifdef IPA_OFFLOAD
  1592. /**
  1593. * ce_ipa_get_resource() - get uc resource on copyengine
  1594. * @ce: copyengine context
  1595. * @ce_sr_base_paddr: copyengine source ring base physical address
  1596. * @ce_sr_ring_size: copyengine source ring size
  1597. * @ce_reg_paddr: copyengine register physical address
  1598. *
  1599. * Copy engine should release resource to micro controller
  1600. * Micro controller needs
  1601. * - Copy engine source descriptor base address
  1602. * - Copy engine source descriptor size
  1603. * - PCI BAR address to access copy engine regiser
  1604. *
  1605. * Return: None
  1606. */
  1607. void ce_ipa_get_resource(struct CE_handle *ce,
  1608. qdf_dma_addr_t *ce_sr_base_paddr,
  1609. uint32_t *ce_sr_ring_size,
  1610. qdf_dma_addr_t *ce_reg_paddr)
  1611. {
  1612. struct CE_state *CE_state = (struct CE_state *)ce;
  1613. uint32_t ring_loop;
  1614. struct CE_src_desc *ce_desc;
  1615. qdf_dma_addr_t phy_mem_base;
  1616. struct hif_softc *scn = CE_state->scn;
  1617. if (CE_RUNNING != CE_state->state) {
  1618. *ce_sr_base_paddr = 0;
  1619. *ce_sr_ring_size = 0;
  1620. return;
  1621. }
  1622. /* Update default value for descriptor */
  1623. for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
  1624. ring_loop++) {
  1625. ce_desc = (struct CE_src_desc *)
  1626. ((char *)CE_state->src_ring->base_addr_owner_space +
  1627. ring_loop * (sizeof(struct CE_src_desc)));
  1628. CE_IPA_RING_INIT(ce_desc);
  1629. }
  1630. /* Get BAR address */
  1631. hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
  1632. *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
  1633. *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
  1634. sizeof(struct CE_src_desc));
  1635. *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
  1636. SR_WR_INDEX_ADDRESS;
  1637. return;
  1638. }
  1639. #endif /* IPA_OFFLOAD */