ce_service.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003
  1. /*
  2. * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hif.h"
  19. #include "hif_io32.h"
  20. #include "ce_api.h"
  21. #include "ce_main.h"
  22. #include "ce_internal.h"
  23. #include "ce_reg.h"
  24. #include "qdf_lock.h"
  25. #include "regtable.h"
  26. #include "hif_main.h"
  27. #include "hif_debug.h"
  28. #include "hif_napi.h"
  29. #include "qdf_module.h"
  30. #ifdef IPA_OFFLOAD
  31. #ifdef QCA_WIFI_3_0
  32. #define CE_IPA_RING_INIT(ce_desc) \
  33. do { \
  34. ce_desc->gather = 0; \
  35. ce_desc->enable_11h = 0; \
  36. ce_desc->meta_data_low = 0; \
  37. ce_desc->packet_result_offset = 64; \
  38. ce_desc->toeplitz_hash_enable = 0; \
  39. ce_desc->addr_y_search_disable = 0; \
  40. ce_desc->addr_x_search_disable = 0; \
  41. ce_desc->misc_int_disable = 0; \
  42. ce_desc->target_int_disable = 0; \
  43. ce_desc->host_int_disable = 0; \
  44. ce_desc->dest_byte_swap = 0; \
  45. ce_desc->byte_swap = 0; \
  46. ce_desc->type = 2; \
  47. ce_desc->tx_classify = 1; \
  48. ce_desc->buffer_addr_hi = 0; \
  49. ce_desc->meta_data = 0; \
  50. ce_desc->nbytes = 128; \
  51. } while (0)
  52. #else
  53. #define CE_IPA_RING_INIT(ce_desc) \
  54. do { \
  55. ce_desc->byte_swap = 0; \
  56. ce_desc->nbytes = 60; \
  57. ce_desc->gather = 0; \
  58. } while (0)
  59. #endif /* QCA_WIFI_3_0 */
  60. #endif /* IPA_OFFLOAD */
  61. static int war1_allow_sleep;
  62. /* io32 write workaround */
  63. static int hif_ce_war1;
  64. /**
  65. * hif_ce_war_disable() - disable ce war gobally
  66. */
  67. void hif_ce_war_disable(void)
  68. {
  69. hif_ce_war1 = 0;
  70. }
  71. /**
  72. * hif_ce_war_enable() - enable ce war gobally
  73. */
  74. void hif_ce_war_enable(void)
  75. {
  76. hif_ce_war1 = 1;
  77. }
  78. /*
  79. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  80. * for defined here
  81. */
  82. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  83. #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
  84. #define CE_DEBUG_DATA_PER_ROW 16
  85. static const char *ce_event_type_to_str(enum hif_ce_event_type type);
  86. int get_next_record_index(qdf_atomic_t *table_index, int array_size)
  87. {
  88. int record_index = qdf_atomic_inc_return(table_index);
  89. if (record_index == array_size)
  90. qdf_atomic_sub(array_size, table_index);
  91. while (record_index >= array_size)
  92. record_index -= array_size;
  93. return record_index;
  94. }
  95. qdf_export_symbol(get_next_record_index);
  96. #ifdef HIF_CE_DEBUG_DATA_BUF
  97. void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
  98. {
  99. uint8_t *data = NULL;
  100. if (!event->data) {
  101. hif_err_rl("No ce debug memory allocated");
  102. return;
  103. }
  104. if (event->memory && len > 0)
  105. data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
  106. event->actual_data_len = 0;
  107. qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
  108. if (data && len > 0) {
  109. qdf_mem_copy(event->data, data,
  110. ((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
  111. len : CE_DEBUG_MAX_DATA_BUF_SIZE));
  112. event->actual_data_len = len;
  113. }
  114. }
  115. qdf_export_symbol(hif_ce_desc_data_record);
  116. void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
  117. {
  118. qdf_mem_zero(event,
  119. offsetof(struct hif_ce_desc_event, data));
  120. }
  121. qdf_export_symbol(hif_clear_ce_desc_debug_data);
  122. #else
  123. void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
  124. {
  125. qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
  126. }
  127. qdf_export_symbol(hif_clear_ce_desc_debug_data);
  128. #endif /* HIF_CE_DEBUG_DATA_BUF */
  129. #if defined(HIF_RECORD_PADDR)
  130. void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
  131. struct hif_ce_desc_event *event,
  132. qdf_nbuf_t memory)
  133. {
  134. if (memory) {
  135. event->dma_addr = QDF_NBUF_CB_PADDR(memory);
  136. event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
  137. scn->qdf_dev,
  138. event->dma_addr);
  139. event->virt_to_phy =
  140. virt_to_phys(qdf_nbuf_data(memory));
  141. }
  142. }
  143. #endif /* HIF_RECORD_RX_PADDR */
  144. /**
  145. * hif_record_ce_desc_event() - record ce descriptor events
  146. * @scn: hif_softc
  147. * @ce_id: which ce is the event occurring on
  148. * @type: what happened
  149. * @descriptor: pointer to the descriptor posted/completed
  150. * @memory: virtual address of buffer related to the descriptor
  151. * @index: index that the descriptor was/will be at.
  152. */
  153. void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
  154. enum hif_ce_event_type type,
  155. union ce_desc *descriptor,
  156. void *memory, int index,
  157. int len)
  158. {
  159. int record_index;
  160. struct hif_ce_desc_event *event;
  161. struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
  162. struct hif_ce_desc_event *hist_ev = NULL;
  163. if (ce_id < CE_COUNT_MAX)
  164. hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
  165. else
  166. return;
  167. if (ce_id >= CE_COUNT_MAX)
  168. return;
  169. if (!ce_hist->enable[ce_id])
  170. return;
  171. if (!hist_ev)
  172. return;
  173. record_index = get_next_record_index(
  174. &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
  175. event = &hist_ev[record_index];
  176. hif_clear_ce_desc_debug_data(event);
  177. event->type = type;
  178. event->time = qdf_get_log_timestamp();
  179. event->cpu_id = qdf_get_cpu();
  180. if (descriptor)
  181. qdf_mem_copy(&event->descriptor, descriptor,
  182. sizeof(union ce_desc));
  183. event->memory = memory;
  184. event->index = index;
  185. if (event->type == HIF_RX_DESC_POST ||
  186. event->type == HIF_RX_DESC_COMPLETION)
  187. hif_ce_desc_record_rx_paddr(scn, event, memory);
  188. if (ce_hist->data_enable[ce_id])
  189. hif_ce_desc_data_record(event, len);
  190. }
  191. qdf_export_symbol(hif_record_ce_desc_event);
  192. /**
  193. * ce_init_ce_desc_event_log() - initialize the ce event log
  194. * @ce_id: copy engine id for which we are initializing the log
  195. * @size: size of array to dedicate
  196. *
  197. * Currently the passed size is ignored in favor of a precompiled value.
  198. */
  199. void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
  200. {
  201. struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
  202. qdf_atomic_init(&ce_hist->history_index[ce_id]);
  203. qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
  204. }
  205. /**
  206. * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
  207. * @ce_id: copy engine id for which we are deinitializing the log
  208. *
  209. */
  210. inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
  211. {
  212. struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
  213. qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
  214. }
  215. #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
  216. void hif_record_ce_desc_event(struct hif_softc *scn,
  217. int ce_id, enum hif_ce_event_type type,
  218. union ce_desc *descriptor, void *memory,
  219. int index, int len)
  220. {
  221. }
  222. qdf_export_symbol(hif_record_ce_desc_event);
  223. inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
  224. int size)
  225. {
  226. }
  227. void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
  228. {
  229. }
  230. #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
  231. #ifdef NAPI_YIELD_BUDGET_BASED
  232. bool hif_ce_service_should_yield(struct hif_softc *scn,
  233. struct CE_state *ce_state)
  234. {
  235. bool yield = hif_max_num_receives_reached(scn, ce_state->receive_count);
  236. /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
  237. * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
  238. * can happen in fast path handling as processing is happenning in
  239. * batches.
  240. */
  241. if (yield)
  242. ce_state->receive_count = MAX_NUM_OF_RECEIVES;
  243. return yield;
  244. }
  245. #else
  246. /**
  247. * hif_ce_service_should_yield() - return true if the service is hogging the cpu
  248. * @scn: hif context
  249. * @ce_state: context of the copy engine being serviced
  250. *
  251. * Return: true if the service should yield
  252. */
  253. bool hif_ce_service_should_yield(struct hif_softc *scn,
  254. struct CE_state *ce_state)
  255. {
  256. bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
  257. time_limit_reached = qdf_time_sched_clock() >
  258. ce_state->ce_service_yield_time ? 1 : 0;
  259. if (!time_limit_reached)
  260. rxpkt_thresh_reached = hif_max_num_receives_reached
  261. (scn, ce_state->receive_count);
  262. /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
  263. * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This
  264. * can happen in fast path handling as processing is happenning in
  265. * batches.
  266. */
  267. if (rxpkt_thresh_reached)
  268. ce_state->receive_count = MAX_NUM_OF_RECEIVES;
  269. yield = time_limit_reached || rxpkt_thresh_reached;
  270. if (yield &&
  271. ce_state->htt_rx_data &&
  272. hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
  273. hif_napi_update_yield_stats(ce_state,
  274. time_limit_reached,
  275. rxpkt_thresh_reached);
  276. }
  277. return yield;
  278. }
  279. qdf_export_symbol(hif_ce_service_should_yield);
  280. #endif
  281. /*
  282. * Guts of ce_send, used by both ce_send and ce_sendlist_send.
  283. * The caller takes responsibility for any needed locking.
  284. */
  285. void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
  286. u32 ctrl_addr, unsigned int write_index)
  287. {
  288. if (hif_ce_war1) {
  289. void __iomem *indicator_addr;
  290. indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
  291. if (!war1_allow_sleep
  292. && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
  293. hif_write32_mb(scn, indicator_addr,
  294. (CDC_WAR_MAGIC_STR | write_index));
  295. } else {
  296. unsigned long irq_flags;
  297. local_irq_save(irq_flags);
  298. hif_write32_mb(scn, indicator_addr, 1);
  299. /*
  300. * PCIE write waits for ACK in IPQ8K, there is no
  301. * need to read back value.
  302. */
  303. (void)hif_read32_mb(scn, indicator_addr);
  304. /* conservative */
  305. (void)hif_read32_mb(scn, indicator_addr);
  306. CE_SRC_RING_WRITE_IDX_SET(scn,
  307. ctrl_addr, write_index);
  308. hif_write32_mb(scn, indicator_addr, 0);
  309. local_irq_restore(irq_flags);
  310. }
  311. } else {
  312. CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
  313. }
  314. }
  315. qdf_export_symbol(war_ce_src_ring_write_idx_set);
  316. QDF_STATUS
  317. ce_send(struct CE_handle *copyeng,
  318. void *per_transfer_context,
  319. qdf_dma_addr_t buffer,
  320. uint32_t nbytes,
  321. uint32_t transfer_id,
  322. uint32_t flags,
  323. uint32_t user_flag)
  324. {
  325. struct CE_state *CE_state = (struct CE_state *)copyeng;
  326. QDF_STATUS status;
  327. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
  328. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  329. status = hif_state->ce_services->ce_send_nolock(copyeng,
  330. per_transfer_context, buffer, nbytes,
  331. transfer_id, flags, user_flag);
  332. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  333. return status;
  334. }
  335. qdf_export_symbol(ce_send);
  336. unsigned int ce_sendlist_sizeof(void)
  337. {
  338. return sizeof(struct ce_sendlist);
  339. }
  340. void ce_sendlist_init(struct ce_sendlist *sendlist)
  341. {
  342. struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
  343. sl->num_items = 0;
  344. }
  345. QDF_STATUS
  346. ce_sendlist_buf_add(struct ce_sendlist *sendlist,
  347. qdf_dma_addr_t buffer,
  348. uint32_t nbytes,
  349. uint32_t flags,
  350. uint32_t user_flags)
  351. {
  352. struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
  353. unsigned int num_items = sl->num_items;
  354. struct ce_sendlist_item *item;
  355. if (num_items >= CE_SENDLIST_ITEMS_MAX) {
  356. QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
  357. return QDF_STATUS_E_RESOURCES;
  358. }
  359. item = &sl->item[num_items];
  360. item->send_type = CE_SIMPLE_BUFFER_TYPE;
  361. item->data = buffer;
  362. item->u.nbytes = nbytes;
  363. item->flags = flags;
  364. item->user_flags = user_flags;
  365. sl->num_items = num_items + 1;
  366. return QDF_STATUS_SUCCESS;
  367. }
  368. QDF_STATUS
  369. ce_sendlist_send(struct CE_handle *copyeng,
  370. void *per_transfer_context,
  371. struct ce_sendlist *sendlist, unsigned int transfer_id)
  372. {
  373. struct CE_state *CE_state = (struct CE_state *)copyeng;
  374. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
  375. return hif_state->ce_services->ce_sendlist_send(copyeng,
  376. per_transfer_context, sendlist, transfer_id);
  377. }
  378. #ifndef AH_NEED_TX_DATA_SWAP
  379. #define AH_NEED_TX_DATA_SWAP 0
  380. #endif
  381. /**
  382. * ce_batch_send() - sends bunch of msdus at once
  383. * @ce_tx_hdl : pointer to CE handle
  384. * @msdu : list of msdus to be sent
  385. * @transfer_id : transfer id
  386. * @len : Downloaded length
  387. * @sendhead : sendhead
  388. *
  389. * Assumption : Called with an array of MSDU's
  390. * Function:
  391. * For each msdu in the array
  392. * 1. Send each msdu
  393. * 2. Increment write index accordinlgy.
  394. *
  395. * Return: list of msds not sent
  396. */
  397. qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
  398. uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
  399. {
  400. struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
  401. struct hif_softc *scn = ce_state->scn;
  402. struct CE_ring_state *src_ring = ce_state->src_ring;
  403. u_int32_t ctrl_addr = ce_state->ctrl_addr;
  404. /* A_target_id_t targid = TARGID(scn);*/
  405. uint32_t nentries_mask = src_ring->nentries_mask;
  406. uint32_t sw_index, write_index;
  407. struct CE_src_desc *src_desc_base =
  408. (struct CE_src_desc *)src_ring->base_addr_owner_space;
  409. uint32_t *src_desc;
  410. struct CE_src_desc lsrc_desc = {0};
  411. int deltacount = 0;
  412. qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
  413. DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
  414. sw_index = src_ring->sw_index;
  415. write_index = src_ring->write_index;
  416. deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
  417. while (msdu) {
  418. tempnext = qdf_nbuf_next(msdu);
  419. if (deltacount < 2) {
  420. if (sendhead)
  421. return msdu;
  422. hif_err("Out of descriptors");
  423. src_ring->write_index = write_index;
  424. war_ce_src_ring_write_idx_set(scn, ctrl_addr,
  425. write_index);
  426. sw_index = src_ring->sw_index;
  427. write_index = src_ring->write_index;
  428. deltacount = CE_RING_DELTA(nentries_mask, write_index,
  429. sw_index-1);
  430. if (!freelist) {
  431. freelist = msdu;
  432. hfreelist = msdu;
  433. } else {
  434. qdf_nbuf_set_next(freelist, msdu);
  435. freelist = msdu;
  436. }
  437. qdf_nbuf_set_next(msdu, NULL);
  438. msdu = tempnext;
  439. continue;
  440. }
  441. src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
  442. write_index);
  443. src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
  444. lsrc_desc.meta_data = transfer_id;
  445. if (len > msdu->len)
  446. len = msdu->len;
  447. lsrc_desc.nbytes = len;
  448. /* Data packet is a byte stream, so disable byte swap */
  449. lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
  450. lsrc_desc.gather = 0; /*For the last one, gather is not set*/
  451. src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
  452. src_ring->per_transfer_context[write_index] = msdu;
  453. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  454. if (sendhead)
  455. break;
  456. qdf_nbuf_set_next(msdu, NULL);
  457. msdu = tempnext;
  458. }
  459. src_ring->write_index = write_index;
  460. war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
  461. return hfreelist;
  462. }
  463. /**
  464. * ce_update_tx_ring() - Advance sw index.
  465. * @ce_tx_hdl : pointer to CE handle
  466. * @num_htt_cmpls : htt completions received.
  467. *
  468. * Function:
  469. * Increment the value of sw index of src ring
  470. * according to number of htt completions
  471. * received.
  472. *
  473. * Return: void
  474. */
  475. #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
  476. void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
  477. {
  478. struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
  479. struct CE_ring_state *src_ring = ce_state->src_ring;
  480. uint32_t nentries_mask = src_ring->nentries_mask;
  481. /*
  482. * Advance the s/w index:
  483. * This effectively simulates completing the CE ring descriptors
  484. */
  485. src_ring->sw_index =
  486. CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
  487. num_htt_cmpls);
  488. }
  489. #else
  490. void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
  491. {}
  492. #endif
  493. /**
  494. * ce_send_single() - sends
  495. * @ce_tx_hdl : pointer to CE handle
  496. * @msdu : msdu to be sent
  497. * @transfer_id : transfer id
  498. * @len : Downloaded length
  499. *
  500. * Function:
  501. * 1. Send one msdu
  502. * 2. Increment write index of src ring accordinlgy.
  503. *
  504. * Return: QDF_STATUS: CE sent status
  505. */
  506. QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
  507. uint32_t transfer_id, u_int32_t len)
  508. {
  509. struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
  510. struct hif_softc *scn = ce_state->scn;
  511. struct CE_ring_state *src_ring = ce_state->src_ring;
  512. uint32_t ctrl_addr = ce_state->ctrl_addr;
  513. /*A_target_id_t targid = TARGID(scn);*/
  514. uint32_t nentries_mask = src_ring->nentries_mask;
  515. uint32_t sw_index, write_index;
  516. struct CE_src_desc *src_desc_base =
  517. (struct CE_src_desc *)src_ring->base_addr_owner_space;
  518. uint32_t *src_desc;
  519. struct CE_src_desc lsrc_desc = {0};
  520. enum hif_ce_event_type event_type;
  521. DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
  522. sw_index = src_ring->sw_index;
  523. write_index = src_ring->write_index;
  524. if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
  525. sw_index-1) < 1)) {
  526. hif_err("ce send fail %d %d %d", nentries_mask,
  527. write_index, sw_index);
  528. return QDF_STATUS_E_RESOURCES;
  529. }
  530. src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
  531. src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
  532. lsrc_desc.meta_data = transfer_id;
  533. lsrc_desc.nbytes = len;
  534. /* Data packet is a byte stream, so disable byte swap */
  535. lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
  536. lsrc_desc.gather = 0; /* For the last one, gather is not set */
  537. src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
  538. src_ring->per_transfer_context[write_index] = msdu;
  539. if (((struct CE_src_desc *)src_desc)->gather)
  540. event_type = HIF_TX_GATHER_DESC_POST;
  541. else if (qdf_unlikely(ce_state->state != CE_RUNNING))
  542. event_type = HIF_TX_DESC_SOFTWARE_POST;
  543. else
  544. event_type = HIF_TX_DESC_POST;
  545. hif_record_ce_desc_event(scn, ce_state->id, event_type,
  546. (union ce_desc *)src_desc, msdu,
  547. write_index, len);
  548. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  549. src_ring->write_index = write_index;
  550. war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
  551. return QDF_STATUS_SUCCESS;
  552. }
  553. /**
  554. * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
  555. * @coyeng: copy engine handle
  556. * @per_recv_context: virtual address of the nbuf
  557. * @buffer: physical address of the nbuf
  558. *
  559. * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
  560. */
  561. QDF_STATUS
  562. ce_recv_buf_enqueue(struct CE_handle *copyeng,
  563. void *per_recv_context, qdf_dma_addr_t buffer)
  564. {
  565. struct CE_state *CE_state = (struct CE_state *)copyeng;
  566. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
  567. return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
  568. per_recv_context, buffer);
  569. }
  570. qdf_export_symbol(ce_recv_buf_enqueue);
  571. void
  572. ce_send_watermarks_set(struct CE_handle *copyeng,
  573. unsigned int low_alert_nentries,
  574. unsigned int high_alert_nentries)
  575. {
  576. struct CE_state *CE_state = (struct CE_state *)copyeng;
  577. uint32_t ctrl_addr = CE_state->ctrl_addr;
  578. struct hif_softc *scn = CE_state->scn;
  579. CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
  580. CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
  581. }
  582. void
  583. ce_recv_watermarks_set(struct CE_handle *copyeng,
  584. unsigned int low_alert_nentries,
  585. unsigned int high_alert_nentries)
  586. {
  587. struct CE_state *CE_state = (struct CE_state *)copyeng;
  588. uint32_t ctrl_addr = CE_state->ctrl_addr;
  589. struct hif_softc *scn = CE_state->scn;
  590. CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
  591. low_alert_nentries);
  592. CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
  593. high_alert_nentries);
  594. }
  595. unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
  596. {
  597. struct CE_state *CE_state = (struct CE_state *)copyeng;
  598. struct CE_ring_state *src_ring = CE_state->src_ring;
  599. unsigned int nentries_mask = src_ring->nentries_mask;
  600. unsigned int sw_index;
  601. unsigned int write_index;
  602. qdf_spin_lock(&CE_state->ce_index_lock);
  603. sw_index = src_ring->sw_index;
  604. write_index = src_ring->write_index;
  605. qdf_spin_unlock(&CE_state->ce_index_lock);
  606. return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
  607. }
  608. unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
  609. {
  610. struct CE_state *CE_state = (struct CE_state *)copyeng;
  611. struct CE_ring_state *dest_ring = CE_state->dest_ring;
  612. unsigned int nentries_mask = dest_ring->nentries_mask;
  613. unsigned int sw_index;
  614. unsigned int write_index;
  615. qdf_spin_lock(&CE_state->ce_index_lock);
  616. sw_index = dest_ring->sw_index;
  617. write_index = dest_ring->write_index;
  618. qdf_spin_unlock(&CE_state->ce_index_lock);
  619. return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
  620. }
  621. /*
  622. * Guts of ce_completed_recv_next.
  623. * The caller takes responsibility for any necessary locking.
  624. */
  625. QDF_STATUS
  626. ce_completed_recv_next(struct CE_handle *copyeng,
  627. void **per_CE_contextp,
  628. void **per_transfer_contextp,
  629. qdf_dma_addr_t *bufferp,
  630. unsigned int *nbytesp,
  631. unsigned int *transfer_idp, unsigned int *flagsp)
  632. {
  633. struct CE_state *CE_state = (struct CE_state *)copyeng;
  634. QDF_STATUS status;
  635. struct hif_softc *scn = CE_state->scn;
  636. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  637. struct ce_ops *ce_services;
  638. ce_services = hif_state->ce_services;
  639. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  640. status =
  641. ce_services->ce_completed_recv_next_nolock(CE_state,
  642. per_CE_contextp, per_transfer_contextp, bufferp,
  643. nbytesp, transfer_idp, flagsp);
  644. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  645. return status;
  646. }
  647. QDF_STATUS
  648. ce_revoke_recv_next(struct CE_handle *copyeng,
  649. void **per_CE_contextp,
  650. void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
  651. {
  652. struct CE_state *CE_state = (struct CE_state *)copyeng;
  653. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
  654. return hif_state->ce_services->ce_revoke_recv_next(copyeng,
  655. per_CE_contextp, per_transfer_contextp, bufferp);
  656. }
  657. QDF_STATUS
  658. ce_cancel_send_next(struct CE_handle *copyeng,
  659. void **per_CE_contextp,
  660. void **per_transfer_contextp,
  661. qdf_dma_addr_t *bufferp,
  662. unsigned int *nbytesp,
  663. unsigned int *transfer_idp,
  664. uint32_t *toeplitz_hash_result)
  665. {
  666. struct CE_state *CE_state = (struct CE_state *)copyeng;
  667. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
  668. return hif_state->ce_services->ce_cancel_send_next
  669. (copyeng, per_CE_contextp, per_transfer_contextp,
  670. bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
  671. }
  672. qdf_export_symbol(ce_cancel_send_next);
  673. QDF_STATUS
  674. ce_completed_send_next(struct CE_handle *copyeng,
  675. void **per_CE_contextp,
  676. void **per_transfer_contextp,
  677. qdf_dma_addr_t *bufferp,
  678. unsigned int *nbytesp,
  679. unsigned int *transfer_idp,
  680. unsigned int *sw_idx,
  681. unsigned int *hw_idx,
  682. unsigned int *toeplitz_hash_result)
  683. {
  684. struct CE_state *CE_state = (struct CE_state *)copyeng;
  685. struct hif_softc *scn = CE_state->scn;
  686. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  687. struct ce_ops *ce_services;
  688. QDF_STATUS status;
  689. ce_services = hif_state->ce_services;
  690. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  691. status =
  692. ce_services->ce_completed_send_next_nolock(CE_state,
  693. per_CE_contextp, per_transfer_contextp,
  694. bufferp, nbytesp, transfer_idp, sw_idx,
  695. hw_idx, toeplitz_hash_result);
  696. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  697. return status;
  698. }
  699. #ifdef ATH_11AC_TXCOMPACT
  700. /* CE engine descriptor reap
  701. * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
  702. * does receive and reaping of completed descriptor ,
  703. * This function only handles reaping of Tx complete descriptor.
  704. * The Function is called from threshold reap poll routine
  705. * hif_send_complete_check so should not countain receive functionality
  706. * within it .
  707. */
  708. void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
  709. {
  710. void *CE_context;
  711. void *transfer_context;
  712. qdf_dma_addr_t buf;
  713. unsigned int nbytes;
  714. unsigned int id;
  715. unsigned int sw_idx, hw_idx;
  716. uint32_t toeplitz_hash_result;
  717. struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
  718. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  719. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  720. return;
  721. hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
  722. NULL, NULL, 0, 0);
  723. /* Since this function is called from both user context and
  724. * tasklet context the spinlock has to lock the bottom halves.
  725. * This fix assumes that ATH_11AC_TXCOMPACT flag is always
  726. * enabled in TX polling mode. If this is not the case, more
  727. * bottom halve spin lock changes are needed. Due to data path
  728. * performance concern, after internal discussion we've decided
  729. * to make minimum change, i.e., only address the issue occurred
  730. * in this function. The possible negative effect of this minimum
  731. * change is that, in the future, if some other function will also
  732. * be opened to let the user context to use, those cases need to be
  733. * addressed by change spin_lock to spin_lock_bh also.
  734. */
  735. qdf_spin_lock_bh(&CE_state->ce_index_lock);
  736. if (CE_state->send_cb) {
  737. {
  738. struct ce_ops *ce_services = hif_state->ce_services;
  739. /* Pop completed send buffers and call the
  740. * registered send callback for each
  741. */
  742. while (ce_services->ce_completed_send_next_nolock
  743. (CE_state, &CE_context,
  744. &transfer_context, &buf,
  745. &nbytes, &id, &sw_idx, &hw_idx,
  746. &toeplitz_hash_result) ==
  747. QDF_STATUS_SUCCESS) {
  748. if (ce_id != CE_HTT_H2T_MSG) {
  749. qdf_spin_unlock_bh(
  750. &CE_state->ce_index_lock);
  751. CE_state->send_cb(
  752. (struct CE_handle *)
  753. CE_state, CE_context,
  754. transfer_context, buf,
  755. nbytes, id, sw_idx, hw_idx,
  756. toeplitz_hash_result);
  757. qdf_spin_lock_bh(
  758. &CE_state->ce_index_lock);
  759. } else {
  760. struct HIF_CE_pipe_info *pipe_info =
  761. (struct HIF_CE_pipe_info *)
  762. CE_context;
  763. qdf_spin_lock_bh(&pipe_info->
  764. completion_freeq_lock);
  765. pipe_info->num_sends_allowed++;
  766. qdf_spin_unlock_bh(&pipe_info->
  767. completion_freeq_lock);
  768. }
  769. }
  770. }
  771. }
  772. qdf_spin_unlock_bh(&CE_state->ce_index_lock);
  773. hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
  774. NULL, NULL, 0, 0);
  775. Q_TARGET_ACCESS_END(scn);
  776. }
  777. #endif /*ATH_11AC_TXCOMPACT */
  778. #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
  779. static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
  780. {
  781. // QDF_IS_EPPING_ENABLED is pre lithium feature
  782. // CE4 completion is enabled only lithium and later
  783. // so no need to check for EPPING
  784. return true;
  785. }
  786. #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
  787. static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
  788. {
  789. if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode))
  790. return true;
  791. else
  792. return false;
  793. }
  794. #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
  795. /*
  796. * ce_engine_service_reg:
  797. *
  798. * Called from ce_per_engine_service and goes through the regular interrupt
  799. * handling that does not involve the WLAN fast path feature.
  800. *
  801. * Returns void
  802. */
  803. void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
  804. {
  805. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  806. uint32_t ctrl_addr = CE_state->ctrl_addr;
  807. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  808. void *CE_context;
  809. void *transfer_context;
  810. qdf_dma_addr_t buf;
  811. unsigned int nbytes;
  812. unsigned int id;
  813. unsigned int flags;
  814. unsigned int more_comp_cnt = 0;
  815. unsigned int more_snd_comp_cnt = 0;
  816. unsigned int sw_idx, hw_idx;
  817. uint32_t toeplitz_hash_result;
  818. uint32_t mode = hif_get_conparam(scn);
  819. more_completions:
  820. if (CE_state->recv_cb) {
  821. /* Pop completed recv buffers and call
  822. * the registered recv callback for each
  823. */
  824. while (hif_state->ce_services->ce_completed_recv_next_nolock
  825. (CE_state, &CE_context, &transfer_context,
  826. &buf, &nbytes, &id, &flags) ==
  827. QDF_STATUS_SUCCESS) {
  828. qdf_spin_unlock(&CE_state->ce_index_lock);
  829. CE_state->recv_cb((struct CE_handle *)CE_state,
  830. CE_context, transfer_context, buf,
  831. nbytes, id, flags);
  832. qdf_spin_lock(&CE_state->ce_index_lock);
  833. /*
  834. * EV #112693 -
  835. * [Peregrine][ES1][WB342][Win8x86][Performance]
  836. * BSoD_0x133 occurred in VHT80 UDP_DL
  837. * Break out DPC by force if number of loops in
  838. * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
  839. * to avoid spending too long time in
  840. * DPC for each interrupt handling. Schedule another
  841. * DPC to avoid data loss if we had taken
  842. * force-break action before apply to Windows OS
  843. * only currently, Linux/MAC os can expand to their
  844. * platform if necessary
  845. */
  846. /* Break the receive processes by
  847. * force if force_break set up
  848. */
  849. if (qdf_unlikely(CE_state->force_break)) {
  850. qdf_atomic_set(&CE_state->rx_pending, 1);
  851. return;
  852. }
  853. }
  854. }
  855. /*
  856. * Attention: We may experience potential infinite loop for below
  857. * While Loop during Sending Stress test.
  858. * Resolve the same way as Receive Case (Refer to EV #112693)
  859. */
  860. if (CE_state->send_cb) {
  861. /* Pop completed send buffers and call
  862. * the registered send callback for each
  863. */
  864. #ifdef ATH_11AC_TXCOMPACT
  865. while (hif_state->ce_services->ce_completed_send_next_nolock
  866. (CE_state, &CE_context,
  867. &transfer_context, &buf, &nbytes,
  868. &id, &sw_idx, &hw_idx,
  869. &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
  870. if (check_ce_id_and_epping_enabled(CE_id, mode)) {
  871. qdf_spin_unlock(&CE_state->ce_index_lock);
  872. CE_state->send_cb((struct CE_handle *)CE_state,
  873. CE_context, transfer_context,
  874. buf, nbytes, id, sw_idx,
  875. hw_idx, toeplitz_hash_result);
  876. qdf_spin_lock(&CE_state->ce_index_lock);
  877. } else {
  878. struct HIF_CE_pipe_info *pipe_info =
  879. (struct HIF_CE_pipe_info *)CE_context;
  880. qdf_spin_lock_bh(&pipe_info->
  881. completion_freeq_lock);
  882. pipe_info->num_sends_allowed++;
  883. qdf_spin_unlock_bh(&pipe_info->
  884. completion_freeq_lock);
  885. }
  886. }
  887. #else /*ATH_11AC_TXCOMPACT */
  888. while (hif_state->ce_services->ce_completed_send_next_nolock
  889. (CE_state, &CE_context,
  890. &transfer_context, &buf, &nbytes,
  891. &id, &sw_idx, &hw_idx,
  892. &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
  893. qdf_spin_unlock(&CE_state->ce_index_lock);
  894. CE_state->send_cb((struct CE_handle *)CE_state,
  895. CE_context, transfer_context, buf,
  896. nbytes, id, sw_idx, hw_idx,
  897. toeplitz_hash_result);
  898. qdf_spin_lock(&CE_state->ce_index_lock);
  899. }
  900. #endif /*ATH_11AC_TXCOMPACT */
  901. }
  902. more_watermarks:
  903. if (CE_state->misc_cbs) {
  904. if (CE_state->watermark_cb &&
  905. hif_state->ce_services->watermark_int(CE_state,
  906. &flags)) {
  907. qdf_spin_unlock(&CE_state->ce_index_lock);
  908. /* Convert HW IS bits to software flags */
  909. CE_state->watermark_cb((struct CE_handle *)CE_state,
  910. CE_state->wm_context, flags);
  911. qdf_spin_lock(&CE_state->ce_index_lock);
  912. }
  913. }
  914. /*
  915. * Clear the misc interrupts (watermark) that were handled above,
  916. * and that will be checked again below.
  917. * Clear and check for copy-complete interrupts again, just in case
  918. * more copy completions happened while the misc interrupts were being
  919. * handled.
  920. */
  921. if (!ce_srng_based(scn)) {
  922. if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
  923. CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
  924. CE_WATERMARK_MASK |
  925. HOST_IS_COPY_COMPLETE_MASK);
  926. } else {
  927. qdf_atomic_set(&CE_state->rx_pending, 0);
  928. hif_err_rl("%s: target access is not allowed",
  929. __func__);
  930. return;
  931. }
  932. }
  933. /*
  934. * Now that per-engine interrupts are cleared, verify that
  935. * no recv interrupts arrive while processing send interrupts,
  936. * and no recv or send interrupts happened while processing
  937. * misc interrupts.Go back and check again.Keep checking until
  938. * we find no more events to process.
  939. */
  940. if (CE_state->recv_cb &&
  941. hif_state->ce_services->ce_recv_entries_done_nolock(scn,
  942. CE_state)) {
  943. if (QDF_IS_EPPING_ENABLED(mode) ||
  944. more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
  945. goto more_completions;
  946. } else {
  947. if (!ce_srng_based(scn)) {
  948. hif_err_rl(
  949. "Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
  950. CE_state->id,
  951. CE_state->dest_ring->nentries_mask,
  952. CE_state->dest_ring->sw_index,
  953. CE_DEST_RING_READ_IDX_GET(scn,
  954. CE_state->ctrl_addr));
  955. }
  956. }
  957. }
  958. if (CE_state->send_cb &&
  959. hif_state->ce_services->ce_send_entries_done_nolock(scn,
  960. CE_state)) {
  961. if (QDF_IS_EPPING_ENABLED(mode) ||
  962. more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
  963. goto more_completions;
  964. } else {
  965. if (!ce_srng_based(scn)) {
  966. hif_err_rl(
  967. "Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x",
  968. CE_state->id,
  969. CE_state->src_ring->nentries_mask,
  970. CE_state->src_ring->sw_index,
  971. CE_state->src_ring->hw_index,
  972. CE_state->src_ring->write_index,
  973. CE_SRC_RING_READ_IDX_GET(scn,
  974. CE_state->ctrl_addr));
  975. }
  976. }
  977. }
  978. if (CE_state->misc_cbs && CE_state->watermark_cb) {
  979. if (hif_state->ce_services->watermark_int(CE_state, &flags))
  980. goto more_watermarks;
  981. }
  982. qdf_atomic_set(&CE_state->rx_pending, 0);
  983. }
  984. /*
  985. * Guts of interrupt handler for per-engine interrupts on a particular CE.
  986. *
  987. * Invokes registered callbacks for recv_complete,
  988. * send_complete, and watermarks.
  989. *
  990. * Returns: number of messages processed
  991. */
  992. int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
  993. {
  994. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  995. if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
  996. return CE_state->receive_count;
  997. if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
  998. hif_err("[premature rc=0]");
  999. return 0; /* no work done */
  1000. }
  1001. /* Clear force_break flag and re-initialize receive_count to 0 */
  1002. CE_state->receive_count = 0;
  1003. CE_state->force_break = 0;
  1004. CE_state->ce_service_start_time = qdf_time_sched_clock();
  1005. CE_state->ce_service_yield_time =
  1006. CE_state->ce_service_start_time +
  1007. hif_get_ce_service_max_yield_time(
  1008. (struct hif_opaque_softc *)scn);
  1009. qdf_spin_lock(&CE_state->ce_index_lock);
  1010. CE_state->service(scn, CE_id);
  1011. qdf_spin_unlock(&CE_state->ce_index_lock);
  1012. if (Q_TARGET_ACCESS_END(scn) < 0)
  1013. hif_err("<--[premature rc=%d]", CE_state->receive_count);
  1014. return CE_state->receive_count;
  1015. }
  1016. qdf_export_symbol(ce_per_engine_service);
  1017. /*
  1018. * Handler for per-engine interrupts on ALL active CEs.
  1019. * This is used in cases where the system is sharing a
  1020. * single interrput for all CEs
  1021. */
  1022. void ce_per_engine_service_any(int irq, struct hif_softc *scn)
  1023. {
  1024. int CE_id;
  1025. uint32_t intr_summary;
  1026. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1027. return;
  1028. if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
  1029. for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
  1030. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1031. if (qdf_atomic_read(&CE_state->rx_pending)) {
  1032. qdf_atomic_set(&CE_state->rx_pending, 0);
  1033. ce_per_engine_service(scn, CE_id);
  1034. }
  1035. }
  1036. Q_TARGET_ACCESS_END(scn);
  1037. return;
  1038. }
  1039. intr_summary = CE_INTERRUPT_SUMMARY(scn);
  1040. for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
  1041. if (intr_summary & (1 << CE_id))
  1042. intr_summary &= ~(1 << CE_id);
  1043. else
  1044. continue; /* no intr pending on this CE */
  1045. ce_per_engine_service(scn, CE_id);
  1046. }
  1047. Q_TARGET_ACCESS_END(scn);
  1048. }
  1049. /*Iterate the CE_state list and disable the compl interrupt
  1050. * if it has been registered already.
  1051. */
  1052. void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
  1053. {
  1054. int CE_id;
  1055. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1056. return;
  1057. for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
  1058. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1059. uint32_t ctrl_addr = CE_state->ctrl_addr;
  1060. /* if the interrupt is currently enabled, disable it */
  1061. if (!CE_state->disable_copy_compl_intr
  1062. && (CE_state->send_cb || CE_state->recv_cb))
  1063. CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
  1064. if (CE_state->watermark_cb)
  1065. CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
  1066. }
  1067. Q_TARGET_ACCESS_END(scn);
  1068. }
  1069. void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
  1070. {
  1071. int CE_id;
  1072. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1073. return;
  1074. for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
  1075. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1076. uint32_t ctrl_addr = CE_state->ctrl_addr;
  1077. /*
  1078. * If the CE is supposed to have copy complete interrupts
  1079. * enabled (i.e. there a callback registered, and the
  1080. * "disable" flag is not set), then re-enable the interrupt.
  1081. */
  1082. if (!CE_state->disable_copy_compl_intr
  1083. && (CE_state->send_cb || CE_state->recv_cb))
  1084. CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
  1085. if (CE_state->watermark_cb)
  1086. CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
  1087. }
  1088. Q_TARGET_ACCESS_END(scn);
  1089. }
  1090. /**
  1091. * ce_send_cb_register(): register completion handler
  1092. * @copyeng: CE_state representing the ce we are adding the behavior to
  1093. * @fn_ptr: callback that the ce should use when processing tx completions
  1094. * @disable_interrupts: if the interupts should be enabled or not.
  1095. *
  1096. * Caller should guarantee that no transactions are in progress before
  1097. * switching the callback function.
  1098. *
  1099. * Registers the send context before the fn pointer so that if the cb is valid
  1100. * the context should be valid.
  1101. *
  1102. * Beware that currently this function will enable completion interrupts.
  1103. */
  1104. void
  1105. ce_send_cb_register(struct CE_handle *copyeng,
  1106. ce_send_cb fn_ptr,
  1107. void *ce_send_context, int disable_interrupts)
  1108. {
  1109. struct CE_state *CE_state = (struct CE_state *)copyeng;
  1110. struct hif_softc *scn;
  1111. struct HIF_CE_state *hif_state;
  1112. if (!CE_state) {
  1113. hif_err("Error CE state = NULL");
  1114. return;
  1115. }
  1116. scn = CE_state->scn;
  1117. hif_state = HIF_GET_CE_STATE(scn);
  1118. if (!hif_state) {
  1119. hif_err("Error HIF state = NULL");
  1120. return;
  1121. }
  1122. CE_state->send_context = ce_send_context;
  1123. CE_state->send_cb = fn_ptr;
  1124. hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
  1125. disable_interrupts);
  1126. }
  1127. qdf_export_symbol(ce_send_cb_register);
  1128. /**
  1129. * ce_recv_cb_register(): register completion handler
  1130. * @copyeng: CE_state representing the ce we are adding the behavior to
  1131. * @fn_ptr: callback that the ce should use when processing rx completions
  1132. * @disable_interrupts: if the interupts should be enabled or not.
  1133. *
  1134. * Registers the send context before the fn pointer so that if the cb is valid
  1135. * the context should be valid.
  1136. *
  1137. * Caller should guarantee that no transactions are in progress before
  1138. * switching the callback function.
  1139. */
  1140. void
  1141. ce_recv_cb_register(struct CE_handle *copyeng,
  1142. CE_recv_cb fn_ptr,
  1143. void *CE_recv_context, int disable_interrupts)
  1144. {
  1145. struct CE_state *CE_state = (struct CE_state *)copyeng;
  1146. struct hif_softc *scn;
  1147. struct HIF_CE_state *hif_state;
  1148. if (!CE_state) {
  1149. hif_err("ERROR CE state = NULL");
  1150. return;
  1151. }
  1152. scn = CE_state->scn;
  1153. hif_state = HIF_GET_CE_STATE(scn);
  1154. if (!hif_state) {
  1155. hif_err("Error HIF state = NULL");
  1156. return;
  1157. }
  1158. CE_state->recv_context = CE_recv_context;
  1159. CE_state->recv_cb = fn_ptr;
  1160. hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
  1161. disable_interrupts);
  1162. }
  1163. qdf_export_symbol(ce_recv_cb_register);
  1164. /**
  1165. * ce_watermark_cb_register(): register completion handler
  1166. * @copyeng: CE_state representing the ce we are adding the behavior to
  1167. * @fn_ptr: callback that the ce should use when processing watermark events
  1168. *
  1169. * Caller should guarantee that no watermark events are being processed before
  1170. * switching the callback function.
  1171. */
  1172. void
  1173. ce_watermark_cb_register(struct CE_handle *copyeng,
  1174. CE_watermark_cb fn_ptr, void *CE_wm_context)
  1175. {
  1176. struct CE_state *CE_state = (struct CE_state *)copyeng;
  1177. struct hif_softc *scn = CE_state->scn;
  1178. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1179. CE_state->watermark_cb = fn_ptr;
  1180. CE_state->wm_context = CE_wm_context;
  1181. hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
  1182. 0);
  1183. if (fn_ptr)
  1184. CE_state->misc_cbs = 1;
  1185. }
  1186. bool ce_get_rx_pending(struct hif_softc *scn)
  1187. {
  1188. int CE_id;
  1189. for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
  1190. struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
  1191. if (qdf_atomic_read(&CE_state->rx_pending))
  1192. return true;
  1193. }
  1194. return false;
  1195. }
  1196. /**
  1197. * ce_check_rx_pending() - ce_check_rx_pending
  1198. * @CE_state: context of the copy engine to check
  1199. *
  1200. * Return: true if there per_engine_service
  1201. * didn't process all the rx descriptors.
  1202. */
  1203. bool ce_check_rx_pending(struct CE_state *CE_state)
  1204. {
  1205. if (qdf_atomic_read(&CE_state->rx_pending))
  1206. return true;
  1207. else
  1208. return false;
  1209. }
  1210. qdf_export_symbol(ce_check_rx_pending);
  1211. #ifdef IPA_OFFLOAD
  1212. #ifdef QCN7605_SUPPORT
  1213. static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
  1214. {
  1215. u_int32_t ctrl_addr = CE_state->ctrl_addr;
  1216. struct hif_softc *scn = CE_state->scn;
  1217. qdf_dma_addr_t wr_index_addr;
  1218. wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr);
  1219. return wr_index_addr;
  1220. }
  1221. #else
  1222. static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
  1223. {
  1224. struct hif_softc *scn = CE_state->scn;
  1225. qdf_dma_addr_t wr_index_addr;
  1226. wr_index_addr = CE_BASE_ADDRESS(CE_state->id) +
  1227. SR_WR_INDEX_ADDRESS;
  1228. return wr_index_addr;
  1229. }
  1230. #endif
  1231. /**
  1232. * ce_ipa_get_resource() - get uc resource on copyengine
  1233. * @ce: copyengine context
  1234. * @ce_sr: copyengine source ring resource info
  1235. * @ce_sr_ring_size: copyengine source ring size
  1236. * @ce_reg_paddr: copyengine register physical address
  1237. *
  1238. * Copy engine should release resource to micro controller
  1239. * Micro controller needs
  1240. * - Copy engine source descriptor base address
  1241. * - Copy engine source descriptor size
  1242. * - PCI BAR address to access copy engine regiser
  1243. *
  1244. * Return: None
  1245. */
  1246. void ce_ipa_get_resource(struct CE_handle *ce,
  1247. qdf_shared_mem_t **ce_sr,
  1248. uint32_t *ce_sr_ring_size,
  1249. qdf_dma_addr_t *ce_reg_paddr)
  1250. {
  1251. struct CE_state *CE_state = (struct CE_state *)ce;
  1252. uint32_t ring_loop;
  1253. struct CE_src_desc *ce_desc;
  1254. qdf_dma_addr_t phy_mem_base;
  1255. struct hif_softc *scn = CE_state->scn;
  1256. if (CE_UNUSED == CE_state->state) {
  1257. *qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
  1258. &CE_state->scn->ipa_ce_ring->mem_info) = 0;
  1259. *ce_sr_ring_size = 0;
  1260. return;
  1261. }
  1262. /* Update default value for descriptor */
  1263. for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
  1264. ring_loop++) {
  1265. ce_desc = (struct CE_src_desc *)
  1266. ((char *)CE_state->src_ring->base_addr_owner_space +
  1267. ring_loop * (sizeof(struct CE_src_desc)));
  1268. CE_IPA_RING_INIT(ce_desc);
  1269. }
  1270. /* Get BAR address */
  1271. hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
  1272. *ce_sr = CE_state->scn->ipa_ce_ring;
  1273. *ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
  1274. sizeof(struct CE_src_desc));
  1275. *ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state);
  1276. }
  1277. #endif /* IPA_OFFLOAD */
  1278. #ifdef HIF_CE_DEBUG_DATA_BUF
  1279. /**
  1280. * hif_dump_desc_data_buf() - record ce descriptor events
  1281. * @buf: buffer to copy to
  1282. * @pos: Current position till which the buf is filled
  1283. * @data: Data to be copied
  1284. * @data_len: Length of the data to be copied
  1285. */
  1286. static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
  1287. uint8_t *data, uint32_t data_len)
  1288. {
  1289. pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
  1290. CE_DEBUG_MAX_DATA_BUF_SIZE);
  1291. if ((data_len > 0) && data) {
  1292. if (data_len < 16) {
  1293. hex_dump_to_buffer(data,
  1294. CE_DEBUG_DATA_PER_ROW,
  1295. 16, 1, buf + pos,
  1296. (ssize_t)PAGE_SIZE - pos,
  1297. false);
  1298. pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
  1299. pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
  1300. } else {
  1301. uint32_t rows = (data_len / 16) + 1;
  1302. uint32_t row = 0;
  1303. for (row = 0; row < rows; row++) {
  1304. hex_dump_to_buffer(data + (row * 16),
  1305. CE_DEBUG_DATA_PER_ROW,
  1306. 16, 1, buf + pos,
  1307. (ssize_t)PAGE_SIZE
  1308. - pos, false);
  1309. pos +=
  1310. CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
  1311. pos += snprintf(buf + pos, PAGE_SIZE - pos,
  1312. "\n");
  1313. }
  1314. }
  1315. }
  1316. return pos;
  1317. }
  1318. #endif
  1319. /*
  1320. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  1321. * for defined here
  1322. */
  1323. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  1324. static const char *ce_event_type_to_str(enum hif_ce_event_type type)
  1325. {
  1326. switch (type) {
  1327. case HIF_RX_DESC_POST:
  1328. return "HIF_RX_DESC_POST";
  1329. case HIF_RX_DESC_COMPLETION:
  1330. return "HIF_RX_DESC_COMPLETION";
  1331. case HIF_TX_GATHER_DESC_POST:
  1332. return "HIF_TX_GATHER_DESC_POST";
  1333. case HIF_TX_DESC_POST:
  1334. return "HIF_TX_DESC_POST";
  1335. case HIF_TX_DESC_SOFTWARE_POST:
  1336. return "HIF_TX_DESC_SOFTWARE_POST";
  1337. case HIF_TX_DESC_COMPLETION:
  1338. return "HIF_TX_DESC_COMPLETION";
  1339. case FAST_RX_WRITE_INDEX_UPDATE:
  1340. return "FAST_RX_WRITE_INDEX_UPDATE";
  1341. case FAST_RX_SOFTWARE_INDEX_UPDATE:
  1342. return "FAST_RX_SOFTWARE_INDEX_UPDATE";
  1343. case FAST_TX_WRITE_INDEX_UPDATE:
  1344. return "FAST_TX_WRITE_INDEX_UPDATE";
  1345. case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
  1346. return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
  1347. case FAST_TX_SOFTWARE_INDEX_UPDATE:
  1348. return "FAST_TX_SOFTWARE_INDEX_UPDATE";
  1349. case RESUME_WRITE_INDEX_UPDATE:
  1350. return "RESUME_WRITE_INDEX_UPDATE";
  1351. case HIF_IRQ_EVENT:
  1352. return "HIF_IRQ_EVENT";
  1353. case HIF_CE_TASKLET_ENTRY:
  1354. return "HIF_CE_TASKLET_ENTRY";
  1355. case HIF_CE_TASKLET_RESCHEDULE:
  1356. return "HIF_CE_TASKLET_RESCHEDULE";
  1357. case HIF_CE_TASKLET_EXIT:
  1358. return "HIF_CE_TASKLET_EXIT";
  1359. case HIF_CE_REAP_ENTRY:
  1360. return "HIF_CE_REAP_ENTRY";
  1361. case HIF_CE_REAP_EXIT:
  1362. return "HIF_CE_REAP_EXIT";
  1363. case NAPI_SCHEDULE:
  1364. return "NAPI_SCHEDULE";
  1365. case NAPI_POLL_ENTER:
  1366. return "NAPI_POLL_ENTER";
  1367. case NAPI_COMPLETE:
  1368. return "NAPI_COMPLETE";
  1369. case NAPI_POLL_EXIT:
  1370. return "NAPI_POLL_EXIT";
  1371. case HIF_RX_NBUF_ALLOC_FAILURE:
  1372. return "HIF_RX_NBUF_ALLOC_FAILURE";
  1373. case HIF_RX_NBUF_MAP_FAILURE:
  1374. return "HIF_RX_NBUF_MAP_FAILURE";
  1375. case HIF_RX_NBUF_ENQUEUE_FAILURE:
  1376. return "HIF_RX_NBUF_ENQUEUE_FAILURE";
  1377. default:
  1378. return "invalid";
  1379. }
  1380. }
  1381. /**
  1382. * hif_dump_desc_event() - record ce descriptor events
  1383. * @buf: Buffer to which to be copied
  1384. * @ce_id: which ce is the event occurring on
  1385. * @index: index that the descriptor was/will be at.
  1386. */
  1387. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
  1388. {
  1389. struct hif_ce_desc_event *event;
  1390. uint64_t secs, usecs;
  1391. ssize_t len = 0;
  1392. struct ce_desc_hist *ce_hist = NULL;
  1393. struct hif_ce_desc_event *hist_ev = NULL;
  1394. if (!scn)
  1395. return -EINVAL;
  1396. ce_hist = &scn->hif_ce_desc_hist;
  1397. if (ce_hist->hist_id >= CE_COUNT_MAX ||
  1398. ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
  1399. qdf_print("Invalid values");
  1400. return -EINVAL;
  1401. }
  1402. hist_ev =
  1403. (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
  1404. if (!hist_ev) {
  1405. qdf_print("Low Memory");
  1406. return -EINVAL;
  1407. }
  1408. event = &hist_ev[ce_hist->hist_index];
  1409. qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
  1410. len += snprintf(buf, PAGE_SIZE - len,
  1411. "\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
  1412. secs, usecs, ce_hist->hist_id,
  1413. ce_event_type_to_str(event->type),
  1414. event->index, event->memory);
  1415. #ifdef HIF_CE_DEBUG_DATA_BUF
  1416. len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu",
  1417. event->actual_data_len);
  1418. #endif
  1419. len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
  1420. hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
  1421. 16, 1, buf + len,
  1422. (ssize_t)PAGE_SIZE - len, false);
  1423. len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
  1424. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  1425. #ifdef HIF_CE_DEBUG_DATA_BUF
  1426. if (ce_hist->data_enable[ce_hist->hist_id])
  1427. len = hif_dump_desc_data_buf(buf, len, event->data,
  1428. (event->actual_data_len <
  1429. CE_DEBUG_MAX_DATA_BUF_SIZE) ?
  1430. event->actual_data_len :
  1431. CE_DEBUG_MAX_DATA_BUF_SIZE);
  1432. #endif /*HIF_CE_DEBUG_DATA_BUF*/
  1433. len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
  1434. return len;
  1435. }
  1436. /*
  1437. * hif_store_desc_trace_buf_index() -
  1438. * API to get the CE id and CE debug storage buffer index
  1439. *
  1440. * @dev: network device
  1441. * @attr: sysfs attribute
  1442. * @buf: data got from the user
  1443. *
  1444. * Return total length
  1445. */
  1446. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  1447. const char *buf, size_t size)
  1448. {
  1449. struct ce_desc_hist *ce_hist = NULL;
  1450. if (!scn)
  1451. return -EINVAL;
  1452. ce_hist = &scn->hif_ce_desc_hist;
  1453. if (!size) {
  1454. qdf_nofl_err("%s: Invalid input buffer.", __func__);
  1455. return -EINVAL;
  1456. }
  1457. if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
  1458. (unsigned int *)&ce_hist->hist_index) != 2) {
  1459. qdf_nofl_err("%s: Invalid input value.", __func__);
  1460. return -EINVAL;
  1461. }
  1462. if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
  1463. (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
  1464. qdf_print("Invalid values");
  1465. return -EINVAL;
  1466. }
  1467. return size;
  1468. }
  1469. #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
  1470. #ifdef HIF_CE_DEBUG_DATA_BUF
  1471. /*
  1472. * hif_ce_en_desc_hist() -
  1473. * API to enable recording the CE desc history
  1474. *
  1475. * @dev: network device
  1476. * @attr: sysfs attribute
  1477. * @buf: buffer to copy the data.
  1478. *
  1479. * Starts recording the ce desc history
  1480. *
  1481. * Return total length copied
  1482. */
  1483. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
  1484. {
  1485. struct ce_desc_hist *ce_hist = NULL;
  1486. uint32_t cfg = 0;
  1487. uint32_t ce_id = 0;
  1488. if (!scn)
  1489. return -EINVAL;
  1490. ce_hist = &scn->hif_ce_desc_hist;
  1491. if (!size) {
  1492. qdf_nofl_err("%s: Invalid input buffer.", __func__);
  1493. return -EINVAL;
  1494. }
  1495. if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
  1496. (unsigned int *)&cfg) != 2) {
  1497. qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
  1498. __func__);
  1499. return -EINVAL;
  1500. }
  1501. if (ce_id >= CE_COUNT_MAX) {
  1502. qdf_print("Invalid value CE Id");
  1503. return -EINVAL;
  1504. }
  1505. if ((cfg > 1 || cfg < 0)) {
  1506. qdf_print("Invalid values: enter 0 or 1");
  1507. return -EINVAL;
  1508. }
  1509. if (!ce_hist->hist_ev[ce_id])
  1510. return -EINVAL;
  1511. qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
  1512. if (cfg == 1) {
  1513. if (ce_hist->data_enable[ce_id] == 1) {
  1514. qdf_debug("Already Enabled");
  1515. } else {
  1516. if (alloc_mem_ce_debug_hist_data(scn, ce_id)
  1517. == QDF_STATUS_E_NOMEM){
  1518. ce_hist->data_enable[ce_id] = 0;
  1519. qdf_err("%s:Memory Alloc failed", __func__);
  1520. } else
  1521. ce_hist->data_enable[ce_id] = 1;
  1522. }
  1523. } else if (cfg == 0) {
  1524. if (ce_hist->data_enable[ce_id] == 0) {
  1525. qdf_debug("Already Disabled");
  1526. } else {
  1527. ce_hist->data_enable[ce_id] = 0;
  1528. free_mem_ce_debug_hist_data(scn, ce_id);
  1529. }
  1530. }
  1531. qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
  1532. return size;
  1533. }
  1534. /*
  1535. * hif_disp_ce_enable_desc_data_hist() -
  1536. * API to display value of data_enable
  1537. *
  1538. * @dev: network device
  1539. * @attr: sysfs attribute
  1540. * @buf: buffer to copy the data.
  1541. *
  1542. * Return total length copied
  1543. */
  1544. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
  1545. {
  1546. ssize_t len = 0;
  1547. uint32_t ce_id = 0;
  1548. struct ce_desc_hist *ce_hist = NULL;
  1549. if (!scn)
  1550. return -EINVAL;
  1551. ce_hist = &scn->hif_ce_desc_hist;
  1552. for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
  1553. len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
  1554. ce_id, ce_hist->data_enable[ce_id]);
  1555. }
  1556. return len;
  1557. }
  1558. #endif /* HIF_CE_DEBUG_DATA_BUF */
  1559. #ifdef OL_ATH_SMART_LOGGING
  1560. #define GUARD_SPACE 10
  1561. #define LOG_ID_SZ 4
  1562. /*
  1563. * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
  1564. * @src_ring: SRC ring state
  1565. * @buf_cur: Current pointer in ring buffer
  1566. * @buf_init:Start of the ring buffer
  1567. * @buf_sz: Size of the ring buffer
  1568. * @skb_sz: Max size of the SKB buffer to be copied
  1569. *
  1570. * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
  1571. * the given buf, skb_sz is the max buffer size to be copied
  1572. *
  1573. * Return: Current pointer in ring buffer
  1574. */
  1575. static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
  1576. uint8_t *buf_cur, uint8_t *buf_init,
  1577. uint32_t buf_sz, uint32_t skb_sz)
  1578. {
  1579. struct CE_src_desc *src_ring_base;
  1580. uint32_t len, entry;
  1581. struct CE_src_desc *src_desc;
  1582. qdf_nbuf_t nbuf;
  1583. uint32_t available_buf;
  1584. src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
  1585. len = sizeof(struct CE_ring_state);
  1586. available_buf = buf_sz - (buf_cur - buf_init);
  1587. if (available_buf < (len + GUARD_SPACE)) {
  1588. buf_cur = buf_init;
  1589. }
  1590. qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
  1591. buf_cur += sizeof(struct CE_ring_state);
  1592. for (entry = 0; entry < src_ring->nentries; entry++) {
  1593. src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
  1594. nbuf = src_ring->per_transfer_context[entry];
  1595. if (nbuf) {
  1596. uint32_t skb_len = qdf_nbuf_len(nbuf);
  1597. uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
  1598. len = sizeof(struct CE_src_desc) + skb_cp_len
  1599. + LOG_ID_SZ + sizeof(skb_cp_len);
  1600. available_buf = buf_sz - (buf_cur - buf_init);
  1601. if (available_buf < (len + GUARD_SPACE)) {
  1602. buf_cur = buf_init;
  1603. }
  1604. qdf_mem_copy(buf_cur, src_desc,
  1605. sizeof(struct CE_src_desc));
  1606. buf_cur += sizeof(struct CE_src_desc);
  1607. available_buf = buf_sz - (buf_cur - buf_init);
  1608. buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
  1609. skb_cp_len);
  1610. if (skb_cp_len) {
  1611. qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
  1612. skb_cp_len);
  1613. buf_cur += skb_cp_len;
  1614. }
  1615. } else {
  1616. len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
  1617. available_buf = buf_sz - (buf_cur - buf_init);
  1618. if (available_buf < (len + GUARD_SPACE)) {
  1619. buf_cur = buf_init;
  1620. }
  1621. qdf_mem_copy(buf_cur, src_desc,
  1622. sizeof(struct CE_src_desc));
  1623. buf_cur += sizeof(struct CE_src_desc);
  1624. available_buf = buf_sz - (buf_cur - buf_init);
  1625. buf_cur += snprintf(buf_cur, available_buf, "NUL");
  1626. }
  1627. }
  1628. return buf_cur;
  1629. }
  1630. /*
  1631. * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
  1632. * @dest_ring: SRC ring state
  1633. * @buf_cur: Current pointer in ring buffer
  1634. * @buf_init:Start of the ring buffer
  1635. * @buf_sz: Size of the ring buffer
  1636. * @skb_sz: Max size of the SKB buffer to be copied
  1637. *
  1638. * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
  1639. * the given buf, skb_sz is the max buffer size to be copied
  1640. *
  1641. * Return: Current pointer in ring buffer
  1642. */
  1643. static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
  1644. uint8_t *buf_cur, uint8_t *buf_init,
  1645. uint32_t buf_sz, uint32_t skb_sz)
  1646. {
  1647. struct CE_dest_desc *dest_ring_base;
  1648. uint32_t len, entry;
  1649. struct CE_dest_desc *dest_desc;
  1650. qdf_nbuf_t nbuf;
  1651. uint32_t available_buf;
  1652. dest_ring_base =
  1653. (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
  1654. len = sizeof(struct CE_ring_state);
  1655. available_buf = buf_sz - (buf_cur - buf_init);
  1656. if (available_buf < (len + GUARD_SPACE)) {
  1657. buf_cur = buf_init;
  1658. }
  1659. qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
  1660. buf_cur += sizeof(struct CE_ring_state);
  1661. for (entry = 0; entry < dest_ring->nentries; entry++) {
  1662. dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
  1663. nbuf = dest_ring->per_transfer_context[entry];
  1664. if (nbuf) {
  1665. uint32_t skb_len = qdf_nbuf_len(nbuf);
  1666. uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
  1667. len = sizeof(struct CE_dest_desc) + skb_cp_len
  1668. + LOG_ID_SZ + sizeof(skb_cp_len);
  1669. available_buf = buf_sz - (buf_cur - buf_init);
  1670. if (available_buf < (len + GUARD_SPACE)) {
  1671. buf_cur = buf_init;
  1672. }
  1673. qdf_mem_copy(buf_cur, dest_desc,
  1674. sizeof(struct CE_dest_desc));
  1675. buf_cur += sizeof(struct CE_dest_desc);
  1676. available_buf = buf_sz - (buf_cur - buf_init);
  1677. buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
  1678. skb_cp_len);
  1679. if (skb_cp_len) {
  1680. qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
  1681. skb_cp_len);
  1682. buf_cur += skb_cp_len;
  1683. }
  1684. } else {
  1685. len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
  1686. available_buf = buf_sz - (buf_cur - buf_init);
  1687. if (available_buf < (len + GUARD_SPACE)) {
  1688. buf_cur = buf_init;
  1689. }
  1690. qdf_mem_copy(buf_cur, dest_desc,
  1691. sizeof(struct CE_dest_desc));
  1692. buf_cur += sizeof(struct CE_dest_desc);
  1693. available_buf = buf_sz - (buf_cur - buf_init);
  1694. buf_cur += snprintf(buf_cur, available_buf, "NUL");
  1695. }
  1696. }
  1697. return buf_cur;
  1698. }
  1699. /**
  1700. * hif_log_ce_dump() - Copy all the CE DEST ring to buf
  1701. * Calls the respective function to dump all the CE SRC/DEST ring descriptors
  1702. * and buffers pointed by them in to the given buf
  1703. */
  1704. uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
  1705. uint8_t *buf_init, uint32_t buf_sz,
  1706. uint32_t ce, uint32_t skb_sz)
  1707. {
  1708. struct CE_state *ce_state;
  1709. struct CE_ring_state *src_ring;
  1710. struct CE_ring_state *dest_ring;
  1711. ce_state = scn->ce_id_to_state[ce];
  1712. src_ring = ce_state->src_ring;
  1713. dest_ring = ce_state->dest_ring;
  1714. if (src_ring) {
  1715. buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
  1716. buf_init, buf_sz, skb_sz);
  1717. } else if (dest_ring) {
  1718. buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
  1719. buf_init, buf_sz, skb_sz);
  1720. }
  1721. return buf_cur;
  1722. }
  1723. qdf_export_symbol(hif_log_dump_ce);
  1724. #endif /* OL_ATH_SMART_LOGGING */