ipa_hw_stats.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/kernel.h>
  7. #include <linux/delay.h>
  8. #include "ipa_i.h"
  9. #include "ipahal/ipahal.h"
  10. #include "ipahal/ipahal_hw_stats.h"
  11. #define IPA_CLIENT_BIT_32(client) \
  12. ((ipa3_get_ep_mapping(client) >= 0 && \
  13. ipa3_get_ep_mapping(client) < IPA_STATS_MAX_PIPE_BIT) ? \
  14. (1 << ipa3_get_ep_mapping(client)) : 0)
  15. int ipa_hw_stats_init(void)
  16. {
  17. int ret = 0, ep_index;
  18. struct ipa_teth_stats_endpoints *teth_stats_init;
  19. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
  20. return 0;
  21. /* initialize stats here */
  22. ipa3_ctx->hw_stats.enabled = true;
  23. teth_stats_init = kzalloc(sizeof(*teth_stats_init), GFP_KERNEL);
  24. if (!teth_stats_init) {
  25. IPAERR("mem allocated failed!\n");
  26. return -ENOMEM;
  27. }
  28. /* enable prod mask */
  29. if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
  30. teth_stats_init->prod_mask = (
  31. IPA_CLIENT_BIT_32(IPA_CLIENT_MHI_PRIME_TETH_PROD) |
  32. IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD));
  33. if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
  34. teth_stats_init->prod_mask |=
  35. IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD);
  36. else
  37. teth_stats_init->prod_mask |=
  38. IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD);
  39. if (IPA_CLIENT_BIT_32(IPA_CLIENT_MHI_PRIME_TETH_PROD)) {
  40. ep_index = ipa3_get_ep_mapping(
  41. IPA_CLIENT_MHI_PRIME_TETH_PROD);
  42. if (ep_index == -1) {
  43. IPAERR("Invalid client.\n");
  44. kfree(teth_stats_init);
  45. return -EINVAL;
  46. }
  47. teth_stats_init->dst_ep_mask[ep_index] =
  48. IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
  49. if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
  50. teth_stats_init->dst_ep_mask[ep_index] |=
  51. IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
  52. else
  53. teth_stats_init->dst_ep_mask[ep_index] |=
  54. IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS);
  55. }
  56. } else {
  57. teth_stats_init->prod_mask = (
  58. IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD) |
  59. IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD));
  60. if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
  61. teth_stats_init->prod_mask |=
  62. IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD);
  63. else
  64. teth_stats_init->prod_mask |=
  65. IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD);
  66. if (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD)) {
  67. ep_index = ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_PROD);
  68. if (ep_index == -1) {
  69. IPAERR("Invalid client.\n");
  70. kfree(teth_stats_init);
  71. return -EINVAL;
  72. }
  73. teth_stats_init->dst_ep_mask[ep_index] =
  74. IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
  75. if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
  76. teth_stats_init->dst_ep_mask[ep_index] |=
  77. IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
  78. else
  79. teth_stats_init->dst_ep_mask[ep_index] |=
  80. IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS);
  81. }
  82. }
  83. if (IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD)) {
  84. ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
  85. if (ep_index == -1) {
  86. IPAERR("Invalid client.\n");
  87. kfree(teth_stats_init);
  88. return -EINVAL;
  89. }
  90. /* enable addtional pipe monitoring for pcie modem */
  91. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1)
  92. teth_stats_init->dst_ep_mask[ep_index] =
  93. (IPA_CLIENT_BIT_32(
  94. IPA_CLIENT_Q6_WAN_CONS) |
  95. IPA_CLIENT_BIT_32(
  96. IPA_CLIENT_MHI_PRIME_TETH_CONS));
  97. else
  98. teth_stats_init->dst_ep_mask[ep_index] =
  99. IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
  100. }
  101. if (IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD)) {
  102. ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
  103. if (ep_index == -1) {
  104. IPAERR("Invalid client.\n");
  105. kfree(teth_stats_init);
  106. return -EINVAL;
  107. }
  108. /* enable addtional pipe monitoring for pcie modem*/
  109. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1)
  110. teth_stats_init->dst_ep_mask[ep_index] =
  111. (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
  112. IPA_CLIENT_BIT_32(
  113. IPA_CLIENT_MHI_PRIME_TETH_CONS));
  114. else
  115. teth_stats_init->dst_ep_mask[ep_index] =
  116. IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
  117. }
  118. if (IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD)) {
  119. ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
  120. if (ep_index == -1) {
  121. IPAERR("Invalid client.\n");
  122. kfree(teth_stats_init);
  123. return -EINVAL;
  124. }
  125. /* enable addtional pipe monitoring for pcie modem*/
  126. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1)
  127. teth_stats_init->dst_ep_mask[ep_index] =
  128. (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
  129. IPA_CLIENT_BIT_32(
  130. IPA_CLIENT_MHI_PRIME_TETH_CONS));
  131. else
  132. teth_stats_init->dst_ep_mask[ep_index] =
  133. IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
  134. }
  135. ret = ipa_init_teth_stats(teth_stats_init);
  136. if (ret != 0)
  137. IPAERR("init teth stats fails\n");
  138. kfree(teth_stats_init);
  139. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
  140. ret = ipa_init_flt_rt_stats();
  141. if (ret != 0)
  142. IPAERR("init flt rt stats fails\n");
  143. }
  144. return ret;
  145. }
  146. int ipa_init_quota_stats(u32 pipe_bitmask)
  147. {
  148. struct ipahal_stats_init_pyld *pyld;
  149. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  150. struct ipahal_imm_cmd_pyld *cmd_pyld;
  151. struct ipahal_imm_cmd_register_write quota_base = {0};
  152. struct ipahal_imm_cmd_pyld *quota_base_pyld;
  153. struct ipahal_imm_cmd_register_write quota_mask = {0};
  154. struct ipahal_imm_cmd_pyld *quota_mask_pyld;
  155. struct ipa3_desc desc[3] = { {0} };
  156. dma_addr_t dma_address;
  157. int ret;
  158. if (!ipa3_ctx->hw_stats.enabled)
  159. return 0;
  160. /* reset driver's cache */
  161. memset(&ipa3_ctx->hw_stats.quota, 0, sizeof(ipa3_ctx->hw_stats.quota));
  162. ipa3_ctx->hw_stats.quota.init.enabled_bitmask = pipe_bitmask;
  163. IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask);
  164. pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_QUOTA,
  165. &ipa3_ctx->hw_stats.quota.init, false);
  166. if (!pyld) {
  167. IPAERR("failed to generate pyld\n");
  168. return -EPERM;
  169. }
  170. if (pyld->len > IPA_MEM_PART(stats_quota_size)) {
  171. IPAERR("SRAM partition too small: %d needed %d\n",
  172. IPA_MEM_PART(stats_quota_size), pyld->len);
  173. ret = -EPERM;
  174. goto destroy_init_pyld;
  175. }
  176. dma_address = dma_map_single(ipa3_ctx->pdev,
  177. pyld->data,
  178. pyld->len,
  179. DMA_TO_DEVICE);
  180. if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
  181. IPAERR("failed to DMA map\n");
  182. ret = -EPERM;
  183. goto destroy_init_pyld;
  184. }
  185. /* setting the registers and init the stats pyld are done atomically */
  186. quota_mask.skip_pipeline_clear = false;
  187. quota_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  188. quota_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_MASK_n,
  189. ipa3_ctx->ee);
  190. quota_mask.value = pipe_bitmask;
  191. quota_mask.value_mask = ~0;
  192. quota_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  193. &quota_mask, false);
  194. if (!quota_mask_pyld) {
  195. IPAERR("failed to construct register_write imm cmd\n");
  196. ret = -ENOMEM;
  197. goto unmap;
  198. }
  199. desc[0].opcode = quota_mask_pyld->opcode;
  200. desc[0].pyld = quota_mask_pyld->data;
  201. desc[0].len = quota_mask_pyld->len;
  202. desc[0].type = IPA_IMM_CMD_DESC;
  203. quota_base.skip_pipeline_clear = false;
  204. quota_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  205. quota_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n,
  206. ipa3_ctx->ee);
  207. quota_base.value = ipa3_ctx->smem_restricted_bytes +
  208. IPA_MEM_PART(stats_quota_ofst);
  209. quota_base.value_mask = ~0;
  210. quota_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  211. &quota_base, false);
  212. if (!quota_base_pyld) {
  213. IPAERR("failed to construct register_write imm cmd\n");
  214. ret = -ENOMEM;
  215. goto destroy_quota_mask;
  216. }
  217. desc[1].opcode = quota_base_pyld->opcode;
  218. desc[1].pyld = quota_base_pyld->data;
  219. desc[1].len = quota_base_pyld->len;
  220. desc[1].type = IPA_IMM_CMD_DESC;
  221. cmd.is_read = false;
  222. cmd.skip_pipeline_clear = false;
  223. cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  224. cmd.size = pyld->len;
  225. cmd.system_addr = dma_address;
  226. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  227. IPA_MEM_PART(stats_quota_ofst);
  228. cmd_pyld = ipahal_construct_imm_cmd(
  229. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  230. if (!cmd_pyld) {
  231. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  232. ret = -ENOMEM;
  233. goto destroy_quota_base;
  234. }
  235. desc[2].opcode = cmd_pyld->opcode;
  236. desc[2].pyld = cmd_pyld->data;
  237. desc[2].len = cmd_pyld->len;
  238. desc[2].type = IPA_IMM_CMD_DESC;
  239. ret = ipa3_send_cmd(3, desc);
  240. if (ret) {
  241. IPAERR("failed to send immediate command (error %d)\n", ret);
  242. goto destroy_imm;
  243. }
  244. ret = 0;
  245. destroy_imm:
  246. ipahal_destroy_imm_cmd(cmd_pyld);
  247. destroy_quota_base:
  248. ipahal_destroy_imm_cmd(quota_base_pyld);
  249. destroy_quota_mask:
  250. ipahal_destroy_imm_cmd(quota_mask_pyld);
  251. unmap:
  252. dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
  253. destroy_init_pyld:
  254. ipahal_destroy_stats_init_pyld(pyld);
  255. return ret;
  256. }
  257. int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
  258. {
  259. int i;
  260. int ret;
  261. struct ipahal_stats_get_offset_quota get_offset = { { 0 } };
  262. struct ipahal_stats_offset offset = { 0 };
  263. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  264. struct ipahal_imm_cmd_pyld *cmd_pyld;
  265. struct ipa_mem_buffer mem;
  266. struct ipa3_desc desc = { 0 };
  267. struct ipahal_stats_quota_all *stats;
  268. if (!ipa3_ctx->hw_stats.enabled)
  269. return 0;
  270. get_offset.init = ipa3_ctx->hw_stats.quota.init;
  271. ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_QUOTA, &get_offset,
  272. &offset);
  273. if (ret) {
  274. IPAERR("failed to get offset from hal %d\n", ret);
  275. return ret;
  276. }
  277. IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
  278. if (offset.size == 0)
  279. return 0;
  280. mem.size = offset.size;
  281. mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
  282. mem.size,
  283. &mem.phys_base,
  284. GFP_KERNEL);
  285. if (!mem.base) {
  286. IPAERR("fail to alloc DMA memory");
  287. return ret;
  288. }
  289. cmd.is_read = true;
  290. cmd.clear_after_read = true;
  291. cmd.skip_pipeline_clear = false;
  292. cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  293. cmd.size = mem.size;
  294. cmd.system_addr = mem.phys_base;
  295. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  296. IPA_MEM_PART(stats_quota_ofst) + offset.offset;
  297. cmd_pyld = ipahal_construct_imm_cmd(
  298. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  299. if (!cmd_pyld) {
  300. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  301. ret = -ENOMEM;
  302. goto free_dma_mem;
  303. }
  304. desc.opcode = cmd_pyld->opcode;
  305. desc.pyld = cmd_pyld->data;
  306. desc.len = cmd_pyld->len;
  307. desc.type = IPA_IMM_CMD_DESC;
  308. ret = ipa3_send_cmd(1, &desc);
  309. if (ret) {
  310. IPAERR("failed to send immediate command (error %d)\n", ret);
  311. goto destroy_imm;
  312. }
  313. stats = kzalloc(sizeof(*stats), GFP_KERNEL);
  314. if (!stats) {
  315. ret = -ENOMEM;
  316. goto destroy_imm;
  317. }
  318. ret = ipahal_parse_stats(IPAHAL_HW_STATS_QUOTA,
  319. &ipa3_ctx->hw_stats.quota.init, mem.base, stats);
  320. if (ret) {
  321. IPAERR("failed to parse stats (error %d)\n", ret);
  322. goto free_stats;
  323. }
  324. /*
  325. * update driver cache.
  326. * the stats were read from hardware with clear_after_read meaning
  327. * hardware stats are 0 now
  328. */
  329. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  330. int ep_idx = ipa3_get_ep_mapping(i);
  331. if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES)
  332. continue;
  333. if (ipa3_ctx->ep[ep_idx].client != i)
  334. continue;
  335. ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_bytes +=
  336. stats->stats[ep_idx].num_ipv4_bytes;
  337. ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_pkts +=
  338. stats->stats[ep_idx].num_ipv4_pkts;
  339. ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_bytes +=
  340. stats->stats[ep_idx].num_ipv6_bytes;
  341. ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_pkts +=
  342. stats->stats[ep_idx].num_ipv6_pkts;
  343. }
  344. /* copy results to out parameter */
  345. if (out)
  346. *out = ipa3_ctx->hw_stats.quota.stats;
  347. ret = 0;
  348. free_stats:
  349. kfree(stats);
  350. destroy_imm:
  351. ipahal_destroy_imm_cmd(cmd_pyld);
  352. free_dma_mem:
  353. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
  354. return ret;
  355. }
  356. int ipa_reset_quota_stats(enum ipa_client_type client)
  357. {
  358. int ret;
  359. struct ipa_quota_stats *stats;
  360. if (!ipa3_ctx->hw_stats.enabled)
  361. return 0;
  362. if (client >= IPA_CLIENT_MAX) {
  363. IPAERR("invalid client %d\n", client);
  364. return -EINVAL;
  365. }
  366. /* reading stats will reset them in hardware */
  367. ret = ipa_get_quota_stats(NULL);
  368. if (ret) {
  369. IPAERR("ipa_get_quota_stats failed %d\n", ret);
  370. return ret;
  371. }
  372. /* reset driver's cache */
  373. stats = &ipa3_ctx->hw_stats.quota.stats.client[client];
  374. memset(stats, 0, sizeof(*stats));
  375. return 0;
  376. }
  377. int ipa_reset_all_quota_stats(void)
  378. {
  379. int ret;
  380. struct ipa_quota_stats_all *stats;
  381. if (!ipa3_ctx->hw_stats.enabled)
  382. return 0;
  383. /* reading stats will reset them in hardware */
  384. ret = ipa_get_quota_stats(NULL);
  385. if (ret) {
  386. IPAERR("ipa_get_quota_stats failed %d\n", ret);
  387. return ret;
  388. }
  389. /* reset driver's cache */
  390. stats = &ipa3_ctx->hw_stats.quota.stats;
  391. memset(stats, 0, sizeof(*stats));
  392. return 0;
  393. }
  394. int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
  395. {
  396. struct ipahal_stats_init_pyld *pyld;
  397. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  398. struct ipahal_imm_cmd_pyld *cmd_pyld;
  399. struct ipahal_imm_cmd_register_write teth_base = {0};
  400. struct ipahal_imm_cmd_pyld *teth_base_pyld;
  401. struct ipahal_imm_cmd_register_write teth_mask = { 0 };
  402. struct ipahal_imm_cmd_pyld *teth_mask_pyld;
  403. struct ipa3_desc desc[3] = { {0} };
  404. dma_addr_t dma_address;
  405. int ret;
  406. int i;
  407. if (!ipa3_ctx->hw_stats.enabled)
  408. return 0;
  409. if (!in || !in->prod_mask) {
  410. IPAERR("invalid params\n");
  411. return -EINVAL;
  412. }
  413. for (i = 0; i < IPA_STATS_MAX_PIPE_BIT; i++) {
  414. if ((in->prod_mask & (1 << i)) && !in->dst_ep_mask[i]) {
  415. IPAERR("prod %d doesn't have cons\n", i);
  416. return -EINVAL;
  417. }
  418. }
  419. IPADBG_LOW("prod_mask=0x%x\n", in->prod_mask);
  420. /* reset driver's cache */
  421. memset(&ipa3_ctx->hw_stats.teth.init, 0,
  422. sizeof(ipa3_ctx->hw_stats.teth.init));
  423. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  424. memset(&ipa3_ctx->hw_stats.teth.prod_stats_sum[i], 0,
  425. sizeof(ipa3_ctx->hw_stats.teth.prod_stats_sum[i]));
  426. memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0,
  427. sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i]));
  428. }
  429. ipa3_ctx->hw_stats.teth.init.prod_bitmask = in->prod_mask;
  430. memcpy(ipa3_ctx->hw_stats.teth.init.cons_bitmask, in->dst_ep_mask,
  431. sizeof(ipa3_ctx->hw_stats.teth.init.cons_bitmask));
  432. pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_TETHERING,
  433. &ipa3_ctx->hw_stats.teth.init, false);
  434. if (!pyld) {
  435. IPAERR("failed to generate pyld\n");
  436. return -EPERM;
  437. }
  438. if (pyld->len > IPA_MEM_PART(stats_tethering_size)) {
  439. IPAERR("SRAM partition too small: %d needed %d\n",
  440. IPA_MEM_PART(stats_tethering_size), pyld->len);
  441. ret = -EPERM;
  442. goto destroy_init_pyld;
  443. }
  444. dma_address = dma_map_single(ipa3_ctx->pdev,
  445. pyld->data,
  446. pyld->len,
  447. DMA_TO_DEVICE);
  448. if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
  449. IPAERR("failed to DMA map\n");
  450. ret = -EPERM;
  451. goto destroy_init_pyld;
  452. }
  453. /* setting the registers and init the stats pyld are done atomically */
  454. teth_mask.skip_pipeline_clear = false;
  455. teth_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  456. teth_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_MASK_n,
  457. ipa3_ctx->ee);
  458. teth_mask.value = in->prod_mask;
  459. teth_mask.value_mask = ~0;
  460. teth_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  461. &teth_mask, false);
  462. if (!teth_mask_pyld) {
  463. IPAERR("failed to construct register_write imm cmd\n");
  464. ret = -ENOMEM;
  465. goto unmap;
  466. }
  467. desc[0].opcode = teth_mask_pyld->opcode;
  468. desc[0].pyld = teth_mask_pyld->data;
  469. desc[0].len = teth_mask_pyld->len;
  470. desc[0].type = IPA_IMM_CMD_DESC;
  471. teth_base.skip_pipeline_clear = false;
  472. teth_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  473. teth_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_BASE_n,
  474. ipa3_ctx->ee);
  475. teth_base.value = ipa3_ctx->smem_restricted_bytes +
  476. IPA_MEM_PART(stats_tethering_ofst);
  477. teth_base.value_mask = ~0;
  478. teth_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  479. &teth_base, false);
  480. if (!teth_base_pyld) {
  481. IPAERR("failed to construct register_write imm cmd\n");
  482. ret = -ENOMEM;
  483. goto destroy_teth_mask;
  484. }
  485. desc[1].opcode = teth_base_pyld->opcode;
  486. desc[1].pyld = teth_base_pyld->data;
  487. desc[1].len = teth_base_pyld->len;
  488. desc[1].type = IPA_IMM_CMD_DESC;
  489. cmd.is_read = false;
  490. cmd.skip_pipeline_clear = false;
  491. cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  492. cmd.size = pyld->len;
  493. cmd.system_addr = dma_address;
  494. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  495. IPA_MEM_PART(stats_tethering_ofst);
  496. cmd_pyld = ipahal_construct_imm_cmd(
  497. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  498. if (!cmd_pyld) {
  499. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  500. ret = -ENOMEM;
  501. goto destroy_teth_base;
  502. }
  503. desc[2].opcode = cmd_pyld->opcode;
  504. desc[2].pyld = cmd_pyld->data;
  505. desc[2].len = cmd_pyld->len;
  506. desc[2].type = IPA_IMM_CMD_DESC;
  507. ret = ipa3_send_cmd(3, desc);
  508. if (ret) {
  509. IPAERR("failed to send immediate command (error %d)\n", ret);
  510. goto destroy_imm;
  511. }
  512. ret = 0;
  513. destroy_imm:
  514. ipahal_destroy_imm_cmd(cmd_pyld);
  515. destroy_teth_base:
  516. ipahal_destroy_imm_cmd(teth_base_pyld);
  517. destroy_teth_mask:
  518. ipahal_destroy_imm_cmd(teth_mask_pyld);
  519. unmap:
  520. dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
  521. destroy_init_pyld:
  522. ipahal_destroy_stats_init_pyld(pyld);
  523. return ret;
  524. }
  525. int ipa_get_teth_stats(void)
  526. {
  527. int i, j;
  528. int ret;
  529. struct ipahal_stats_get_offset_tethering get_offset = { { 0 } };
  530. struct ipahal_stats_offset offset = {0};
  531. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  532. struct ipahal_imm_cmd_pyld *cmd_pyld;
  533. struct ipa_mem_buffer mem;
  534. struct ipa3_desc desc = { 0 };
  535. struct ipahal_stats_tethering_all *stats_all;
  536. struct ipa_hw_stats_teth *sw_stats = &ipa3_ctx->hw_stats.teth;
  537. struct ipahal_stats_tethering *stats;
  538. struct ipa_quota_stats *quota_stats;
  539. struct ipahal_stats_init_tethering *init =
  540. (struct ipahal_stats_init_tethering *)
  541. &ipa3_ctx->hw_stats.teth.init;
  542. if (!ipa3_ctx->hw_stats.enabled)
  543. return 0;
  544. get_offset.init = ipa3_ctx->hw_stats.teth.init;
  545. ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_TETHERING, &get_offset,
  546. &offset);
  547. if (ret) {
  548. IPAERR("failed to get offset from hal %d\n", ret);
  549. return ret;
  550. }
  551. IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
  552. if (offset.size == 0)
  553. return 0;
  554. mem.size = offset.size;
  555. mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
  556. mem.size,
  557. &mem.phys_base,
  558. GFP_KERNEL);
  559. if (!mem.base) {
  560. IPAERR("fail to alloc DMA memory\n");
  561. return ret;
  562. }
  563. cmd.is_read = true;
  564. cmd.clear_after_read = true;
  565. cmd.skip_pipeline_clear = false;
  566. cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  567. cmd.size = mem.size;
  568. cmd.system_addr = mem.phys_base;
  569. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  570. IPA_MEM_PART(stats_tethering_ofst) + offset.offset;
  571. cmd_pyld = ipahal_construct_imm_cmd(
  572. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  573. if (!cmd_pyld) {
  574. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  575. ret = -ENOMEM;
  576. goto free_dma_mem;
  577. }
  578. desc.opcode = cmd_pyld->opcode;
  579. desc.pyld = cmd_pyld->data;
  580. desc.len = cmd_pyld->len;
  581. desc.type = IPA_IMM_CMD_DESC;
  582. ret = ipa3_send_cmd(1, &desc);
  583. if (ret) {
  584. IPAERR("failed to send immediate command (error %d)\n", ret);
  585. goto destroy_imm;
  586. }
  587. stats_all = kzalloc(sizeof(*stats_all), GFP_KERNEL);
  588. if (!stats_all) {
  589. IPADBG("failed to alloc memory\n");
  590. ret = -ENOMEM;
  591. goto destroy_imm;
  592. }
  593. ret = ipahal_parse_stats(IPAHAL_HW_STATS_TETHERING,
  594. &ipa3_ctx->hw_stats.teth.init, mem.base, stats_all);
  595. if (ret) {
  596. IPAERR("failed to parse stats_all (error %d)\n", ret);
  597. goto free_stats;
  598. }
  599. /* reset prod_stats cache */
  600. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  601. memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0,
  602. sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i]));
  603. }
  604. /*
  605. * update driver cache.
  606. * the stats were read from hardware with clear_after_read meaning
  607. * hardware stats are 0 now
  608. */
  609. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  610. for (j = 0; j < IPA_CLIENT_MAX; j++) {
  611. int prod_idx = ipa3_get_ep_mapping(i);
  612. int cons_idx = ipa3_get_ep_mapping(j);
  613. if (prod_idx == -1 || prod_idx >= IPA3_MAX_NUM_PIPES)
  614. continue;
  615. if (cons_idx == -1 || cons_idx >= IPA3_MAX_NUM_PIPES)
  616. continue;
  617. /* save hw-query result */
  618. if ((init->prod_bitmask & (1 << prod_idx)) &&
  619. (init->cons_bitmask[prod_idx]
  620. & (1 << cons_idx))) {
  621. IPADBG_LOW("prod %d cons %d\n",
  622. prod_idx, cons_idx);
  623. stats = &stats_all->stats[prod_idx][cons_idx];
  624. IPADBG_LOW("num_ipv4_bytes %lld\n",
  625. stats->num_ipv4_bytes);
  626. IPADBG_LOW("num_ipv4_pkts %lld\n",
  627. stats->num_ipv4_pkts);
  628. IPADBG_LOW("num_ipv6_pkts %lld\n",
  629. stats->num_ipv6_pkts);
  630. IPADBG_LOW("num_ipv6_bytes %lld\n",
  631. stats->num_ipv6_bytes);
  632. /* update stats*/
  633. quota_stats =
  634. &sw_stats->prod_stats[i].client[j];
  635. quota_stats->num_ipv4_bytes =
  636. stats->num_ipv4_bytes;
  637. quota_stats->num_ipv4_pkts =
  638. stats->num_ipv4_pkts;
  639. quota_stats->num_ipv6_bytes =
  640. stats->num_ipv6_bytes;
  641. quota_stats->num_ipv6_pkts =
  642. stats->num_ipv6_pkts;
  643. /* Accumulated stats */
  644. quota_stats =
  645. &sw_stats->prod_stats_sum[i].client[j];
  646. quota_stats->num_ipv4_bytes +=
  647. stats->num_ipv4_bytes;
  648. quota_stats->num_ipv4_pkts +=
  649. stats->num_ipv4_pkts;
  650. quota_stats->num_ipv6_bytes +=
  651. stats->num_ipv6_bytes;
  652. quota_stats->num_ipv6_pkts +=
  653. stats->num_ipv6_pkts;
  654. }
  655. }
  656. }
  657. ret = 0;
  658. free_stats:
  659. kfree(stats_all);
  660. stats = NULL;
  661. destroy_imm:
  662. ipahal_destroy_imm_cmd(cmd_pyld);
  663. free_dma_mem:
  664. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
  665. return ret;
  666. }
  667. int ipa_query_teth_stats(enum ipa_client_type prod,
  668. struct ipa_quota_stats_all *out, bool reset)
  669. {
  670. if (!IPA_CLIENT_IS_PROD(prod) || ipa3_get_ep_mapping(prod) == -1) {
  671. IPAERR("invalid prod %d\n", prod);
  672. return -EINVAL;
  673. }
  674. /* copy results to out parameter */
  675. if (reset)
  676. *out = ipa3_ctx->hw_stats.teth.prod_stats[prod];
  677. else
  678. *out = ipa3_ctx->hw_stats.teth.prod_stats_sum[prod];
  679. return 0;
  680. }
  681. int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons)
  682. {
  683. int ret;
  684. struct ipa_quota_stats *stats;
  685. if (!ipa3_ctx->hw_stats.enabled)
  686. return 0;
  687. if (!IPA_CLIENT_IS_PROD(prod) || !IPA_CLIENT_IS_CONS(cons)) {
  688. IPAERR("invalid prod %d or cons %d\n", prod, cons);
  689. return -EINVAL;
  690. }
  691. /* reading stats will reset them in hardware */
  692. ret = ipa_get_teth_stats();
  693. if (ret) {
  694. IPAERR("ipa_get_teth_stats failed %d\n", ret);
  695. return ret;
  696. }
  697. /* reset driver's cache */
  698. stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[prod].client[cons];
  699. memset(stats, 0, sizeof(*stats));
  700. return 0;
  701. }
  702. int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod)
  703. {
  704. int ret;
  705. int i;
  706. struct ipa_quota_stats *stats;
  707. if (!ipa3_ctx->hw_stats.enabled)
  708. return 0;
  709. if (!IPA_CLIENT_IS_PROD(prod)) {
  710. IPAERR("invalid prod %d\n", prod);
  711. return -EINVAL;
  712. }
  713. /* reading stats will reset them in hardware */
  714. ret = ipa_get_teth_stats();
  715. if (ret) {
  716. IPAERR("ipa_get_teth_stats failed %d\n", ret);
  717. return ret;
  718. }
  719. /* reset driver's cache */
  720. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  721. stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[prod].client[i];
  722. memset(stats, 0, sizeof(*stats));
  723. }
  724. return 0;
  725. }
  726. int ipa_reset_all_teth_stats(void)
  727. {
  728. int i;
  729. int ret;
  730. struct ipa_quota_stats_all *stats;
  731. if (!ipa3_ctx->hw_stats.enabled)
  732. return 0;
  733. /* reading stats will reset them in hardware */
  734. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  735. if (IPA_CLIENT_IS_PROD(i) && ipa3_get_ep_mapping(i) != -1) {
  736. ret = ipa_get_teth_stats();
  737. if (ret) {
  738. IPAERR("ipa_get_teth_stats failed %d\n", ret);
  739. return ret;
  740. }
  741. /* a single iteration will reset all hardware stats */
  742. break;
  743. }
  744. }
  745. /* reset driver's cache */
  746. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  747. stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[i];
  748. memset(stats, 0, sizeof(*stats));
  749. }
  750. return 0;
  751. }
  752. int ipa_init_flt_rt_stats(void)
  753. {
  754. struct ipahal_stats_init_pyld *pyld;
  755. int smem_ofst, smem_size;
  756. int stats_base_flt_v4, stats_base_flt_v6;
  757. int stats_base_rt_v4, stats_base_rt_v6;
  758. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  759. struct ipahal_imm_cmd_pyld *cmd_pyld;
  760. struct ipahal_imm_cmd_register_write flt_v4_base = {0};
  761. struct ipahal_imm_cmd_pyld *flt_v4_base_pyld;
  762. struct ipahal_imm_cmd_register_write flt_v6_base = {0};
  763. struct ipahal_imm_cmd_pyld *flt_v6_base_pyld;
  764. struct ipahal_imm_cmd_register_write rt_v4_base = {0};
  765. struct ipahal_imm_cmd_pyld *rt_v4_base_pyld;
  766. struct ipahal_imm_cmd_register_write rt_v6_base = {0};
  767. struct ipahal_imm_cmd_pyld *rt_v6_base_pyld;
  768. struct ipa3_desc desc[5] = { {0} };
  769. dma_addr_t dma_address;
  770. int ret;
  771. if (!ipa3_ctx->hw_stats.enabled)
  772. return 0;
  773. smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
  774. smem_size = IPA_MEM_PART(stats_fnr_size);
  775. pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_FNR,
  776. (void *)(uintptr_t)(IPA_MAX_FLT_RT_CNT_INDEX), false);
  777. if (!pyld) {
  778. IPAERR("failed to generate pyld\n");
  779. return -EPERM;
  780. }
  781. if (pyld->len > smem_size) {
  782. IPAERR("SRAM partition too small: %d needed %d\n",
  783. smem_size, pyld->len);
  784. ret = -EPERM;
  785. goto destroy_init_pyld;
  786. }
  787. dma_address = dma_map_single(ipa3_ctx->pdev,
  788. pyld->data,
  789. pyld->len,
  790. DMA_TO_DEVICE);
  791. if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
  792. IPAERR("failed to DMA map\n");
  793. ret = -EPERM;
  794. goto destroy_init_pyld;
  795. }
  796. stats_base_flt_v4 = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE);
  797. stats_base_flt_v6 = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE);
  798. stats_base_rt_v4 = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE);
  799. stats_base_rt_v6 = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_BASE);
  800. /* setting the registers and init the stats pyld are done atomically */
  801. /* set IPA_STAT_FILTER_IPV4_BASE */
  802. flt_v4_base.skip_pipeline_clear = false;
  803. flt_v4_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  804. flt_v4_base.offset = stats_base_flt_v4;
  805. flt_v4_base.value = ipa3_ctx->smem_restricted_bytes +
  806. smem_ofst;
  807. flt_v4_base.value_mask = ~0;
  808. flt_v4_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  809. &flt_v4_base, false);
  810. if (!flt_v4_base_pyld) {
  811. IPAERR("failed to construct register_write imm cmd\n");
  812. ret = -ENOMEM;
  813. goto unmap;
  814. }
  815. desc[0].opcode = flt_v4_base_pyld->opcode;
  816. desc[0].pyld = flt_v4_base_pyld->data;
  817. desc[0].len = flt_v4_base_pyld->len;
  818. desc[0].type = IPA_IMM_CMD_DESC;
  819. /* set IPA_STAT_FILTER_IPV6_BASE */
  820. flt_v6_base.skip_pipeline_clear = false;
  821. flt_v6_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  822. flt_v6_base.offset = stats_base_flt_v6;
  823. flt_v6_base.value = ipa3_ctx->smem_restricted_bytes +
  824. smem_ofst;
  825. flt_v6_base.value_mask = ~0;
  826. flt_v6_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  827. &flt_v6_base, false);
  828. if (!flt_v6_base_pyld) {
  829. IPAERR("failed to construct register_write imm cmd\n");
  830. ret = -ENOMEM;
  831. goto destroy_flt_v4_base;
  832. }
  833. desc[1].opcode = flt_v6_base_pyld->opcode;
  834. desc[1].pyld = flt_v6_base_pyld->data;
  835. desc[1].len = flt_v6_base_pyld->len;
  836. desc[1].type = IPA_IMM_CMD_DESC;
  837. /* set IPA_STAT_ROUTER_IPV4_BASE */
  838. rt_v4_base.skip_pipeline_clear = false;
  839. rt_v4_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  840. rt_v4_base.offset = stats_base_rt_v4;
  841. rt_v4_base.value = ipa3_ctx->smem_restricted_bytes +
  842. smem_ofst;
  843. rt_v4_base.value_mask = ~0;
  844. rt_v4_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  845. &rt_v4_base, false);
  846. if (!rt_v4_base_pyld) {
  847. IPAERR("failed to construct register_write imm cmd\n");
  848. ret = -ENOMEM;
  849. goto destroy_flt_v6_base;
  850. }
  851. desc[2].opcode = rt_v4_base_pyld->opcode;
  852. desc[2].pyld = rt_v4_base_pyld->data;
  853. desc[2].len = rt_v4_base_pyld->len;
  854. desc[2].type = IPA_IMM_CMD_DESC;
  855. /* set IPA_STAT_ROUTER_IPV6_BASE */
  856. rt_v6_base.skip_pipeline_clear = false;
  857. rt_v6_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  858. rt_v6_base.offset = stats_base_rt_v6;
  859. rt_v6_base.value = ipa3_ctx->smem_restricted_bytes +
  860. smem_ofst;
  861. rt_v6_base.value_mask = ~0;
  862. rt_v6_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  863. &rt_v6_base, false);
  864. if (!rt_v6_base_pyld) {
  865. IPAERR("failed to construct register_write imm cmd\n");
  866. ret = -ENOMEM;
  867. goto destroy_rt_v4_base;
  868. }
  869. desc[3].opcode = rt_v6_base_pyld->opcode;
  870. desc[3].pyld = rt_v6_base_pyld->data;
  871. desc[3].len = rt_v6_base_pyld->len;
  872. desc[3].type = IPA_IMM_CMD_DESC;
  873. cmd.is_read = false;
  874. cmd.skip_pipeline_clear = false;
  875. cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  876. cmd.size = pyld->len;
  877. cmd.system_addr = dma_address;
  878. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  879. smem_ofst;
  880. cmd_pyld = ipahal_construct_imm_cmd(
  881. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  882. if (!cmd_pyld) {
  883. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  884. ret = -ENOMEM;
  885. goto destroy_rt_v6_base;
  886. }
  887. desc[4].opcode = cmd_pyld->opcode;
  888. desc[4].pyld = cmd_pyld->data;
  889. desc[4].len = cmd_pyld->len;
  890. desc[4].type = IPA_IMM_CMD_DESC;
  891. ret = ipa3_send_cmd(5, desc);
  892. if (ret) {
  893. IPAERR("failed to send immediate command (error %d)\n", ret);
  894. goto destroy_imm;
  895. }
  896. ret = 0;
  897. destroy_imm:
  898. ipahal_destroy_imm_cmd(cmd_pyld);
  899. destroy_rt_v6_base:
  900. ipahal_destroy_imm_cmd(rt_v6_base_pyld);
  901. destroy_rt_v4_base:
  902. ipahal_destroy_imm_cmd(rt_v4_base_pyld);
  903. destroy_flt_v6_base:
  904. ipahal_destroy_imm_cmd(flt_v6_base_pyld);
  905. destroy_flt_v4_base:
  906. ipahal_destroy_imm_cmd(flt_v4_base_pyld);
  907. unmap:
  908. dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
  909. destroy_init_pyld:
  910. ipahal_destroy_stats_init_pyld(pyld);
  911. return ret;
  912. }
  913. static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
  914. {
  915. int ret;
  916. int smem_ofst;
  917. bool clear = query->reset;
  918. struct ipahal_stats_get_offset_flt_rt_v4_5 *get_offset;
  919. struct ipahal_stats_offset offset = { 0 };
  920. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  921. struct ipahal_imm_cmd_pyld *cmd_pyld;
  922. struct ipa_mem_buffer mem;
  923. struct ipa3_desc desc = { 0 };
  924. get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
  925. if (!get_offset) {
  926. IPADBG("no mem\n");
  927. return -ENOMEM;
  928. }
  929. smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
  930. get_offset->start_id = query->start_id;
  931. get_offset->end_id = query->end_id;
  932. ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset,
  933. &offset);
  934. if (ret) {
  935. IPAERR("failed to get offset from hal %d\n", ret);
  936. goto free_offset;
  937. }
  938. IPADBG("offset = %d size = %d\n", offset.offset, offset.size);
  939. if (offset.size == 0) {
  940. ret = 0;
  941. goto free_offset;
  942. }
  943. mem.size = offset.size;
  944. mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
  945. mem.size,
  946. &mem.phys_base,
  947. GFP_KERNEL);
  948. if (!mem.base) {
  949. IPAERR("fail to alloc DMA memory\n");
  950. goto free_offset;
  951. }
  952. cmd.is_read = true;
  953. cmd.clear_after_read = clear;
  954. cmd.skip_pipeline_clear = false;
  955. cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  956. cmd.size = mem.size;
  957. cmd.system_addr = mem.phys_base;
  958. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  959. smem_ofst + offset.offset;
  960. cmd_pyld = ipahal_construct_imm_cmd(
  961. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  962. if (!cmd_pyld) {
  963. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  964. ret = -ENOMEM;
  965. goto free_dma_mem;
  966. }
  967. desc.opcode = cmd_pyld->opcode;
  968. desc.pyld = cmd_pyld->data;
  969. desc.len = cmd_pyld->len;
  970. desc.type = IPA_IMM_CMD_DESC;
  971. ret = ipa3_send_cmd(1, &desc);
  972. if (ret) {
  973. IPAERR("failed to send immediate command (error %d)\n", ret);
  974. goto destroy_imm;
  975. }
  976. ret = ipahal_parse_stats(IPAHAL_HW_STATS_FNR,
  977. NULL, mem.base, query);
  978. if (ret) {
  979. IPAERR("failed to parse stats (error %d)\n", ret);
  980. goto destroy_imm;
  981. }
  982. ret = 0;
  983. destroy_imm:
  984. ipahal_destroy_imm_cmd(cmd_pyld);
  985. free_dma_mem:
  986. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
  987. free_offset:
  988. kfree(get_offset);
  989. return ret;
  990. }
  991. int ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
  992. {
  993. if (!ipa3_ctx->hw_stats.enabled) {
  994. IPAERR("hw_stats is not enabled\n");
  995. return 0;
  996. }
  997. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
  998. IPAERR("FnR stats not supported in %d hw_type\n",
  999. ipa3_ctx->ipa_hw_type);
  1000. return 0;
  1001. }
  1002. if (query->start_id == 0 || query->end_id == 0) {
  1003. IPAERR("Invalid start_id/end_id, must be not 0\n");
  1004. IPAERR("start_id %d, end_id %d\n",
  1005. query->start_id, query->end_id);
  1006. return -EINVAL;
  1007. }
  1008. if (query->start_id > IPA_MAX_FLT_RT_CNT_INDEX) {
  1009. IPAERR("start_cnt_id %d out of range\n", query->start_id);
  1010. return -EINVAL;
  1011. }
  1012. if (query->end_id > IPA_MAX_FLT_RT_CNT_INDEX) {
  1013. IPAERR("end_cnt_id %d out of range\n", query->end_id);
  1014. return -EINVAL;
  1015. }
  1016. if (query->end_id < query->start_id) {
  1017. IPAERR("end_id %d < start_id %d\n",
  1018. query->end_id, query->start_id);
  1019. return -EINVAL;
  1020. }
  1021. if (query->stats_size > sizeof(struct ipa_flt_rt_stats)) {
  1022. IPAERR("stats_size %d > ipa_flt_rt_stats %d\n",
  1023. query->stats_size, sizeof(struct ipa_flt_rt_stats));
  1024. return -EINVAL;
  1025. }
  1026. return __ipa_get_flt_rt_stats(query);
  1027. }
  1028. static int __ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats)
  1029. {
  1030. int ret;
  1031. int smem_ofst;
  1032. struct ipahal_stats_get_offset_flt_rt_v4_5 *get_offset;
  1033. struct ipahal_stats_offset offset = { 0 };
  1034. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  1035. struct ipahal_imm_cmd_pyld *cmd_pyld;
  1036. struct ipa_mem_buffer mem;
  1037. struct ipa3_desc desc = { 0 };
  1038. get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
  1039. if (!get_offset) {
  1040. IPADBG("no mem\n");
  1041. return -ENOMEM;
  1042. }
  1043. smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
  1044. get_offset->start_id = index;
  1045. get_offset->end_id = index;
  1046. ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset,
  1047. &offset);
  1048. if (ret) {
  1049. IPAERR("failed to get offset from hal %d\n", ret);
  1050. goto free_offset;
  1051. }
  1052. IPADBG("offset = %d size = %d\n", offset.offset, offset.size);
  1053. if (offset.size == 0) {
  1054. ret = 0;
  1055. goto free_offset;
  1056. }
  1057. mem.size = offset.size;
  1058. mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
  1059. mem.size,
  1060. &mem.phys_base,
  1061. GFP_KERNEL);
  1062. if (!mem.base) {
  1063. IPAERR("fail to alloc DMA memory\n");
  1064. goto free_offset;
  1065. }
  1066. ipahal_set_flt_rt_sw_stats(mem.base, stats);
  1067. cmd.is_read = false;
  1068. cmd.skip_pipeline_clear = false;
  1069. cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  1070. cmd.size = mem.size;
  1071. cmd.system_addr = mem.phys_base;
  1072. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  1073. smem_ofst + offset.offset;
  1074. cmd_pyld = ipahal_construct_imm_cmd(
  1075. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  1076. if (!cmd_pyld) {
  1077. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  1078. ret = -ENOMEM;
  1079. goto free_dma_mem;
  1080. }
  1081. desc.opcode = cmd_pyld->opcode;
  1082. desc.pyld = cmd_pyld->data;
  1083. desc.len = cmd_pyld->len;
  1084. desc.type = IPA_IMM_CMD_DESC;
  1085. ret = ipa3_send_cmd(1, &desc);
  1086. if (ret) {
  1087. IPAERR("failed to send immediate command (error %d)\n", ret);
  1088. goto destroy_imm;
  1089. }
  1090. ret = 0;
  1091. destroy_imm:
  1092. ipahal_destroy_imm_cmd(cmd_pyld);
  1093. free_dma_mem:
  1094. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
  1095. free_offset:
  1096. kfree(get_offset);
  1097. return ret;
  1098. }
  1099. int ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats)
  1100. {
  1101. if (!ipa3_ctx->hw_stats.enabled) {
  1102. IPAERR("hw_stats is not enabled\n");
  1103. return 0;
  1104. }
  1105. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
  1106. IPAERR("FnR stats not supported in %d hw_type\n",
  1107. ipa3_ctx->ipa_hw_type);
  1108. return 0;
  1109. }
  1110. if (index > IPA_MAX_FLT_RT_CNT_INDEX) {
  1111. IPAERR("index %d out of range\n", index);
  1112. return -EINVAL;
  1113. }
  1114. if (index <= IPA_FLT_RT_HW_COUNTER) {
  1115. IPAERR("index %d invalid, only support sw counter set\n",
  1116. index);
  1117. return -EINVAL;
  1118. }
  1119. return __ipa_set_flt_rt_stats(index, stats);
  1120. }
  1121. int ipa_init_drop_stats(u32 pipe_bitmask)
  1122. {
  1123. struct ipahal_stats_init_pyld *pyld;
  1124. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  1125. struct ipahal_imm_cmd_pyld *cmd_pyld;
  1126. struct ipahal_imm_cmd_register_write drop_base = {0};
  1127. struct ipahal_imm_cmd_pyld *drop_base_pyld;
  1128. struct ipahal_imm_cmd_register_write drop_mask = {0};
  1129. struct ipahal_imm_cmd_pyld *drop_mask_pyld;
  1130. struct ipa3_desc desc[3] = { {0} };
  1131. dma_addr_t dma_address;
  1132. int ret;
  1133. if (!ipa3_ctx->hw_stats.enabled)
  1134. return 0;
  1135. /* reset driver's cache */
  1136. memset(&ipa3_ctx->hw_stats.drop, 0, sizeof(ipa3_ctx->hw_stats.drop));
  1137. ipa3_ctx->hw_stats.drop.init.enabled_bitmask = pipe_bitmask;
  1138. IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask);
  1139. pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_DROP,
  1140. &ipa3_ctx->hw_stats.drop.init, false);
  1141. if (!pyld) {
  1142. IPAERR("failed to generate pyld\n");
  1143. return -EPERM;
  1144. }
  1145. if (pyld->len > IPA_MEM_PART(stats_drop_size)) {
  1146. IPAERR("SRAM partition too small: %d needed %d\n",
  1147. IPA_MEM_PART(stats_drop_size), pyld->len);
  1148. ret = -EPERM;
  1149. goto destroy_init_pyld;
  1150. }
  1151. dma_address = dma_map_single(ipa3_ctx->pdev,
  1152. pyld->data,
  1153. pyld->len,
  1154. DMA_TO_DEVICE);
  1155. if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
  1156. IPAERR("failed to DMA map\n");
  1157. ret = -EPERM;
  1158. goto destroy_init_pyld;
  1159. }
  1160. /* setting the registers and init the stats pyld are done atomically */
  1161. drop_mask.skip_pipeline_clear = false;
  1162. drop_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  1163. drop_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_MASK_n,
  1164. ipa3_ctx->ee);
  1165. drop_mask.value = pipe_bitmask;
  1166. drop_mask.value_mask = ~0;
  1167. drop_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  1168. &drop_mask, false);
  1169. if (!drop_mask_pyld) {
  1170. IPAERR("failed to construct register_write imm cmd\n");
  1171. ret = -ENOMEM;
  1172. goto unmap;
  1173. }
  1174. desc[0].opcode = drop_mask_pyld->opcode;
  1175. desc[0].pyld = drop_mask_pyld->data;
  1176. desc[0].len = drop_mask_pyld->len;
  1177. desc[0].type = IPA_IMM_CMD_DESC;
  1178. drop_base.skip_pipeline_clear = false;
  1179. drop_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  1180. drop_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_BASE_n,
  1181. ipa3_ctx->ee);
  1182. drop_base.value = ipa3_ctx->smem_restricted_bytes +
  1183. IPA_MEM_PART(stats_drop_ofst);
  1184. drop_base.value_mask = ~0;
  1185. drop_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  1186. &drop_base, false);
  1187. if (!drop_base_pyld) {
  1188. IPAERR("failed to construct register_write imm cmd\n");
  1189. ret = -ENOMEM;
  1190. goto destroy_drop_mask;
  1191. }
  1192. desc[1].opcode = drop_base_pyld->opcode;
  1193. desc[1].pyld = drop_base_pyld->data;
  1194. desc[1].len = drop_base_pyld->len;
  1195. desc[1].type = IPA_IMM_CMD_DESC;
  1196. cmd.is_read = false;
  1197. cmd.skip_pipeline_clear = false;
  1198. cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
  1199. cmd.size = pyld->len;
  1200. cmd.system_addr = dma_address;
  1201. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  1202. IPA_MEM_PART(stats_drop_ofst);
  1203. cmd_pyld = ipahal_construct_imm_cmd(
  1204. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  1205. if (!cmd_pyld) {
  1206. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  1207. ret = -ENOMEM;
  1208. goto destroy_drop_base;
  1209. }
  1210. desc[2].opcode = cmd_pyld->opcode;
  1211. desc[2].pyld = cmd_pyld->data;
  1212. desc[2].len = cmd_pyld->len;
  1213. desc[2].type = IPA_IMM_CMD_DESC;
  1214. ret = ipa3_send_cmd(3, desc);
  1215. if (ret) {
  1216. IPAERR("failed to send immediate command (error %d)\n", ret);
  1217. goto destroy_imm;
  1218. }
  1219. ret = 0;
  1220. destroy_imm:
  1221. ipahal_destroy_imm_cmd(cmd_pyld);
  1222. destroy_drop_base:
  1223. ipahal_destroy_imm_cmd(drop_base_pyld);
  1224. destroy_drop_mask:
  1225. ipahal_destroy_imm_cmd(drop_mask_pyld);
  1226. unmap:
  1227. dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
  1228. destroy_init_pyld:
  1229. ipahal_destroy_stats_init_pyld(pyld);
  1230. return ret;
  1231. }
  1232. int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
  1233. {
  1234. int i;
  1235. int ret;
  1236. struct ipahal_stats_get_offset_drop get_offset = { { 0 } };
  1237. struct ipahal_stats_offset offset = { 0 };
  1238. struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
  1239. struct ipahal_imm_cmd_pyld *cmd_pyld;
  1240. struct ipa_mem_buffer mem;
  1241. struct ipa3_desc desc = { 0 };
  1242. struct ipahal_stats_drop_all *stats;
  1243. if (!ipa3_ctx->hw_stats.enabled)
  1244. return 0;
  1245. get_offset.init = ipa3_ctx->hw_stats.drop.init;
  1246. ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_DROP, &get_offset,
  1247. &offset);
  1248. if (ret) {
  1249. IPAERR("failed to get offset from hal %d\n", ret);
  1250. return ret;
  1251. }
  1252. IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
  1253. if (offset.size == 0)
  1254. return 0;
  1255. mem.size = offset.size;
  1256. mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
  1257. mem.size,
  1258. &mem.phys_base,
  1259. GFP_KERNEL);
  1260. if (!mem.base) {
  1261. IPAERR("fail to alloc DMA memory\n");
  1262. return ret;
  1263. }
  1264. cmd.is_read = true;
  1265. cmd.clear_after_read = true;
  1266. cmd.skip_pipeline_clear = false;
  1267. cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  1268. cmd.size = mem.size;
  1269. cmd.system_addr = mem.phys_base;
  1270. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  1271. IPA_MEM_PART(stats_drop_ofst) + offset.offset;
  1272. cmd_pyld = ipahal_construct_imm_cmd(
  1273. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  1274. if (!cmd_pyld) {
  1275. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  1276. ret = -ENOMEM;
  1277. goto free_dma_mem;
  1278. }
  1279. desc.opcode = cmd_pyld->opcode;
  1280. desc.pyld = cmd_pyld->data;
  1281. desc.len = cmd_pyld->len;
  1282. desc.type = IPA_IMM_CMD_DESC;
  1283. ret = ipa3_send_cmd(1, &desc);
  1284. if (ret) {
  1285. IPAERR("failed to send immediate command (error %d)\n", ret);
  1286. goto destroy_imm;
  1287. }
  1288. stats = kzalloc(sizeof(*stats), GFP_KERNEL);
  1289. if (!stats) {
  1290. ret = -ENOMEM;
  1291. goto destroy_imm;
  1292. }
  1293. ret = ipahal_parse_stats(IPAHAL_HW_STATS_DROP,
  1294. &ipa3_ctx->hw_stats.drop.init, mem.base, stats);
  1295. if (ret) {
  1296. IPAERR("failed to parse stats (error %d)\n", ret);
  1297. goto free_stats;
  1298. }
  1299. /*
  1300. * update driver cache.
  1301. * the stats were read from hardware with clear_after_read meaning
  1302. * hardware stats are 0 now
  1303. */
  1304. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  1305. int ep_idx = ipa3_get_ep_mapping(i);
  1306. if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES)
  1307. continue;
  1308. if (ipa3_ctx->ep[ep_idx].client != i)
  1309. continue;
  1310. ipa3_ctx->hw_stats.drop.stats.client[i].drop_byte_cnt +=
  1311. stats->stats[ep_idx].drop_byte_cnt;
  1312. ipa3_ctx->hw_stats.drop.stats.client[i].drop_packet_cnt +=
  1313. stats->stats[ep_idx].drop_packet_cnt;
  1314. }
  1315. if (!out) {
  1316. ret = 0;
  1317. goto free_stats;
  1318. }
  1319. /* copy results to out parameter */
  1320. *out = ipa3_ctx->hw_stats.drop.stats;
  1321. ret = 0;
  1322. free_stats:
  1323. kfree(stats);
  1324. destroy_imm:
  1325. ipahal_destroy_imm_cmd(cmd_pyld);
  1326. free_dma_mem:
  1327. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
  1328. return ret;
  1329. }
  1330. int ipa_reset_drop_stats(enum ipa_client_type client)
  1331. {
  1332. int ret;
  1333. struct ipa_drop_stats *stats;
  1334. if (client >= IPA_CLIENT_MAX) {
  1335. IPAERR("invalid client %d\n", client);
  1336. return -EINVAL;
  1337. }
  1338. /* reading stats will reset them in hardware */
  1339. ret = ipa_get_drop_stats(NULL);
  1340. if (ret) {
  1341. IPAERR("ipa_get_drop_stats failed %d\n", ret);
  1342. return ret;
  1343. }
  1344. /* reset driver's cache */
  1345. stats = &ipa3_ctx->hw_stats.drop.stats.client[client];
  1346. memset(stats, 0, sizeof(*stats));
  1347. return 0;
  1348. }
  1349. int ipa_reset_all_drop_stats(void)
  1350. {
  1351. int ret;
  1352. struct ipa_drop_stats_all *stats;
  1353. if (!ipa3_ctx->hw_stats.enabled)
  1354. return 0;
  1355. /* reading stats will reset them in hardware */
  1356. ret = ipa_get_drop_stats(NULL);
  1357. if (ret) {
  1358. IPAERR("ipa_get_drop_stats failed %d\n", ret);
  1359. return ret;
  1360. }
  1361. /* reset driver's cache */
  1362. stats = &ipa3_ctx->hw_stats.drop.stats;
  1363. memset(stats, 0, sizeof(*stats));
  1364. return 0;
  1365. }
  1366. #ifndef CONFIG_DEBUG_FS
  1367. int ipa_debugfs_init_stats(struct dentry *parent) { return 0; }
  1368. #else
  1369. #define IPA_MAX_MSG_LEN 4096
  1370. static char dbg_buff[IPA_MAX_MSG_LEN];
  1371. static ssize_t ipa_debugfs_reset_quota_stats(struct file *file,
  1372. const char __user *ubuf, size_t count, loff_t *ppos)
  1373. {
  1374. s8 client = 0;
  1375. int ret;
  1376. mutex_lock(&ipa3_ctx->lock);
  1377. ret = kstrtos8_from_user(ubuf, count, 0, &client);
  1378. if (ret)
  1379. goto bail;
  1380. if (client == -1)
  1381. ipa_reset_all_quota_stats();
  1382. else
  1383. ipa_reset_quota_stats(client);
  1384. ret = count;
  1385. bail:
  1386. mutex_unlock(&ipa3_ctx->lock);
  1387. return ret;
  1388. }
  1389. static ssize_t ipa_debugfs_print_quota_stats(struct file *file,
  1390. char __user *ubuf, size_t count, loff_t *ppos)
  1391. {
  1392. int nbytes = 0;
  1393. struct ipa_quota_stats_all *out;
  1394. int i;
  1395. int res;
  1396. out = kzalloc(sizeof(*out), GFP_KERNEL);
  1397. if (!out)
  1398. return -ENOMEM;
  1399. mutex_lock(&ipa3_ctx->lock);
  1400. res = ipa_get_quota_stats(out);
  1401. if (res) {
  1402. mutex_unlock(&ipa3_ctx->lock);
  1403. kfree(out);
  1404. return res;
  1405. }
  1406. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  1407. int ep_idx = ipa3_get_ep_mapping(i);
  1408. if (ep_idx == -1)
  1409. continue;
  1410. if (IPA_CLIENT_IS_TEST(i))
  1411. continue;
  1412. if (!(ipa3_ctx->hw_stats.quota.init.enabled_bitmask &
  1413. (1 << ep_idx)))
  1414. continue;
  1415. nbytes += scnprintf(dbg_buff + nbytes,
  1416. IPA_MAX_MSG_LEN - nbytes,
  1417. "%s:\n",
  1418. ipa_clients_strings[i]);
  1419. nbytes += scnprintf(dbg_buff + nbytes,
  1420. IPA_MAX_MSG_LEN - nbytes,
  1421. "num_ipv4_bytes=%llu\n",
  1422. out->client[i].num_ipv4_bytes);
  1423. nbytes += scnprintf(dbg_buff + nbytes,
  1424. IPA_MAX_MSG_LEN - nbytes,
  1425. "num_ipv6_bytes=%llu\n",
  1426. out->client[i].num_ipv6_bytes);
  1427. nbytes += scnprintf(dbg_buff + nbytes,
  1428. IPA_MAX_MSG_LEN - nbytes,
  1429. "num_ipv4_pkts=%u\n",
  1430. out->client[i].num_ipv4_pkts);
  1431. nbytes += scnprintf(dbg_buff + nbytes,
  1432. IPA_MAX_MSG_LEN - nbytes,
  1433. "num_ipv6_pkts=%u\n",
  1434. out->client[i].num_ipv6_pkts);
  1435. nbytes += scnprintf(dbg_buff + nbytes,
  1436. IPA_MAX_MSG_LEN - nbytes,
  1437. "\n");
  1438. }
  1439. mutex_unlock(&ipa3_ctx->lock);
  1440. kfree(out);
  1441. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  1442. }
  1443. static ssize_t ipa_debugfs_reset_tethering_stats(struct file *file,
  1444. const char __user *ubuf, size_t count, loff_t *ppos)
  1445. {
  1446. s8 client = 0;
  1447. int ret;
  1448. mutex_lock(&ipa3_ctx->lock);
  1449. ret = kstrtos8_from_user(ubuf, count, 0, &client);
  1450. if (ret)
  1451. goto bail;
  1452. if (client == -1)
  1453. ipa_reset_all_teth_stats();
  1454. else
  1455. ipa_reset_all_cons_teth_stats(client);
  1456. ret = count;
  1457. bail:
  1458. mutex_unlock(&ipa3_ctx->lock);
  1459. return ret;
  1460. }
  1461. static ssize_t ipa_debugfs_print_tethering_stats(struct file *file,
  1462. char __user *ubuf, size_t count, loff_t *ppos)
  1463. {
  1464. int nbytes = 0;
  1465. struct ipa_quota_stats_all *out;
  1466. int i, j;
  1467. int res;
  1468. out = kzalloc(sizeof(*out), GFP_KERNEL);
  1469. if (!out)
  1470. return -ENOMEM;
  1471. mutex_lock(&ipa3_ctx->lock);
  1472. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  1473. int ep_idx = ipa3_get_ep_mapping(i);
  1474. if (ep_idx == -1)
  1475. continue;
  1476. if (!IPA_CLIENT_IS_PROD(i))
  1477. continue;
  1478. if (IPA_CLIENT_IS_TEST(i))
  1479. continue;
  1480. if (!(ipa3_ctx->hw_stats.teth.init.prod_bitmask &
  1481. (1 << ep_idx)))
  1482. continue;
  1483. res = ipa_get_teth_stats();
  1484. if (res) {
  1485. mutex_unlock(&ipa3_ctx->lock);
  1486. kfree(out);
  1487. return res;
  1488. }
  1489. for (j = 0; j < IPA_CLIENT_MAX; j++) {
  1490. int cons_idx = ipa3_get_ep_mapping(j);
  1491. if (cons_idx == -1)
  1492. continue;
  1493. if (IPA_CLIENT_IS_TEST(j))
  1494. continue;
  1495. if (!(ipa3_ctx->hw_stats.teth.init.cons_bitmask[ep_idx]
  1496. & (1 << cons_idx)))
  1497. continue;
  1498. nbytes += scnprintf(dbg_buff + nbytes,
  1499. IPA_MAX_MSG_LEN - nbytes,
  1500. "%s->%s:\n",
  1501. ipa_clients_strings[i],
  1502. ipa_clients_strings[j]);
  1503. nbytes += scnprintf(dbg_buff + nbytes,
  1504. IPA_MAX_MSG_LEN - nbytes,
  1505. "num_ipv4_bytes=%llu\n",
  1506. out->client[j].num_ipv4_bytes);
  1507. nbytes += scnprintf(dbg_buff + nbytes,
  1508. IPA_MAX_MSG_LEN - nbytes,
  1509. "num_ipv6_bytes=%llu\n",
  1510. out->client[j].num_ipv6_bytes);
  1511. nbytes += scnprintf(dbg_buff + nbytes,
  1512. IPA_MAX_MSG_LEN - nbytes,
  1513. "num_ipv4_pkts=%u\n",
  1514. out->client[j].num_ipv4_pkts);
  1515. nbytes += scnprintf(dbg_buff + nbytes,
  1516. IPA_MAX_MSG_LEN - nbytes,
  1517. "num_ipv6_pkts=%u\n",
  1518. out->client[j].num_ipv6_pkts);
  1519. nbytes += scnprintf(dbg_buff + nbytes,
  1520. IPA_MAX_MSG_LEN - nbytes,
  1521. "\n");
  1522. }
  1523. }
  1524. mutex_unlock(&ipa3_ctx->lock);
  1525. kfree(out);
  1526. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  1527. }
  1528. static ssize_t ipa_debugfs_control_flt_rt_stats(struct file *file,
  1529. const char __user *ubuf, size_t count, loff_t *ppos)
  1530. {
  1531. struct ipa_ioc_flt_rt_query *query;
  1532. unsigned long missing;
  1533. int pyld_size = 0;
  1534. int ret;
  1535. query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query),
  1536. GFP_KERNEL);
  1537. if (!query)
  1538. return -ENOMEM;
  1539. query->stats_size = sizeof(struct ipa_flt_rt_stats);
  1540. pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
  1541. sizeof(struct ipa_flt_rt_stats);
  1542. query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
  1543. if (!query->stats) {
  1544. kfree(query);
  1545. return -ENOMEM;
  1546. }
  1547. mutex_lock(&ipa3_ctx->lock);
  1548. if (count >= sizeof(dbg_buff)) {
  1549. ret = -EFAULT;
  1550. goto bail;
  1551. }
  1552. missing = copy_from_user(dbg_buff, ubuf, count);
  1553. if (missing) {
  1554. ret = -EFAULT;
  1555. goto bail;
  1556. }
  1557. dbg_buff[count] = '\0';
  1558. if (strcmp(dbg_buff, "reset\n") == 0) {
  1559. query->reset = 1;
  1560. query->start_id = 1;
  1561. query->end_id = IPA_MAX_FLT_RT_CNT_INDEX;
  1562. ipa_get_flt_rt_stats(query);
  1563. } else {
  1564. IPAERR("unsupport flt_rt command\n");
  1565. }
  1566. ret = count;
  1567. bail:
  1568. kfree((void *)(uintptr_t)(query->stats));
  1569. kfree(query);
  1570. mutex_unlock(&ipa3_ctx->lock);
  1571. return ret;
  1572. }
  1573. static ssize_t ipa_debugfs_print_flt_rt_stats(struct file *file,
  1574. char __user *ubuf, size_t count, loff_t *ppos)
  1575. {
  1576. int nbytes = 0;
  1577. int i;
  1578. int res;
  1579. int pyld_size = 0;
  1580. struct ipa_ioc_flt_rt_query *query;
  1581. query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query),
  1582. GFP_KERNEL);
  1583. if (!query)
  1584. return -ENOMEM;
  1585. query->start_id = 1;
  1586. query->end_id = IPA_MAX_FLT_RT_CNT_INDEX;
  1587. query->reset = true;
  1588. query->stats_size = sizeof(struct ipa_flt_rt_stats);
  1589. pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
  1590. sizeof(struct ipa_flt_rt_stats);
  1591. query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
  1592. if (!query->stats) {
  1593. kfree(query);
  1594. return -ENOMEM;
  1595. }
  1596. mutex_lock(&ipa3_ctx->lock);
  1597. res = ipa_get_flt_rt_stats(query);
  1598. if (res) {
  1599. mutex_unlock(&ipa3_ctx->lock);
  1600. kfree((void *)(uintptr_t)(query->stats));
  1601. kfree(query);
  1602. return res;
  1603. }
  1604. for (i = 0; i < IPA_MAX_FLT_RT_CNT_INDEX; i++) {
  1605. nbytes += scnprintf(dbg_buff + nbytes,
  1606. IPA_MAX_MSG_LEN - nbytes,
  1607. "cnt_id: %d\n", i + 1);
  1608. nbytes += scnprintf(dbg_buff + nbytes,
  1609. IPA_MAX_MSG_LEN - nbytes,
  1610. "num_pkts: %d\n",
  1611. ((struct ipa_flt_rt_stats *)
  1612. query->stats)[i].num_pkts);
  1613. nbytes += scnprintf(dbg_buff + nbytes,
  1614. IPA_MAX_MSG_LEN - nbytes,
  1615. "num_pkts_hash: %d\n",
  1616. ((struct ipa_flt_rt_stats *)
  1617. query->stats)[i].num_pkts_hash);
  1618. nbytes += scnprintf(dbg_buff + nbytes,
  1619. IPA_MAX_MSG_LEN - nbytes,
  1620. "num_bytes: %lld\n",
  1621. ((struct ipa_flt_rt_stats *)
  1622. query->stats)[i].num_bytes);
  1623. nbytes += scnprintf(dbg_buff + nbytes,
  1624. IPA_MAX_MSG_LEN - nbytes,
  1625. "\n");
  1626. }
  1627. mutex_unlock(&ipa3_ctx->lock);
  1628. kfree((void *)(uintptr_t)(query->stats));
  1629. kfree(query);
  1630. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  1631. }
  1632. static ssize_t ipa_debugfs_reset_drop_stats(struct file *file,
  1633. const char __user *ubuf, size_t count, loff_t *ppos)
  1634. {
  1635. s8 client = 0;
  1636. int ret;
  1637. mutex_lock(&ipa3_ctx->lock);
  1638. ret = kstrtos8_from_user(ubuf, count, 0, &client);
  1639. if (ret)
  1640. goto bail;
  1641. if (client == -1)
  1642. ipa_reset_all_drop_stats();
  1643. else
  1644. ipa_reset_drop_stats(client);
  1645. ret = count;
  1646. bail:
  1647. mutex_unlock(&ipa3_ctx->lock);
  1648. return count;
  1649. }
  1650. static ssize_t ipa_debugfs_print_drop_stats(struct file *file,
  1651. char __user *ubuf, size_t count, loff_t *ppos)
  1652. {
  1653. int nbytes = 0;
  1654. struct ipa_drop_stats_all *out;
  1655. int i;
  1656. int res;
  1657. out = kzalloc(sizeof(*out), GFP_KERNEL);
  1658. if (!out)
  1659. return -ENOMEM;
  1660. mutex_lock(&ipa3_ctx->lock);
  1661. res = ipa_get_drop_stats(out);
  1662. if (res) {
  1663. mutex_unlock(&ipa3_ctx->lock);
  1664. kfree(out);
  1665. return res;
  1666. }
  1667. for (i = 0; i < IPA_CLIENT_MAX; i++) {
  1668. int ep_idx = ipa3_get_ep_mapping(i);
  1669. if (ep_idx == -1)
  1670. continue;
  1671. if (!IPA_CLIENT_IS_CONS(i))
  1672. continue;
  1673. if (IPA_CLIENT_IS_TEST(i))
  1674. continue;
  1675. if (!(ipa3_ctx->hw_stats.drop.init.enabled_bitmask &
  1676. (1 << ep_idx)))
  1677. continue;
  1678. nbytes += scnprintf(dbg_buff + nbytes,
  1679. IPA_MAX_MSG_LEN - nbytes,
  1680. "%s:\n",
  1681. ipa_clients_strings[i]);
  1682. nbytes += scnprintf(dbg_buff + nbytes,
  1683. IPA_MAX_MSG_LEN - nbytes,
  1684. "drop_byte_cnt=%u\n",
  1685. out->client[i].drop_byte_cnt);
  1686. nbytes += scnprintf(dbg_buff + nbytes,
  1687. IPA_MAX_MSG_LEN - nbytes,
  1688. "drop_packet_cnt=%u\n",
  1689. out->client[i].drop_packet_cnt);
  1690. nbytes += scnprintf(dbg_buff + nbytes,
  1691. IPA_MAX_MSG_LEN - nbytes,
  1692. "\n");
  1693. }
  1694. mutex_unlock(&ipa3_ctx->lock);
  1695. kfree(out);
  1696. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  1697. }
  1698. static const struct file_operations ipa3_quota_ops = {
  1699. .read = ipa_debugfs_print_quota_stats,
  1700. .write = ipa_debugfs_reset_quota_stats,
  1701. };
  1702. static const struct file_operations ipa3_tethering_ops = {
  1703. .read = ipa_debugfs_print_tethering_stats,
  1704. .write = ipa_debugfs_reset_tethering_stats,
  1705. };
  1706. static const struct file_operations ipa3_flt_rt_ops = {
  1707. .read = ipa_debugfs_print_flt_rt_stats,
  1708. .write = ipa_debugfs_control_flt_rt_stats,
  1709. };
  1710. static const struct file_operations ipa3_drop_ops = {
  1711. .read = ipa_debugfs_print_drop_stats,
  1712. .write = ipa_debugfs_reset_drop_stats,
  1713. };
  1714. int ipa_debugfs_init_stats(struct dentry *parent)
  1715. {
  1716. const mode_t read_write_mode = 0664;
  1717. struct dentry *file;
  1718. struct dentry *dent;
  1719. if (!ipa3_ctx->hw_stats.enabled)
  1720. return 0;
  1721. dent = debugfs_create_dir("hw_stats", parent);
  1722. if (IS_ERR_OR_NULL(dent)) {
  1723. IPAERR("fail to create folder in debug_fs\n");
  1724. return -EFAULT;
  1725. }
  1726. file = debugfs_create_file("quota", read_write_mode, dent, NULL,
  1727. &ipa3_quota_ops);
  1728. if (IS_ERR_OR_NULL(file)) {
  1729. IPAERR("fail to create file %s\n", "quota");
  1730. goto fail;
  1731. }
  1732. file = debugfs_create_file("drop", read_write_mode, dent, NULL,
  1733. &ipa3_drop_ops);
  1734. if (IS_ERR_OR_NULL(file)) {
  1735. IPAERR("fail to create file %s\n", "drop");
  1736. goto fail;
  1737. }
  1738. file = debugfs_create_file("tethering", read_write_mode, dent, NULL,
  1739. &ipa3_tethering_ops);
  1740. if (IS_ERR_OR_NULL(file)) {
  1741. IPAERR("fail to create file %s\n", "tethering");
  1742. goto fail;
  1743. }
  1744. file = debugfs_create_file("flt_rt", read_write_mode, dent, NULL,
  1745. &ipa3_flt_rt_ops);
  1746. if (IS_ERR_OR_NULL(file)) {
  1747. IPAERR("fail to create file flt_rt\n");
  1748. goto fail;
  1749. }
  1750. return 0;
  1751. fail:
  1752. debugfs_remove_recursive(dent);
  1753. return -EFAULT;
  1754. }
  1755. #endif