qdf_mem.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189
  1. /*
  2. * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * DOC: qdf_mem
  21. * This file provides OS dependent memory management APIs
  22. */
  23. #include "qdf_debugfs.h"
  24. #include "qdf_mem.h"
  25. #include "qdf_nbuf.h"
  26. #include "qdf_lock.h"
  27. #include "qdf_mc_timer.h"
  28. #include "qdf_module.h"
  29. #include <qdf_trace.h>
  30. #include "qdf_str.h"
  31. #include "qdf_talloc.h"
  32. #include <linux/debugfs.h>
  33. #include <linux/seq_file.h>
  34. #include <linux/string.h>
  35. #include <qdf_list.h>
  36. #ifdef CNSS_MEM_PRE_ALLOC
  37. #ifdef CONFIG_CNSS_OUT_OF_TREE
  38. #include "cnss_prealloc.h"
  39. #else
  40. #include <net/cnss_prealloc.h>
  41. #endif
  42. #endif
  43. /* cnss prealloc maintains various prealloc pools of 8Kb, 16Kb, 32Kb and so
  44. * on and allocates buffer from the pool for wlan driver. When wlan driver
  45. * requests to free the memory buffer then cnss prealloc derives slab_cache
  46. * from virtual memory via page struct to identify prealloc pool id to put
  47. * back memory buffer into the pool. Kernel 5.17 removed slab_cache from page
  48. * struct. So add headroom to store cache pointer at the beginning of
  49. * allocated memory buffer to use it later in identifying prealloc pool id.
  50. */
  51. #if defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE)
  52. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
  53. static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
  54. {
  55. return true;
  56. }
  57. #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  58. static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
  59. {
  60. return false;
  61. }
  62. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  63. #else /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
  64. static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
  65. {
  66. return false;
  67. }
  68. #endif /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
  69. #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
  70. static bool mem_debug_disabled;
  71. qdf_declare_param(mem_debug_disabled, bool);
  72. #endif
  73. #ifdef MEMORY_DEBUG
  74. static bool is_initial_mem_debug_disabled;
  75. #endif
  76. /* Preprocessor Definitions and Constants */
  77. #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
  78. #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
  79. #define QDF_DEBUG_STRING_SIZE 512
  80. /**
  81. * struct __qdf_mem_stat - qdf memory statistics
  82. * @kmalloc: total kmalloc allocations
  83. * @dma: total dma allocations
  84. * @skb: total skb allocations
  85. * @skb_total: total skb allocations in host driver
  86. * @dp_tx_skb: total Tx skb allocations in datapath
  87. * @dp_rx_skb: total Rx skb allocations in datapath
  88. * @skb_mem_max: high watermark for skb allocations
  89. * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
  90. * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
  91. * @dp_tx_skb_count: DP Tx buffer count
  92. * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
  93. * @dp_rx_skb_count: DP Rx buffer count
  94. * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
  95. * @tx_descs_outstanding: Current pending Tx descs count
  96. * @tx_descs_max: High watermark for pending Tx descs count
  97. */
  98. static struct __qdf_mem_stat {
  99. qdf_atomic_t kmalloc;
  100. qdf_atomic_t dma;
  101. qdf_atomic_t skb;
  102. qdf_atomic_t skb_total;
  103. qdf_atomic_t dp_tx_skb;
  104. qdf_atomic_t dp_rx_skb;
  105. int32_t skb_mem_max;
  106. int32_t dp_tx_skb_mem_max;
  107. int32_t dp_rx_skb_mem_max;
  108. qdf_atomic_t dp_tx_skb_count;
  109. int32_t dp_tx_skb_count_max;
  110. qdf_atomic_t dp_rx_skb_count;
  111. int32_t dp_rx_skb_count_max;
  112. qdf_atomic_t tx_descs_outstanding;
  113. int32_t tx_descs_max;
  114. } qdf_mem_stat;
  115. #ifdef MEMORY_DEBUG
  116. #include "qdf_debug_domain.h"
  117. enum list_type {
  118. LIST_TYPE_MEM = 0,
  119. LIST_TYPE_DMA = 1,
  120. LIST_TYPE_NBUF = 2,
  121. LIST_TYPE_MAX,
  122. };
  123. /**
  124. * major_alloc_priv: private data registered to debugfs entry created to list
  125. * the list major allocations
  126. * @type: type of the list to be parsed
  127. * @threshold: configured by user by overwriting the respective debugfs
  128. * sys entry. This is to list the functions which requested
  129. * memory/dma allocations more than threshold number of times.
  130. */
  131. struct major_alloc_priv {
  132. enum list_type type;
  133. uint32_t threshold;
  134. };
  135. static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
  136. static qdf_spinlock_t qdf_mem_list_lock;
  137. static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
  138. static qdf_spinlock_t qdf_mem_dma_list_lock;
  139. static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
  140. {
  141. return &qdf_mem_domains[domain];
  142. }
  143. static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
  144. {
  145. return &qdf_mem_dma_domains[domain];
  146. }
  147. /**
  148. * struct qdf_mem_header - memory object to dubug
  149. * @node: node to the list
  150. * @domain: the active memory domain at time of allocation
  151. * @freed: flag set during free, used to detect double frees
  152. * Use uint8_t so we can detect corruption
  153. * @func: name of the function the allocation was made from
  154. * @line: line number of the file the allocation was made from
  155. * @size: size of the allocation in bytes
  156. * @caller: Caller of the function for which memory is allocated
  157. * @header: a known value, used to detect out-of-bounds access
  158. * @time: timestamp at which allocation was made
  159. */
  160. struct qdf_mem_header {
  161. qdf_list_node_t node;
  162. enum qdf_debug_domain domain;
  163. uint8_t freed;
  164. char func[QDF_MEM_FUNC_NAME_SIZE];
  165. uint32_t line;
  166. uint32_t size;
  167. void *caller;
  168. uint64_t header;
  169. uint64_t time;
  170. };
  171. /* align the qdf_mem_header to 8 bytes */
  172. #define QDF_DMA_MEM_HEADER_ALIGN 8
  173. static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
  174. static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
  175. static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
  176. {
  177. return (struct qdf_mem_header *)ptr - 1;
  178. }
  179. /* make sure the header pointer is 8bytes aligned */
  180. static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
  181. qdf_size_t size)
  182. {
  183. return (struct qdf_mem_header *)
  184. qdf_roundup((size_t)((uint8_t *)ptr + size),
  185. QDF_DMA_MEM_HEADER_ALIGN);
  186. }
  187. static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
  188. {
  189. return (uint64_t *)((void *)(header + 1) + header->size);
  190. }
  191. static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
  192. {
  193. return (void *)(header + 1);
  194. }
  195. /* number of bytes needed for the qdf memory debug information */
  196. #define QDF_MEM_DEBUG_SIZE \
  197. (sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
  198. /* number of bytes needed for the qdf dma memory debug information */
  199. #define QDF_DMA_MEM_DEBUG_SIZE \
  200. (sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
  201. static void qdf_mem_trailer_init(struct qdf_mem_header *header)
  202. {
  203. QDF_BUG(header);
  204. if (!header)
  205. return;
  206. *qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
  207. }
  208. static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
  209. const char *func, uint32_t line, void *caller)
  210. {
  211. QDF_BUG(header);
  212. if (!header)
  213. return;
  214. header->domain = qdf_debug_domain_get();
  215. header->freed = false;
  216. qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
  217. header->line = line;
  218. header->size = size;
  219. header->caller = caller;
  220. header->header = WLAN_MEM_HEADER;
  221. header->time = qdf_get_log_timestamp();
  222. }
  223. enum qdf_mem_validation_bitmap {
  224. QDF_MEM_BAD_HEADER = 1 << 0,
  225. QDF_MEM_BAD_TRAILER = 1 << 1,
  226. QDF_MEM_BAD_SIZE = 1 << 2,
  227. QDF_MEM_DOUBLE_FREE = 1 << 3,
  228. QDF_MEM_BAD_FREED = 1 << 4,
  229. QDF_MEM_BAD_NODE = 1 << 5,
  230. QDF_MEM_BAD_DOMAIN = 1 << 6,
  231. QDF_MEM_WRONG_DOMAIN = 1 << 7,
  232. };
  233. static enum qdf_mem_validation_bitmap
  234. qdf_mem_trailer_validate(struct qdf_mem_header *header)
  235. {
  236. enum qdf_mem_validation_bitmap error_bitmap = 0;
  237. if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
  238. error_bitmap |= QDF_MEM_BAD_TRAILER;
  239. return error_bitmap;
  240. }
  241. static enum qdf_mem_validation_bitmap
  242. qdf_mem_header_validate(struct qdf_mem_header *header,
  243. enum qdf_debug_domain domain)
  244. {
  245. enum qdf_mem_validation_bitmap error_bitmap = 0;
  246. if (header->header != WLAN_MEM_HEADER)
  247. error_bitmap |= QDF_MEM_BAD_HEADER;
  248. if (header->size > QDF_MEM_MAX_MALLOC)
  249. error_bitmap |= QDF_MEM_BAD_SIZE;
  250. if (header->freed == true)
  251. error_bitmap |= QDF_MEM_DOUBLE_FREE;
  252. else if (header->freed)
  253. error_bitmap |= QDF_MEM_BAD_FREED;
  254. if (!qdf_list_node_in_any_list(&header->node))
  255. error_bitmap |= QDF_MEM_BAD_NODE;
  256. if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
  257. header->domain >= QDF_DEBUG_DOMAIN_COUNT)
  258. error_bitmap |= QDF_MEM_BAD_DOMAIN;
  259. else if (header->domain != domain)
  260. error_bitmap |= QDF_MEM_WRONG_DOMAIN;
  261. return error_bitmap;
  262. }
  263. static void
  264. qdf_mem_header_assert_valid(struct qdf_mem_header *header,
  265. enum qdf_debug_domain current_domain,
  266. enum qdf_mem_validation_bitmap error_bitmap,
  267. const char *func,
  268. uint32_t line)
  269. {
  270. if (!error_bitmap)
  271. return;
  272. if (error_bitmap & QDF_MEM_BAD_HEADER)
  273. qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
  274. header->header, WLAN_MEM_HEADER);
  275. if (error_bitmap & QDF_MEM_BAD_SIZE)
  276. qdf_err("Corrupted memory size %u (expected < %d)",
  277. header->size, QDF_MEM_MAX_MALLOC);
  278. if (error_bitmap & QDF_MEM_BAD_TRAILER)
  279. qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
  280. *qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
  281. if (error_bitmap & QDF_MEM_DOUBLE_FREE)
  282. qdf_err("Memory has previously been freed");
  283. if (error_bitmap & QDF_MEM_BAD_FREED)
  284. qdf_err("Corrupted memory freed flag 0x%x", header->freed);
  285. if (error_bitmap & QDF_MEM_BAD_NODE)
  286. qdf_err("Corrupted memory header node or double free");
  287. if (error_bitmap & QDF_MEM_BAD_DOMAIN)
  288. qdf_err("Corrupted memory domain 0x%x", header->domain);
  289. if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
  290. qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
  291. qdf_debug_domain_name(header->domain), header->domain,
  292. qdf_debug_domain_name(current_domain), current_domain);
  293. QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
  294. }
  295. /**
  296. * struct __qdf_mem_info - memory statistics
  297. * @func: the function which allocated memory
  298. * @line: the line at which allocation happened
  299. * @size: the size of allocation
  300. * @caller: Address of the caller function
  301. * @count: how many allocations of same type
  302. * @time: timestamp at which allocation happened
  303. */
  304. struct __qdf_mem_info {
  305. char func[QDF_MEM_FUNC_NAME_SIZE];
  306. uint32_t line;
  307. uint32_t size;
  308. void *caller;
  309. uint32_t count;
  310. uint64_t time;
  311. };
  312. /*
  313. * The table depth defines the de-duplication proximity scope.
  314. * A deeper table takes more time, so choose any optimum value.
  315. */
  316. #define QDF_MEM_STAT_TABLE_SIZE 8
  317. /**
  318. * qdf_mem_debug_print_header() - memory debug header print logic
  319. * @print: the print adapter function
  320. * @print_priv: the private data to be consumed by @print
  321. * @threshold: the threshold value set by user to list top allocations
  322. *
  323. * Return: None
  324. */
  325. static void qdf_mem_debug_print_header(qdf_abstract_print print,
  326. void *print_priv,
  327. uint32_t threshold)
  328. {
  329. if (threshold)
  330. print(print_priv, "APIs requested allocations >= %u no of time",
  331. threshold);
  332. print(print_priv,
  333. "--------------------------------------------------------------");
  334. print(print_priv,
  335. " count size total filename caller timestamp");
  336. print(print_priv,
  337. "--------------------------------------------------------------");
  338. }
  339. /**
  340. * qdf_mem_meta_table_insert() - insert memory metadata into the given table
  341. * @table: the memory metadata table to insert into
  342. * @meta: the memory metadata to insert
  343. *
  344. * Return: true if the table is full after inserting, false otherwise
  345. */
  346. static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
  347. struct qdf_mem_header *meta)
  348. {
  349. int i;
  350. for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
  351. if (!table[i].count) {
  352. qdf_str_lcopy(table[i].func, meta->func,
  353. QDF_MEM_FUNC_NAME_SIZE);
  354. table[i].line = meta->line;
  355. table[i].size = meta->size;
  356. table[i].count = 1;
  357. table[i].caller = meta->caller;
  358. table[i].time = meta->time;
  359. break;
  360. }
  361. if (qdf_str_eq(table[i].func, meta->func) &&
  362. table[i].line == meta->line &&
  363. table[i].size == meta->size &&
  364. table[i].caller == meta->caller) {
  365. table[i].count++;
  366. break;
  367. }
  368. }
  369. /* return true if the table is now full */
  370. return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
  371. }
  372. /**
  373. * qdf_mem_domain_print() - output agnostic memory domain print logic
  374. * @domain: the memory domain to print
  375. * @print: the print adapter function
  376. * @print_priv: the private data to be consumed by @print
  377. * @threshold: the threshold value set by uset to list top allocations
  378. * @mem_print: pointer to function which prints the memory allocation data
  379. *
  380. * Return: None
  381. */
  382. static void qdf_mem_domain_print(qdf_list_t *domain,
  383. qdf_abstract_print print,
  384. void *print_priv,
  385. uint32_t threshold,
  386. void (*mem_print)(struct __qdf_mem_info *,
  387. qdf_abstract_print,
  388. void *, uint32_t))
  389. {
  390. QDF_STATUS status;
  391. struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
  392. qdf_list_node_t *node;
  393. qdf_mem_zero(table, sizeof(table));
  394. qdf_mem_debug_print_header(print, print_priv, threshold);
  395. /* hold lock while inserting to avoid use-after free of the metadata */
  396. qdf_spin_lock(&qdf_mem_list_lock);
  397. status = qdf_list_peek_front(domain, &node);
  398. while (QDF_IS_STATUS_SUCCESS(status)) {
  399. struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
  400. bool is_full = qdf_mem_meta_table_insert(table, meta);
  401. qdf_spin_unlock(&qdf_mem_list_lock);
  402. if (is_full) {
  403. (*mem_print)(table, print, print_priv, threshold);
  404. qdf_mem_zero(table, sizeof(table));
  405. }
  406. qdf_spin_lock(&qdf_mem_list_lock);
  407. status = qdf_list_peek_next(domain, node, &node);
  408. }
  409. qdf_spin_unlock(&qdf_mem_list_lock);
  410. (*mem_print)(table, print, print_priv, threshold);
  411. }
  412. /**
  413. * qdf_mem_meta_table_print() - memory metadata table print logic
  414. * @table: the memory metadata table to print
  415. * @print: the print adapter function
  416. * @print_priv: the private data to be consumed by @print
  417. * @threshold: the threshold value set by user to list top allocations
  418. *
  419. * Return: None
  420. */
  421. static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
  422. qdf_abstract_print print,
  423. void *print_priv,
  424. uint32_t threshold)
  425. {
  426. int i;
  427. char debug_str[QDF_DEBUG_STRING_SIZE];
  428. size_t len = 0;
  429. char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
  430. len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
  431. "%s", debug_prefix);
  432. for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
  433. if (!table[i].count)
  434. break;
  435. print(print_priv,
  436. "%6u x %5u = %7uB @ %s:%u %pS %llu",
  437. table[i].count,
  438. table[i].size,
  439. table[i].count * table[i].size,
  440. table[i].func,
  441. table[i].line, table[i].caller,
  442. table[i].time);
  443. len += qdf_scnprintf(debug_str + len,
  444. sizeof(debug_str) - len,
  445. " @ %s:%u %pS",
  446. table[i].func,
  447. table[i].line,
  448. table[i].caller);
  449. }
  450. print(print_priv, "%s", debug_str);
  451. }
  452. static int qdf_err_printer(void *priv, const char *fmt, ...)
  453. {
  454. va_list args;
  455. va_start(args, fmt);
  456. QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
  457. va_end(args);
  458. return 0;
  459. }
  460. #endif /* MEMORY_DEBUG */
  461. bool prealloc_disabled = 1;
  462. qdf_declare_param(prealloc_disabled, bool);
  463. qdf_export_symbol(prealloc_disabled);
  464. int qdf_mem_malloc_flags(void)
  465. {
  466. if (in_interrupt() || !preemptible() || rcu_preempt_depth())
  467. return GFP_ATOMIC;
  468. return GFP_KERNEL;
  469. }
  470. qdf_export_symbol(qdf_mem_malloc_flags);
  471. /**
  472. * qdf_prealloc_disabled_config_get() - Get the user configuration of
  473. * prealloc_disabled
  474. *
  475. * Return: value of prealloc_disabled qdf module argument
  476. */
  477. bool qdf_prealloc_disabled_config_get(void)
  478. {
  479. return prealloc_disabled;
  480. }
  481. qdf_export_symbol(qdf_prealloc_disabled_config_get);
  482. #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
  483. /**
  484. * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
  485. * @str_value: value of the module param
  486. *
  487. * This function will set qdf module param prealloc_disabled
  488. *
  489. * Return: QDF_STATUS_SUCCESS on Success
  490. */
  491. QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
  492. {
  493. QDF_STATUS status;
  494. status = qdf_bool_parse(str_value, &prealloc_disabled);
  495. return status;
  496. }
  497. #endif
  498. #if defined WLAN_DEBUGFS
  499. /* Debugfs root directory for qdf_mem */
  500. static struct dentry *qdf_mem_debugfs_root;
  501. #ifdef MEMORY_DEBUG
  502. static int seq_printf_printer(void *priv, const char *fmt, ...)
  503. {
  504. struct seq_file *file = priv;
  505. va_list args;
  506. va_start(args, fmt);
  507. seq_vprintf(file, fmt, args);
  508. seq_puts(file, "\n");
  509. va_end(args);
  510. return 0;
  511. }
  512. /**
  513. * qdf_print_major_alloc() - memory metadata table print logic
  514. * @table: the memory metadata table to print
  515. * @print: the print adapter function
  516. * @print_priv: the private data to be consumed by @print
  517. * @threshold: the threshold value set by uset to list top allocations
  518. *
  519. * Return: None
  520. */
  521. static void qdf_print_major_alloc(struct __qdf_mem_info *table,
  522. qdf_abstract_print print,
  523. void *print_priv,
  524. uint32_t threshold)
  525. {
  526. int i;
  527. for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
  528. if (!table[i].count)
  529. break;
  530. if (table[i].count >= threshold)
  531. print(print_priv,
  532. "%6u x %5u = %7uB @ %s:%u %pS %llu",
  533. table[i].count,
  534. table[i].size,
  535. table[i].count * table[i].size,
  536. table[i].func,
  537. table[i].line, table[i].caller,
  538. table[i].time);
  539. }
  540. }
  541. /**
  542. * qdf_mem_seq_start() - sequential callback to start
  543. * @seq: seq_file handle
  544. * @pos: The start position of the sequence
  545. *
  546. * Return: iterator pointer, or NULL if iteration is complete
  547. */
  548. static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
  549. {
  550. enum qdf_debug_domain domain = *pos;
  551. if (!qdf_debug_domain_valid(domain))
  552. return NULL;
  553. /* just use the current position as our iterator */
  554. return pos;
  555. }
  556. /**
  557. * qdf_mem_seq_next() - next sequential callback
  558. * @seq: seq_file handle
  559. * @v: the current iterator
  560. * @pos: the current position
  561. *
  562. * Get the next node and release previous node.
  563. *
  564. * Return: iterator pointer, or NULL if iteration is complete
  565. */
  566. static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  567. {
  568. ++*pos;
  569. return qdf_mem_seq_start(seq, pos);
  570. }
  571. /**
  572. * qdf_mem_seq_stop() - stop sequential callback
  573. * @seq: seq_file handle
  574. * @v: current iterator
  575. *
  576. * Return: None
  577. */
  578. static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
  579. /**
  580. * qdf_mem_seq_show() - print sequential callback
  581. * @seq: seq_file handle
  582. * @v: current iterator
  583. *
  584. * Return: 0 - success
  585. */
  586. static int qdf_mem_seq_show(struct seq_file *seq, void *v)
  587. {
  588. enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
  589. seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
  590. qdf_debug_domain_name(domain_id), domain_id);
  591. qdf_mem_domain_print(qdf_mem_list_get(domain_id),
  592. seq_printf_printer,
  593. seq,
  594. 0,
  595. qdf_mem_meta_table_print);
  596. return 0;
  597. }
  598. /* sequential file operation table */
  599. static const struct seq_operations qdf_mem_seq_ops = {
  600. .start = qdf_mem_seq_start,
  601. .next = qdf_mem_seq_next,
  602. .stop = qdf_mem_seq_stop,
  603. .show = qdf_mem_seq_show,
  604. };
  605. static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
  606. {
  607. return seq_open(file, &qdf_mem_seq_ops);
  608. }
  609. /**
  610. * qdf_major_alloc_show() - print sequential callback
  611. * @seq: seq_file handle
  612. * @v: current iterator
  613. *
  614. * Return: 0 - success
  615. */
  616. static int qdf_major_alloc_show(struct seq_file *seq, void *v)
  617. {
  618. enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
  619. struct major_alloc_priv *priv;
  620. qdf_list_t *list;
  621. priv = (struct major_alloc_priv *)seq->private;
  622. seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
  623. qdf_debug_domain_name(domain_id), domain_id);
  624. switch (priv->type) {
  625. case LIST_TYPE_MEM:
  626. list = qdf_mem_list_get(domain_id);
  627. break;
  628. case LIST_TYPE_DMA:
  629. list = qdf_mem_dma_list(domain_id);
  630. break;
  631. default:
  632. list = NULL;
  633. break;
  634. }
  635. if (list)
  636. qdf_mem_domain_print(list,
  637. seq_printf_printer,
  638. seq,
  639. priv->threshold,
  640. qdf_print_major_alloc);
  641. return 0;
  642. }
  643. /* sequential file operation table created to track major allocs */
  644. static const struct seq_operations qdf_major_allocs_seq_ops = {
  645. .start = qdf_mem_seq_start,
  646. .next = qdf_mem_seq_next,
  647. .stop = qdf_mem_seq_stop,
  648. .show = qdf_major_alloc_show,
  649. };
  650. static int qdf_major_allocs_open(struct inode *inode, struct file *file)
  651. {
  652. void *private = inode->i_private;
  653. struct seq_file *seq;
  654. int rc;
  655. rc = seq_open(file, &qdf_major_allocs_seq_ops);
  656. if (rc == 0) {
  657. seq = file->private_data;
  658. seq->private = private;
  659. }
  660. return rc;
  661. }
  662. static ssize_t qdf_major_alloc_set_threshold(struct file *file,
  663. const char __user *user_buf,
  664. size_t count,
  665. loff_t *pos)
  666. {
  667. char buf[32];
  668. ssize_t buf_size;
  669. uint32_t threshold;
  670. struct seq_file *seq = file->private_data;
  671. struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
  672. buf_size = min(count, (sizeof(buf) - 1));
  673. if (buf_size <= 0)
  674. return 0;
  675. if (copy_from_user(buf, user_buf, buf_size))
  676. return -EFAULT;
  677. buf[buf_size] = '\0';
  678. if (!kstrtou32(buf, 10, &threshold))
  679. priv->threshold = threshold;
  680. return buf_size;
  681. }
  682. /**
  683. * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
  684. * @threshold: the threshold value set by uset to list top allocations
  685. * @print: the print adapter function
  686. * @print_priv: the private data to be consumed by @print
  687. * @mem_print: pointer to function which prints the memory allocation data
  688. *
  689. * Return: None
  690. */
  691. static void
  692. qdf_print_major_nbuf_allocs(uint32_t threshold,
  693. qdf_abstract_print print,
  694. void *print_priv,
  695. void (*mem_print)(struct __qdf_mem_info *,
  696. qdf_abstract_print,
  697. void *, uint32_t))
  698. {
  699. uint32_t nbuf_iter;
  700. unsigned long irq_flag = 0;
  701. QDF_NBUF_TRACK *p_node;
  702. struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
  703. struct qdf_mem_header meta;
  704. bool is_full;
  705. qdf_mem_zero(table, sizeof(table));
  706. qdf_mem_debug_print_header(print, print_priv, threshold);
  707. if (is_initial_mem_debug_disabled)
  708. return;
  709. qdf_rl_info("major nbuf print with threshold %u", threshold);
  710. for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
  711. nbuf_iter++) {
  712. qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
  713. p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
  714. while (p_node) {
  715. meta.line = p_node->line_num;
  716. meta.size = p_node->size;
  717. meta.caller = NULL;
  718. meta.time = p_node->time;
  719. qdf_str_lcopy(meta.func, p_node->func_name,
  720. QDF_MEM_FUNC_NAME_SIZE);
  721. is_full = qdf_mem_meta_table_insert(table, &meta);
  722. if (is_full) {
  723. (*mem_print)(table, print,
  724. print_priv, threshold);
  725. qdf_mem_zero(table, sizeof(table));
  726. }
  727. p_node = p_node->p_next;
  728. }
  729. qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
  730. }
  731. (*mem_print)(table, print, print_priv, threshold);
  732. qdf_rl_info("major nbuf print end");
  733. }
  734. /**
  735. * qdf_major_nbuf_alloc_show() - print sequential callback
  736. * @seq: seq_file handle
  737. * @v: current iterator
  738. *
  739. * Return: 0 - success
  740. */
  741. static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
  742. {
  743. struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
  744. if (!priv) {
  745. qdf_err("priv is null");
  746. return -EINVAL;
  747. }
  748. qdf_print_major_nbuf_allocs(priv->threshold,
  749. seq_printf_printer,
  750. seq,
  751. qdf_print_major_alloc);
  752. return 0;
  753. }
  754. /**
  755. * qdf_nbuf_seq_start() - sequential callback to start
  756. * @seq: seq_file handle
  757. * @pos: The start position of the sequence
  758. *
  759. * Return: iterator pointer, or NULL if iteration is complete
  760. */
  761. static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
  762. {
  763. enum qdf_debug_domain domain = *pos;
  764. if (domain > QDF_DEBUG_NBUF_DOMAIN)
  765. return NULL;
  766. return pos;
  767. }
  768. /**
  769. * qdf_nbuf_seq_next() - next sequential callback
  770. * @seq: seq_file handle
  771. * @v: the current iterator
  772. * @pos: the current position
  773. *
  774. * Get the next node and release previous node.
  775. *
  776. * Return: iterator pointer, or NULL if iteration is complete
  777. */
  778. static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  779. {
  780. ++*pos;
  781. return qdf_nbuf_seq_start(seq, pos);
  782. }
  783. /**
  784. * qdf_nbuf_seq_stop() - stop sequential callback
  785. * @seq: seq_file handle
  786. * @v: current iterator
  787. *
  788. * Return: None
  789. */
  790. static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
  791. /* sequential file operation table created to track major skb allocs */
  792. static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
  793. .start = qdf_nbuf_seq_start,
  794. .next = qdf_nbuf_seq_next,
  795. .stop = qdf_nbuf_seq_stop,
  796. .show = qdf_major_nbuf_alloc_show,
  797. };
  798. static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
  799. {
  800. void *private = inode->i_private;
  801. struct seq_file *seq;
  802. int rc;
  803. rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
  804. if (rc == 0) {
  805. seq = file->private_data;
  806. seq->private = private;
  807. }
  808. return rc;
  809. }
  810. static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
  811. const char __user *user_buf,
  812. size_t count,
  813. loff_t *pos)
  814. {
  815. char buf[32];
  816. ssize_t buf_size;
  817. uint32_t threshold;
  818. struct seq_file *seq = file->private_data;
  819. struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
  820. buf_size = min(count, (sizeof(buf) - 1));
  821. if (buf_size <= 0)
  822. return 0;
  823. if (copy_from_user(buf, user_buf, buf_size))
  824. return -EFAULT;
  825. buf[buf_size] = '\0';
  826. if (!kstrtou32(buf, 10, &threshold))
  827. priv->threshold = threshold;
  828. return buf_size;
  829. }
  830. /* file operation table for listing major allocs */
  831. static const struct file_operations fops_qdf_major_allocs = {
  832. .owner = THIS_MODULE,
  833. .open = qdf_major_allocs_open,
  834. .read = seq_read,
  835. .llseek = seq_lseek,
  836. .release = seq_release,
  837. .write = qdf_major_alloc_set_threshold,
  838. };
  839. /* debugfs file operation table */
  840. static const struct file_operations fops_qdf_mem_debugfs = {
  841. .owner = THIS_MODULE,
  842. .open = qdf_mem_debugfs_open,
  843. .read = seq_read,
  844. .llseek = seq_lseek,
  845. .release = seq_release,
  846. };
  847. /* file operation table for listing major allocs */
  848. static const struct file_operations fops_qdf_nbuf_major_allocs = {
  849. .owner = THIS_MODULE,
  850. .open = qdf_major_nbuf_allocs_open,
  851. .read = seq_read,
  852. .llseek = seq_lseek,
  853. .release = seq_release,
  854. .write = qdf_major_nbuf_alloc_set_threshold,
  855. };
  856. static struct major_alloc_priv mem_priv = {
  857. /* List type set to mem */
  858. LIST_TYPE_MEM,
  859. /* initial threshold to list APIs which allocates mem >= 50 times */
  860. 50
  861. };
  862. static struct major_alloc_priv dma_priv = {
  863. /* List type set to DMA */
  864. LIST_TYPE_DMA,
  865. /* initial threshold to list APIs which allocates dma >= 50 times */
  866. 50
  867. };
  868. static struct major_alloc_priv nbuf_priv = {
  869. /* List type set to NBUF */
  870. LIST_TYPE_NBUF,
  871. /* initial threshold to list APIs which allocates nbuf >= 50 times */
  872. 50
  873. };
  874. static QDF_STATUS qdf_mem_debug_debugfs_init(void)
  875. {
  876. if (is_initial_mem_debug_disabled)
  877. return QDF_STATUS_SUCCESS;
  878. if (!qdf_mem_debugfs_root)
  879. return QDF_STATUS_E_FAILURE;
  880. debugfs_create_file("list",
  881. S_IRUSR,
  882. qdf_mem_debugfs_root,
  883. NULL,
  884. &fops_qdf_mem_debugfs);
  885. debugfs_create_file("major_mem_allocs",
  886. 0600,
  887. qdf_mem_debugfs_root,
  888. &mem_priv,
  889. &fops_qdf_major_allocs);
  890. debugfs_create_file("major_dma_allocs",
  891. 0600,
  892. qdf_mem_debugfs_root,
  893. &dma_priv,
  894. &fops_qdf_major_allocs);
  895. debugfs_create_file("major_nbuf_allocs",
  896. 0600,
  897. qdf_mem_debugfs_root,
  898. &nbuf_priv,
  899. &fops_qdf_nbuf_major_allocs);
  900. return QDF_STATUS_SUCCESS;
  901. }
  902. static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
  903. {
  904. return QDF_STATUS_SUCCESS;
  905. }
  906. #else /* MEMORY_DEBUG */
  907. static QDF_STATUS qdf_mem_debug_debugfs_init(void)
  908. {
  909. return QDF_STATUS_E_NOSUPPORT;
  910. }
  911. static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
  912. {
  913. return QDF_STATUS_E_NOSUPPORT;
  914. }
  915. #endif /* MEMORY_DEBUG */
  916. static void qdf_mem_debugfs_exit(void)
  917. {
  918. debugfs_remove_recursive(qdf_mem_debugfs_root);
  919. qdf_mem_debugfs_root = NULL;
  920. }
  921. static QDF_STATUS qdf_mem_debugfs_init(void)
  922. {
  923. struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
  924. if (!qdf_debugfs_root)
  925. return QDF_STATUS_E_FAILURE;
  926. qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
  927. if (!qdf_mem_debugfs_root)
  928. return QDF_STATUS_E_FAILURE;
  929. debugfs_create_atomic_t("kmalloc",
  930. S_IRUSR,
  931. qdf_mem_debugfs_root,
  932. &qdf_mem_stat.kmalloc);
  933. debugfs_create_atomic_t("dma",
  934. S_IRUSR,
  935. qdf_mem_debugfs_root,
  936. &qdf_mem_stat.dma);
  937. debugfs_create_atomic_t("skb",
  938. S_IRUSR,
  939. qdf_mem_debugfs_root,
  940. &qdf_mem_stat.skb);
  941. return QDF_STATUS_SUCCESS;
  942. }
  943. #else /* WLAN_DEBUGFS */
  944. static QDF_STATUS qdf_mem_debugfs_init(void)
  945. {
  946. return QDF_STATUS_E_NOSUPPORT;
  947. }
  948. static void qdf_mem_debugfs_exit(void) {}
  949. static QDF_STATUS qdf_mem_debug_debugfs_init(void)
  950. {
  951. return QDF_STATUS_E_NOSUPPORT;
  952. }
  953. static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
  954. {
  955. return QDF_STATUS_E_NOSUPPORT;
  956. }
  957. #endif /* WLAN_DEBUGFS */
  958. void qdf_mem_kmalloc_inc(qdf_size_t size)
  959. {
  960. qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
  961. }
  962. static void qdf_mem_dma_inc(qdf_size_t size)
  963. {
  964. qdf_atomic_add(size, &qdf_mem_stat.dma);
  965. }
  966. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  967. void qdf_mem_skb_inc(qdf_size_t size)
  968. {
  969. qdf_atomic_add(size, &qdf_mem_stat.skb);
  970. }
  971. void qdf_mem_skb_dec(qdf_size_t size)
  972. {
  973. qdf_atomic_sub(size, &qdf_mem_stat.skb);
  974. }
  975. void qdf_mem_skb_total_inc(qdf_size_t size)
  976. {
  977. int32_t skb_mem_max = 0;
  978. qdf_atomic_add(size, &qdf_mem_stat.skb_total);
  979. skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
  980. if (qdf_mem_stat.skb_mem_max < skb_mem_max)
  981. qdf_mem_stat.skb_mem_max = skb_mem_max;
  982. }
  983. void qdf_mem_skb_total_dec(qdf_size_t size)
  984. {
  985. qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
  986. }
  987. void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
  988. {
  989. int32_t curr_dp_tx_skb_mem_max = 0;
  990. qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
  991. curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
  992. if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
  993. qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
  994. }
  995. void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
  996. {
  997. qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
  998. }
  999. void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
  1000. {
  1001. int32_t curr_dp_rx_skb_mem_max = 0;
  1002. qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
  1003. curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
  1004. if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
  1005. qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
  1006. }
  1007. void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
  1008. {
  1009. qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
  1010. }
  1011. void qdf_mem_dp_tx_skb_cnt_inc(void)
  1012. {
  1013. int32_t curr_dp_tx_skb_count_max = 0;
  1014. qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
  1015. curr_dp_tx_skb_count_max =
  1016. qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
  1017. if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
  1018. qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
  1019. }
  1020. void qdf_mem_dp_tx_skb_cnt_dec(void)
  1021. {
  1022. qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
  1023. }
  1024. void qdf_mem_dp_rx_skb_cnt_inc(void)
  1025. {
  1026. int32_t curr_dp_rx_skb_count_max = 0;
  1027. qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
  1028. curr_dp_rx_skb_count_max =
  1029. qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
  1030. if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
  1031. qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
  1032. }
  1033. void qdf_mem_dp_rx_skb_cnt_dec(void)
  1034. {
  1035. qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
  1036. }
  1037. #endif
  1038. void qdf_mem_kmalloc_dec(qdf_size_t size)
  1039. {
  1040. qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
  1041. }
  1042. static inline void qdf_mem_dma_dec(qdf_size_t size)
  1043. {
  1044. qdf_atomic_sub(size, &qdf_mem_stat.dma);
  1045. }
  1046. /**
  1047. * __qdf_mempool_init() - Create and initialize memory pool
  1048. *
  1049. * @osdev: platform device object
  1050. * @pool_addr: address of the pool created
  1051. * @elem_cnt: no. of elements in pool
  1052. * @elem_size: size of each pool element in bytes
  1053. * @flags: flags
  1054. *
  1055. * return: Handle to memory pool or NULL if allocation failed
  1056. */
  1057. int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
  1058. int elem_cnt, size_t elem_size, u_int32_t flags)
  1059. {
  1060. __qdf_mempool_ctxt_t *new_pool = NULL;
  1061. u_int32_t align = L1_CACHE_BYTES;
  1062. unsigned long aligned_pool_mem;
  1063. int pool_id;
  1064. int i;
  1065. if (prealloc_disabled) {
  1066. /* TBD: We can maintain a list of pools in qdf_device_t
  1067. * to help debugging
  1068. * when pre-allocation is not enabled
  1069. */
  1070. new_pool = (__qdf_mempool_ctxt_t *)
  1071. kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
  1072. if (!new_pool)
  1073. return QDF_STATUS_E_NOMEM;
  1074. memset(new_pool, 0, sizeof(*new_pool));
  1075. /* TBD: define flags for zeroing buffers etc */
  1076. new_pool->flags = flags;
  1077. new_pool->elem_size = elem_size;
  1078. new_pool->max_elem = elem_cnt;
  1079. *pool_addr = new_pool;
  1080. return 0;
  1081. }
  1082. for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
  1083. if (!osdev->mem_pool[pool_id])
  1084. break;
  1085. }
  1086. if (pool_id == MAX_MEM_POOLS)
  1087. return -ENOMEM;
  1088. new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
  1089. kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
  1090. if (!new_pool)
  1091. return -ENOMEM;
  1092. memset(new_pool, 0, sizeof(*new_pool));
  1093. /* TBD: define flags for zeroing buffers etc */
  1094. new_pool->flags = flags;
  1095. new_pool->pool_id = pool_id;
  1096. /* Round up the element size to cacheline */
  1097. new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
  1098. new_pool->mem_size = elem_cnt * new_pool->elem_size +
  1099. ((align)?(align - 1):0);
  1100. new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
  1101. if (!new_pool->pool_mem) {
  1102. /* TBD: Check if we need get_free_pages above */
  1103. kfree(new_pool);
  1104. osdev->mem_pool[pool_id] = NULL;
  1105. return -ENOMEM;
  1106. }
  1107. spin_lock_init(&new_pool->lock);
  1108. /* Initialize free list */
  1109. aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
  1110. ((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
  1111. STAILQ_INIT(&new_pool->free_list);
  1112. for (i = 0; i < elem_cnt; i++)
  1113. STAILQ_INSERT_TAIL(&(new_pool->free_list),
  1114. (mempool_elem_t *)(aligned_pool_mem +
  1115. (new_pool->elem_size * i)), mempool_entry);
  1116. new_pool->free_cnt = elem_cnt;
  1117. *pool_addr = new_pool;
  1118. return 0;
  1119. }
  1120. qdf_export_symbol(__qdf_mempool_init);
  1121. /**
  1122. * __qdf_mempool_destroy() - Destroy memory pool
  1123. * @osdev: platform device object
  1124. * @Handle: to memory pool
  1125. *
  1126. * Returns: none
  1127. */
  1128. void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
  1129. {
  1130. int pool_id = 0;
  1131. if (!pool)
  1132. return;
  1133. if (prealloc_disabled) {
  1134. kfree(pool);
  1135. return;
  1136. }
  1137. pool_id = pool->pool_id;
  1138. /* TBD: Check if free count matches elem_cnt if debug is enabled */
  1139. kfree(pool->pool_mem);
  1140. kfree(pool);
  1141. osdev->mem_pool[pool_id] = NULL;
  1142. }
  1143. qdf_export_symbol(__qdf_mempool_destroy);
  1144. /**
  1145. * __qdf_mempool_alloc() - Allocate an element memory pool
  1146. *
  1147. * @osdev: platform device object
  1148. * @Handle: to memory pool
  1149. *
  1150. * Return: Pointer to the allocated element or NULL if the pool is empty
  1151. */
  1152. void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
  1153. {
  1154. void *buf = NULL;
  1155. if (!pool)
  1156. return NULL;
  1157. if (prealloc_disabled)
  1158. return qdf_mem_malloc(pool->elem_size);
  1159. spin_lock_bh(&pool->lock);
  1160. buf = STAILQ_FIRST(&pool->free_list);
  1161. if (buf) {
  1162. STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
  1163. pool->free_cnt--;
  1164. }
  1165. /* TBD: Update free count if debug is enabled */
  1166. spin_unlock_bh(&pool->lock);
  1167. return buf;
  1168. }
  1169. qdf_export_symbol(__qdf_mempool_alloc);
  1170. /**
  1171. * __qdf_mempool_free() - Free a memory pool element
  1172. * @osdev: Platform device object
  1173. * @pool: Handle to memory pool
  1174. * @buf: Element to be freed
  1175. *
  1176. * Returns: none
  1177. */
  1178. void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
  1179. {
  1180. if (!pool)
  1181. return;
  1182. if (prealloc_disabled)
  1183. return qdf_mem_free(buf);
  1184. spin_lock_bh(&pool->lock);
  1185. pool->free_cnt++;
  1186. STAILQ_INSERT_TAIL
  1187. (&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
  1188. spin_unlock_bh(&pool->lock);
  1189. }
  1190. qdf_export_symbol(__qdf_mempool_free);
  1191. #ifdef CNSS_MEM_PRE_ALLOC
  1192. static bool qdf_might_be_prealloc(void *ptr)
  1193. {
  1194. if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
  1195. return true;
  1196. else
  1197. return false;
  1198. }
  1199. /**
  1200. * qdf_mem_prealloc_get() - conditionally pre-allocate memory
  1201. * @size: the number of bytes to allocate
  1202. *
  1203. * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
  1204. * a chunk of pre-allocated memory. If size if less than or equal to
  1205. * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
  1206. *
  1207. * Return: NULL on failure, non-NULL on success
  1208. */
  1209. static void *qdf_mem_prealloc_get(size_t size)
  1210. {
  1211. void *ptr;
  1212. if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
  1213. return NULL;
  1214. ptr = wcnss_prealloc_get(size);
  1215. if (!ptr)
  1216. return NULL;
  1217. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1218. ptr += sizeof(void *);
  1219. memset(ptr, 0, size);
  1220. return ptr;
  1221. }
  1222. static inline bool qdf_mem_prealloc_put(void *ptr)
  1223. {
  1224. return wcnss_prealloc_put(ptr);
  1225. }
  1226. #else
  1227. static bool qdf_might_be_prealloc(void *ptr)
  1228. {
  1229. return false;
  1230. }
  1231. static inline void *qdf_mem_prealloc_get(size_t size)
  1232. {
  1233. return NULL;
  1234. }
  1235. static inline bool qdf_mem_prealloc_put(void *ptr)
  1236. {
  1237. return false;
  1238. }
  1239. #endif /* CNSS_MEM_PRE_ALLOC */
  1240. /* External Function implementation */
  1241. #ifdef MEMORY_DEBUG
  1242. /**
  1243. * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
  1244. *
  1245. * Return: value of mem_debug_disabled qdf module argument
  1246. */
  1247. #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
  1248. bool qdf_mem_debug_config_get(void)
  1249. {
  1250. /* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
  1251. return false;
  1252. }
  1253. #else
  1254. bool qdf_mem_debug_config_get(void)
  1255. {
  1256. return mem_debug_disabled;
  1257. }
  1258. #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
  1259. /**
  1260. * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
  1261. * @str_value: value of the module param
  1262. *
  1263. * This function will se qdf module param mem_debug_disabled
  1264. *
  1265. * Return: QDF_STATUS_SUCCESS on Success
  1266. */
  1267. #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
  1268. QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
  1269. {
  1270. QDF_STATUS status;
  1271. status = qdf_bool_parse(str_value, &mem_debug_disabled);
  1272. return status;
  1273. }
  1274. #endif
  1275. /**
  1276. * qdf_mem_debug_init() - initialize qdf memory debug functionality
  1277. *
  1278. * Return: none
  1279. */
  1280. static void qdf_mem_debug_init(void)
  1281. {
  1282. int i;
  1283. is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
  1284. if (is_initial_mem_debug_disabled)
  1285. return;
  1286. /* Initializing the list with maximum size of 60000 */
  1287. for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
  1288. qdf_list_create(&qdf_mem_domains[i], 60000);
  1289. qdf_spinlock_create(&qdf_mem_list_lock);
  1290. /* dma */
  1291. for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
  1292. qdf_list_create(&qdf_mem_dma_domains[i], 0);
  1293. qdf_spinlock_create(&qdf_mem_dma_list_lock);
  1294. }
  1295. static uint32_t
  1296. qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
  1297. qdf_list_t *mem_list)
  1298. {
  1299. if (is_initial_mem_debug_disabled)
  1300. return 0;
  1301. if (qdf_list_empty(mem_list))
  1302. return 0;
  1303. qdf_err("Memory leaks detected in %s domain!",
  1304. qdf_debug_domain_name(domain));
  1305. qdf_mem_domain_print(mem_list,
  1306. qdf_err_printer,
  1307. NULL,
  1308. 0,
  1309. qdf_mem_meta_table_print);
  1310. return mem_list->count;
  1311. }
  1312. static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
  1313. {
  1314. uint32_t leak_count = 0;
  1315. int i;
  1316. if (is_initial_mem_debug_disabled)
  1317. return;
  1318. /* detect and print leaks */
  1319. for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
  1320. leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
  1321. if (leak_count)
  1322. QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
  1323. leak_count);
  1324. }
  1325. /**
  1326. * qdf_mem_debug_exit() - exit qdf memory debug functionality
  1327. *
  1328. * Return: none
  1329. */
  1330. static void qdf_mem_debug_exit(void)
  1331. {
  1332. int i;
  1333. if (is_initial_mem_debug_disabled)
  1334. return;
  1335. /* mem */
  1336. qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
  1337. for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
  1338. qdf_list_destroy(qdf_mem_list_get(i));
  1339. qdf_spinlock_destroy(&qdf_mem_list_lock);
  1340. /* dma */
  1341. qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
  1342. for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
  1343. qdf_list_destroy(&qdf_mem_dma_domains[i]);
  1344. qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
  1345. }
  1346. void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
  1347. void *caller, uint32_t flag)
  1348. {
  1349. QDF_STATUS status;
  1350. enum qdf_debug_domain current_domain = qdf_debug_domain_get();
  1351. qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
  1352. struct qdf_mem_header *header;
  1353. void *ptr;
  1354. unsigned long start, duration;
  1355. if (is_initial_mem_debug_disabled)
  1356. return __qdf_mem_malloc(size, func, line);
  1357. if (!size || size > QDF_MEM_MAX_MALLOC) {
  1358. qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
  1359. return NULL;
  1360. }
  1361. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1362. size += sizeof(void *);
  1363. ptr = qdf_mem_prealloc_get(size);
  1364. if (ptr)
  1365. return ptr;
  1366. if (!flag)
  1367. flag = qdf_mem_malloc_flags();
  1368. start = qdf_mc_timer_get_system_time();
  1369. header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
  1370. duration = qdf_mc_timer_get_system_time() - start;
  1371. if (duration > QDF_MEM_WARN_THRESHOLD)
  1372. qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
  1373. duration, size, func, line);
  1374. if (!header) {
  1375. qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
  1376. return NULL;
  1377. }
  1378. qdf_mem_header_init(header, size, func, line, caller);
  1379. qdf_mem_trailer_init(header);
  1380. ptr = qdf_mem_get_ptr(header);
  1381. qdf_spin_lock_irqsave(&qdf_mem_list_lock);
  1382. status = qdf_list_insert_front(mem_list, &header->node);
  1383. qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
  1384. if (QDF_IS_STATUS_ERROR(status))
  1385. qdf_err("Failed to insert memory header; status %d", status);
  1386. qdf_mem_kmalloc_inc(ksize(header));
  1387. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1388. ptr += sizeof(void *);
  1389. return ptr;
  1390. }
  1391. qdf_export_symbol(qdf_mem_malloc_debug);
  1392. void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
  1393. uint32_t line, void *caller)
  1394. {
  1395. QDF_STATUS status;
  1396. enum qdf_debug_domain current_domain = qdf_debug_domain_get();
  1397. qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
  1398. struct qdf_mem_header *header;
  1399. void *ptr;
  1400. unsigned long start, duration;
  1401. if (is_initial_mem_debug_disabled)
  1402. return qdf_mem_malloc_atomic_debug_fl(size, func, line);
  1403. if (!size || size > QDF_MEM_MAX_MALLOC) {
  1404. qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
  1405. return NULL;
  1406. }
  1407. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1408. size += sizeof(void *);
  1409. ptr = qdf_mem_prealloc_get(size);
  1410. if (ptr)
  1411. return ptr;
  1412. start = qdf_mc_timer_get_system_time();
  1413. header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
  1414. duration = qdf_mc_timer_get_system_time() - start;
  1415. if (duration > QDF_MEM_WARN_THRESHOLD)
  1416. qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
  1417. duration, size, func, line);
  1418. if (!header) {
  1419. qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
  1420. return NULL;
  1421. }
  1422. qdf_mem_header_init(header, size, func, line, caller);
  1423. qdf_mem_trailer_init(header);
  1424. ptr = qdf_mem_get_ptr(header);
  1425. qdf_spin_lock_irqsave(&qdf_mem_list_lock);
  1426. status = qdf_list_insert_front(mem_list, &header->node);
  1427. qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
  1428. if (QDF_IS_STATUS_ERROR(status))
  1429. qdf_err("Failed to insert memory header; status %d", status);
  1430. qdf_mem_kmalloc_inc(ksize(header));
  1431. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1432. ptr += sizeof(void *);
  1433. return ptr;
  1434. }
  1435. qdf_export_symbol(qdf_mem_malloc_atomic_debug);
  1436. void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
  1437. uint32_t line)
  1438. {
  1439. void *ptr;
  1440. if (!size || size > QDF_MEM_MAX_MALLOC) {
  1441. qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
  1442. line);
  1443. return NULL;
  1444. }
  1445. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1446. size += sizeof(void *);
  1447. ptr = qdf_mem_prealloc_get(size);
  1448. if (ptr)
  1449. return ptr;
  1450. ptr = kzalloc(size, GFP_ATOMIC);
  1451. if (!ptr) {
  1452. qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
  1453. size, func, line);
  1454. return NULL;
  1455. }
  1456. qdf_mem_kmalloc_inc(ksize(ptr));
  1457. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1458. ptr += sizeof(void *);
  1459. return ptr;
  1460. }
  1461. qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
  1462. void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
  1463. {
  1464. enum qdf_debug_domain current_domain = qdf_debug_domain_get();
  1465. struct qdf_mem_header *header;
  1466. enum qdf_mem_validation_bitmap error_bitmap;
  1467. if (is_initial_mem_debug_disabled) {
  1468. __qdf_mem_free(ptr);
  1469. return;
  1470. }
  1471. /* freeing a null pointer is valid */
  1472. if (qdf_unlikely(!ptr))
  1473. return;
  1474. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1475. ptr = ptr - sizeof(void *);
  1476. if (qdf_mem_prealloc_put(ptr))
  1477. return;
  1478. if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
  1479. QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
  1480. ptr);
  1481. qdf_talloc_assert_no_children_fl(ptr, func, line);
  1482. qdf_spin_lock_irqsave(&qdf_mem_list_lock);
  1483. header = qdf_mem_get_header(ptr);
  1484. error_bitmap = qdf_mem_header_validate(header, current_domain);
  1485. error_bitmap |= qdf_mem_trailer_validate(header);
  1486. if (!error_bitmap) {
  1487. header->freed = true;
  1488. qdf_list_remove_node(qdf_mem_list_get(header->domain),
  1489. &header->node);
  1490. }
  1491. qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
  1492. qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
  1493. func, line);
  1494. qdf_mem_kmalloc_dec(ksize(header));
  1495. kfree(header);
  1496. }
  1497. qdf_export_symbol(qdf_mem_free_debug);
  1498. void qdf_mem_check_for_leaks(void)
  1499. {
  1500. enum qdf_debug_domain current_domain = qdf_debug_domain_get();
  1501. qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
  1502. qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
  1503. uint32_t leaks_count = 0;
  1504. if (is_initial_mem_debug_disabled)
  1505. return;
  1506. leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
  1507. leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
  1508. if (leaks_count)
  1509. QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
  1510. leaks_count);
  1511. }
  1512. /**
  1513. * qdf_mem_multi_pages_alloc_debug() - Debug version of
  1514. * qdf_mem_multi_pages_alloc
  1515. * @osdev: OS device handle pointer
  1516. * @pages: Multi page information storage
  1517. * @element_size: Each element size
  1518. * @element_num: Total number of elements should be allocated
  1519. * @memctxt: Memory context
  1520. * @cacheable: Coherent memory or cacheable memory
  1521. * @func: Caller of this allocator
  1522. * @line: Line number of the caller
  1523. * @caller: Return address of the caller
  1524. *
  1525. * This function will allocate large size of memory over multiple pages.
  1526. * Large size of contiguous memory allocation will fail frequently, then
  1527. * instead of allocate large memory by one shot, allocate through multiple, non
  1528. * contiguous memory and combine pages when actual usage
  1529. *
  1530. * Return: None
  1531. */
  1532. void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
  1533. struct qdf_mem_multi_page_t *pages,
  1534. size_t element_size, uint32_t element_num,
  1535. qdf_dma_context_t memctxt, bool cacheable,
  1536. const char *func, uint32_t line,
  1537. void *caller)
  1538. {
  1539. uint16_t page_idx;
  1540. struct qdf_mem_dma_page_t *dma_pages;
  1541. void **cacheable_pages = NULL;
  1542. uint16_t i;
  1543. if (!pages->page_size)
  1544. pages->page_size = qdf_page_size;
  1545. pages->num_element_per_page = pages->page_size / element_size;
  1546. if (!pages->num_element_per_page) {
  1547. qdf_print("Invalid page %d or element size %d",
  1548. (int)pages->page_size, (int)element_size);
  1549. goto out_fail;
  1550. }
  1551. pages->num_pages = element_num / pages->num_element_per_page;
  1552. if (element_num % pages->num_element_per_page)
  1553. pages->num_pages++;
  1554. if (cacheable) {
  1555. /* Pages information storage */
  1556. pages->cacheable_pages = qdf_mem_malloc_debug(
  1557. pages->num_pages * sizeof(pages->cacheable_pages),
  1558. func, line, caller, 0);
  1559. if (!pages->cacheable_pages)
  1560. goto out_fail;
  1561. cacheable_pages = pages->cacheable_pages;
  1562. for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
  1563. cacheable_pages[page_idx] = qdf_mem_malloc_debug(
  1564. pages->page_size, func, line, caller, 0);
  1565. if (!cacheable_pages[page_idx])
  1566. goto page_alloc_fail;
  1567. }
  1568. pages->dma_pages = NULL;
  1569. } else {
  1570. pages->dma_pages = qdf_mem_malloc_debug(
  1571. pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
  1572. func, line, caller, 0);
  1573. if (!pages->dma_pages)
  1574. goto out_fail;
  1575. dma_pages = pages->dma_pages;
  1576. for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
  1577. dma_pages->page_v_addr_start =
  1578. qdf_mem_alloc_consistent_debug(
  1579. osdev, osdev->dev, pages->page_size,
  1580. &dma_pages->page_p_addr,
  1581. func, line, caller);
  1582. if (!dma_pages->page_v_addr_start) {
  1583. qdf_print("dmaable page alloc fail pi %d",
  1584. page_idx);
  1585. goto page_alloc_fail;
  1586. }
  1587. dma_pages->page_v_addr_end =
  1588. dma_pages->page_v_addr_start + pages->page_size;
  1589. dma_pages++;
  1590. }
  1591. pages->cacheable_pages = NULL;
  1592. }
  1593. return;
  1594. page_alloc_fail:
  1595. if (cacheable) {
  1596. for (i = 0; i < page_idx; i++)
  1597. qdf_mem_free_debug(pages->cacheable_pages[i],
  1598. func, line);
  1599. qdf_mem_free_debug(pages->cacheable_pages, func, line);
  1600. } else {
  1601. dma_pages = pages->dma_pages;
  1602. for (i = 0; i < page_idx; i++) {
  1603. qdf_mem_free_consistent_debug(
  1604. osdev, osdev->dev,
  1605. pages->page_size, dma_pages->page_v_addr_start,
  1606. dma_pages->page_p_addr, memctxt, func, line);
  1607. dma_pages++;
  1608. }
  1609. qdf_mem_free_debug(pages->dma_pages, func, line);
  1610. }
  1611. out_fail:
  1612. pages->cacheable_pages = NULL;
  1613. pages->dma_pages = NULL;
  1614. pages->num_pages = 0;
  1615. }
  1616. qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
  1617. /**
  1618. * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
  1619. * @osdev: OS device handle pointer
  1620. * @pages: Multi page information storage
  1621. * @memctxt: Memory context
  1622. * @cacheable: Coherent memory or cacheable memory
  1623. * @func: Caller of this allocator
  1624. * @line: Line number of the caller
  1625. *
  1626. * This function will free large size of memory over multiple pages.
  1627. *
  1628. * Return: None
  1629. */
  1630. void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
  1631. struct qdf_mem_multi_page_t *pages,
  1632. qdf_dma_context_t memctxt, bool cacheable,
  1633. const char *func, uint32_t line)
  1634. {
  1635. unsigned int page_idx;
  1636. struct qdf_mem_dma_page_t *dma_pages;
  1637. if (!pages->page_size)
  1638. pages->page_size = qdf_page_size;
  1639. if (cacheable) {
  1640. for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
  1641. qdf_mem_free_debug(pages->cacheable_pages[page_idx],
  1642. func, line);
  1643. qdf_mem_free_debug(pages->cacheable_pages, func, line);
  1644. } else {
  1645. dma_pages = pages->dma_pages;
  1646. for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
  1647. qdf_mem_free_consistent_debug(
  1648. osdev, osdev->dev, pages->page_size,
  1649. dma_pages->page_v_addr_start,
  1650. dma_pages->page_p_addr, memctxt, func, line);
  1651. dma_pages++;
  1652. }
  1653. qdf_mem_free_debug(pages->dma_pages, func, line);
  1654. }
  1655. pages->cacheable_pages = NULL;
  1656. pages->dma_pages = NULL;
  1657. pages->num_pages = 0;
  1658. }
  1659. qdf_export_symbol(qdf_mem_multi_pages_free_debug);
  1660. #else
  1661. static void qdf_mem_debug_init(void) {}
  1662. static void qdf_mem_debug_exit(void) {}
  1663. void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
  1664. {
  1665. void *ptr;
  1666. if (!size || size > QDF_MEM_MAX_MALLOC) {
  1667. qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
  1668. line);
  1669. return NULL;
  1670. }
  1671. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1672. size += sizeof(void *);
  1673. ptr = qdf_mem_prealloc_get(size);
  1674. if (ptr)
  1675. return ptr;
  1676. ptr = kzalloc(size, GFP_ATOMIC);
  1677. if (!ptr) {
  1678. qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
  1679. size, func, line);
  1680. return NULL;
  1681. }
  1682. qdf_mem_kmalloc_inc(ksize(ptr));
  1683. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1684. ptr += sizeof(void *);
  1685. return ptr;
  1686. }
  1687. qdf_export_symbol(qdf_mem_malloc_atomic_fl);
  1688. /**
  1689. * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
  1690. * @osdev: OS device handle pointer
  1691. * @pages: Multi page information storage
  1692. * @element_size: Each element size
  1693. * @element_num: Total number of elements should be allocated
  1694. * @memctxt: Memory context
  1695. * @cacheable: Coherent memory or cacheable memory
  1696. *
  1697. * This function will allocate large size of memory over multiple pages.
  1698. * Large size of contiguous memory allocation will fail frequently, then
  1699. * instead of allocate large memory by one shot, allocate through multiple, non
  1700. * contiguous memory and combine pages when actual usage
  1701. *
  1702. * Return: None
  1703. */
  1704. void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
  1705. struct qdf_mem_multi_page_t *pages,
  1706. size_t element_size, uint32_t element_num,
  1707. qdf_dma_context_t memctxt, bool cacheable)
  1708. {
  1709. uint16_t page_idx;
  1710. struct qdf_mem_dma_page_t *dma_pages;
  1711. void **cacheable_pages = NULL;
  1712. uint16_t i;
  1713. if (!pages->page_size)
  1714. pages->page_size = qdf_page_size;
  1715. pages->num_element_per_page = pages->page_size / element_size;
  1716. if (!pages->num_element_per_page) {
  1717. qdf_print("Invalid page %d or element size %d",
  1718. (int)pages->page_size, (int)element_size);
  1719. goto out_fail;
  1720. }
  1721. pages->num_pages = element_num / pages->num_element_per_page;
  1722. if (element_num % pages->num_element_per_page)
  1723. pages->num_pages++;
  1724. if (cacheable) {
  1725. /* Pages information storage */
  1726. pages->cacheable_pages = qdf_mem_malloc(
  1727. pages->num_pages * sizeof(pages->cacheable_pages));
  1728. if (!pages->cacheable_pages)
  1729. goto out_fail;
  1730. cacheable_pages = pages->cacheable_pages;
  1731. for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
  1732. cacheable_pages[page_idx] =
  1733. qdf_mem_malloc(pages->page_size);
  1734. if (!cacheable_pages[page_idx])
  1735. goto page_alloc_fail;
  1736. }
  1737. pages->dma_pages = NULL;
  1738. } else {
  1739. pages->dma_pages = qdf_mem_malloc(
  1740. pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
  1741. if (!pages->dma_pages)
  1742. goto out_fail;
  1743. dma_pages = pages->dma_pages;
  1744. for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
  1745. dma_pages->page_v_addr_start =
  1746. qdf_mem_alloc_consistent(osdev, osdev->dev,
  1747. pages->page_size,
  1748. &dma_pages->page_p_addr);
  1749. if (!dma_pages->page_v_addr_start) {
  1750. qdf_print("dmaable page alloc fail pi %d",
  1751. page_idx);
  1752. goto page_alloc_fail;
  1753. }
  1754. dma_pages->page_v_addr_end =
  1755. dma_pages->page_v_addr_start + pages->page_size;
  1756. dma_pages++;
  1757. }
  1758. pages->cacheable_pages = NULL;
  1759. }
  1760. return;
  1761. page_alloc_fail:
  1762. if (cacheable) {
  1763. for (i = 0; i < page_idx; i++)
  1764. qdf_mem_free(pages->cacheable_pages[i]);
  1765. qdf_mem_free(pages->cacheable_pages);
  1766. } else {
  1767. dma_pages = pages->dma_pages;
  1768. for (i = 0; i < page_idx; i++) {
  1769. qdf_mem_free_consistent(
  1770. osdev, osdev->dev, pages->page_size,
  1771. dma_pages->page_v_addr_start,
  1772. dma_pages->page_p_addr, memctxt);
  1773. dma_pages++;
  1774. }
  1775. qdf_mem_free(pages->dma_pages);
  1776. }
  1777. out_fail:
  1778. pages->cacheable_pages = NULL;
  1779. pages->dma_pages = NULL;
  1780. pages->num_pages = 0;
  1781. return;
  1782. }
  1783. qdf_export_symbol(qdf_mem_multi_pages_alloc);
  1784. /**
  1785. * qdf_mem_multi_pages_free() - free large size of kernel memory
  1786. * @osdev: OS device handle pointer
  1787. * @pages: Multi page information storage
  1788. * @memctxt: Memory context
  1789. * @cacheable: Coherent memory or cacheable memory
  1790. *
  1791. * This function will free large size of memory over multiple pages.
  1792. *
  1793. * Return: None
  1794. */
  1795. void qdf_mem_multi_pages_free(qdf_device_t osdev,
  1796. struct qdf_mem_multi_page_t *pages,
  1797. qdf_dma_context_t memctxt, bool cacheable)
  1798. {
  1799. unsigned int page_idx;
  1800. struct qdf_mem_dma_page_t *dma_pages;
  1801. if (!pages->page_size)
  1802. pages->page_size = qdf_page_size;
  1803. if (cacheable) {
  1804. for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
  1805. qdf_mem_free(pages->cacheable_pages[page_idx]);
  1806. qdf_mem_free(pages->cacheable_pages);
  1807. } else {
  1808. dma_pages = pages->dma_pages;
  1809. for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
  1810. qdf_mem_free_consistent(
  1811. osdev, osdev->dev, pages->page_size,
  1812. dma_pages->page_v_addr_start,
  1813. dma_pages->page_p_addr, memctxt);
  1814. dma_pages++;
  1815. }
  1816. qdf_mem_free(pages->dma_pages);
  1817. }
  1818. pages->cacheable_pages = NULL;
  1819. pages->dma_pages = NULL;
  1820. pages->num_pages = 0;
  1821. return;
  1822. }
  1823. qdf_export_symbol(qdf_mem_multi_pages_free);
  1824. #endif
  1825. void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
  1826. bool cacheable)
  1827. {
  1828. unsigned int page_idx;
  1829. struct qdf_mem_dma_page_t *dma_pages;
  1830. if (!pages->page_size)
  1831. pages->page_size = qdf_page_size;
  1832. if (cacheable) {
  1833. for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
  1834. qdf_mem_zero(pages->cacheable_pages[page_idx],
  1835. pages->page_size);
  1836. } else {
  1837. dma_pages = pages->dma_pages;
  1838. for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
  1839. qdf_mem_zero(dma_pages->page_v_addr_start,
  1840. pages->page_size);
  1841. dma_pages++;
  1842. }
  1843. }
  1844. }
  1845. qdf_export_symbol(qdf_mem_multi_pages_zero);
  1846. void __qdf_mem_free(void *ptr)
  1847. {
  1848. if (!ptr)
  1849. return;
  1850. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1851. ptr = ptr - sizeof(void *);
  1852. if (qdf_might_be_prealloc(ptr)) {
  1853. if (qdf_mem_prealloc_put(ptr))
  1854. return;
  1855. }
  1856. qdf_mem_kmalloc_dec(ksize(ptr));
  1857. kfree(ptr);
  1858. }
  1859. qdf_export_symbol(__qdf_mem_free);
  1860. void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
  1861. {
  1862. void *ptr;
  1863. if (!size || size > QDF_MEM_MAX_MALLOC) {
  1864. qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
  1865. line);
  1866. return NULL;
  1867. }
  1868. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1869. size += sizeof(void *);
  1870. ptr = qdf_mem_prealloc_get(size);
  1871. if (ptr)
  1872. return ptr;
  1873. ptr = kzalloc(size, qdf_mem_malloc_flags());
  1874. if (!ptr)
  1875. return NULL;
  1876. qdf_mem_kmalloc_inc(ksize(ptr));
  1877. if (add_headroom_for_cnss_prealloc_cache_ptr())
  1878. ptr += sizeof(void *);
  1879. return ptr;
  1880. }
  1881. qdf_export_symbol(__qdf_mem_malloc);
  1882. #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
  1883. void __qdf_untracked_mem_free(void *ptr)
  1884. {
  1885. if (!ptr)
  1886. return;
  1887. kfree(ptr);
  1888. }
  1889. void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
  1890. {
  1891. void *ptr;
  1892. if (!size || size > QDF_MEM_MAX_MALLOC) {
  1893. qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
  1894. line);
  1895. return NULL;
  1896. }
  1897. ptr = kzalloc(size, qdf_mem_malloc_flags());
  1898. if (!ptr)
  1899. return NULL;
  1900. return ptr;
  1901. }
  1902. #endif
  1903. void *qdf_aligned_malloc_fl(uint32_t *size,
  1904. void **vaddr_unaligned,
  1905. qdf_dma_addr_t *paddr_unaligned,
  1906. qdf_dma_addr_t *paddr_aligned,
  1907. uint32_t align,
  1908. const char *func, uint32_t line)
  1909. {
  1910. void *vaddr_aligned;
  1911. uint32_t align_alloc_size;
  1912. *vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
  1913. line);
  1914. if (!*vaddr_unaligned) {
  1915. qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
  1916. return NULL;
  1917. }
  1918. *paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
  1919. /* Re-allocate additional bytes to align base address only if
  1920. * above allocation returns unaligned address. Reason for
  1921. * trying exact size allocation above is, OS tries to allocate
  1922. * blocks of size power-of-2 pages and then free extra pages.
  1923. * e.g., of a ring size of 1MB, the allocation below will
  1924. * request 1MB plus 7 bytes for alignment, which will cause a
  1925. * 2MB block allocation,and that is failing sometimes due to
  1926. * memory fragmentation.
  1927. */
  1928. if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
  1929. align_alloc_size = *size + align - 1;
  1930. qdf_mem_free(*vaddr_unaligned);
  1931. *vaddr_unaligned = qdf_mem_malloc_fl(
  1932. (qdf_size_t)align_alloc_size, func, line);
  1933. if (!*vaddr_unaligned) {
  1934. qdf_warn("Failed to alloc %uB @ %s:%d",
  1935. align_alloc_size, func, line);
  1936. return NULL;
  1937. }
  1938. *paddr_unaligned = qdf_mem_virt_to_phys(
  1939. *vaddr_unaligned);
  1940. *size = align_alloc_size;
  1941. }
  1942. *paddr_aligned = (qdf_dma_addr_t)qdf_align
  1943. ((unsigned long)(*paddr_unaligned), align);
  1944. vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
  1945. ((unsigned long)(*paddr_aligned) -
  1946. (unsigned long)(*paddr_unaligned)));
  1947. return vaddr_aligned;
  1948. }
  1949. qdf_export_symbol(qdf_aligned_malloc_fl);
  1950. #if defined(DP_UMAC_HW_RESET_SUPPORT) || defined(WLAN_SUPPORT_PPEDS)
  1951. /**
  1952. * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered cb
  1953. * @ctxt: Context to be passed to the cb
  1954. * @pages: Multi page information storage
  1955. * @elem_size: Each element size
  1956. * @elem_count: Total number of elements in the pool.
  1957. * @cacheable: Coherent memory or cacheable memory
  1958. * @cb: Callback to free the elements
  1959. * @elem_list: elem list for delayed free
  1960. *
  1961. * Return: 0 on Succscc, or Error code
  1962. */
  1963. int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
  1964. uint32_t elem_size, uint32_t elem_count,
  1965. uint8_t cacheable, qdf_mem_release_cb cb,
  1966. void *elem_list)
  1967. {
  1968. uint16_t i, i_int;
  1969. void *page_info;
  1970. void *elem;
  1971. uint32_t num_elem = 0;
  1972. for (i = 0; i < pages->num_pages; i++) {
  1973. if (cacheable)
  1974. page_info = pages->cacheable_pages[i];
  1975. else
  1976. page_info = pages->dma_pages[i].page_v_addr_start;
  1977. if (!page_info)
  1978. return -ENOMEM;
  1979. elem = page_info;
  1980. for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
  1981. cb(ctxt, elem, elem_list);
  1982. elem = ((char *)elem + elem_size);
  1983. num_elem++;
  1984. /* Number of desc pool elements reached */
  1985. if (num_elem == (elem_count - 1))
  1986. break;
  1987. }
  1988. }
  1989. return 0;
  1990. }
  1991. qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
  1992. #endif
  1993. /**
  1994. * qdf_mem_multi_page_link() - Make links for multi page elements
  1995. * @osdev: OS device handle pointer
  1996. * @pages: Multi page information storage
  1997. * @elem_size: Single element size
  1998. * @elem_count: elements count should be linked
  1999. * @cacheable: Coherent memory or cacheable memory
  2000. *
  2001. * This function will make links for multi page allocated structure
  2002. *
  2003. * Return: 0 success
  2004. */
  2005. int qdf_mem_multi_page_link(qdf_device_t osdev,
  2006. struct qdf_mem_multi_page_t *pages,
  2007. uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
  2008. {
  2009. uint16_t i, i_int;
  2010. void *page_info;
  2011. void **c_elem = NULL;
  2012. uint32_t num_link = 0;
  2013. for (i = 0; i < pages->num_pages; i++) {
  2014. if (cacheable)
  2015. page_info = pages->cacheable_pages[i];
  2016. else
  2017. page_info = pages->dma_pages[i].page_v_addr_start;
  2018. if (!page_info)
  2019. return -ENOMEM;
  2020. c_elem = (void **)page_info;
  2021. for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
  2022. if (i_int == (pages->num_element_per_page - 1)) {
  2023. if ((i + 1) == pages->num_pages)
  2024. break;
  2025. if (cacheable)
  2026. *c_elem = pages->
  2027. cacheable_pages[i + 1];
  2028. else
  2029. *c_elem = pages->
  2030. dma_pages[i + 1].
  2031. page_v_addr_start;
  2032. num_link++;
  2033. break;
  2034. } else {
  2035. *c_elem =
  2036. (void *)(((char *)c_elem) + elem_size);
  2037. }
  2038. num_link++;
  2039. c_elem = (void **)*c_elem;
  2040. /* Last link established exit */
  2041. if (num_link == (elem_count - 1))
  2042. break;
  2043. }
  2044. }
  2045. if (c_elem)
  2046. *c_elem = NULL;
  2047. return 0;
  2048. }
  2049. qdf_export_symbol(qdf_mem_multi_page_link);
  2050. void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
  2051. {
  2052. /* special case where dst_addr or src_addr can be NULL */
  2053. if (!num_bytes)
  2054. return;
  2055. QDF_BUG(dst_addr);
  2056. QDF_BUG(src_addr);
  2057. if (!dst_addr || !src_addr)
  2058. return;
  2059. memcpy(dst_addr, src_addr, num_bytes);
  2060. }
  2061. qdf_export_symbol(qdf_mem_copy);
  2062. qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
  2063. {
  2064. qdf_shared_mem_t *shared_mem;
  2065. qdf_dma_addr_t dma_addr, paddr;
  2066. int ret;
  2067. shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
  2068. if (!shared_mem)
  2069. return NULL;
  2070. shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
  2071. size, qdf_mem_get_dma_addr_ptr(osdev,
  2072. &shared_mem->mem_info));
  2073. if (!shared_mem->vaddr) {
  2074. qdf_err("Unable to allocate DMA memory for shared resource");
  2075. qdf_mem_free(shared_mem);
  2076. return NULL;
  2077. }
  2078. qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
  2079. size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
  2080. qdf_mem_zero(shared_mem->vaddr, size);
  2081. dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
  2082. paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
  2083. qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
  2084. ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
  2085. shared_mem->vaddr, dma_addr, size);
  2086. if (ret) {
  2087. qdf_err("Unable to get DMA sgtable");
  2088. qdf_mem_free_consistent(osdev, osdev->dev,
  2089. shared_mem->mem_info.size,
  2090. shared_mem->vaddr,
  2091. dma_addr,
  2092. qdf_get_dma_mem_context(shared_mem,
  2093. memctx));
  2094. qdf_mem_free(shared_mem);
  2095. return NULL;
  2096. }
  2097. qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
  2098. return shared_mem;
  2099. }
  2100. qdf_export_symbol(qdf_mem_shared_mem_alloc);
  2101. /**
  2102. * qdf_mem_copy_toio() - copy memory
  2103. * @dst_addr: Pointer to destination memory location (to copy to)
  2104. * @src_addr: Pointer to source memory location (to copy from)
  2105. * @num_bytes: Number of bytes to copy.
  2106. *
  2107. * Return: none
  2108. */
  2109. void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
  2110. {
  2111. if (0 == num_bytes) {
  2112. /* special case where dst_addr or src_addr can be NULL */
  2113. return;
  2114. }
  2115. if ((!dst_addr) || (!src_addr)) {
  2116. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
  2117. "%s called with NULL parameter, source:%pK destination:%pK",
  2118. __func__, src_addr, dst_addr);
  2119. QDF_ASSERT(0);
  2120. return;
  2121. }
  2122. memcpy_toio(dst_addr, src_addr, num_bytes);
  2123. }
  2124. qdf_export_symbol(qdf_mem_copy_toio);
  2125. /**
  2126. * qdf_mem_set_io() - set (fill) memory with a specified byte value.
  2127. * @ptr: Pointer to memory that will be set
  2128. * @value: Byte set in memory
  2129. * @num_bytes: Number of bytes to be set
  2130. *
  2131. * Return: None
  2132. */
  2133. void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
  2134. {
  2135. if (!ptr) {
  2136. qdf_print("%s called with NULL parameter ptr", __func__);
  2137. return;
  2138. }
  2139. memset_io(ptr, value, num_bytes);
  2140. }
  2141. qdf_export_symbol(qdf_mem_set_io);
  2142. void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
  2143. {
  2144. QDF_BUG(ptr);
  2145. if (!ptr)
  2146. return;
  2147. memset(ptr, value, num_bytes);
  2148. }
  2149. qdf_export_symbol(qdf_mem_set);
  2150. void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
  2151. {
  2152. /* special case where dst_addr or src_addr can be NULL */
  2153. if (!num_bytes)
  2154. return;
  2155. QDF_BUG(dst_addr);
  2156. QDF_BUG(src_addr);
  2157. if (!dst_addr || !src_addr)
  2158. return;
  2159. memmove(dst_addr, src_addr, num_bytes);
  2160. }
  2161. qdf_export_symbol(qdf_mem_move);
  2162. int qdf_mem_cmp(const void *left, const void *right, size_t size)
  2163. {
  2164. QDF_BUG(left);
  2165. QDF_BUG(right);
  2166. return memcmp(left, right, size);
  2167. }
  2168. qdf_export_symbol(qdf_mem_cmp);
  2169. #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
  2170. /**
  2171. * qdf_mem_dma_alloc() - allocates memory for dma
  2172. * @osdev: OS device handle
  2173. * @dev: Pointer to device handle
  2174. * @size: Size to be allocated
  2175. * @phy_addr: Physical address
  2176. *
  2177. * Return: pointer of allocated memory or null if memory alloc fails
  2178. */
  2179. static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
  2180. qdf_size_t size,
  2181. qdf_dma_addr_t *phy_addr)
  2182. {
  2183. void *vaddr;
  2184. vaddr = qdf_mem_malloc(size);
  2185. *phy_addr = ((uintptr_t) vaddr);
  2186. /* using this type conversion to suppress "cast from pointer to integer
  2187. * of different size" warning on some platforms
  2188. */
  2189. BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
  2190. return vaddr;
  2191. }
  2192. #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
  2193. !defined(QCA_WIFI_QCN9000)
  2194. #define QCA8074_RAM_BASE 0x50000000
  2195. #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
  2196. void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
  2197. qdf_dma_addr_t *phy_addr)
  2198. {
  2199. void *vaddr = NULL;
  2200. int i;
  2201. *phy_addr = 0;
  2202. for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
  2203. vaddr = dma_alloc_coherent(dev, size, phy_addr,
  2204. qdf_mem_malloc_flags());
  2205. if (!vaddr) {
  2206. qdf_err("%s failed , size: %zu!", __func__, size);
  2207. return NULL;
  2208. }
  2209. if (*phy_addr >= QCA8074_RAM_BASE)
  2210. return vaddr;
  2211. dma_free_coherent(dev, size, vaddr, *phy_addr);
  2212. }
  2213. return NULL;
  2214. }
  2215. #else
  2216. static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
  2217. qdf_size_t size, qdf_dma_addr_t *paddr)
  2218. {
  2219. return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
  2220. }
  2221. #endif
  2222. #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
  2223. static inline void
  2224. qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
  2225. {
  2226. qdf_mem_free(vaddr);
  2227. }
  2228. #else
  2229. static inline void
  2230. qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
  2231. {
  2232. dma_free_coherent(dev, size, vaddr, paddr);
  2233. }
  2234. #endif
  2235. #ifdef MEMORY_DEBUG
  2236. void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
  2237. qdf_size_t size, qdf_dma_addr_t *paddr,
  2238. const char *func, uint32_t line,
  2239. void *caller)
  2240. {
  2241. QDF_STATUS status;
  2242. enum qdf_debug_domain current_domain = qdf_debug_domain_get();
  2243. qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
  2244. struct qdf_mem_header *header;
  2245. void *vaddr;
  2246. if (is_initial_mem_debug_disabled)
  2247. return __qdf_mem_alloc_consistent(osdev, dev,
  2248. size, paddr,
  2249. func, line);
  2250. if (!size || size > QDF_MEM_MAX_MALLOC) {
  2251. qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
  2252. return NULL;
  2253. }
  2254. vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
  2255. paddr);
  2256. if (!vaddr) {
  2257. qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
  2258. return NULL;
  2259. }
  2260. header = qdf_mem_dma_get_header(vaddr, size);
  2261. /* For DMA buffers we only add trailers, this function will init
  2262. * the header structure at the tail
  2263. * Prefix the header into DMA buffer causes SMMU faults, so
  2264. * do not prefix header into the DMA buffers
  2265. */
  2266. qdf_mem_header_init(header, size, func, line, caller);
  2267. qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
  2268. status = qdf_list_insert_front(mem_list, &header->node);
  2269. qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
  2270. if (QDF_IS_STATUS_ERROR(status))
  2271. qdf_err("Failed to insert memory header; status %d", status);
  2272. qdf_mem_dma_inc(size);
  2273. return vaddr;
  2274. }
  2275. qdf_export_symbol(qdf_mem_alloc_consistent_debug);
  2276. void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
  2277. qdf_size_t size, void *vaddr,
  2278. qdf_dma_addr_t paddr,
  2279. qdf_dma_context_t memctx,
  2280. const char *func, uint32_t line)
  2281. {
  2282. enum qdf_debug_domain domain = qdf_debug_domain_get();
  2283. struct qdf_mem_header *header;
  2284. enum qdf_mem_validation_bitmap error_bitmap;
  2285. if (is_initial_mem_debug_disabled) {
  2286. __qdf_mem_free_consistent(
  2287. osdev, dev,
  2288. size, vaddr,
  2289. paddr, memctx);
  2290. return;
  2291. }
  2292. /* freeing a null pointer is valid */
  2293. if (qdf_unlikely(!vaddr))
  2294. return;
  2295. qdf_talloc_assert_no_children_fl(vaddr, func, line);
  2296. qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
  2297. /* For DMA buffers we only add trailers, this function will retrieve
  2298. * the header structure at the tail
  2299. * Prefix the header into DMA buffer causes SMMU faults, so
  2300. * do not prefix header into the DMA buffers
  2301. */
  2302. header = qdf_mem_dma_get_header(vaddr, size);
  2303. error_bitmap = qdf_mem_header_validate(header, domain);
  2304. if (!error_bitmap) {
  2305. header->freed = true;
  2306. qdf_list_remove_node(qdf_mem_dma_list(header->domain),
  2307. &header->node);
  2308. }
  2309. qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
  2310. qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
  2311. qdf_mem_dma_dec(header->size);
  2312. qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
  2313. }
  2314. qdf_export_symbol(qdf_mem_free_consistent_debug);
  2315. #endif /* MEMORY_DEBUG */
  2316. void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
  2317. qdf_size_t size, void *vaddr,
  2318. qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
  2319. {
  2320. qdf_mem_dma_dec(size);
  2321. qdf_mem_dma_free(dev, size, vaddr, paddr);
  2322. }
  2323. qdf_export_symbol(__qdf_mem_free_consistent);
  2324. void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
  2325. qdf_size_t size, qdf_dma_addr_t *paddr,
  2326. const char *func, uint32_t line)
  2327. {
  2328. void *vaddr;
  2329. if (!size || size > QDF_MEM_MAX_MALLOC) {
  2330. qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
  2331. size, func, line);
  2332. return NULL;
  2333. }
  2334. vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
  2335. if (vaddr)
  2336. qdf_mem_dma_inc(size);
  2337. return vaddr;
  2338. }
  2339. qdf_export_symbol(__qdf_mem_alloc_consistent);
  2340. void *qdf_aligned_mem_alloc_consistent_fl(
  2341. qdf_device_t osdev, uint32_t *size,
  2342. void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
  2343. qdf_dma_addr_t *paddr_aligned, uint32_t align,
  2344. const char *func, uint32_t line)
  2345. {
  2346. void *vaddr_aligned;
  2347. uint32_t align_alloc_size;
  2348. *vaddr_unaligned = qdf_mem_alloc_consistent(
  2349. osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
  2350. if (!*vaddr_unaligned) {
  2351. qdf_warn("Failed to alloc %uB @ %s:%d",
  2352. *size, func, line);
  2353. return NULL;
  2354. }
  2355. /* Re-allocate additional bytes to align base address only if
  2356. * above allocation returns unaligned address. Reason for
  2357. * trying exact size allocation above is, OS tries to allocate
  2358. * blocks of size power-of-2 pages and then free extra pages.
  2359. * e.g., of a ring size of 1MB, the allocation below will
  2360. * request 1MB plus 7 bytes for alignment, which will cause a
  2361. * 2MB block allocation,and that is failing sometimes due to
  2362. * memory fragmentation.
  2363. */
  2364. if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
  2365. align_alloc_size = *size + align - 1;
  2366. qdf_mem_free_consistent(osdev, osdev->dev, *size,
  2367. *vaddr_unaligned,
  2368. *paddr_unaligned, 0);
  2369. *vaddr_unaligned = qdf_mem_alloc_consistent(
  2370. osdev, osdev->dev, align_alloc_size,
  2371. paddr_unaligned);
  2372. if (!*vaddr_unaligned) {
  2373. qdf_warn("Failed to alloc %uB @ %s:%d",
  2374. align_alloc_size, func, line);
  2375. return NULL;
  2376. }
  2377. *size = align_alloc_size;
  2378. }
  2379. *paddr_aligned = (qdf_dma_addr_t)qdf_align(
  2380. (unsigned long)(*paddr_unaligned), align);
  2381. vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
  2382. ((unsigned long)(*paddr_aligned) -
  2383. (unsigned long)(*paddr_unaligned)));
  2384. return vaddr_aligned;
  2385. }
  2386. qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
  2387. /**
  2388. * qdf_mem_dma_sync_single_for_device() - assign memory to device
  2389. * @osdev: OS device handle
  2390. * @bus_addr: dma address to give to the device
  2391. * @size: Size of the memory block
  2392. * @direction: direction data will be DMAed
  2393. *
  2394. * Assign memory to the remote device.
  2395. * The cache lines are flushed to ram or invalidated as needed.
  2396. *
  2397. * Return: none
  2398. */
  2399. void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
  2400. qdf_dma_addr_t bus_addr,
  2401. qdf_size_t size,
  2402. enum dma_data_direction direction)
  2403. {
  2404. dma_sync_single_for_device(osdev->dev, bus_addr, size, direction);
  2405. }
  2406. qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
  2407. /**
  2408. * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
  2409. * @osdev: OS device handle
  2410. * @bus_addr: dma address to give to the cpu
  2411. * @size: Size of the memory block
  2412. * @direction: direction data will be DMAed
  2413. *
  2414. * Assign memory to the CPU.
  2415. *
  2416. * Return: none
  2417. */
  2418. void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
  2419. qdf_dma_addr_t bus_addr,
  2420. qdf_size_t size,
  2421. enum dma_data_direction direction)
  2422. {
  2423. dma_sync_single_for_cpu(osdev->dev, bus_addr, size, direction);
  2424. }
  2425. qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
  2426. void qdf_mem_init(void)
  2427. {
  2428. qdf_mem_debug_init();
  2429. qdf_net_buf_debug_init();
  2430. qdf_frag_debug_init();
  2431. qdf_mem_debugfs_init();
  2432. qdf_mem_debug_debugfs_init();
  2433. }
  2434. qdf_export_symbol(qdf_mem_init);
  2435. void qdf_mem_exit(void)
  2436. {
  2437. qdf_mem_debug_debugfs_exit();
  2438. qdf_mem_debugfs_exit();
  2439. qdf_frag_debug_exit();
  2440. qdf_net_buf_debug_exit();
  2441. qdf_mem_debug_exit();
  2442. }
  2443. qdf_export_symbol(qdf_mem_exit);
  2444. /**
  2445. * qdf_ether_addr_copy() - copy an Ethernet address
  2446. *
  2447. * @dst_addr: A six-byte array Ethernet address destination
  2448. * @src_addr: A six-byte array Ethernet address source
  2449. *
  2450. * Please note: dst & src must both be aligned to u16.
  2451. *
  2452. * Return: none
  2453. */
  2454. void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
  2455. {
  2456. if ((!dst_addr) || (!src_addr)) {
  2457. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
  2458. "%s called with NULL parameter, source:%pK destination:%pK",
  2459. __func__, src_addr, dst_addr);
  2460. QDF_ASSERT(0);
  2461. return;
  2462. }
  2463. ether_addr_copy(dst_addr, src_addr);
  2464. }
  2465. qdf_export_symbol(qdf_ether_addr_copy);
  2466. int32_t qdf_dma_mem_stats_read(void)
  2467. {
  2468. return qdf_atomic_read(&qdf_mem_stat.dma);
  2469. }
  2470. qdf_export_symbol(qdf_dma_mem_stats_read);
  2471. int32_t qdf_heap_mem_stats_read(void)
  2472. {
  2473. return qdf_atomic_read(&qdf_mem_stat.kmalloc);
  2474. }
  2475. qdf_export_symbol(qdf_heap_mem_stats_read);
  2476. int32_t qdf_skb_mem_stats_read(void)
  2477. {
  2478. return qdf_atomic_read(&qdf_mem_stat.skb);
  2479. }
  2480. qdf_export_symbol(qdf_skb_mem_stats_read);
  2481. int32_t qdf_skb_total_mem_stats_read(void)
  2482. {
  2483. return qdf_atomic_read(&qdf_mem_stat.skb_total);
  2484. }
  2485. qdf_export_symbol(qdf_skb_total_mem_stats_read);
  2486. int32_t qdf_skb_max_mem_stats_read(void)
  2487. {
  2488. return qdf_mem_stat.skb_mem_max;
  2489. }
  2490. qdf_export_symbol(qdf_skb_max_mem_stats_read);
  2491. int32_t qdf_dp_tx_skb_mem_stats_read(void)
  2492. {
  2493. return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
  2494. }
  2495. qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
  2496. int32_t qdf_dp_rx_skb_mem_stats_read(void)
  2497. {
  2498. return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
  2499. }
  2500. qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
  2501. int32_t qdf_mem_dp_tx_skb_cnt_read(void)
  2502. {
  2503. return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
  2504. }
  2505. qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
  2506. int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
  2507. {
  2508. return qdf_mem_stat.dp_tx_skb_count_max;
  2509. }
  2510. qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
  2511. int32_t qdf_mem_dp_rx_skb_cnt_read(void)
  2512. {
  2513. return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
  2514. }
  2515. qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
  2516. int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
  2517. {
  2518. return qdf_mem_stat.dp_rx_skb_count_max;
  2519. }
  2520. qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
  2521. int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
  2522. {
  2523. return qdf_mem_stat.dp_tx_skb_mem_max;
  2524. }
  2525. qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
  2526. int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
  2527. {
  2528. return qdf_mem_stat.dp_rx_skb_mem_max;
  2529. }
  2530. qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
  2531. int32_t qdf_mem_tx_desc_cnt_read(void)
  2532. {
  2533. return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
  2534. }
  2535. qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
  2536. int32_t qdf_mem_tx_desc_max_read(void)
  2537. {
  2538. return qdf_mem_stat.tx_descs_max;
  2539. }
  2540. qdf_export_symbol(qdf_mem_tx_desc_max_read);
  2541. void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
  2542. int32_t tx_descs_max)
  2543. {
  2544. qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
  2545. qdf_mem_stat.tx_descs_max = tx_descs_max;
  2546. }
  2547. qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
  2548. void qdf_mem_stats_init(void)
  2549. {
  2550. qdf_mem_stat.skb_mem_max = 0;
  2551. qdf_mem_stat.dp_tx_skb_mem_max = 0;
  2552. qdf_mem_stat.dp_rx_skb_mem_max = 0;
  2553. qdf_mem_stat.dp_tx_skb_count_max = 0;
  2554. qdf_mem_stat.dp_rx_skb_count_max = 0;
  2555. qdf_mem_stat.tx_descs_max = 0;
  2556. }
  2557. qdf_export_symbol(qdf_mem_stats_init);
  2558. void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
  2559. {
  2560. void *ptr;
  2561. if (!size) {
  2562. qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
  2563. return NULL;
  2564. }
  2565. ptr = vzalloc(size);
  2566. return ptr;
  2567. }
  2568. qdf_export_symbol(__qdf_mem_valloc);
  2569. void __qdf_mem_vfree(void *ptr)
  2570. {
  2571. if (qdf_unlikely(!ptr))
  2572. return;
  2573. vfree(ptr);
  2574. }
  2575. qdf_export_symbol(__qdf_mem_vfree);
  2576. #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
  2577. int
  2578. qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
  2579. enum qdf_iommu_attr attr, void *data)
  2580. {
  2581. return __qdf_iommu_domain_get_attr(domain, attr, data);
  2582. }
  2583. qdf_export_symbol(qdf_iommu_domain_get_attr);
  2584. #endif
  2585. #ifdef ENHANCED_OS_ABSTRACTION
  2586. void qdf_update_mem_map_table(qdf_device_t osdev,
  2587. qdf_mem_info_t *mem_info,
  2588. qdf_dma_addr_t dma_addr,
  2589. uint32_t mem_size)
  2590. {
  2591. if (!mem_info) {
  2592. qdf_nofl_err("%s: NULL mem_info", __func__);
  2593. return;
  2594. }
  2595. __qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
  2596. }
  2597. qdf_export_symbol(qdf_update_mem_map_table);
  2598. qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
  2599. qdf_dma_addr_t dma_addr)
  2600. {
  2601. return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
  2602. }
  2603. qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
  2604. #endif
  2605. #ifdef QCA_KMEM_CACHE_SUPPORT
  2606. qdf_kmem_cache_t
  2607. __qdf_kmem_cache_create(const char *cache_name,
  2608. qdf_size_t size)
  2609. {
  2610. struct kmem_cache *cache;
  2611. cache = kmem_cache_create(cache_name, size,
  2612. 0, 0, NULL);
  2613. if (!cache)
  2614. return NULL;
  2615. return cache;
  2616. }
  2617. qdf_export_symbol(__qdf_kmem_cache_create);
  2618. void
  2619. __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
  2620. {
  2621. kmem_cache_destroy(cache);
  2622. }
  2623. qdf_export_symbol(__qdf_kmem_cache_destroy);
  2624. void*
  2625. __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
  2626. {
  2627. int flags = GFP_KERNEL;
  2628. if (in_interrupt() || irqs_disabled() || in_atomic())
  2629. flags = GFP_ATOMIC;
  2630. return kmem_cache_alloc(cache, flags);
  2631. }
  2632. qdf_export_symbol(__qdf_kmem_cache_alloc);
  2633. void
  2634. __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
  2635. {
  2636. kmem_cache_free(cache, node);
  2637. }
  2638. qdf_export_symbol(__qdf_kmem_cache_free);
  2639. #else
  2640. qdf_kmem_cache_t
  2641. __qdf_kmem_cache_create(const char *cache_name,
  2642. qdf_size_t size)
  2643. {
  2644. return NULL;
  2645. }
  2646. void
  2647. __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
  2648. {
  2649. }
  2650. void *
  2651. __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
  2652. {
  2653. return NULL;
  2654. }
  2655. void
  2656. __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
  2657. {
  2658. }
  2659. #endif