123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189 |
- /*
- * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
- /**
- * DOC: qdf_mem
- * This file provides OS dependent memory management APIs
- */
- #include "qdf_debugfs.h"
- #include "qdf_mem.h"
- #include "qdf_nbuf.h"
- #include "qdf_lock.h"
- #include "qdf_mc_timer.h"
- #include "qdf_module.h"
- #include <qdf_trace.h>
- #include "qdf_str.h"
- #include "qdf_talloc.h"
- #include <linux/debugfs.h>
- #include <linux/seq_file.h>
- #include <linux/string.h>
- #include <qdf_list.h>
- #ifdef CNSS_MEM_PRE_ALLOC
- #ifdef CONFIG_CNSS_OUT_OF_TREE
- #include "cnss_prealloc.h"
- #else
- #include <net/cnss_prealloc.h>
- #endif
- #endif
- /* cnss prealloc maintains various prealloc pools of 8Kb, 16Kb, 32Kb and so
- * on and allocates buffer from the pool for wlan driver. When wlan driver
- * requests to free the memory buffer then cnss prealloc derives slab_cache
- * from virtual memory via page struct to identify prealloc pool id to put
- * back memory buffer into the pool. Kernel 5.17 removed slab_cache from page
- * struct. So add headroom to store cache pointer at the beginning of
- * allocated memory buffer to use it later in identifying prealloc pool id.
- */
- #if defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE)
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
- static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
- {
- return true;
- }
- #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
- static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
- {
- return false;
- }
- #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
- #else /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
- static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
- {
- return false;
- }
- #endif /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
- #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
- static bool mem_debug_disabled;
- qdf_declare_param(mem_debug_disabled, bool);
- #endif
- #ifdef MEMORY_DEBUG
- static bool is_initial_mem_debug_disabled;
- #endif
- /* Preprocessor Definitions and Constants */
- #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
- #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
- #define QDF_DEBUG_STRING_SIZE 512
- /**
- * struct __qdf_mem_stat - qdf memory statistics
- * @kmalloc: total kmalloc allocations
- * @dma: total dma allocations
- * @skb: total skb allocations
- * @skb_total: total skb allocations in host driver
- * @dp_tx_skb: total Tx skb allocations in datapath
- * @dp_rx_skb: total Rx skb allocations in datapath
- * @skb_mem_max: high watermark for skb allocations
- * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
- * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
- * @dp_tx_skb_count: DP Tx buffer count
- * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
- * @dp_rx_skb_count: DP Rx buffer count
- * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
- * @tx_descs_outstanding: Current pending Tx descs count
- * @tx_descs_max: High watermark for pending Tx descs count
- */
- static struct __qdf_mem_stat {
- qdf_atomic_t kmalloc;
- qdf_atomic_t dma;
- qdf_atomic_t skb;
- qdf_atomic_t skb_total;
- qdf_atomic_t dp_tx_skb;
- qdf_atomic_t dp_rx_skb;
- int32_t skb_mem_max;
- int32_t dp_tx_skb_mem_max;
- int32_t dp_rx_skb_mem_max;
- qdf_atomic_t dp_tx_skb_count;
- int32_t dp_tx_skb_count_max;
- qdf_atomic_t dp_rx_skb_count;
- int32_t dp_rx_skb_count_max;
- qdf_atomic_t tx_descs_outstanding;
- int32_t tx_descs_max;
- } qdf_mem_stat;
- #ifdef MEMORY_DEBUG
- #include "qdf_debug_domain.h"
- enum list_type {
- LIST_TYPE_MEM = 0,
- LIST_TYPE_DMA = 1,
- LIST_TYPE_NBUF = 2,
- LIST_TYPE_MAX,
- };
- /**
- * major_alloc_priv: private data registered to debugfs entry created to list
- * the list major allocations
- * @type: type of the list to be parsed
- * @threshold: configured by user by overwriting the respective debugfs
- * sys entry. This is to list the functions which requested
- * memory/dma allocations more than threshold number of times.
- */
- struct major_alloc_priv {
- enum list_type type;
- uint32_t threshold;
- };
- static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
- static qdf_spinlock_t qdf_mem_list_lock;
- static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
- static qdf_spinlock_t qdf_mem_dma_list_lock;
- static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
- {
- return &qdf_mem_domains[domain];
- }
- static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
- {
- return &qdf_mem_dma_domains[domain];
- }
- /**
- * struct qdf_mem_header - memory object to dubug
- * @node: node to the list
- * @domain: the active memory domain at time of allocation
- * @freed: flag set during free, used to detect double frees
- * Use uint8_t so we can detect corruption
- * @func: name of the function the allocation was made from
- * @line: line number of the file the allocation was made from
- * @size: size of the allocation in bytes
- * @caller: Caller of the function for which memory is allocated
- * @header: a known value, used to detect out-of-bounds access
- * @time: timestamp at which allocation was made
- */
- struct qdf_mem_header {
- qdf_list_node_t node;
- enum qdf_debug_domain domain;
- uint8_t freed;
- char func[QDF_MEM_FUNC_NAME_SIZE];
- uint32_t line;
- uint32_t size;
- void *caller;
- uint64_t header;
- uint64_t time;
- };
- /* align the qdf_mem_header to 8 bytes */
- #define QDF_DMA_MEM_HEADER_ALIGN 8
- static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
- static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
- static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
- {
- return (struct qdf_mem_header *)ptr - 1;
- }
- /* make sure the header pointer is 8bytes aligned */
- static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
- qdf_size_t size)
- {
- return (struct qdf_mem_header *)
- qdf_roundup((size_t)((uint8_t *)ptr + size),
- QDF_DMA_MEM_HEADER_ALIGN);
- }
- static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
- {
- return (uint64_t *)((void *)(header + 1) + header->size);
- }
- static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
- {
- return (void *)(header + 1);
- }
- /* number of bytes needed for the qdf memory debug information */
- #define QDF_MEM_DEBUG_SIZE \
- (sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
- /* number of bytes needed for the qdf dma memory debug information */
- #define QDF_DMA_MEM_DEBUG_SIZE \
- (sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
- static void qdf_mem_trailer_init(struct qdf_mem_header *header)
- {
- QDF_BUG(header);
- if (!header)
- return;
- *qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
- }
- static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
- const char *func, uint32_t line, void *caller)
- {
- QDF_BUG(header);
- if (!header)
- return;
- header->domain = qdf_debug_domain_get();
- header->freed = false;
- qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
- header->line = line;
- header->size = size;
- header->caller = caller;
- header->header = WLAN_MEM_HEADER;
- header->time = qdf_get_log_timestamp();
- }
- enum qdf_mem_validation_bitmap {
- QDF_MEM_BAD_HEADER = 1 << 0,
- QDF_MEM_BAD_TRAILER = 1 << 1,
- QDF_MEM_BAD_SIZE = 1 << 2,
- QDF_MEM_DOUBLE_FREE = 1 << 3,
- QDF_MEM_BAD_FREED = 1 << 4,
- QDF_MEM_BAD_NODE = 1 << 5,
- QDF_MEM_BAD_DOMAIN = 1 << 6,
- QDF_MEM_WRONG_DOMAIN = 1 << 7,
- };
- static enum qdf_mem_validation_bitmap
- qdf_mem_trailer_validate(struct qdf_mem_header *header)
- {
- enum qdf_mem_validation_bitmap error_bitmap = 0;
- if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
- error_bitmap |= QDF_MEM_BAD_TRAILER;
- return error_bitmap;
- }
- static enum qdf_mem_validation_bitmap
- qdf_mem_header_validate(struct qdf_mem_header *header,
- enum qdf_debug_domain domain)
- {
- enum qdf_mem_validation_bitmap error_bitmap = 0;
- if (header->header != WLAN_MEM_HEADER)
- error_bitmap |= QDF_MEM_BAD_HEADER;
- if (header->size > QDF_MEM_MAX_MALLOC)
- error_bitmap |= QDF_MEM_BAD_SIZE;
- if (header->freed == true)
- error_bitmap |= QDF_MEM_DOUBLE_FREE;
- else if (header->freed)
- error_bitmap |= QDF_MEM_BAD_FREED;
- if (!qdf_list_node_in_any_list(&header->node))
- error_bitmap |= QDF_MEM_BAD_NODE;
- if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
- header->domain >= QDF_DEBUG_DOMAIN_COUNT)
- error_bitmap |= QDF_MEM_BAD_DOMAIN;
- else if (header->domain != domain)
- error_bitmap |= QDF_MEM_WRONG_DOMAIN;
- return error_bitmap;
- }
- static void
- qdf_mem_header_assert_valid(struct qdf_mem_header *header,
- enum qdf_debug_domain current_domain,
- enum qdf_mem_validation_bitmap error_bitmap,
- const char *func,
- uint32_t line)
- {
- if (!error_bitmap)
- return;
- if (error_bitmap & QDF_MEM_BAD_HEADER)
- qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
- header->header, WLAN_MEM_HEADER);
- if (error_bitmap & QDF_MEM_BAD_SIZE)
- qdf_err("Corrupted memory size %u (expected < %d)",
- header->size, QDF_MEM_MAX_MALLOC);
- if (error_bitmap & QDF_MEM_BAD_TRAILER)
- qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
- *qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
- if (error_bitmap & QDF_MEM_DOUBLE_FREE)
- qdf_err("Memory has previously been freed");
- if (error_bitmap & QDF_MEM_BAD_FREED)
- qdf_err("Corrupted memory freed flag 0x%x", header->freed);
- if (error_bitmap & QDF_MEM_BAD_NODE)
- qdf_err("Corrupted memory header node or double free");
- if (error_bitmap & QDF_MEM_BAD_DOMAIN)
- qdf_err("Corrupted memory domain 0x%x", header->domain);
- if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
- qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
- qdf_debug_domain_name(header->domain), header->domain,
- qdf_debug_domain_name(current_domain), current_domain);
- QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
- }
- /**
- * struct __qdf_mem_info - memory statistics
- * @func: the function which allocated memory
- * @line: the line at which allocation happened
- * @size: the size of allocation
- * @caller: Address of the caller function
- * @count: how many allocations of same type
- * @time: timestamp at which allocation happened
- */
- struct __qdf_mem_info {
- char func[QDF_MEM_FUNC_NAME_SIZE];
- uint32_t line;
- uint32_t size;
- void *caller;
- uint32_t count;
- uint64_t time;
- };
- /*
- * The table depth defines the de-duplication proximity scope.
- * A deeper table takes more time, so choose any optimum value.
- */
- #define QDF_MEM_STAT_TABLE_SIZE 8
- /**
- * qdf_mem_debug_print_header() - memory debug header print logic
- * @print: the print adapter function
- * @print_priv: the private data to be consumed by @print
- * @threshold: the threshold value set by user to list top allocations
- *
- * Return: None
- */
- static void qdf_mem_debug_print_header(qdf_abstract_print print,
- void *print_priv,
- uint32_t threshold)
- {
- if (threshold)
- print(print_priv, "APIs requested allocations >= %u no of time",
- threshold);
- print(print_priv,
- "--------------------------------------------------------------");
- print(print_priv,
- " count size total filename caller timestamp");
- print(print_priv,
- "--------------------------------------------------------------");
- }
- /**
- * qdf_mem_meta_table_insert() - insert memory metadata into the given table
- * @table: the memory metadata table to insert into
- * @meta: the memory metadata to insert
- *
- * Return: true if the table is full after inserting, false otherwise
- */
- static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
- struct qdf_mem_header *meta)
- {
- int i;
- for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
- if (!table[i].count) {
- qdf_str_lcopy(table[i].func, meta->func,
- QDF_MEM_FUNC_NAME_SIZE);
- table[i].line = meta->line;
- table[i].size = meta->size;
- table[i].count = 1;
- table[i].caller = meta->caller;
- table[i].time = meta->time;
- break;
- }
- if (qdf_str_eq(table[i].func, meta->func) &&
- table[i].line == meta->line &&
- table[i].size == meta->size &&
- table[i].caller == meta->caller) {
- table[i].count++;
- break;
- }
- }
- /* return true if the table is now full */
- return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
- }
- /**
- * qdf_mem_domain_print() - output agnostic memory domain print logic
- * @domain: the memory domain to print
- * @print: the print adapter function
- * @print_priv: the private data to be consumed by @print
- * @threshold: the threshold value set by uset to list top allocations
- * @mem_print: pointer to function which prints the memory allocation data
- *
- * Return: None
- */
- static void qdf_mem_domain_print(qdf_list_t *domain,
- qdf_abstract_print print,
- void *print_priv,
- uint32_t threshold,
- void (*mem_print)(struct __qdf_mem_info *,
- qdf_abstract_print,
- void *, uint32_t))
- {
- QDF_STATUS status;
- struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
- qdf_list_node_t *node;
- qdf_mem_zero(table, sizeof(table));
- qdf_mem_debug_print_header(print, print_priv, threshold);
- /* hold lock while inserting to avoid use-after free of the metadata */
- qdf_spin_lock(&qdf_mem_list_lock);
- status = qdf_list_peek_front(domain, &node);
- while (QDF_IS_STATUS_SUCCESS(status)) {
- struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
- bool is_full = qdf_mem_meta_table_insert(table, meta);
- qdf_spin_unlock(&qdf_mem_list_lock);
- if (is_full) {
- (*mem_print)(table, print, print_priv, threshold);
- qdf_mem_zero(table, sizeof(table));
- }
- qdf_spin_lock(&qdf_mem_list_lock);
- status = qdf_list_peek_next(domain, node, &node);
- }
- qdf_spin_unlock(&qdf_mem_list_lock);
- (*mem_print)(table, print, print_priv, threshold);
- }
- /**
- * qdf_mem_meta_table_print() - memory metadata table print logic
- * @table: the memory metadata table to print
- * @print: the print adapter function
- * @print_priv: the private data to be consumed by @print
- * @threshold: the threshold value set by user to list top allocations
- *
- * Return: None
- */
- static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
- qdf_abstract_print print,
- void *print_priv,
- uint32_t threshold)
- {
- int i;
- char debug_str[QDF_DEBUG_STRING_SIZE];
- size_t len = 0;
- char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
- len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
- "%s", debug_prefix);
- for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
- if (!table[i].count)
- break;
- print(print_priv,
- "%6u x %5u = %7uB @ %s:%u %pS %llu",
- table[i].count,
- table[i].size,
- table[i].count * table[i].size,
- table[i].func,
- table[i].line, table[i].caller,
- table[i].time);
- len += qdf_scnprintf(debug_str + len,
- sizeof(debug_str) - len,
- " @ %s:%u %pS",
- table[i].func,
- table[i].line,
- table[i].caller);
- }
- print(print_priv, "%s", debug_str);
- }
- static int qdf_err_printer(void *priv, const char *fmt, ...)
- {
- va_list args;
- va_start(args, fmt);
- QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
- va_end(args);
- return 0;
- }
- #endif /* MEMORY_DEBUG */
- bool prealloc_disabled = 1;
- qdf_declare_param(prealloc_disabled, bool);
- qdf_export_symbol(prealloc_disabled);
- int qdf_mem_malloc_flags(void)
- {
- if (in_interrupt() || !preemptible() || rcu_preempt_depth())
- return GFP_ATOMIC;
- return GFP_KERNEL;
- }
- qdf_export_symbol(qdf_mem_malloc_flags);
- /**
- * qdf_prealloc_disabled_config_get() - Get the user configuration of
- * prealloc_disabled
- *
- * Return: value of prealloc_disabled qdf module argument
- */
- bool qdf_prealloc_disabled_config_get(void)
- {
- return prealloc_disabled;
- }
- qdf_export_symbol(qdf_prealloc_disabled_config_get);
- #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
- /**
- * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
- * @str_value: value of the module param
- *
- * This function will set qdf module param prealloc_disabled
- *
- * Return: QDF_STATUS_SUCCESS on Success
- */
- QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
- {
- QDF_STATUS status;
- status = qdf_bool_parse(str_value, &prealloc_disabled);
- return status;
- }
- #endif
- #if defined WLAN_DEBUGFS
- /* Debugfs root directory for qdf_mem */
- static struct dentry *qdf_mem_debugfs_root;
- #ifdef MEMORY_DEBUG
- static int seq_printf_printer(void *priv, const char *fmt, ...)
- {
- struct seq_file *file = priv;
- va_list args;
- va_start(args, fmt);
- seq_vprintf(file, fmt, args);
- seq_puts(file, "\n");
- va_end(args);
- return 0;
- }
- /**
- * qdf_print_major_alloc() - memory metadata table print logic
- * @table: the memory metadata table to print
- * @print: the print adapter function
- * @print_priv: the private data to be consumed by @print
- * @threshold: the threshold value set by uset to list top allocations
- *
- * Return: None
- */
- static void qdf_print_major_alloc(struct __qdf_mem_info *table,
- qdf_abstract_print print,
- void *print_priv,
- uint32_t threshold)
- {
- int i;
- for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
- if (!table[i].count)
- break;
- if (table[i].count >= threshold)
- print(print_priv,
- "%6u x %5u = %7uB @ %s:%u %pS %llu",
- table[i].count,
- table[i].size,
- table[i].count * table[i].size,
- table[i].func,
- table[i].line, table[i].caller,
- table[i].time);
- }
- }
- /**
- * qdf_mem_seq_start() - sequential callback to start
- * @seq: seq_file handle
- * @pos: The start position of the sequence
- *
- * Return: iterator pointer, or NULL if iteration is complete
- */
- static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
- {
- enum qdf_debug_domain domain = *pos;
- if (!qdf_debug_domain_valid(domain))
- return NULL;
- /* just use the current position as our iterator */
- return pos;
- }
- /**
- * qdf_mem_seq_next() - next sequential callback
- * @seq: seq_file handle
- * @v: the current iterator
- * @pos: the current position
- *
- * Get the next node and release previous node.
- *
- * Return: iterator pointer, or NULL if iteration is complete
- */
- static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
- {
- ++*pos;
- return qdf_mem_seq_start(seq, pos);
- }
- /**
- * qdf_mem_seq_stop() - stop sequential callback
- * @seq: seq_file handle
- * @v: current iterator
- *
- * Return: None
- */
- static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
- /**
- * qdf_mem_seq_show() - print sequential callback
- * @seq: seq_file handle
- * @v: current iterator
- *
- * Return: 0 - success
- */
- static int qdf_mem_seq_show(struct seq_file *seq, void *v)
- {
- enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
- seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
- qdf_debug_domain_name(domain_id), domain_id);
- qdf_mem_domain_print(qdf_mem_list_get(domain_id),
- seq_printf_printer,
- seq,
- 0,
- qdf_mem_meta_table_print);
- return 0;
- }
- /* sequential file operation table */
- static const struct seq_operations qdf_mem_seq_ops = {
- .start = qdf_mem_seq_start,
- .next = qdf_mem_seq_next,
- .stop = qdf_mem_seq_stop,
- .show = qdf_mem_seq_show,
- };
- static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
- {
- return seq_open(file, &qdf_mem_seq_ops);
- }
- /**
- * qdf_major_alloc_show() - print sequential callback
- * @seq: seq_file handle
- * @v: current iterator
- *
- * Return: 0 - success
- */
- static int qdf_major_alloc_show(struct seq_file *seq, void *v)
- {
- enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
- struct major_alloc_priv *priv;
- qdf_list_t *list;
- priv = (struct major_alloc_priv *)seq->private;
- seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
- qdf_debug_domain_name(domain_id), domain_id);
- switch (priv->type) {
- case LIST_TYPE_MEM:
- list = qdf_mem_list_get(domain_id);
- break;
- case LIST_TYPE_DMA:
- list = qdf_mem_dma_list(domain_id);
- break;
- default:
- list = NULL;
- break;
- }
- if (list)
- qdf_mem_domain_print(list,
- seq_printf_printer,
- seq,
- priv->threshold,
- qdf_print_major_alloc);
- return 0;
- }
- /* sequential file operation table created to track major allocs */
- static const struct seq_operations qdf_major_allocs_seq_ops = {
- .start = qdf_mem_seq_start,
- .next = qdf_mem_seq_next,
- .stop = qdf_mem_seq_stop,
- .show = qdf_major_alloc_show,
- };
- static int qdf_major_allocs_open(struct inode *inode, struct file *file)
- {
- void *private = inode->i_private;
- struct seq_file *seq;
- int rc;
- rc = seq_open(file, &qdf_major_allocs_seq_ops);
- if (rc == 0) {
- seq = file->private_data;
- seq->private = private;
- }
- return rc;
- }
- static ssize_t qdf_major_alloc_set_threshold(struct file *file,
- const char __user *user_buf,
- size_t count,
- loff_t *pos)
- {
- char buf[32];
- ssize_t buf_size;
- uint32_t threshold;
- struct seq_file *seq = file->private_data;
- struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
- buf_size = min(count, (sizeof(buf) - 1));
- if (buf_size <= 0)
- return 0;
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = '\0';
- if (!kstrtou32(buf, 10, &threshold))
- priv->threshold = threshold;
- return buf_size;
- }
- /**
- * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
- * @threshold: the threshold value set by uset to list top allocations
- * @print: the print adapter function
- * @print_priv: the private data to be consumed by @print
- * @mem_print: pointer to function which prints the memory allocation data
- *
- * Return: None
- */
- static void
- qdf_print_major_nbuf_allocs(uint32_t threshold,
- qdf_abstract_print print,
- void *print_priv,
- void (*mem_print)(struct __qdf_mem_info *,
- qdf_abstract_print,
- void *, uint32_t))
- {
- uint32_t nbuf_iter;
- unsigned long irq_flag = 0;
- QDF_NBUF_TRACK *p_node;
- struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
- struct qdf_mem_header meta;
- bool is_full;
- qdf_mem_zero(table, sizeof(table));
- qdf_mem_debug_print_header(print, print_priv, threshold);
- if (is_initial_mem_debug_disabled)
- return;
- qdf_rl_info("major nbuf print with threshold %u", threshold);
- for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
- nbuf_iter++) {
- qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
- p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
- while (p_node) {
- meta.line = p_node->line_num;
- meta.size = p_node->size;
- meta.caller = NULL;
- meta.time = p_node->time;
- qdf_str_lcopy(meta.func, p_node->func_name,
- QDF_MEM_FUNC_NAME_SIZE);
- is_full = qdf_mem_meta_table_insert(table, &meta);
- if (is_full) {
- (*mem_print)(table, print,
- print_priv, threshold);
- qdf_mem_zero(table, sizeof(table));
- }
- p_node = p_node->p_next;
- }
- qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
- }
- (*mem_print)(table, print, print_priv, threshold);
- qdf_rl_info("major nbuf print end");
- }
- /**
- * qdf_major_nbuf_alloc_show() - print sequential callback
- * @seq: seq_file handle
- * @v: current iterator
- *
- * Return: 0 - success
- */
- static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
- {
- struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
- if (!priv) {
- qdf_err("priv is null");
- return -EINVAL;
- }
- qdf_print_major_nbuf_allocs(priv->threshold,
- seq_printf_printer,
- seq,
- qdf_print_major_alloc);
- return 0;
- }
- /**
- * qdf_nbuf_seq_start() - sequential callback to start
- * @seq: seq_file handle
- * @pos: The start position of the sequence
- *
- * Return: iterator pointer, or NULL if iteration is complete
- */
- static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
- {
- enum qdf_debug_domain domain = *pos;
- if (domain > QDF_DEBUG_NBUF_DOMAIN)
- return NULL;
- return pos;
- }
- /**
- * qdf_nbuf_seq_next() - next sequential callback
- * @seq: seq_file handle
- * @v: the current iterator
- * @pos: the current position
- *
- * Get the next node and release previous node.
- *
- * Return: iterator pointer, or NULL if iteration is complete
- */
- static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
- {
- ++*pos;
- return qdf_nbuf_seq_start(seq, pos);
- }
- /**
- * qdf_nbuf_seq_stop() - stop sequential callback
- * @seq: seq_file handle
- * @v: current iterator
- *
- * Return: None
- */
- static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
- /* sequential file operation table created to track major skb allocs */
- static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
- .start = qdf_nbuf_seq_start,
- .next = qdf_nbuf_seq_next,
- .stop = qdf_nbuf_seq_stop,
- .show = qdf_major_nbuf_alloc_show,
- };
- static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
- {
- void *private = inode->i_private;
- struct seq_file *seq;
- int rc;
- rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
- if (rc == 0) {
- seq = file->private_data;
- seq->private = private;
- }
- return rc;
- }
- static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
- const char __user *user_buf,
- size_t count,
- loff_t *pos)
- {
- char buf[32];
- ssize_t buf_size;
- uint32_t threshold;
- struct seq_file *seq = file->private_data;
- struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
- buf_size = min(count, (sizeof(buf) - 1));
- if (buf_size <= 0)
- return 0;
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = '\0';
- if (!kstrtou32(buf, 10, &threshold))
- priv->threshold = threshold;
- return buf_size;
- }
- /* file operation table for listing major allocs */
- static const struct file_operations fops_qdf_major_allocs = {
- .owner = THIS_MODULE,
- .open = qdf_major_allocs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- .write = qdf_major_alloc_set_threshold,
- };
- /* debugfs file operation table */
- static const struct file_operations fops_qdf_mem_debugfs = {
- .owner = THIS_MODULE,
- .open = qdf_mem_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- };
- /* file operation table for listing major allocs */
- static const struct file_operations fops_qdf_nbuf_major_allocs = {
- .owner = THIS_MODULE,
- .open = qdf_major_nbuf_allocs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- .write = qdf_major_nbuf_alloc_set_threshold,
- };
- static struct major_alloc_priv mem_priv = {
- /* List type set to mem */
- LIST_TYPE_MEM,
- /* initial threshold to list APIs which allocates mem >= 50 times */
- 50
- };
- static struct major_alloc_priv dma_priv = {
- /* List type set to DMA */
- LIST_TYPE_DMA,
- /* initial threshold to list APIs which allocates dma >= 50 times */
- 50
- };
- static struct major_alloc_priv nbuf_priv = {
- /* List type set to NBUF */
- LIST_TYPE_NBUF,
- /* initial threshold to list APIs which allocates nbuf >= 50 times */
- 50
- };
- static QDF_STATUS qdf_mem_debug_debugfs_init(void)
- {
- if (is_initial_mem_debug_disabled)
- return QDF_STATUS_SUCCESS;
- if (!qdf_mem_debugfs_root)
- return QDF_STATUS_E_FAILURE;
- debugfs_create_file("list",
- S_IRUSR,
- qdf_mem_debugfs_root,
- NULL,
- &fops_qdf_mem_debugfs);
- debugfs_create_file("major_mem_allocs",
- 0600,
- qdf_mem_debugfs_root,
- &mem_priv,
- &fops_qdf_major_allocs);
- debugfs_create_file("major_dma_allocs",
- 0600,
- qdf_mem_debugfs_root,
- &dma_priv,
- &fops_qdf_major_allocs);
- debugfs_create_file("major_nbuf_allocs",
- 0600,
- qdf_mem_debugfs_root,
- &nbuf_priv,
- &fops_qdf_nbuf_major_allocs);
- return QDF_STATUS_SUCCESS;
- }
- static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
- {
- return QDF_STATUS_SUCCESS;
- }
- #else /* MEMORY_DEBUG */
- static QDF_STATUS qdf_mem_debug_debugfs_init(void)
- {
- return QDF_STATUS_E_NOSUPPORT;
- }
- static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
- {
- return QDF_STATUS_E_NOSUPPORT;
- }
- #endif /* MEMORY_DEBUG */
- static void qdf_mem_debugfs_exit(void)
- {
- debugfs_remove_recursive(qdf_mem_debugfs_root);
- qdf_mem_debugfs_root = NULL;
- }
- static QDF_STATUS qdf_mem_debugfs_init(void)
- {
- struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
- if (!qdf_debugfs_root)
- return QDF_STATUS_E_FAILURE;
- qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
- if (!qdf_mem_debugfs_root)
- return QDF_STATUS_E_FAILURE;
- debugfs_create_atomic_t("kmalloc",
- S_IRUSR,
- qdf_mem_debugfs_root,
- &qdf_mem_stat.kmalloc);
- debugfs_create_atomic_t("dma",
- S_IRUSR,
- qdf_mem_debugfs_root,
- &qdf_mem_stat.dma);
- debugfs_create_atomic_t("skb",
- S_IRUSR,
- qdf_mem_debugfs_root,
- &qdf_mem_stat.skb);
- return QDF_STATUS_SUCCESS;
- }
- #else /* WLAN_DEBUGFS */
- static QDF_STATUS qdf_mem_debugfs_init(void)
- {
- return QDF_STATUS_E_NOSUPPORT;
- }
- static void qdf_mem_debugfs_exit(void) {}
- static QDF_STATUS qdf_mem_debug_debugfs_init(void)
- {
- return QDF_STATUS_E_NOSUPPORT;
- }
- static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
- {
- return QDF_STATUS_E_NOSUPPORT;
- }
- #endif /* WLAN_DEBUGFS */
- void qdf_mem_kmalloc_inc(qdf_size_t size)
- {
- qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
- }
- static void qdf_mem_dma_inc(qdf_size_t size)
- {
- qdf_atomic_add(size, &qdf_mem_stat.dma);
- }
- #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
- void qdf_mem_skb_inc(qdf_size_t size)
- {
- qdf_atomic_add(size, &qdf_mem_stat.skb);
- }
- void qdf_mem_skb_dec(qdf_size_t size)
- {
- qdf_atomic_sub(size, &qdf_mem_stat.skb);
- }
- void qdf_mem_skb_total_inc(qdf_size_t size)
- {
- int32_t skb_mem_max = 0;
- qdf_atomic_add(size, &qdf_mem_stat.skb_total);
- skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
- if (qdf_mem_stat.skb_mem_max < skb_mem_max)
- qdf_mem_stat.skb_mem_max = skb_mem_max;
- }
- void qdf_mem_skb_total_dec(qdf_size_t size)
- {
- qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
- }
- void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
- {
- int32_t curr_dp_tx_skb_mem_max = 0;
- qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
- curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
- if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
- qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
- }
- void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
- {
- qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
- }
- void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
- {
- int32_t curr_dp_rx_skb_mem_max = 0;
- qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
- curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
- if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
- qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
- }
- void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
- {
- qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
- }
- void qdf_mem_dp_tx_skb_cnt_inc(void)
- {
- int32_t curr_dp_tx_skb_count_max = 0;
- qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
- curr_dp_tx_skb_count_max =
- qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
- if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
- qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
- }
- void qdf_mem_dp_tx_skb_cnt_dec(void)
- {
- qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
- }
- void qdf_mem_dp_rx_skb_cnt_inc(void)
- {
- int32_t curr_dp_rx_skb_count_max = 0;
- qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
- curr_dp_rx_skb_count_max =
- qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
- if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
- qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
- }
- void qdf_mem_dp_rx_skb_cnt_dec(void)
- {
- qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
- }
- #endif
- void qdf_mem_kmalloc_dec(qdf_size_t size)
- {
- qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
- }
- static inline void qdf_mem_dma_dec(qdf_size_t size)
- {
- qdf_atomic_sub(size, &qdf_mem_stat.dma);
- }
- /**
- * __qdf_mempool_init() - Create and initialize memory pool
- *
- * @osdev: platform device object
- * @pool_addr: address of the pool created
- * @elem_cnt: no. of elements in pool
- * @elem_size: size of each pool element in bytes
- * @flags: flags
- *
- * return: Handle to memory pool or NULL if allocation failed
- */
- int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
- int elem_cnt, size_t elem_size, u_int32_t flags)
- {
- __qdf_mempool_ctxt_t *new_pool = NULL;
- u_int32_t align = L1_CACHE_BYTES;
- unsigned long aligned_pool_mem;
- int pool_id;
- int i;
- if (prealloc_disabled) {
- /* TBD: We can maintain a list of pools in qdf_device_t
- * to help debugging
- * when pre-allocation is not enabled
- */
- new_pool = (__qdf_mempool_ctxt_t *)
- kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
- if (!new_pool)
- return QDF_STATUS_E_NOMEM;
- memset(new_pool, 0, sizeof(*new_pool));
- /* TBD: define flags for zeroing buffers etc */
- new_pool->flags = flags;
- new_pool->elem_size = elem_size;
- new_pool->max_elem = elem_cnt;
- *pool_addr = new_pool;
- return 0;
- }
- for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
- if (!osdev->mem_pool[pool_id])
- break;
- }
- if (pool_id == MAX_MEM_POOLS)
- return -ENOMEM;
- new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
- kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
- if (!new_pool)
- return -ENOMEM;
- memset(new_pool, 0, sizeof(*new_pool));
- /* TBD: define flags for zeroing buffers etc */
- new_pool->flags = flags;
- new_pool->pool_id = pool_id;
- /* Round up the element size to cacheline */
- new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
- new_pool->mem_size = elem_cnt * new_pool->elem_size +
- ((align)?(align - 1):0);
- new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
- if (!new_pool->pool_mem) {
- /* TBD: Check if we need get_free_pages above */
- kfree(new_pool);
- osdev->mem_pool[pool_id] = NULL;
- return -ENOMEM;
- }
- spin_lock_init(&new_pool->lock);
- /* Initialize free list */
- aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
- ((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
- STAILQ_INIT(&new_pool->free_list);
- for (i = 0; i < elem_cnt; i++)
- STAILQ_INSERT_TAIL(&(new_pool->free_list),
- (mempool_elem_t *)(aligned_pool_mem +
- (new_pool->elem_size * i)), mempool_entry);
- new_pool->free_cnt = elem_cnt;
- *pool_addr = new_pool;
- return 0;
- }
- qdf_export_symbol(__qdf_mempool_init);
- /**
- * __qdf_mempool_destroy() - Destroy memory pool
- * @osdev: platform device object
- * @Handle: to memory pool
- *
- * Returns: none
- */
- void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
- {
- int pool_id = 0;
- if (!pool)
- return;
- if (prealloc_disabled) {
- kfree(pool);
- return;
- }
- pool_id = pool->pool_id;
- /* TBD: Check if free count matches elem_cnt if debug is enabled */
- kfree(pool->pool_mem);
- kfree(pool);
- osdev->mem_pool[pool_id] = NULL;
- }
- qdf_export_symbol(__qdf_mempool_destroy);
- /**
- * __qdf_mempool_alloc() - Allocate an element memory pool
- *
- * @osdev: platform device object
- * @Handle: to memory pool
- *
- * Return: Pointer to the allocated element or NULL if the pool is empty
- */
- void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
- {
- void *buf = NULL;
- if (!pool)
- return NULL;
- if (prealloc_disabled)
- return qdf_mem_malloc(pool->elem_size);
- spin_lock_bh(&pool->lock);
- buf = STAILQ_FIRST(&pool->free_list);
- if (buf) {
- STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
- pool->free_cnt--;
- }
- /* TBD: Update free count if debug is enabled */
- spin_unlock_bh(&pool->lock);
- return buf;
- }
- qdf_export_symbol(__qdf_mempool_alloc);
- /**
- * __qdf_mempool_free() - Free a memory pool element
- * @osdev: Platform device object
- * @pool: Handle to memory pool
- * @buf: Element to be freed
- *
- * Returns: none
- */
- void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
- {
- if (!pool)
- return;
- if (prealloc_disabled)
- return qdf_mem_free(buf);
- spin_lock_bh(&pool->lock);
- pool->free_cnt++;
- STAILQ_INSERT_TAIL
- (&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
- spin_unlock_bh(&pool->lock);
- }
- qdf_export_symbol(__qdf_mempool_free);
- #ifdef CNSS_MEM_PRE_ALLOC
- static bool qdf_might_be_prealloc(void *ptr)
- {
- if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
- return true;
- else
- return false;
- }
- /**
- * qdf_mem_prealloc_get() - conditionally pre-allocate memory
- * @size: the number of bytes to allocate
- *
- * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
- * a chunk of pre-allocated memory. If size if less than or equal to
- * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
- *
- * Return: NULL on failure, non-NULL on success
- */
- static void *qdf_mem_prealloc_get(size_t size)
- {
- void *ptr;
- if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
- return NULL;
- ptr = wcnss_prealloc_get(size);
- if (!ptr)
- return NULL;
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- ptr += sizeof(void *);
- memset(ptr, 0, size);
- return ptr;
- }
- static inline bool qdf_mem_prealloc_put(void *ptr)
- {
- return wcnss_prealloc_put(ptr);
- }
- #else
- static bool qdf_might_be_prealloc(void *ptr)
- {
- return false;
- }
- static inline void *qdf_mem_prealloc_get(size_t size)
- {
- return NULL;
- }
- static inline bool qdf_mem_prealloc_put(void *ptr)
- {
- return false;
- }
- #endif /* CNSS_MEM_PRE_ALLOC */
- /* External Function implementation */
- #ifdef MEMORY_DEBUG
- /**
- * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
- *
- * Return: value of mem_debug_disabled qdf module argument
- */
- #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
- bool qdf_mem_debug_config_get(void)
- {
- /* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
- return false;
- }
- #else
- bool qdf_mem_debug_config_get(void)
- {
- return mem_debug_disabled;
- }
- #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
- /**
- * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
- * @str_value: value of the module param
- *
- * This function will se qdf module param mem_debug_disabled
- *
- * Return: QDF_STATUS_SUCCESS on Success
- */
- #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
- QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
- {
- QDF_STATUS status;
- status = qdf_bool_parse(str_value, &mem_debug_disabled);
- return status;
- }
- #endif
- /**
- * qdf_mem_debug_init() - initialize qdf memory debug functionality
- *
- * Return: none
- */
- static void qdf_mem_debug_init(void)
- {
- int i;
- is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
- if (is_initial_mem_debug_disabled)
- return;
- /* Initializing the list with maximum size of 60000 */
- for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
- qdf_list_create(&qdf_mem_domains[i], 60000);
- qdf_spinlock_create(&qdf_mem_list_lock);
- /* dma */
- for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
- qdf_list_create(&qdf_mem_dma_domains[i], 0);
- qdf_spinlock_create(&qdf_mem_dma_list_lock);
- }
- static uint32_t
- qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
- qdf_list_t *mem_list)
- {
- if (is_initial_mem_debug_disabled)
- return 0;
- if (qdf_list_empty(mem_list))
- return 0;
- qdf_err("Memory leaks detected in %s domain!",
- qdf_debug_domain_name(domain));
- qdf_mem_domain_print(mem_list,
- qdf_err_printer,
- NULL,
- 0,
- qdf_mem_meta_table_print);
- return mem_list->count;
- }
- static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
- {
- uint32_t leak_count = 0;
- int i;
- if (is_initial_mem_debug_disabled)
- return;
- /* detect and print leaks */
- for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
- leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
- if (leak_count)
- QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
- leak_count);
- }
- /**
- * qdf_mem_debug_exit() - exit qdf memory debug functionality
- *
- * Return: none
- */
- static void qdf_mem_debug_exit(void)
- {
- int i;
- if (is_initial_mem_debug_disabled)
- return;
- /* mem */
- qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
- for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
- qdf_list_destroy(qdf_mem_list_get(i));
- qdf_spinlock_destroy(&qdf_mem_list_lock);
- /* dma */
- qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
- for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
- qdf_list_destroy(&qdf_mem_dma_domains[i]);
- qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
- }
- void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
- void *caller, uint32_t flag)
- {
- QDF_STATUS status;
- enum qdf_debug_domain current_domain = qdf_debug_domain_get();
- qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
- struct qdf_mem_header *header;
- void *ptr;
- unsigned long start, duration;
- if (is_initial_mem_debug_disabled)
- return __qdf_mem_malloc(size, func, line);
- if (!size || size > QDF_MEM_MAX_MALLOC) {
- qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
- return NULL;
- }
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- size += sizeof(void *);
- ptr = qdf_mem_prealloc_get(size);
- if (ptr)
- return ptr;
- if (!flag)
- flag = qdf_mem_malloc_flags();
- start = qdf_mc_timer_get_system_time();
- header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
- duration = qdf_mc_timer_get_system_time() - start;
- if (duration > QDF_MEM_WARN_THRESHOLD)
- qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
- duration, size, func, line);
- if (!header) {
- qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
- return NULL;
- }
- qdf_mem_header_init(header, size, func, line, caller);
- qdf_mem_trailer_init(header);
- ptr = qdf_mem_get_ptr(header);
- qdf_spin_lock_irqsave(&qdf_mem_list_lock);
- status = qdf_list_insert_front(mem_list, &header->node);
- qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
- if (QDF_IS_STATUS_ERROR(status))
- qdf_err("Failed to insert memory header; status %d", status);
- qdf_mem_kmalloc_inc(ksize(header));
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- ptr += sizeof(void *);
- return ptr;
- }
- qdf_export_symbol(qdf_mem_malloc_debug);
- void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
- uint32_t line, void *caller)
- {
- QDF_STATUS status;
- enum qdf_debug_domain current_domain = qdf_debug_domain_get();
- qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
- struct qdf_mem_header *header;
- void *ptr;
- unsigned long start, duration;
- if (is_initial_mem_debug_disabled)
- return qdf_mem_malloc_atomic_debug_fl(size, func, line);
- if (!size || size > QDF_MEM_MAX_MALLOC) {
- qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
- return NULL;
- }
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- size += sizeof(void *);
- ptr = qdf_mem_prealloc_get(size);
- if (ptr)
- return ptr;
- start = qdf_mc_timer_get_system_time();
- header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
- duration = qdf_mc_timer_get_system_time() - start;
- if (duration > QDF_MEM_WARN_THRESHOLD)
- qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
- duration, size, func, line);
- if (!header) {
- qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
- return NULL;
- }
- qdf_mem_header_init(header, size, func, line, caller);
- qdf_mem_trailer_init(header);
- ptr = qdf_mem_get_ptr(header);
- qdf_spin_lock_irqsave(&qdf_mem_list_lock);
- status = qdf_list_insert_front(mem_list, &header->node);
- qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
- if (QDF_IS_STATUS_ERROR(status))
- qdf_err("Failed to insert memory header; status %d", status);
- qdf_mem_kmalloc_inc(ksize(header));
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- ptr += sizeof(void *);
- return ptr;
- }
- qdf_export_symbol(qdf_mem_malloc_atomic_debug);
- void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
- uint32_t line)
- {
- void *ptr;
- if (!size || size > QDF_MEM_MAX_MALLOC) {
- qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
- line);
- return NULL;
- }
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- size += sizeof(void *);
- ptr = qdf_mem_prealloc_get(size);
- if (ptr)
- return ptr;
- ptr = kzalloc(size, GFP_ATOMIC);
- if (!ptr) {
- qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
- size, func, line);
- return NULL;
- }
- qdf_mem_kmalloc_inc(ksize(ptr));
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- ptr += sizeof(void *);
- return ptr;
- }
- qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
- void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
- {
- enum qdf_debug_domain current_domain = qdf_debug_domain_get();
- struct qdf_mem_header *header;
- enum qdf_mem_validation_bitmap error_bitmap;
- if (is_initial_mem_debug_disabled) {
- __qdf_mem_free(ptr);
- return;
- }
- /* freeing a null pointer is valid */
- if (qdf_unlikely(!ptr))
- return;
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- ptr = ptr - sizeof(void *);
- if (qdf_mem_prealloc_put(ptr))
- return;
- if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
- QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
- ptr);
- qdf_talloc_assert_no_children_fl(ptr, func, line);
- qdf_spin_lock_irqsave(&qdf_mem_list_lock);
- header = qdf_mem_get_header(ptr);
- error_bitmap = qdf_mem_header_validate(header, current_domain);
- error_bitmap |= qdf_mem_trailer_validate(header);
- if (!error_bitmap) {
- header->freed = true;
- qdf_list_remove_node(qdf_mem_list_get(header->domain),
- &header->node);
- }
- qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
- qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
- func, line);
- qdf_mem_kmalloc_dec(ksize(header));
- kfree(header);
- }
- qdf_export_symbol(qdf_mem_free_debug);
- void qdf_mem_check_for_leaks(void)
- {
- enum qdf_debug_domain current_domain = qdf_debug_domain_get();
- qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
- qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
- uint32_t leaks_count = 0;
- if (is_initial_mem_debug_disabled)
- return;
- leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
- leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
- if (leaks_count)
- QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
- leaks_count);
- }
- /**
- * qdf_mem_multi_pages_alloc_debug() - Debug version of
- * qdf_mem_multi_pages_alloc
- * @osdev: OS device handle pointer
- * @pages: Multi page information storage
- * @element_size: Each element size
- * @element_num: Total number of elements should be allocated
- * @memctxt: Memory context
- * @cacheable: Coherent memory or cacheable memory
- * @func: Caller of this allocator
- * @line: Line number of the caller
- * @caller: Return address of the caller
- *
- * This function will allocate large size of memory over multiple pages.
- * Large size of contiguous memory allocation will fail frequently, then
- * instead of allocate large memory by one shot, allocate through multiple, non
- * contiguous memory and combine pages when actual usage
- *
- * Return: None
- */
- void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
- struct qdf_mem_multi_page_t *pages,
- size_t element_size, uint32_t element_num,
- qdf_dma_context_t memctxt, bool cacheable,
- const char *func, uint32_t line,
- void *caller)
- {
- uint16_t page_idx;
- struct qdf_mem_dma_page_t *dma_pages;
- void **cacheable_pages = NULL;
- uint16_t i;
- if (!pages->page_size)
- pages->page_size = qdf_page_size;
- pages->num_element_per_page = pages->page_size / element_size;
- if (!pages->num_element_per_page) {
- qdf_print("Invalid page %d or element size %d",
- (int)pages->page_size, (int)element_size);
- goto out_fail;
- }
- pages->num_pages = element_num / pages->num_element_per_page;
- if (element_num % pages->num_element_per_page)
- pages->num_pages++;
- if (cacheable) {
- /* Pages information storage */
- pages->cacheable_pages = qdf_mem_malloc_debug(
- pages->num_pages * sizeof(pages->cacheable_pages),
- func, line, caller, 0);
- if (!pages->cacheable_pages)
- goto out_fail;
- cacheable_pages = pages->cacheable_pages;
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
- cacheable_pages[page_idx] = qdf_mem_malloc_debug(
- pages->page_size, func, line, caller, 0);
- if (!cacheable_pages[page_idx])
- goto page_alloc_fail;
- }
- pages->dma_pages = NULL;
- } else {
- pages->dma_pages = qdf_mem_malloc_debug(
- pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
- func, line, caller, 0);
- if (!pages->dma_pages)
- goto out_fail;
- dma_pages = pages->dma_pages;
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
- dma_pages->page_v_addr_start =
- qdf_mem_alloc_consistent_debug(
- osdev, osdev->dev, pages->page_size,
- &dma_pages->page_p_addr,
- func, line, caller);
- if (!dma_pages->page_v_addr_start) {
- qdf_print("dmaable page alloc fail pi %d",
- page_idx);
- goto page_alloc_fail;
- }
- dma_pages->page_v_addr_end =
- dma_pages->page_v_addr_start + pages->page_size;
- dma_pages++;
- }
- pages->cacheable_pages = NULL;
- }
- return;
- page_alloc_fail:
- if (cacheable) {
- for (i = 0; i < page_idx; i++)
- qdf_mem_free_debug(pages->cacheable_pages[i],
- func, line);
- qdf_mem_free_debug(pages->cacheable_pages, func, line);
- } else {
- dma_pages = pages->dma_pages;
- for (i = 0; i < page_idx; i++) {
- qdf_mem_free_consistent_debug(
- osdev, osdev->dev,
- pages->page_size, dma_pages->page_v_addr_start,
- dma_pages->page_p_addr, memctxt, func, line);
- dma_pages++;
- }
- qdf_mem_free_debug(pages->dma_pages, func, line);
- }
- out_fail:
- pages->cacheable_pages = NULL;
- pages->dma_pages = NULL;
- pages->num_pages = 0;
- }
- qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
- /**
- * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
- * @osdev: OS device handle pointer
- * @pages: Multi page information storage
- * @memctxt: Memory context
- * @cacheable: Coherent memory or cacheable memory
- * @func: Caller of this allocator
- * @line: Line number of the caller
- *
- * This function will free large size of memory over multiple pages.
- *
- * Return: None
- */
- void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
- struct qdf_mem_multi_page_t *pages,
- qdf_dma_context_t memctxt, bool cacheable,
- const char *func, uint32_t line)
- {
- unsigned int page_idx;
- struct qdf_mem_dma_page_t *dma_pages;
- if (!pages->page_size)
- pages->page_size = qdf_page_size;
- if (cacheable) {
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
- qdf_mem_free_debug(pages->cacheable_pages[page_idx],
- func, line);
- qdf_mem_free_debug(pages->cacheable_pages, func, line);
- } else {
- dma_pages = pages->dma_pages;
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
- qdf_mem_free_consistent_debug(
- osdev, osdev->dev, pages->page_size,
- dma_pages->page_v_addr_start,
- dma_pages->page_p_addr, memctxt, func, line);
- dma_pages++;
- }
- qdf_mem_free_debug(pages->dma_pages, func, line);
- }
- pages->cacheable_pages = NULL;
- pages->dma_pages = NULL;
- pages->num_pages = 0;
- }
- qdf_export_symbol(qdf_mem_multi_pages_free_debug);
- #else
- static void qdf_mem_debug_init(void) {}
- static void qdf_mem_debug_exit(void) {}
- void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
- {
- void *ptr;
- if (!size || size > QDF_MEM_MAX_MALLOC) {
- qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
- line);
- return NULL;
- }
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- size += sizeof(void *);
- ptr = qdf_mem_prealloc_get(size);
- if (ptr)
- return ptr;
- ptr = kzalloc(size, GFP_ATOMIC);
- if (!ptr) {
- qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
- size, func, line);
- return NULL;
- }
- qdf_mem_kmalloc_inc(ksize(ptr));
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- ptr += sizeof(void *);
- return ptr;
- }
- qdf_export_symbol(qdf_mem_malloc_atomic_fl);
- /**
- * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
- * @osdev: OS device handle pointer
- * @pages: Multi page information storage
- * @element_size: Each element size
- * @element_num: Total number of elements should be allocated
- * @memctxt: Memory context
- * @cacheable: Coherent memory or cacheable memory
- *
- * This function will allocate large size of memory over multiple pages.
- * Large size of contiguous memory allocation will fail frequently, then
- * instead of allocate large memory by one shot, allocate through multiple, non
- * contiguous memory and combine pages when actual usage
- *
- * Return: None
- */
- void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
- struct qdf_mem_multi_page_t *pages,
- size_t element_size, uint32_t element_num,
- qdf_dma_context_t memctxt, bool cacheable)
- {
- uint16_t page_idx;
- struct qdf_mem_dma_page_t *dma_pages;
- void **cacheable_pages = NULL;
- uint16_t i;
- if (!pages->page_size)
- pages->page_size = qdf_page_size;
- pages->num_element_per_page = pages->page_size / element_size;
- if (!pages->num_element_per_page) {
- qdf_print("Invalid page %d or element size %d",
- (int)pages->page_size, (int)element_size);
- goto out_fail;
- }
- pages->num_pages = element_num / pages->num_element_per_page;
- if (element_num % pages->num_element_per_page)
- pages->num_pages++;
- if (cacheable) {
- /* Pages information storage */
- pages->cacheable_pages = qdf_mem_malloc(
- pages->num_pages * sizeof(pages->cacheable_pages));
- if (!pages->cacheable_pages)
- goto out_fail;
- cacheable_pages = pages->cacheable_pages;
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
- cacheable_pages[page_idx] =
- qdf_mem_malloc(pages->page_size);
- if (!cacheable_pages[page_idx])
- goto page_alloc_fail;
- }
- pages->dma_pages = NULL;
- } else {
- pages->dma_pages = qdf_mem_malloc(
- pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
- if (!pages->dma_pages)
- goto out_fail;
- dma_pages = pages->dma_pages;
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
- dma_pages->page_v_addr_start =
- qdf_mem_alloc_consistent(osdev, osdev->dev,
- pages->page_size,
- &dma_pages->page_p_addr);
- if (!dma_pages->page_v_addr_start) {
- qdf_print("dmaable page alloc fail pi %d",
- page_idx);
- goto page_alloc_fail;
- }
- dma_pages->page_v_addr_end =
- dma_pages->page_v_addr_start + pages->page_size;
- dma_pages++;
- }
- pages->cacheable_pages = NULL;
- }
- return;
- page_alloc_fail:
- if (cacheable) {
- for (i = 0; i < page_idx; i++)
- qdf_mem_free(pages->cacheable_pages[i]);
- qdf_mem_free(pages->cacheable_pages);
- } else {
- dma_pages = pages->dma_pages;
- for (i = 0; i < page_idx; i++) {
- qdf_mem_free_consistent(
- osdev, osdev->dev, pages->page_size,
- dma_pages->page_v_addr_start,
- dma_pages->page_p_addr, memctxt);
- dma_pages++;
- }
- qdf_mem_free(pages->dma_pages);
- }
- out_fail:
- pages->cacheable_pages = NULL;
- pages->dma_pages = NULL;
- pages->num_pages = 0;
- return;
- }
- qdf_export_symbol(qdf_mem_multi_pages_alloc);
- /**
- * qdf_mem_multi_pages_free() - free large size of kernel memory
- * @osdev: OS device handle pointer
- * @pages: Multi page information storage
- * @memctxt: Memory context
- * @cacheable: Coherent memory or cacheable memory
- *
- * This function will free large size of memory over multiple pages.
- *
- * Return: None
- */
- void qdf_mem_multi_pages_free(qdf_device_t osdev,
- struct qdf_mem_multi_page_t *pages,
- qdf_dma_context_t memctxt, bool cacheable)
- {
- unsigned int page_idx;
- struct qdf_mem_dma_page_t *dma_pages;
- if (!pages->page_size)
- pages->page_size = qdf_page_size;
- if (cacheable) {
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
- qdf_mem_free(pages->cacheable_pages[page_idx]);
- qdf_mem_free(pages->cacheable_pages);
- } else {
- dma_pages = pages->dma_pages;
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
- qdf_mem_free_consistent(
- osdev, osdev->dev, pages->page_size,
- dma_pages->page_v_addr_start,
- dma_pages->page_p_addr, memctxt);
- dma_pages++;
- }
- qdf_mem_free(pages->dma_pages);
- }
- pages->cacheable_pages = NULL;
- pages->dma_pages = NULL;
- pages->num_pages = 0;
- return;
- }
- qdf_export_symbol(qdf_mem_multi_pages_free);
- #endif
- void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
- bool cacheable)
- {
- unsigned int page_idx;
- struct qdf_mem_dma_page_t *dma_pages;
- if (!pages->page_size)
- pages->page_size = qdf_page_size;
- if (cacheable) {
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
- qdf_mem_zero(pages->cacheable_pages[page_idx],
- pages->page_size);
- } else {
- dma_pages = pages->dma_pages;
- for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
- qdf_mem_zero(dma_pages->page_v_addr_start,
- pages->page_size);
- dma_pages++;
- }
- }
- }
- qdf_export_symbol(qdf_mem_multi_pages_zero);
- void __qdf_mem_free(void *ptr)
- {
- if (!ptr)
- return;
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- ptr = ptr - sizeof(void *);
- if (qdf_might_be_prealloc(ptr)) {
- if (qdf_mem_prealloc_put(ptr))
- return;
- }
- qdf_mem_kmalloc_dec(ksize(ptr));
- kfree(ptr);
- }
- qdf_export_symbol(__qdf_mem_free);
- void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
- {
- void *ptr;
- if (!size || size > QDF_MEM_MAX_MALLOC) {
- qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
- line);
- return NULL;
- }
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- size += sizeof(void *);
- ptr = qdf_mem_prealloc_get(size);
- if (ptr)
- return ptr;
- ptr = kzalloc(size, qdf_mem_malloc_flags());
- if (!ptr)
- return NULL;
- qdf_mem_kmalloc_inc(ksize(ptr));
- if (add_headroom_for_cnss_prealloc_cache_ptr())
- ptr += sizeof(void *);
- return ptr;
- }
- qdf_export_symbol(__qdf_mem_malloc);
- #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
- void __qdf_untracked_mem_free(void *ptr)
- {
- if (!ptr)
- return;
- kfree(ptr);
- }
- void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
- {
- void *ptr;
- if (!size || size > QDF_MEM_MAX_MALLOC) {
- qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
- line);
- return NULL;
- }
- ptr = kzalloc(size, qdf_mem_malloc_flags());
- if (!ptr)
- return NULL;
- return ptr;
- }
- #endif
- void *qdf_aligned_malloc_fl(uint32_t *size,
- void **vaddr_unaligned,
- qdf_dma_addr_t *paddr_unaligned,
- qdf_dma_addr_t *paddr_aligned,
- uint32_t align,
- const char *func, uint32_t line)
- {
- void *vaddr_aligned;
- uint32_t align_alloc_size;
- *vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
- line);
- if (!*vaddr_unaligned) {
- qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
- return NULL;
- }
- *paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
- /* Re-allocate additional bytes to align base address only if
- * above allocation returns unaligned address. Reason for
- * trying exact size allocation above is, OS tries to allocate
- * blocks of size power-of-2 pages and then free extra pages.
- * e.g., of a ring size of 1MB, the allocation below will
- * request 1MB plus 7 bytes for alignment, which will cause a
- * 2MB block allocation,and that is failing sometimes due to
- * memory fragmentation.
- */
- if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
- align_alloc_size = *size + align - 1;
- qdf_mem_free(*vaddr_unaligned);
- *vaddr_unaligned = qdf_mem_malloc_fl(
- (qdf_size_t)align_alloc_size, func, line);
- if (!*vaddr_unaligned) {
- qdf_warn("Failed to alloc %uB @ %s:%d",
- align_alloc_size, func, line);
- return NULL;
- }
- *paddr_unaligned = qdf_mem_virt_to_phys(
- *vaddr_unaligned);
- *size = align_alloc_size;
- }
- *paddr_aligned = (qdf_dma_addr_t)qdf_align
- ((unsigned long)(*paddr_unaligned), align);
- vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
- ((unsigned long)(*paddr_aligned) -
- (unsigned long)(*paddr_unaligned)));
- return vaddr_aligned;
- }
- qdf_export_symbol(qdf_aligned_malloc_fl);
- #if defined(DP_UMAC_HW_RESET_SUPPORT) || defined(WLAN_SUPPORT_PPEDS)
- /**
- * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered cb
- * @ctxt: Context to be passed to the cb
- * @pages: Multi page information storage
- * @elem_size: Each element size
- * @elem_count: Total number of elements in the pool.
- * @cacheable: Coherent memory or cacheable memory
- * @cb: Callback to free the elements
- * @elem_list: elem list for delayed free
- *
- * Return: 0 on Succscc, or Error code
- */
- int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
- uint32_t elem_size, uint32_t elem_count,
- uint8_t cacheable, qdf_mem_release_cb cb,
- void *elem_list)
- {
- uint16_t i, i_int;
- void *page_info;
- void *elem;
- uint32_t num_elem = 0;
- for (i = 0; i < pages->num_pages; i++) {
- if (cacheable)
- page_info = pages->cacheable_pages[i];
- else
- page_info = pages->dma_pages[i].page_v_addr_start;
- if (!page_info)
- return -ENOMEM;
- elem = page_info;
- for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
- cb(ctxt, elem, elem_list);
- elem = ((char *)elem + elem_size);
- num_elem++;
- /* Number of desc pool elements reached */
- if (num_elem == (elem_count - 1))
- break;
- }
- }
- return 0;
- }
- qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
- #endif
- /**
- * qdf_mem_multi_page_link() - Make links for multi page elements
- * @osdev: OS device handle pointer
- * @pages: Multi page information storage
- * @elem_size: Single element size
- * @elem_count: elements count should be linked
- * @cacheable: Coherent memory or cacheable memory
- *
- * This function will make links for multi page allocated structure
- *
- * Return: 0 success
- */
- int qdf_mem_multi_page_link(qdf_device_t osdev,
- struct qdf_mem_multi_page_t *pages,
- uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
- {
- uint16_t i, i_int;
- void *page_info;
- void **c_elem = NULL;
- uint32_t num_link = 0;
- for (i = 0; i < pages->num_pages; i++) {
- if (cacheable)
- page_info = pages->cacheable_pages[i];
- else
- page_info = pages->dma_pages[i].page_v_addr_start;
- if (!page_info)
- return -ENOMEM;
- c_elem = (void **)page_info;
- for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
- if (i_int == (pages->num_element_per_page - 1)) {
- if ((i + 1) == pages->num_pages)
- break;
- if (cacheable)
- *c_elem = pages->
- cacheable_pages[i + 1];
- else
- *c_elem = pages->
- dma_pages[i + 1].
- page_v_addr_start;
- num_link++;
- break;
- } else {
- *c_elem =
- (void *)(((char *)c_elem) + elem_size);
- }
- num_link++;
- c_elem = (void **)*c_elem;
- /* Last link established exit */
- if (num_link == (elem_count - 1))
- break;
- }
- }
- if (c_elem)
- *c_elem = NULL;
- return 0;
- }
- qdf_export_symbol(qdf_mem_multi_page_link);
- void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
- {
- /* special case where dst_addr or src_addr can be NULL */
- if (!num_bytes)
- return;
- QDF_BUG(dst_addr);
- QDF_BUG(src_addr);
- if (!dst_addr || !src_addr)
- return;
- memcpy(dst_addr, src_addr, num_bytes);
- }
- qdf_export_symbol(qdf_mem_copy);
- qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
- {
- qdf_shared_mem_t *shared_mem;
- qdf_dma_addr_t dma_addr, paddr;
- int ret;
- shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
- if (!shared_mem)
- return NULL;
- shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
- size, qdf_mem_get_dma_addr_ptr(osdev,
- &shared_mem->mem_info));
- if (!shared_mem->vaddr) {
- qdf_err("Unable to allocate DMA memory for shared resource");
- qdf_mem_free(shared_mem);
- return NULL;
- }
- qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
- size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
- qdf_mem_zero(shared_mem->vaddr, size);
- dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
- paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
- qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
- ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
- shared_mem->vaddr, dma_addr, size);
- if (ret) {
- qdf_err("Unable to get DMA sgtable");
- qdf_mem_free_consistent(osdev, osdev->dev,
- shared_mem->mem_info.size,
- shared_mem->vaddr,
- dma_addr,
- qdf_get_dma_mem_context(shared_mem,
- memctx));
- qdf_mem_free(shared_mem);
- return NULL;
- }
- qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
- return shared_mem;
- }
- qdf_export_symbol(qdf_mem_shared_mem_alloc);
- /**
- * qdf_mem_copy_toio() - copy memory
- * @dst_addr: Pointer to destination memory location (to copy to)
- * @src_addr: Pointer to source memory location (to copy from)
- * @num_bytes: Number of bytes to copy.
- *
- * Return: none
- */
- void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
- {
- if (0 == num_bytes) {
- /* special case where dst_addr or src_addr can be NULL */
- return;
- }
- if ((!dst_addr) || (!src_addr)) {
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
- "%s called with NULL parameter, source:%pK destination:%pK",
- __func__, src_addr, dst_addr);
- QDF_ASSERT(0);
- return;
- }
- memcpy_toio(dst_addr, src_addr, num_bytes);
- }
- qdf_export_symbol(qdf_mem_copy_toio);
- /**
- * qdf_mem_set_io() - set (fill) memory with a specified byte value.
- * @ptr: Pointer to memory that will be set
- * @value: Byte set in memory
- * @num_bytes: Number of bytes to be set
- *
- * Return: None
- */
- void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
- {
- if (!ptr) {
- qdf_print("%s called with NULL parameter ptr", __func__);
- return;
- }
- memset_io(ptr, value, num_bytes);
- }
- qdf_export_symbol(qdf_mem_set_io);
- void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
- {
- QDF_BUG(ptr);
- if (!ptr)
- return;
- memset(ptr, value, num_bytes);
- }
- qdf_export_symbol(qdf_mem_set);
- void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
- {
- /* special case where dst_addr or src_addr can be NULL */
- if (!num_bytes)
- return;
- QDF_BUG(dst_addr);
- QDF_BUG(src_addr);
- if (!dst_addr || !src_addr)
- return;
- memmove(dst_addr, src_addr, num_bytes);
- }
- qdf_export_symbol(qdf_mem_move);
- int qdf_mem_cmp(const void *left, const void *right, size_t size)
- {
- QDF_BUG(left);
- QDF_BUG(right);
- return memcmp(left, right, size);
- }
- qdf_export_symbol(qdf_mem_cmp);
- #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
- /**
- * qdf_mem_dma_alloc() - allocates memory for dma
- * @osdev: OS device handle
- * @dev: Pointer to device handle
- * @size: Size to be allocated
- * @phy_addr: Physical address
- *
- * Return: pointer of allocated memory or null if memory alloc fails
- */
- static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
- qdf_size_t size,
- qdf_dma_addr_t *phy_addr)
- {
- void *vaddr;
- vaddr = qdf_mem_malloc(size);
- *phy_addr = ((uintptr_t) vaddr);
- /* using this type conversion to suppress "cast from pointer to integer
- * of different size" warning on some platforms
- */
- BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
- return vaddr;
- }
- #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
- !defined(QCA_WIFI_QCN9000)
- #define QCA8074_RAM_BASE 0x50000000
- #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
- void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
- qdf_dma_addr_t *phy_addr)
- {
- void *vaddr = NULL;
- int i;
- *phy_addr = 0;
- for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
- vaddr = dma_alloc_coherent(dev, size, phy_addr,
- qdf_mem_malloc_flags());
- if (!vaddr) {
- qdf_err("%s failed , size: %zu!", __func__, size);
- return NULL;
- }
- if (*phy_addr >= QCA8074_RAM_BASE)
- return vaddr;
- dma_free_coherent(dev, size, vaddr, *phy_addr);
- }
- return NULL;
- }
- #else
- static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
- qdf_size_t size, qdf_dma_addr_t *paddr)
- {
- return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
- }
- #endif
- #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
- static inline void
- qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
- {
- qdf_mem_free(vaddr);
- }
- #else
- static inline void
- qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
- {
- dma_free_coherent(dev, size, vaddr, paddr);
- }
- #endif
- #ifdef MEMORY_DEBUG
- void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
- qdf_size_t size, qdf_dma_addr_t *paddr,
- const char *func, uint32_t line,
- void *caller)
- {
- QDF_STATUS status;
- enum qdf_debug_domain current_domain = qdf_debug_domain_get();
- qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
- struct qdf_mem_header *header;
- void *vaddr;
- if (is_initial_mem_debug_disabled)
- return __qdf_mem_alloc_consistent(osdev, dev,
- size, paddr,
- func, line);
- if (!size || size > QDF_MEM_MAX_MALLOC) {
- qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
- return NULL;
- }
- vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
- paddr);
- if (!vaddr) {
- qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
- return NULL;
- }
- header = qdf_mem_dma_get_header(vaddr, size);
- /* For DMA buffers we only add trailers, this function will init
- * the header structure at the tail
- * Prefix the header into DMA buffer causes SMMU faults, so
- * do not prefix header into the DMA buffers
- */
- qdf_mem_header_init(header, size, func, line, caller);
- qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
- status = qdf_list_insert_front(mem_list, &header->node);
- qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
- if (QDF_IS_STATUS_ERROR(status))
- qdf_err("Failed to insert memory header; status %d", status);
- qdf_mem_dma_inc(size);
- return vaddr;
- }
- qdf_export_symbol(qdf_mem_alloc_consistent_debug);
- void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
- qdf_size_t size, void *vaddr,
- qdf_dma_addr_t paddr,
- qdf_dma_context_t memctx,
- const char *func, uint32_t line)
- {
- enum qdf_debug_domain domain = qdf_debug_domain_get();
- struct qdf_mem_header *header;
- enum qdf_mem_validation_bitmap error_bitmap;
- if (is_initial_mem_debug_disabled) {
- __qdf_mem_free_consistent(
- osdev, dev,
- size, vaddr,
- paddr, memctx);
- return;
- }
- /* freeing a null pointer is valid */
- if (qdf_unlikely(!vaddr))
- return;
- qdf_talloc_assert_no_children_fl(vaddr, func, line);
- qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
- /* For DMA buffers we only add trailers, this function will retrieve
- * the header structure at the tail
- * Prefix the header into DMA buffer causes SMMU faults, so
- * do not prefix header into the DMA buffers
- */
- header = qdf_mem_dma_get_header(vaddr, size);
- error_bitmap = qdf_mem_header_validate(header, domain);
- if (!error_bitmap) {
- header->freed = true;
- qdf_list_remove_node(qdf_mem_dma_list(header->domain),
- &header->node);
- }
- qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
- qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
- qdf_mem_dma_dec(header->size);
- qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
- }
- qdf_export_symbol(qdf_mem_free_consistent_debug);
- #endif /* MEMORY_DEBUG */
- void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
- qdf_size_t size, void *vaddr,
- qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
- {
- qdf_mem_dma_dec(size);
- qdf_mem_dma_free(dev, size, vaddr, paddr);
- }
- qdf_export_symbol(__qdf_mem_free_consistent);
- void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
- qdf_size_t size, qdf_dma_addr_t *paddr,
- const char *func, uint32_t line)
- {
- void *vaddr;
- if (!size || size > QDF_MEM_MAX_MALLOC) {
- qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
- size, func, line);
- return NULL;
- }
- vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
- if (vaddr)
- qdf_mem_dma_inc(size);
- return vaddr;
- }
- qdf_export_symbol(__qdf_mem_alloc_consistent);
- void *qdf_aligned_mem_alloc_consistent_fl(
- qdf_device_t osdev, uint32_t *size,
- void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
- qdf_dma_addr_t *paddr_aligned, uint32_t align,
- const char *func, uint32_t line)
- {
- void *vaddr_aligned;
- uint32_t align_alloc_size;
- *vaddr_unaligned = qdf_mem_alloc_consistent(
- osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
- if (!*vaddr_unaligned) {
- qdf_warn("Failed to alloc %uB @ %s:%d",
- *size, func, line);
- return NULL;
- }
- /* Re-allocate additional bytes to align base address only if
- * above allocation returns unaligned address. Reason for
- * trying exact size allocation above is, OS tries to allocate
- * blocks of size power-of-2 pages and then free extra pages.
- * e.g., of a ring size of 1MB, the allocation below will
- * request 1MB plus 7 bytes for alignment, which will cause a
- * 2MB block allocation,and that is failing sometimes due to
- * memory fragmentation.
- */
- if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
- align_alloc_size = *size + align - 1;
- qdf_mem_free_consistent(osdev, osdev->dev, *size,
- *vaddr_unaligned,
- *paddr_unaligned, 0);
- *vaddr_unaligned = qdf_mem_alloc_consistent(
- osdev, osdev->dev, align_alloc_size,
- paddr_unaligned);
- if (!*vaddr_unaligned) {
- qdf_warn("Failed to alloc %uB @ %s:%d",
- align_alloc_size, func, line);
- return NULL;
- }
- *size = align_alloc_size;
- }
- *paddr_aligned = (qdf_dma_addr_t)qdf_align(
- (unsigned long)(*paddr_unaligned), align);
- vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
- ((unsigned long)(*paddr_aligned) -
- (unsigned long)(*paddr_unaligned)));
- return vaddr_aligned;
- }
- qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
- /**
- * qdf_mem_dma_sync_single_for_device() - assign memory to device
- * @osdev: OS device handle
- * @bus_addr: dma address to give to the device
- * @size: Size of the memory block
- * @direction: direction data will be DMAed
- *
- * Assign memory to the remote device.
- * The cache lines are flushed to ram or invalidated as needed.
- *
- * Return: none
- */
- void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
- qdf_dma_addr_t bus_addr,
- qdf_size_t size,
- enum dma_data_direction direction)
- {
- dma_sync_single_for_device(osdev->dev, bus_addr, size, direction);
- }
- qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
- /**
- * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
- * @osdev: OS device handle
- * @bus_addr: dma address to give to the cpu
- * @size: Size of the memory block
- * @direction: direction data will be DMAed
- *
- * Assign memory to the CPU.
- *
- * Return: none
- */
- void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
- qdf_dma_addr_t bus_addr,
- qdf_size_t size,
- enum dma_data_direction direction)
- {
- dma_sync_single_for_cpu(osdev->dev, bus_addr, size, direction);
- }
- qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
- void qdf_mem_init(void)
- {
- qdf_mem_debug_init();
- qdf_net_buf_debug_init();
- qdf_frag_debug_init();
- qdf_mem_debugfs_init();
- qdf_mem_debug_debugfs_init();
- }
- qdf_export_symbol(qdf_mem_init);
- void qdf_mem_exit(void)
- {
- qdf_mem_debug_debugfs_exit();
- qdf_mem_debugfs_exit();
- qdf_frag_debug_exit();
- qdf_net_buf_debug_exit();
- qdf_mem_debug_exit();
- }
- qdf_export_symbol(qdf_mem_exit);
- /**
- * qdf_ether_addr_copy() - copy an Ethernet address
- *
- * @dst_addr: A six-byte array Ethernet address destination
- * @src_addr: A six-byte array Ethernet address source
- *
- * Please note: dst & src must both be aligned to u16.
- *
- * Return: none
- */
- void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
- {
- if ((!dst_addr) || (!src_addr)) {
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
- "%s called with NULL parameter, source:%pK destination:%pK",
- __func__, src_addr, dst_addr);
- QDF_ASSERT(0);
- return;
- }
- ether_addr_copy(dst_addr, src_addr);
- }
- qdf_export_symbol(qdf_ether_addr_copy);
- int32_t qdf_dma_mem_stats_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.dma);
- }
- qdf_export_symbol(qdf_dma_mem_stats_read);
- int32_t qdf_heap_mem_stats_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.kmalloc);
- }
- qdf_export_symbol(qdf_heap_mem_stats_read);
- int32_t qdf_skb_mem_stats_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.skb);
- }
- qdf_export_symbol(qdf_skb_mem_stats_read);
- int32_t qdf_skb_total_mem_stats_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.skb_total);
- }
- qdf_export_symbol(qdf_skb_total_mem_stats_read);
- int32_t qdf_skb_max_mem_stats_read(void)
- {
- return qdf_mem_stat.skb_mem_max;
- }
- qdf_export_symbol(qdf_skb_max_mem_stats_read);
- int32_t qdf_dp_tx_skb_mem_stats_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
- }
- qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
- int32_t qdf_dp_rx_skb_mem_stats_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
- }
- qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
- int32_t qdf_mem_dp_tx_skb_cnt_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
- }
- qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
- int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
- {
- return qdf_mem_stat.dp_tx_skb_count_max;
- }
- qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
- int32_t qdf_mem_dp_rx_skb_cnt_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
- }
- qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
- int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
- {
- return qdf_mem_stat.dp_rx_skb_count_max;
- }
- qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
- int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
- {
- return qdf_mem_stat.dp_tx_skb_mem_max;
- }
- qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
- int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
- {
- return qdf_mem_stat.dp_rx_skb_mem_max;
- }
- qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
- int32_t qdf_mem_tx_desc_cnt_read(void)
- {
- return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
- }
- qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
- int32_t qdf_mem_tx_desc_max_read(void)
- {
- return qdf_mem_stat.tx_descs_max;
- }
- qdf_export_symbol(qdf_mem_tx_desc_max_read);
- void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
- int32_t tx_descs_max)
- {
- qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
- qdf_mem_stat.tx_descs_max = tx_descs_max;
- }
- qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
- void qdf_mem_stats_init(void)
- {
- qdf_mem_stat.skb_mem_max = 0;
- qdf_mem_stat.dp_tx_skb_mem_max = 0;
- qdf_mem_stat.dp_rx_skb_mem_max = 0;
- qdf_mem_stat.dp_tx_skb_count_max = 0;
- qdf_mem_stat.dp_rx_skb_count_max = 0;
- qdf_mem_stat.tx_descs_max = 0;
- }
- qdf_export_symbol(qdf_mem_stats_init);
- void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
- {
- void *ptr;
- if (!size) {
- qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
- return NULL;
- }
- ptr = vzalloc(size);
- return ptr;
- }
- qdf_export_symbol(__qdf_mem_valloc);
- void __qdf_mem_vfree(void *ptr)
- {
- if (qdf_unlikely(!ptr))
- return;
- vfree(ptr);
- }
- qdf_export_symbol(__qdf_mem_vfree);
- #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
- int
- qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
- enum qdf_iommu_attr attr, void *data)
- {
- return __qdf_iommu_domain_get_attr(domain, attr, data);
- }
- qdf_export_symbol(qdf_iommu_domain_get_attr);
- #endif
- #ifdef ENHANCED_OS_ABSTRACTION
- void qdf_update_mem_map_table(qdf_device_t osdev,
- qdf_mem_info_t *mem_info,
- qdf_dma_addr_t dma_addr,
- uint32_t mem_size)
- {
- if (!mem_info) {
- qdf_nofl_err("%s: NULL mem_info", __func__);
- return;
- }
- __qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
- }
- qdf_export_symbol(qdf_update_mem_map_table);
- qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
- qdf_dma_addr_t dma_addr)
- {
- return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
- }
- qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
- #endif
- #ifdef QCA_KMEM_CACHE_SUPPORT
- qdf_kmem_cache_t
- __qdf_kmem_cache_create(const char *cache_name,
- qdf_size_t size)
- {
- struct kmem_cache *cache;
- cache = kmem_cache_create(cache_name, size,
- 0, 0, NULL);
- if (!cache)
- return NULL;
- return cache;
- }
- qdf_export_symbol(__qdf_kmem_cache_create);
- void
- __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
- {
- kmem_cache_destroy(cache);
- }
- qdf_export_symbol(__qdf_kmem_cache_destroy);
- void*
- __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
- {
- int flags = GFP_KERNEL;
- if (in_interrupt() || irqs_disabled() || in_atomic())
- flags = GFP_ATOMIC;
- return kmem_cache_alloc(cache, flags);
- }
- qdf_export_symbol(__qdf_kmem_cache_alloc);
- void
- __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
- {
- kmem_cache_free(cache, node);
- }
- qdf_export_symbol(__qdf_kmem_cache_free);
- #else
- qdf_kmem_cache_t
- __qdf_kmem_cache_create(const char *cache_name,
- qdf_size_t size)
- {
- return NULL;
- }
- void
- __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
- {
- }
- void *
- __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
- {
- return NULL;
- }
- void
- __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
- {
- }
- #endif
|