1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (C) 1993 Linus Torvalds
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <[email protected]>, May 2000
- * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
- * Numa awareness, Christoph Lameter, SGI, June 2005
- * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
- */
- #include <linux/vmalloc.h>
- #include <linux/mm.h>
- #include <linux/module.h>
- #include <linux/highmem.h>
- #include <linux/sched/signal.h>
- #include <linux/slab.h>
- #include <linux/spinlock.h>
- #include <linux/interrupt.h>
- #include <linux/proc_fs.h>
- #include <linux/seq_file.h>
- #include <linux/set_memory.h>
- #include <linux/debugobjects.h>
- #include <linux/kallsyms.h>
- #include <linux/list.h>
- #include <linux/notifier.h>
- #include <linux/rbtree.h>
- #include <linux/xarray.h>
- #include <linux/io.h>
- #include <linux/rcupdate.h>
- #include <linux/pfn.h>
- #include <linux/kmemleak.h>
- #include <linux/atomic.h>
- #include <linux/compiler.h>
- #include <linux/memcontrol.h>
- #include <linux/llist.h>
- #include <linux/bitops.h>
- #include <linux/rbtree_augmented.h>
- #include <linux/overflow.h>
- #include <linux/pgtable.h>
- #include <linux/uaccess.h>
- #include <linux/hugetlb.h>
- #include <linux/sched/mm.h>
- #include <linux/io.h>
- #include <asm/tlbflush.h>
- #include <asm/shmparam.h>
- #ifdef CONFIG_RKP
- #include <linux/uh.h>
- #include <linux/rkp.h>
- #include <linux/moduleloader.h>
- #endif
- #include "internal.h"
- #include "pgalloc-track.h"
- #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
- static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
- static int __init set_nohugeiomap(char *str)
- {
- ioremap_max_page_shift = PAGE_SHIFT;
- return 0;
- }
- early_param("nohugeiomap", set_nohugeiomap);
- #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
- static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
- #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
- #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
- static bool __ro_after_init vmap_allow_huge = true;
- static int __init set_nohugevmalloc(char *str)
- {
- vmap_allow_huge = false;
- return 0;
- }
- early_param("nohugevmalloc", set_nohugevmalloc);
- #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
- static const bool vmap_allow_huge = false;
- #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
- bool is_vmalloc_addr(const void *x)
- {
- unsigned long addr = (unsigned long)kasan_reset_tag(x);
- return addr >= VMALLOC_START && addr < VMALLOC_END;
- }
- EXPORT_SYMBOL(is_vmalloc_addr);
- struct vfree_deferred {
- struct llist_head list;
- struct work_struct wq;
- };
- static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
- static void __vunmap(const void *, int);
- static void free_work(struct work_struct *w)
- {
- struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
- struct llist_node *t, *llnode;
- llist_for_each_safe(llnode, t, llist_del_all(&p->list))
- __vunmap((void *)llnode, 1);
- }
- /*** Page table manipulation functions ***/
- static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift, pgtbl_mod_mask *mask)
- {
- pte_t *pte;
- u64 pfn;
- unsigned long size = PAGE_SIZE;
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel_track(pmd, addr, mask);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- #ifdef CONFIG_HUGETLB_PAGE
- size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
- if (size != PAGE_SIZE) {
- pte_t entry = pfn_pte(pfn, prot);
- entry = arch_make_huge_pte(entry, ilog2(size), 0);
- set_huge_pte_at(&init_mm, addr, pte, entry);
- pfn += PFN_DOWN(size);
- continue;
- }
- #endif
- set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
- pfn++;
- } while (pte += PFN_DOWN(size), addr += size, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
- return 0;
- }
- static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift)
- {
- if (max_page_shift < PMD_SHIFT)
- return 0;
- if (!arch_vmap_pmd_supported(prot))
- return 0;
- if ((end - addr) != PMD_SIZE)
- return 0;
- if (!IS_ALIGNED(addr, PMD_SIZE))
- return 0;
- if (!IS_ALIGNED(phys_addr, PMD_SIZE))
- return 0;
- if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
- return 0;
- return pmd_set_huge(pmd, phys_addr, prot);
- }
- static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift, pgtbl_mod_mask *mask)
- {
- pmd_t *pmd;
- unsigned long next;
- pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
- if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
- max_page_shift)) {
- *mask |= PGTBL_PMD_MODIFIED;
- continue;
- }
- if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
- return -ENOMEM;
- } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
- }
- static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift)
- {
- if (max_page_shift < PUD_SHIFT)
- return 0;
- if (!arch_vmap_pud_supported(prot))
- return 0;
- if ((end - addr) != PUD_SIZE)
- return 0;
- if (!IS_ALIGNED(addr, PUD_SIZE))
- return 0;
- if (!IS_ALIGNED(phys_addr, PUD_SIZE))
- return 0;
- if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
- return 0;
- return pud_set_huge(pud, phys_addr, prot);
- }
- static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift, pgtbl_mod_mask *mask)
- {
- pud_t *pud;
- unsigned long next;
- pud = pud_alloc_track(&init_mm, p4d, addr, mask);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
- if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
- max_page_shift)) {
- *mask |= PGTBL_PUD_MODIFIED;
- continue;
- }
- if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
- max_page_shift, mask))
- return -ENOMEM;
- } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
- }
- static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift)
- {
- if (max_page_shift < P4D_SHIFT)
- return 0;
- if (!arch_vmap_p4d_supported(prot))
- return 0;
- if ((end - addr) != P4D_SIZE)
- return 0;
- if (!IS_ALIGNED(addr, P4D_SIZE))
- return 0;
- if (!IS_ALIGNED(phys_addr, P4D_SIZE))
- return 0;
- if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
- return 0;
- return p4d_set_huge(p4d, phys_addr, prot);
- }
- static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift, pgtbl_mod_mask *mask)
- {
- p4d_t *p4d;
- unsigned long next;
- p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
- if (!p4d)
- return -ENOMEM;
- do {
- next = p4d_addr_end(addr, end);
- if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
- max_page_shift)) {
- *mask |= PGTBL_P4D_MODIFIED;
- continue;
- }
- if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
- max_page_shift, mask))
- return -ENOMEM;
- } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
- }
- static int vmap_range_noflush(unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift)
- {
- pgd_t *pgd;
- unsigned long start;
- unsigned long next;
- int err;
- pgtbl_mod_mask mask = 0;
- might_sleep();
- BUG_ON(addr >= end);
- start = addr;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
- max_page_shift, &mask);
- if (err)
- break;
- } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
- return err;
- }
- int ioremap_page_range(unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot)
- {
- int err;
- prot = pgprot_nx(prot);
- err = vmap_range_noflush(addr, end, phys_addr, prot,
- ioremap_max_page_shift);
- flush_cache_vmap(addr, end);
- if (!err)
- err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
- ioremap_max_page_shift);
- if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) && !err)
- ioremap_phys_range_hook(phys_addr, end - addr, prot);
- return err;
- }
- static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- pgtbl_mod_mask *mask)
- {
- pte_t *pte;
- pte = pte_offset_kernel(pmd, addr);
- do {
- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
- } while (pte++, addr += PAGE_SIZE, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
- }
- static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
- pgtbl_mod_mask *mask)
- {
- pmd_t *pmd;
- unsigned long next;
- int cleared;
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- cleared = pmd_clear_huge(pmd);
- if (cleared || pmd_bad(*pmd))
- *mask |= PGTBL_PMD_MODIFIED;
- if (cleared)
- continue;
- if (pmd_none_or_clear_bad(pmd))
- continue;
- vunmap_pte_range(pmd, addr, next, mask);
- cond_resched();
- } while (pmd++, addr = next, addr != end);
- }
- static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
- pgtbl_mod_mask *mask)
- {
- pud_t *pud;
- unsigned long next;
- int cleared;
- pud = pud_offset(p4d, addr);
- do {
- next = pud_addr_end(addr, end);
- cleared = pud_clear_huge(pud);
- if (cleared || pud_bad(*pud))
- *mask |= PGTBL_PUD_MODIFIED;
- if (cleared)
- continue;
- if (pud_none_or_clear_bad(pud))
- continue;
- vunmap_pmd_range(pud, addr, next, mask);
- } while (pud++, addr = next, addr != end);
- }
- static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
- pgtbl_mod_mask *mask)
- {
- p4d_t *p4d;
- unsigned long next;
- p4d = p4d_offset(pgd, addr);
- do {
- next = p4d_addr_end(addr, end);
- p4d_clear_huge(p4d);
- if (p4d_bad(*p4d))
- *mask |= PGTBL_P4D_MODIFIED;
- if (p4d_none_or_clear_bad(p4d))
- continue;
- vunmap_pud_range(p4d, addr, next, mask);
- } while (p4d++, addr = next, addr != end);
- }
- /*
- * vunmap_range_noflush is similar to vunmap_range, but does not
- * flush caches or TLBs.
- *
- * The caller is responsible for calling flush_cache_vmap() before calling
- * this function, and flush_tlb_kernel_range after it has returned
- * successfully (and before the addresses are expected to cause a page fault
- * or be re-mapped for something else, if TLB flushes are being delayed or
- * coalesced).
- *
- * This is an internal function only. Do not use outside mm/.
- */
- void __vunmap_range_noflush(unsigned long start, unsigned long end)
- {
- unsigned long next;
- pgd_t *pgd;
- unsigned long addr = start;
- pgtbl_mod_mask mask = 0;
- BUG_ON(addr >= end);
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_bad(*pgd))
- mask |= PGTBL_PGD_MODIFIED;
- if (pgd_none_or_clear_bad(pgd))
- continue;
- vunmap_p4d_range(pgd, addr, next, &mask);
- } while (pgd++, addr = next, addr != end);
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
- }
- void vunmap_range_noflush(unsigned long start, unsigned long end)
- {
- kmsan_vunmap_range_noflush(start, end);
- __vunmap_range_noflush(start, end);
- }
- /**
- * vunmap_range - unmap kernel virtual addresses
- * @addr: start of the VM area to unmap
- * @end: end of the VM area to unmap (non-inclusive)
- *
- * Clears any present PTEs in the virtual address range, flushes TLBs and
- * caches. Any subsequent access to the address before it has been re-mapped
- * is a kernel bug.
- */
- void vunmap_range(unsigned long addr, unsigned long end)
- {
- flush_cache_vunmap(addr, end);
- vunmap_range_noflush(addr, end);
- flush_tlb_kernel_range(addr, end);
- }
- static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
- {
- pte_t *pte;
- /*
- * nr is a running index into the array which helps higher level
- * callers keep track of where we're up to.
- */
- pte = pte_alloc_kernel_track(pmd, addr, mask);
- if (!pte)
- return -ENOMEM;
- do {
- struct page *page = pages[*nr];
- if (WARN_ON(!pte_none(*pte)))
- return -EBUSY;
- if (WARN_ON(!page))
- return -ENOMEM;
- if (WARN_ON(!pfn_valid(page_to_pfn(page))))
- return -EINVAL;
- set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
- (*nr)++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
- return 0;
- }
- static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
- {
- pmd_t *pmd;
- unsigned long next;
- pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
- if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
- return -ENOMEM;
- } while (pmd++, addr = next, addr != end);
- return 0;
- }
- static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
- {
- pud_t *pud;
- unsigned long next;
- pud = pud_alloc_track(&init_mm, p4d, addr, mask);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
- if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
- return -ENOMEM;
- } while (pud++, addr = next, addr != end);
- return 0;
- }
- static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
- {
- p4d_t *p4d;
- unsigned long next;
- p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
- if (!p4d)
- return -ENOMEM;
- do {
- next = p4d_addr_end(addr, end);
- if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
- return -ENOMEM;
- } while (p4d++, addr = next, addr != end);
- return 0;
- }
- static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
- pgprot_t prot, struct page **pages)
- {
- unsigned long start = addr;
- pgd_t *pgd;
- unsigned long next;
- int err = 0;
- int nr = 0;
- pgtbl_mod_mask mask = 0;
- BUG_ON(addr >= end);
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_bad(*pgd))
- mask |= PGTBL_PGD_MODIFIED;
- err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
- if (err)
- return err;
- } while (pgd++, addr = next, addr != end);
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
- return 0;
- }
- /*
- * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
- * flush caches.
- *
- * The caller is responsible for calling flush_cache_vmap() after this
- * function returns successfully and before the addresses are accessed.
- *
- * This is an internal function only. Do not use outside mm/.
- */
- int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
- pgprot_t prot, struct page **pages, unsigned int page_shift)
- {
- unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
- WARN_ON(page_shift < PAGE_SHIFT);
- if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
- page_shift == PAGE_SHIFT)
- return vmap_small_pages_range_noflush(addr, end, prot, pages);
- for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
- int err;
- err = vmap_range_noflush(addr, addr + (1UL << page_shift),
- page_to_phys(pages[i]), prot,
- page_shift);
- if (err)
- return err;
- addr += 1UL << page_shift;
- }
- return 0;
- }
- int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
- pgprot_t prot, struct page **pages, unsigned int page_shift)
- {
- int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
- page_shift);
- if (ret)
- return ret;
- return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
- }
- /**
- * vmap_pages_range - map pages to a kernel virtual address
- * @addr: start of the VM area to map
- * @end: end of the VM area to map (non-inclusive)
- * @prot: page protection flags to use
- * @pages: pages to map (always PAGE_SIZE pages)
- * @page_shift: maximum shift that the pages may be mapped with, @pages must
- * be aligned and contiguous up to at least this shift.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
- static int vmap_pages_range(unsigned long addr, unsigned long end,
- pgprot_t prot, struct page **pages, unsigned int page_shift)
- {
- int err;
- err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
- flush_cache_vmap(addr, end);
- return err;
- }
- int is_vmalloc_or_module_addr(const void *x)
- {
- /*
- * ARM, x86-64 and sparc64 put modules in a special place,
- * and fall back on vmalloc() if that fails. Others
- * just put it in the vmalloc space.
- */
- #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
- unsigned long addr = (unsigned long)kasan_reset_tag(x);
- if (addr >= MODULES_VADDR && addr < MODULES_END)
- return 1;
- #endif
- return is_vmalloc_addr(x);
- }
- /*
- * Walk a vmap address to the struct page it maps. Huge vmap mappings will
- * return the tail page that corresponds to the base page address, which
- * matches small vmap mappings.
- */
- struct page *vmalloc_to_page(const void *vmalloc_addr)
- {
- unsigned long addr = (unsigned long) vmalloc_addr;
- struct page *page = NULL;
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep, pte;
- /*
- * XXX we might need to change this if we add VIRTUAL_BUG_ON for
- * architectures that do not vmalloc module space
- */
- VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
- if (pgd_none(*pgd))
- return NULL;
- if (WARN_ON_ONCE(pgd_leaf(*pgd)))
- return NULL; /* XXX: no allowance for huge pgd */
- if (WARN_ON_ONCE(pgd_bad(*pgd)))
- return NULL;
- p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d))
- return NULL;
- if (p4d_leaf(*p4d))
- return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
- if (WARN_ON_ONCE(p4d_bad(*p4d)))
- return NULL;
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud))
- return NULL;
- if (pud_leaf(*pud))
- return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- if (WARN_ON_ONCE(pud_bad(*pud)))
- return NULL;
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
- return NULL;
- if (pmd_leaf(*pmd))
- return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- if (WARN_ON_ONCE(pmd_bad(*pmd)))
- return NULL;
- ptep = pte_offset_map(pmd, addr);
- pte = *ptep;
- if (pte_present(pte))
- page = pte_page(pte);
- pte_unmap(ptep);
- return page;
- }
- EXPORT_SYMBOL(vmalloc_to_page);
- /*
- * Map a vmalloc()-space virtual address to the physical page frame number.
- */
- unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
- {
- return page_to_pfn(vmalloc_to_page(vmalloc_addr));
- }
- EXPORT_SYMBOL(vmalloc_to_pfn);
- /*** Global kva allocator ***/
- #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
- #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
- static DEFINE_SPINLOCK(vmap_area_lock);
- static DEFINE_SPINLOCK(free_vmap_area_lock);
- /* Export for kexec only */
- LIST_HEAD(vmap_area_list);
- static struct rb_root vmap_area_root = RB_ROOT;
- static bool vmap_initialized __read_mostly;
- static struct rb_root purge_vmap_area_root = RB_ROOT;
- static LIST_HEAD(purge_vmap_area_list);
- static DEFINE_SPINLOCK(purge_vmap_area_lock);
- /*
- * This kmem_cache is used for vmap_area objects. Instead of
- * allocating from slab we reuse an object from this cache to
- * make things faster. Especially in "no edge" splitting of
- * free block.
- */
- static struct kmem_cache *vmap_area_cachep;
- /*
- * This linked list is used in pair with free_vmap_area_root.
- * It gives O(1) access to prev/next to perform fast coalescing.
- */
- static LIST_HEAD(free_vmap_area_list);
- /*
- * This augment red-black tree represents the free vmap space.
- * All vmap_area objects in this tree are sorted by va->va_start
- * address. It is used for allocation and merging when a vmap
- * object is released.
- *
- * Each vmap_area node contains a maximum available free block
- * of its sub-tree, right or left. Therefore it is possible to
- * find a lowest match of free area.
- */
- static struct rb_root free_vmap_area_root = RB_ROOT;
- /*
- * Preload a CPU with one object for "no edge" split case. The
- * aim is to get rid of allocations from the atomic context, thus
- * to use more permissive allocation masks.
- */
- static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
- static __always_inline unsigned long
- va_size(struct vmap_area *va)
- {
- return (va->va_end - va->va_start);
- }
- static __always_inline unsigned long
- get_subtree_max_size(struct rb_node *node)
- {
- struct vmap_area *va;
- va = rb_entry_safe(node, struct vmap_area, rb_node);
- return va ? va->subtree_max_size : 0;
- }
- RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
- struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
- static void purge_vmap_area_lazy(void);
- static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
- static void drain_vmap_area_work(struct work_struct *work);
- static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
- static atomic_long_t nr_vmalloc_pages;
- unsigned long vmalloc_nr_pages(void)
- {
- return atomic_long_read(&nr_vmalloc_pages);
- }
- EXPORT_SYMBOL_GPL(vmalloc_nr_pages);
- /* Look up the first VA which satisfies addr < va_end, NULL if none. */
- static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
- {
- struct vmap_area *va = NULL;
- struct rb_node *n = vmap_area_root.rb_node;
- addr = (unsigned long)kasan_reset_tag((void *)addr);
- while (n) {
- struct vmap_area *tmp;
- tmp = rb_entry(n, struct vmap_area, rb_node);
- if (tmp->va_end > addr) {
- va = tmp;
- if (tmp->va_start <= addr)
- break;
- n = n->rb_left;
- } else
- n = n->rb_right;
- }
- return va;
- }
- static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
- {
- struct rb_node *n = root->rb_node;
- addr = (unsigned long)kasan_reset_tag((void *)addr);
- while (n) {
- struct vmap_area *va;
- va = rb_entry(n, struct vmap_area, rb_node);
- if (addr < va->va_start)
- n = n->rb_left;
- else if (addr >= va->va_end)
- n = n->rb_right;
- else
- return va;
- }
- return NULL;
- }
- /*
- * This function returns back addresses of parent node
- * and its left or right link for further processing.
- *
- * Otherwise NULL is returned. In that case all further
- * steps regarding inserting of conflicting overlap range
- * have to be declined and actually considered as a bug.
- */
- static __always_inline struct rb_node **
- find_va_links(struct vmap_area *va,
- struct rb_root *root, struct rb_node *from,
- struct rb_node **parent)
- {
- struct vmap_area *tmp_va;
- struct rb_node **link;
- if (root) {
- link = &root->rb_node;
- if (unlikely(!*link)) {
- *parent = NULL;
- return link;
- }
- } else {
- link = &from;
- }
- /*
- * Go to the bottom of the tree. When we hit the last point
- * we end up with parent rb_node and correct direction, i name
- * it link, where the new va->rb_node will be attached to.
- */
- do {
- tmp_va = rb_entry(*link, struct vmap_area, rb_node);
- /*
- * During the traversal we also do some sanity check.
- * Trigger the BUG() if there are sides(left/right)
- * or full overlaps.
- */
- if (va->va_end <= tmp_va->va_start)
- link = &(*link)->rb_left;
- else if (va->va_start >= tmp_va->va_end)
- link = &(*link)->rb_right;
- else {
- WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
- va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
- return NULL;
- }
- } while (*link);
- *parent = &tmp_va->rb_node;
- return link;
- }
- static __always_inline struct list_head *
- get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
- {
- struct list_head *list;
- if (unlikely(!parent))
- /*
- * The red-black tree where we try to find VA neighbors
- * before merging or inserting is empty, i.e. it means
- * there is no free vmap space. Normally it does not
- * happen but we handle this case anyway.
- */
- return NULL;
- list = &rb_entry(parent, struct vmap_area, rb_node)->list;
- return (&parent->rb_right == link ? list->next : list);
- }
- static __always_inline void
- __link_va(struct vmap_area *va, struct rb_root *root,
- struct rb_node *parent, struct rb_node **link,
- struct list_head *head, bool augment)
- {
- /*
- * VA is still not in the list, but we can
- * identify its future previous list_head node.
- */
- if (likely(parent)) {
- head = &rb_entry(parent, struct vmap_area, rb_node)->list;
- if (&parent->rb_right != link)
- head = head->prev;
- }
- /* Insert to the rb-tree */
- rb_link_node(&va->rb_node, parent, link);
- if (augment) {
- /*
- * Some explanation here. Just perform simple insertion
- * to the tree. We do not set va->subtree_max_size to
- * its current size before calling rb_insert_augmented().
- * It is because we populate the tree from the bottom
- * to parent levels when the node _is_ in the tree.
- *
- * Therefore we set subtree_max_size to zero after insertion,
- * to let __augment_tree_propagate_from() puts everything to
- * the correct order later on.
- */
- rb_insert_augmented(&va->rb_node,
- root, &free_vmap_area_rb_augment_cb);
- va->subtree_max_size = 0;
- } else {
- rb_insert_color(&va->rb_node, root);
- }
- /* Address-sort this list */
- list_add(&va->list, head);
- }
- static __always_inline void
- link_va(struct vmap_area *va, struct rb_root *root,
- struct rb_node *parent, struct rb_node **link,
- struct list_head *head)
- {
- __link_va(va, root, parent, link, head, false);
- }
- static __always_inline void
- link_va_augment(struct vmap_area *va, struct rb_root *root,
- struct rb_node *parent, struct rb_node **link,
- struct list_head *head)
- {
- __link_va(va, root, parent, link, head, true);
- }
- static __always_inline void
- __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
- {
- if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
- return;
- if (augment)
- rb_erase_augmented(&va->rb_node,
- root, &free_vmap_area_rb_augment_cb);
- else
- rb_erase(&va->rb_node, root);
- list_del_init(&va->list);
- RB_CLEAR_NODE(&va->rb_node);
- }
- static __always_inline void
- unlink_va(struct vmap_area *va, struct rb_root *root)
- {
- __unlink_va(va, root, false);
- }
- static __always_inline void
- unlink_va_augment(struct vmap_area *va, struct rb_root *root)
- {
- __unlink_va(va, root, true);
- }
- #if DEBUG_AUGMENT_PROPAGATE_CHECK
- /*
- * Gets called when remove the node and rotate.
- */
- static __always_inline unsigned long
- compute_subtree_max_size(struct vmap_area *va)
- {
- return max3(va_size(va),
- get_subtree_max_size(va->rb_node.rb_left),
- get_subtree_max_size(va->rb_node.rb_right));
- }
- static void
- augment_tree_propagate_check(void)
- {
- struct vmap_area *va;
- unsigned long computed_size;
- list_for_each_entry(va, &free_vmap_area_list, list) {
- computed_size = compute_subtree_max_size(va);
- if (computed_size != va->subtree_max_size)
- pr_emerg("tree is corrupted: %lu, %lu\n",
- va_size(va), va->subtree_max_size);
- }
- }
- #endif
- /*
- * This function populates subtree_max_size from bottom to upper
- * levels starting from VA point. The propagation must be done
- * when VA size is modified by changing its va_start/va_end. Or
- * in case of newly inserting of VA to the tree.
- *
- * It means that __augment_tree_propagate_from() must be called:
- * - After VA has been inserted to the tree(free path);
- * - After VA has been shrunk(allocation path);
- * - After VA has been increased(merging path).
- *
- * Please note that, it does not mean that upper parent nodes
- * and their subtree_max_size are recalculated all the time up
- * to the root node.
- *
- * 4--8
- * /\
- * / \
- * / \
- * 2--2 8--8
- *
- * For example if we modify the node 4, shrinking it to 2, then
- * no any modification is required. If we shrink the node 2 to 1
- * its subtree_max_size is updated only, and set to 1. If we shrink
- * the node 8 to 6, then its subtree_max_size is set to 6 and parent
- * node becomes 4--6.
- */
- static __always_inline void
- augment_tree_propagate_from(struct vmap_area *va)
- {
- /*
- * Populate the tree from bottom towards the root until
- * the calculated maximum available size of checked node
- * is equal to its current one.
- */
- free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
- #if DEBUG_AUGMENT_PROPAGATE_CHECK
- augment_tree_propagate_check();
- #endif
- }
- static void
- insert_vmap_area(struct vmap_area *va,
- struct rb_root *root, struct list_head *head)
- {
- struct rb_node **link;
- struct rb_node *parent;
- link = find_va_links(va, root, NULL, &parent);
- if (link)
- link_va(va, root, parent, link, head);
- }
- static void
- insert_vmap_area_augment(struct vmap_area *va,
- struct rb_node *from, struct rb_root *root,
- struct list_head *head)
- {
- struct rb_node **link;
- struct rb_node *parent;
- if (from)
- link = find_va_links(va, NULL, from, &parent);
- else
- link = find_va_links(va, root, NULL, &parent);
- if (link) {
- link_va_augment(va, root, parent, link, head);
- augment_tree_propagate_from(va);
- }
- }
- /*
- * Merge de-allocated chunk of VA memory with previous
- * and next free blocks. If coalesce is not done a new
- * free area is inserted. If VA has been merged, it is
- * freed.
- *
- * Please note, it can return NULL in case of overlap
- * ranges, followed by WARN() report. Despite it is a
- * buggy behaviour, a system can be alive and keep
- * ongoing.
- */
- static __always_inline struct vmap_area *
- __merge_or_add_vmap_area(struct vmap_area *va,
- struct rb_root *root, struct list_head *head, bool augment)
- {
- struct vmap_area *sibling;
- struct list_head *next;
- struct rb_node **link;
- struct rb_node *parent;
- bool merged = false;
- /*
- * Find a place in the tree where VA potentially will be
- * inserted, unless it is merged with its sibling/siblings.
- */
- link = find_va_links(va, root, NULL, &parent);
- if (!link)
- return NULL;
- /*
- * Get next node of VA to check if merging can be done.
- */
- next = get_va_next_sibling(parent, link);
- if (unlikely(next == NULL))
- goto insert;
- /*
- * start end
- * | |
- * |<------VA------>|<-----Next----->|
- * | |
- * start end
- */
- if (next != head) {
- sibling = list_entry(next, struct vmap_area, list);
- if (sibling->va_start == va->va_end) {
- sibling->va_start = va->va_start;
- /* Free vmap_area object. */
- kmem_cache_free(vmap_area_cachep, va);
- /* Point to the new merged area. */
- va = sibling;
- merged = true;
- }
- }
- /*
- * start end
- * | |
- * |<-----Prev----->|<------VA------>|
- * | |
- * start end
- */
- if (next->prev != head) {
- sibling = list_entry(next->prev, struct vmap_area, list);
- if (sibling->va_end == va->va_start) {
- /*
- * If both neighbors are coalesced, it is important
- * to unlink the "next" node first, followed by merging
- * with "previous" one. Otherwise the tree might not be
- * fully populated if a sibling's augmented value is
- * "normalized" because of rotation operations.
- */
- if (merged)
- __unlink_va(va, root, augment);
- sibling->va_end = va->va_end;
- /* Free vmap_area object. */
- kmem_cache_free(vmap_area_cachep, va);
- /* Point to the new merged area. */
- va = sibling;
- merged = true;
- }
- }
- insert:
- if (!merged)
- __link_va(va, root, parent, link, head, augment);
- return va;
- }
- static __always_inline struct vmap_area *
- merge_or_add_vmap_area(struct vmap_area *va,
- struct rb_root *root, struct list_head *head)
- {
- return __merge_or_add_vmap_area(va, root, head, false);
- }
- static __always_inline struct vmap_area *
- merge_or_add_vmap_area_augment(struct vmap_area *va,
- struct rb_root *root, struct list_head *head)
- {
- va = __merge_or_add_vmap_area(va, root, head, true);
- if (va)
- augment_tree_propagate_from(va);
- return va;
- }
- static __always_inline bool
- is_within_this_va(struct vmap_area *va, unsigned long size,
- unsigned long align, unsigned long vstart)
- {
- unsigned long nva_start_addr;
- if (va->va_start > vstart)
- nva_start_addr = ALIGN(va->va_start, align);
- else
- nva_start_addr = ALIGN(vstart, align);
- /* Can be overflowed due to big size or alignment. */
- if (nva_start_addr + size < nva_start_addr ||
- nva_start_addr < vstart)
- return false;
- return (nva_start_addr + size <= va->va_end);
- }
- /*
- * Find the first free block(lowest start address) in the tree,
- * that will accomplish the request corresponding to passing
- * parameters. Please note, with an alignment bigger than PAGE_SIZE,
- * a search length is adjusted to account for worst case alignment
- * overhead.
- */
- static __always_inline struct vmap_area *
- find_vmap_lowest_match(struct rb_root *root, unsigned long size,
- unsigned long align, unsigned long vstart, bool adjust_search_size)
- {
- struct vmap_area *va;
- struct rb_node *node;
- unsigned long length;
- /* Start from the root. */
- node = root->rb_node;
- /* Adjust the search size for alignment overhead. */
- length = adjust_search_size ? size + align - 1 : size;
- while (node) {
- va = rb_entry(node, struct vmap_area, rb_node);
- if (get_subtree_max_size(node->rb_left) >= length &&
- vstart < va->va_start) {
- node = node->rb_left;
- } else {
- if (is_within_this_va(va, size, align, vstart))
- return va;
- /*
- * Does not make sense to go deeper towards the right
- * sub-tree if it does not have a free block that is
- * equal or bigger to the requested search length.
- */
- if (get_subtree_max_size(node->rb_right) >= length) {
- node = node->rb_right;
- continue;
- }
- /*
- * OK. We roll back and find the first right sub-tree,
- * that will satisfy the search criteria. It can happen
- * due to "vstart" restriction or an alignment overhead
- * that is bigger then PAGE_SIZE.
- */
- while ((node = rb_parent(node))) {
- va = rb_entry(node, struct vmap_area, rb_node);
- if (is_within_this_va(va, size, align, vstart))
- return va;
- if (get_subtree_max_size(node->rb_right) >= length &&
- vstart <= va->va_start) {
- /*
- * Shift the vstart forward. Please note, we update it with
- * parent's start address adding "1" because we do not want
- * to enter same sub-tree after it has already been checked
- * and no suitable free block found there.
- */
- vstart = va->va_start + 1;
- node = node->rb_right;
- break;
- }
- }
- }
- }
- return NULL;
- }
- #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
- #include <linux/random.h>
- static struct vmap_area *
- find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
- unsigned long align, unsigned long vstart)
- {
- struct vmap_area *va;
- list_for_each_entry(va, head, list) {
- if (!is_within_this_va(va, size, align, vstart))
- continue;
- return va;
- }
- return NULL;
- }
- static void
- find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
- unsigned long size, unsigned long align)
- {
- struct vmap_area *va_1, *va_2;
- unsigned long vstart;
- unsigned int rnd;
- get_random_bytes(&rnd, sizeof(rnd));
- vstart = VMALLOC_START + rnd;
- va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
- va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
- if (va_1 != va_2)
- pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
- va_1, va_2, vstart);
- }
- #endif
- enum fit_type {
- NOTHING_FIT = 0,
- FL_FIT_TYPE = 1, /* full fit */
- LE_FIT_TYPE = 2, /* left edge fit */
- RE_FIT_TYPE = 3, /* right edge fit */
- NE_FIT_TYPE = 4 /* no edge fit */
- };
- static __always_inline enum fit_type
- classify_va_fit_type(struct vmap_area *va,
- unsigned long nva_start_addr, unsigned long size)
- {
- enum fit_type type;
- /* Check if it is within VA. */
- if (nva_start_addr < va->va_start ||
- nva_start_addr + size > va->va_end)
- return NOTHING_FIT;
- /* Now classify. */
- if (va->va_start == nva_start_addr) {
- if (va->va_end == nva_start_addr + size)
- type = FL_FIT_TYPE;
- else
- type = LE_FIT_TYPE;
- } else if (va->va_end == nva_start_addr + size) {
- type = RE_FIT_TYPE;
- } else {
- type = NE_FIT_TYPE;
- }
- return type;
- }
- static __always_inline int
- adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
- struct vmap_area *va, unsigned long nva_start_addr,
- unsigned long size)
- {
- struct vmap_area *lva = NULL;
- enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
- if (type == FL_FIT_TYPE) {
- /*
- * No need to split VA, it fully fits.
- *
- * | |
- * V NVA V
- * |---------------|
- */
- unlink_va_augment(va, root);
- kmem_cache_free(vmap_area_cachep, va);
- } else if (type == LE_FIT_TYPE) {
- /*
- * Split left edge of fit VA.
- *
- * | |
- * V NVA V R
- * |-------|-------|
- */
- va->va_start += size;
- } else if (type == RE_FIT_TYPE) {
- /*
- * Split right edge of fit VA.
- *
- * | |
- * L V NVA V
- * |-------|-------|
- */
- va->va_end = nva_start_addr;
- } else if (type == NE_FIT_TYPE) {
- /*
- * Split no edge of fit VA.
- *
- * | |
- * L V NVA V R
- * |---|-------|---|
- */
- lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
- if (unlikely(!lva)) {
- /*
- * For percpu allocator we do not do any pre-allocation
- * and leave it as it is. The reason is it most likely
- * never ends up with NE_FIT_TYPE splitting. In case of
- * percpu allocations offsets and sizes are aligned to
- * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
- * are its main fitting cases.
- *
- * There are a few exceptions though, as an example it is
- * a first allocation (early boot up) when we have "one"
- * big free space that has to be split.
- *
- * Also we can hit this path in case of regular "vmap"
- * allocations, if "this" current CPU was not preloaded.
- * See the comment in alloc_vmap_area() why. If so, then
- * GFP_NOWAIT is used instead to get an extra object for
- * split purpose. That is rare and most time does not
- * occur.
- *
- * What happens if an allocation gets failed. Basically,
- * an "overflow" path is triggered to purge lazily freed
- * areas to free some memory, then, the "retry" path is
- * triggered to repeat one more time. See more details
- * in alloc_vmap_area() function.
- */
- lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
- if (!lva)
- return -1;
- }
- /*
- * Build the remainder.
- */
- lva->va_start = va->va_start;
- lva->va_end = nva_start_addr;
- /*
- * Shrink this VA to remaining size.
- */
- va->va_start = nva_start_addr + size;
- } else {
- return -1;
- }
- if (type != FL_FIT_TYPE) {
- augment_tree_propagate_from(va);
- if (lva) /* type == NE_FIT_TYPE */
- insert_vmap_area_augment(lva, &va->rb_node, root, head);
- }
- return 0;
- }
- /*
- * Returns a start address of the newly allocated area, if success.
- * Otherwise a vend is returned that indicates failure.
- */
- static __always_inline unsigned long
- __alloc_vmap_area(struct rb_root *root, struct list_head *head,
- unsigned long size, unsigned long align,
- unsigned long vstart, unsigned long vend)
- {
- bool adjust_search_size = true;
- unsigned long nva_start_addr;
- struct vmap_area *va;
- int ret;
- /*
- * Do not adjust when:
- * a) align <= PAGE_SIZE, because it does not make any sense.
- * All blocks(their start addresses) are at least PAGE_SIZE
- * aligned anyway;
- * b) a short range where a requested size corresponds to exactly
- * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
- * With adjusted search length an allocation would not succeed.
- */
- if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
- adjust_search_size = false;
- va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
- if (unlikely(!va))
- return vend;
- if (va->va_start > vstart)
- nva_start_addr = ALIGN(va->va_start, align);
- else
- nva_start_addr = ALIGN(vstart, align);
- /* Check the "vend" restriction. */
- if (nva_start_addr + size > vend)
- return vend;
- /* Update the free vmap_area. */
- ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
- if (WARN_ON_ONCE(ret))
- return vend;
- #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
- find_vmap_lowest_match_check(root, head, size, align);
- #endif
- return nva_start_addr;
- }
- /*
- * Free a region of KVA allocated by alloc_vmap_area
- */
- static void free_vmap_area(struct vmap_area *va)
- {
- /*
- * Remove from the busy tree/list.
- */
- spin_lock(&vmap_area_lock);
- unlink_va(va, &vmap_area_root);
- spin_unlock(&vmap_area_lock);
- /*
- * Insert/Merge it back to the free tree/list.
- */
- spin_lock(&free_vmap_area_lock);
- merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
- spin_unlock(&free_vmap_area_lock);
- }
- static inline void
- preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
- {
- struct vmap_area *va = NULL;
- /*
- * Preload this CPU with one extra vmap_area object. It is used
- * when fit type of free area is NE_FIT_TYPE. It guarantees that
- * a CPU that does an allocation is preloaded.
- *
- * We do it in non-atomic context, thus it allows us to use more
- * permissive allocation masks to be more stable under low memory
- * condition and high memory pressure.
- */
- if (!this_cpu_read(ne_fit_preload_node))
- va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
- spin_lock(lock);
- if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
- kmem_cache_free(vmap_area_cachep, va);
- }
- /*
- * Allocate a region of KVA of the specified size and alignment, within the
- * vstart and vend.
- */
- static struct vmap_area *alloc_vmap_area(unsigned long size,
- unsigned long align,
- unsigned long vstart, unsigned long vend,
- int node, gfp_t gfp_mask)
- {
- struct vmap_area *va;
- unsigned long freed;
- unsigned long addr;
- int purged = 0;
- int ret;
- BUG_ON(!size);
- BUG_ON(offset_in_page(size));
- BUG_ON(!is_power_of_2(align));
- if (unlikely(!vmap_initialized))
- return ERR_PTR(-EBUSY);
- might_sleep();
- gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
- va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
- if (unlikely(!va))
- return ERR_PTR(-ENOMEM);
- /*
- * Only scan the relevant parts containing pointers to other objects
- * to avoid false negatives.
- */
- kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
- retry:
- preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
- addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
- size, align, vstart, vend);
- spin_unlock(&free_vmap_area_lock);
- /*
- * If an allocation fails, the "vend" address is
- * returned. Therefore trigger the overflow path.
- */
- if (unlikely(addr == vend))
- goto overflow;
- va->va_start = addr;
- va->va_end = addr + size;
- va->vm = NULL;
- spin_lock(&vmap_area_lock);
- insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
- spin_unlock(&vmap_area_lock);
- BUG_ON(!IS_ALIGNED(va->va_start, align));
- BUG_ON(va->va_start < vstart);
- BUG_ON(va->va_end > vend);
- ret = kasan_populate_vmalloc(addr, size);
- if (ret) {
- free_vmap_area(va);
- return ERR_PTR(ret);
- }
- return va;
- overflow:
- if (!purged) {
- purge_vmap_area_lazy();
- purged = 1;
- goto retry;
- }
- freed = 0;
- blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
- if (freed > 0) {
- purged = 0;
- goto retry;
- }
- if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
- pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
- size);
- kmem_cache_free(vmap_area_cachep, va);
- return ERR_PTR(-EBUSY);
- }
- int register_vmap_purge_notifier(struct notifier_block *nb)
- {
- return blocking_notifier_chain_register(&vmap_notify_list, nb);
- }
- EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
- int unregister_vmap_purge_notifier(struct notifier_block *nb)
- {
- return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
- }
- EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
- /*
- * lazy_max_pages is the maximum amount of virtual address space we gather up
- * before attempting to purge with a TLB flush.
- *
- * There is a tradeoff here: a larger number will cover more kernel page tables
- * and take slightly longer to purge, but it will linearly reduce the number of
- * global TLB flushes that must be performed. It would seem natural to scale
- * this number up linearly with the number of CPUs (because vmapping activity
- * could also scale linearly with the number of CPUs), however it is likely
- * that in practice, workloads might be constrained in other ways that mean
- * vmap activity will not scale linearly with CPUs. Also, I want to be
- * conservative and not introduce a big latency on huge systems, so go with
- * a less aggressive log scale. It will still be an improvement over the old
- * code, and it will be simple to change the scale factor if we find that it
- * becomes a problem on bigger systems.
- */
- static unsigned long lazy_max_pages(void)
- {
- unsigned int log;
- log = fls(num_online_cpus());
- return log * (32UL * 1024 * 1024 / PAGE_SIZE);
- }
- static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
- /*
- * Serialize vmap purging. There is no actual critical section protected
- * by this lock, but we want to avoid concurrent calls for performance
- * reasons and to make the pcpu_get_vm_areas more deterministic.
- */
- static DEFINE_MUTEX(vmap_purge_lock);
- /* for per-CPU blocks */
- static void purge_fragmented_blocks_allcpus(void);
- /*
- * Purges all lazily-freed vmap areas.
- */
- static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
- {
- unsigned long resched_threshold;
- struct list_head local_purge_list;
- struct vmap_area *va, *n_va;
- lockdep_assert_held(&vmap_purge_lock);
- spin_lock(&purge_vmap_area_lock);
- purge_vmap_area_root = RB_ROOT;
- list_replace_init(&purge_vmap_area_list, &local_purge_list);
- spin_unlock(&purge_vmap_area_lock);
- if (unlikely(list_empty(&local_purge_list)))
- return false;
- start = min(start,
- list_first_entry(&local_purge_list,
- struct vmap_area, list)->va_start);
- end = max(end,
- list_last_entry(&local_purge_list,
- struct vmap_area, list)->va_end);
- flush_tlb_kernel_range(start, end);
- resched_threshold = lazy_max_pages() << 1;
- spin_lock(&free_vmap_area_lock);
- list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
- unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
- unsigned long orig_start = va->va_start;
- unsigned long orig_end = va->va_end;
- /*
- * Finally insert or merge lazily-freed area. It is
- * detached and there is no need to "unlink" it from
- * anything.
- */
- va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
- &free_vmap_area_list);
- if (!va)
- continue;
- if (is_vmalloc_or_module_addr((void *)orig_start))
- kasan_release_vmalloc(orig_start, orig_end,
- va->va_start, va->va_end);
- atomic_long_sub(nr, &vmap_lazy_nr);
- if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
- cond_resched_lock(&free_vmap_area_lock);
- }
- spin_unlock(&free_vmap_area_lock);
- return true;
- }
- /*
- * Kick off a purge of the outstanding lazy areas.
- */
- static void purge_vmap_area_lazy(void)
- {
- mutex_lock(&vmap_purge_lock);
- purge_fragmented_blocks_allcpus();
- __purge_vmap_area_lazy(ULONG_MAX, 0);
- mutex_unlock(&vmap_purge_lock);
- }
- static void drain_vmap_area_work(struct work_struct *work)
- {
- unsigned long nr_lazy;
- do {
- mutex_lock(&vmap_purge_lock);
- __purge_vmap_area_lazy(ULONG_MAX, 0);
- mutex_unlock(&vmap_purge_lock);
- /* Recheck if further work is required. */
- nr_lazy = atomic_long_read(&vmap_lazy_nr);
- } while (nr_lazy > lazy_max_pages());
- }
- /*
- * Free a vmap area, caller ensuring that the area has been unmapped
- * and flush_cache_vunmap had been called for the correct range
- * previously.
- */
- static void free_vmap_area_noflush(struct vmap_area *va)
- {
- unsigned long nr_lazy;
- spin_lock(&vmap_area_lock);
- unlink_va(va, &vmap_area_root);
- spin_unlock(&vmap_area_lock);
- nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
- PAGE_SHIFT, &vmap_lazy_nr);
- /*
- * Merge or place it to the purge tree/list.
- */
- spin_lock(&purge_vmap_area_lock);
- merge_or_add_vmap_area(va,
- &purge_vmap_area_root, &purge_vmap_area_list);
- spin_unlock(&purge_vmap_area_lock);
- /* After this point, we may free va at any time */
- if (unlikely(nr_lazy > lazy_max_pages()))
- schedule_work(&drain_vmap_work);
- }
- /*
- * Free and unmap a vmap area
- */
- static void free_unmap_vmap_area(struct vmap_area *va)
- {
- flush_cache_vunmap(va->va_start, va->va_end);
- vunmap_range_noflush(va->va_start, va->va_end);
- if (debug_pagealloc_enabled_static())
- flush_tlb_kernel_range(va->va_start, va->va_end);
- free_vmap_area_noflush(va);
- }
- struct vmap_area *find_vmap_area(unsigned long addr)
- {
- struct vmap_area *va;
- spin_lock(&vmap_area_lock);
- va = __find_vmap_area(addr, &vmap_area_root);
- spin_unlock(&vmap_area_lock);
- return va;
- }
- /*** Per cpu kva allocator ***/
- /*
- * vmap space is limited especially on 32 bit architectures. Ensure there is
- * room for at least 16 percpu vmap blocks per CPU.
- */
- /*
- * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
- * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
- * instead (we just need a rough idea)
- */
- #if BITS_PER_LONG == 32
- #define VMALLOC_SPACE (128UL*1024*1024)
- #else
- #define VMALLOC_SPACE (128UL*1024*1024*1024)
- #endif
- #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
- #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
- #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
- #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
- #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
- #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
- #define VMAP_BBMAP_BITS \
- VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
- VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
- VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
- #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
- struct vmap_block_queue {
- spinlock_t lock;
- struct list_head free;
- };
- struct vmap_block {
- spinlock_t lock;
- struct vmap_area *va;
- unsigned long free, dirty;
- unsigned long dirty_min, dirty_max; /*< dirty range */
- struct list_head free_list;
- struct rcu_head rcu_head;
- struct list_head purge;
- };
- /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
- static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
- /*
- * XArray of vmap blocks, indexed by address, to quickly find a vmap block
- * in the free path. Could get rid of this if we change the API to return a
- * "cookie" from alloc, to be passed to free. But no big deal yet.
- */
- static DEFINE_XARRAY(vmap_blocks);
- /*
- * We should probably have a fallback mechanism to allocate virtual memory
- * out of partially filled vmap blocks. However vmap block sizing should be
- * fairly reasonable according to the vmalloc size, so it shouldn't be a
- * big problem.
- */
- static unsigned long addr_to_vb_idx(unsigned long addr)
- {
- addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
- addr /= VMAP_BLOCK_SIZE;
- return addr;
- }
- static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
- {
- unsigned long addr;
- addr = va_start + (pages_off << PAGE_SHIFT);
- BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
- return (void *)addr;
- }
- /**
- * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
- * block. Of course pages number can't exceed VMAP_BBMAP_BITS
- * @order: how many 2^order pages should be occupied in newly allocated block
- * @gfp_mask: flags for the page level allocator
- *
- * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
- */
- static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
- {
- struct vmap_block_queue *vbq;
- struct vmap_block *vb;
- struct vmap_area *va;
- unsigned long vb_idx;
- int node, err;
- void *vaddr;
- node = numa_node_id();
- vb = kmalloc_node(sizeof(struct vmap_block),
- gfp_mask & GFP_RECLAIM_MASK, node);
- if (unlikely(!vb))
- return ERR_PTR(-ENOMEM);
- va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
- VMALLOC_START, VMALLOC_END,
- node, gfp_mask);
- if (IS_ERR(va)) {
- kfree(vb);
- return ERR_CAST(va);
- }
- vaddr = vmap_block_vaddr(va->va_start, 0);
- spin_lock_init(&vb->lock);
- vb->va = va;
- /* At least something should be left free */
- BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
- vb->free = VMAP_BBMAP_BITS - (1UL << order);
- vb->dirty = 0;
- vb->dirty_min = VMAP_BBMAP_BITS;
- vb->dirty_max = 0;
- INIT_LIST_HEAD(&vb->free_list);
- vb_idx = addr_to_vb_idx(va->va_start);
- err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
- if (err) {
- kfree(vb);
- free_vmap_area(va);
- return ERR_PTR(err);
- }
- vbq = raw_cpu_ptr(&vmap_block_queue);
- spin_lock(&vbq->lock);
- list_add_tail_rcu(&vb->free_list, &vbq->free);
- spin_unlock(&vbq->lock);
- return vaddr;
- }
- static void free_vmap_block(struct vmap_block *vb)
- {
- struct vmap_block *tmp;
- tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
- BUG_ON(tmp != vb);
- free_vmap_area_noflush(vb->va);
- kfree_rcu(vb, rcu_head);
- }
- static void purge_fragmented_blocks(int cpu)
- {
- LIST_HEAD(purge);
- struct vmap_block *vb;
- struct vmap_block *n_vb;
- struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
- rcu_read_lock();
- list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
- continue;
- spin_lock(&vb->lock);
- if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
- vb->free = 0; /* prevent further allocs after releasing lock */
- vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
- vb->dirty_min = 0;
- vb->dirty_max = VMAP_BBMAP_BITS;
- spin_lock(&vbq->lock);
- list_del_rcu(&vb->free_list);
- spin_unlock(&vbq->lock);
- spin_unlock(&vb->lock);
- list_add_tail(&vb->purge, &purge);
- } else
- spin_unlock(&vb->lock);
- }
- rcu_read_unlock();
- list_for_each_entry_safe(vb, n_vb, &purge, purge) {
- list_del(&vb->purge);
- free_vmap_block(vb);
- }
- }
- static void purge_fragmented_blocks_allcpus(void)
- {
- int cpu;
- for_each_possible_cpu(cpu)
- purge_fragmented_blocks(cpu);
- }
- static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
- {
- struct vmap_block_queue *vbq;
- struct vmap_block *vb;
- void *vaddr = NULL;
- unsigned int order;
- BUG_ON(offset_in_page(size));
- BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
- if (WARN_ON(size == 0)) {
- /*
- * Allocating 0 bytes isn't what caller wants since
- * get_order(0) returns funny result. Just warn and terminate
- * early.
- */
- return NULL;
- }
- order = get_order(size);
- rcu_read_lock();
- vbq = raw_cpu_ptr(&vmap_block_queue);
- list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- unsigned long pages_off;
- spin_lock(&vb->lock);
- if (vb->free < (1UL << order)) {
- spin_unlock(&vb->lock);
- continue;
- }
- pages_off = VMAP_BBMAP_BITS - vb->free;
- vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
- vb->free -= 1UL << order;
- if (vb->free == 0) {
- spin_lock(&vbq->lock);
- list_del_rcu(&vb->free_list);
- spin_unlock(&vbq->lock);
- }
- spin_unlock(&vb->lock);
- break;
- }
- rcu_read_unlock();
- /* Allocate new block if nothing was found */
- if (!vaddr)
- vaddr = new_vmap_block(order, gfp_mask);
- return vaddr;
- }
- static void vb_free(unsigned long addr, unsigned long size)
- {
- unsigned long offset;
- unsigned int order;
- struct vmap_block *vb;
- BUG_ON(offset_in_page(size));
- BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
- flush_cache_vunmap(addr, addr + size);
- order = get_order(size);
- offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
- vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
- vunmap_range_noflush(addr, addr + size);
- if (debug_pagealloc_enabled_static())
- flush_tlb_kernel_range(addr, addr + size);
- spin_lock(&vb->lock);
- /* Expand dirty range */
- vb->dirty_min = min(vb->dirty_min, offset);
- vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
- vb->dirty += 1UL << order;
- if (vb->dirty == VMAP_BBMAP_BITS) {
- BUG_ON(vb->free);
- spin_unlock(&vb->lock);
- free_vmap_block(vb);
- } else
- spin_unlock(&vb->lock);
- }
- static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
- {
- int cpu;
- if (unlikely(!vmap_initialized))
- return;
- might_sleep();
- for_each_possible_cpu(cpu) {
- struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
- struct vmap_block *vb;
- rcu_read_lock();
- list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- spin_lock(&vb->lock);
- if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
- unsigned long va_start = vb->va->va_start;
- unsigned long s, e;
- s = va_start + (vb->dirty_min << PAGE_SHIFT);
- e = va_start + (vb->dirty_max << PAGE_SHIFT);
- start = min(s, start);
- end = max(e, end);
- flush = 1;
- }
- spin_unlock(&vb->lock);
- }
- rcu_read_unlock();
- }
- mutex_lock(&vmap_purge_lock);
- purge_fragmented_blocks_allcpus();
- if (!__purge_vmap_area_lazy(start, end) && flush)
- flush_tlb_kernel_range(start, end);
- mutex_unlock(&vmap_purge_lock);
- }
- /**
- * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
- *
- * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
- * to amortize TLB flushing overheads. What this means is that any page you
- * have now, may, in a former life, have been mapped into kernel virtual
- * address by the vmap layer and so there might be some CPUs with TLB entries
- * still referencing that page (additional to the regular 1:1 kernel mapping).
- *
- * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
- * be sure that none of the pages we have control over will have any aliases
- * from the vmap layer.
- */
- void vm_unmap_aliases(void)
- {
- unsigned long start = ULONG_MAX, end = 0;
- int flush = 0;
- _vm_unmap_aliases(start, end, flush);
- }
- EXPORT_SYMBOL_GPL(vm_unmap_aliases);
- /**
- * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
- * @mem: the pointer returned by vm_map_ram
- * @count: the count passed to that vm_map_ram call (cannot unmap partial)
- */
- void vm_unmap_ram(const void *mem, unsigned int count)
- {
- unsigned long size = (unsigned long)count << PAGE_SHIFT;
- unsigned long addr = (unsigned long)kasan_reset_tag(mem);
- struct vmap_area *va;
- might_sleep();
- BUG_ON(!addr);
- BUG_ON(addr < VMALLOC_START);
- BUG_ON(addr > VMALLOC_END);
- BUG_ON(!PAGE_ALIGNED(addr));
- kasan_poison_vmalloc(mem, size);
- if (likely(count <= VMAP_MAX_ALLOC)) {
- debug_check_no_locks_freed(mem, size);
- vb_free(addr, size);
- return;
- }
- va = find_vmap_area(addr);
- BUG_ON(!va);
- debug_check_no_locks_freed((void *)va->va_start,
- (va->va_end - va->va_start));
- free_unmap_vmap_area(va);
- }
- EXPORT_SYMBOL(vm_unmap_ram);
- /**
- * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
- * @pages: an array of pointers to the pages to be mapped
- * @count: number of pages
- * @node: prefer to allocate data structures on this node
- *
- * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
- * faster than vmap so it's good. But if you mix long-life and short-life
- * objects with vm_map_ram(), it could consume lots of address space through
- * fragmentation (especially on a 32bit machine). You could see failures in
- * the end. Please use this function for short-lived objects.
- *
- * Returns: a pointer to the address that has been mapped, or %NULL on failure
- */
- void *vm_map_ram(struct page **pages, unsigned int count, int node)
- {
- unsigned long size = (unsigned long)count << PAGE_SHIFT;
- unsigned long addr;
- void *mem;
- if (likely(count <= VMAP_MAX_ALLOC)) {
- mem = vb_alloc(size, GFP_KERNEL);
- if (IS_ERR(mem))
- return NULL;
- addr = (unsigned long)mem;
- } else {
- struct vmap_area *va;
- va = alloc_vmap_area(size, PAGE_SIZE,
- VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
- if (IS_ERR(va))
- return NULL;
- addr = va->va_start;
- mem = (void *)addr;
- }
- if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
- pages, PAGE_SHIFT) < 0) {
- vm_unmap_ram(mem, count);
- return NULL;
- }
- /*
- * Mark the pages as accessible, now that they are mapped.
- * With hardware tag-based KASAN, marking is skipped for
- * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
- */
- mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
- return mem;
- }
- EXPORT_SYMBOL(vm_map_ram);
- static struct vm_struct *vmlist __initdata;
- static inline unsigned int vm_area_page_order(struct vm_struct *vm)
- {
- #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
- return vm->page_order;
- #else
- return 0;
- #endif
- }
- static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
- {
- #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
- vm->page_order = order;
- #else
- BUG_ON(order != 0);
- #endif
- }
- /**
- * vm_area_add_early - add vmap area early during boot
- * @vm: vm_struct to add
- *
- * This function is used to add fixed kernel vm area to vmlist before
- * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
- * should contain proper values and the other fields should be zero.
- *
- * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
- */
- void __init vm_area_add_early(struct vm_struct *vm)
- {
- struct vm_struct *tmp, **p;
- BUG_ON(vmap_initialized);
- for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
- if (tmp->addr >= vm->addr) {
- BUG_ON(tmp->addr < vm->addr + vm->size);
- break;
- } else
- BUG_ON(tmp->addr + tmp->size > vm->addr);
- }
- vm->next = *p;
- *p = vm;
- }
- /**
- * vm_area_register_early - register vmap area early during boot
- * @vm: vm_struct to register
- * @align: requested alignment
- *
- * This function is used to register kernel vm area before
- * vmalloc_init() is called. @vm->size and @vm->flags should contain
- * proper values on entry and other fields should be zero. On return,
- * vm->addr contains the allocated address.
- *
- * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
- */
- void __init vm_area_register_early(struct vm_struct *vm, size_t align)
- {
- unsigned long addr = ALIGN(VMALLOC_START, align);
- struct vm_struct *cur, **p;
- BUG_ON(vmap_initialized);
- for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
- if ((unsigned long)cur->addr - addr >= vm->size)
- break;
- addr = ALIGN((unsigned long)cur->addr + cur->size, align);
- }
- BUG_ON(addr > VMALLOC_END - vm->size);
- vm->addr = (void *)addr;
- vm->next = *p;
- *p = vm;
- kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
- }
- static void vmap_init_free_space(void)
- {
- unsigned long vmap_start = 1;
- const unsigned long vmap_end = ULONG_MAX;
- struct vmap_area *busy, *free;
- /*
- * B F B B B F
- * -|-----|.....|-----|-----|-----|.....|-
- * | The KVA space |
- * |<--------------------------------->|
- */
- list_for_each_entry(busy, &vmap_area_list, list) {
- if (busy->va_start - vmap_start > 0) {
- free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
- if (!WARN_ON_ONCE(!free)) {
- free->va_start = vmap_start;
- free->va_end = busy->va_start;
- insert_vmap_area_augment(free, NULL,
- &free_vmap_area_root,
- &free_vmap_area_list);
- }
- }
- vmap_start = busy->va_end;
- }
- if (vmap_end - vmap_start > 0) {
- free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
- if (!WARN_ON_ONCE(!free)) {
- free->va_start = vmap_start;
- free->va_end = vmap_end;
- insert_vmap_area_augment(free, NULL,
- &free_vmap_area_root,
- &free_vmap_area_list);
- }
- }
- }
- void __init vmalloc_init(void)
- {
- struct vmap_area *va;
- struct vm_struct *tmp;
- int i;
- /*
- * Create the cache for vmap_area objects.
- */
- vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
- for_each_possible_cpu(i) {
- struct vmap_block_queue *vbq;
- struct vfree_deferred *p;
- vbq = &per_cpu(vmap_block_queue, i);
- spin_lock_init(&vbq->lock);
- INIT_LIST_HEAD(&vbq->free);
- p = &per_cpu(vfree_deferred, i);
- init_llist_head(&p->list);
- INIT_WORK(&p->wq, free_work);
- }
- /* Import existing vmlist entries. */
- for (tmp = vmlist; tmp; tmp = tmp->next) {
- va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
- if (WARN_ON_ONCE(!va))
- continue;
- va->va_start = (unsigned long)tmp->addr;
- va->va_end = va->va_start + tmp->size;
- va->vm = tmp;
- insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
- }
- /*
- * Now we can initialize a free vmap space.
- */
- vmap_init_free_space();
- vmap_initialized = true;
- }
- static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
- struct vmap_area *va, unsigned long flags, const void *caller)
- {
- vm->flags = flags;
- vm->addr = (void *)va->va_start;
- vm->size = va->va_end - va->va_start;
- vm->caller = caller;
- va->vm = vm;
- }
- static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, const void *caller)
- {
- spin_lock(&vmap_area_lock);
- setup_vmalloc_vm_locked(vm, va, flags, caller);
- spin_unlock(&vmap_area_lock);
- }
- static void clear_vm_uninitialized_flag(struct vm_struct *vm)
- {
- /*
- * Before removing VM_UNINITIALIZED,
- * we should make sure that vm has proper values.
- * Pair with smp_rmb() in show_numa_info().
- */
- smp_wmb();
- vm->flags &= ~VM_UNINITIALIZED;
- }
- static struct vm_struct *__get_vm_area_node(unsigned long size,
- unsigned long align, unsigned long shift, unsigned long flags,
- unsigned long start, unsigned long end, int node,
- gfp_t gfp_mask, const void *caller)
- {
- struct vmap_area *va;
- struct vm_struct *area;
- unsigned long requested_size = size;
- BUG_ON(in_interrupt());
- size = ALIGN(size, 1ul << shift);
- if (unlikely(!size))
- return NULL;
- if (flags & VM_IOREMAP)
- align = 1ul << clamp_t(int, get_count_order_long(size),
- PAGE_SHIFT, IOREMAP_MAX_ORDER);
- area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
- if (unlikely(!area))
- return NULL;
- if (!(flags & VM_NO_GUARD))
- size += PAGE_SIZE;
- va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
- if (IS_ERR(va)) {
- kfree(area);
- return NULL;
- }
- setup_vmalloc_vm(area, va, flags, caller);
- /*
- * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
- * best-effort approach, as they can be mapped outside of vmalloc code.
- * For VM_ALLOC mappings, the pages are marked as accessible after
- * getting mapped in __vmalloc_node_range().
- * With hardware tag-based KASAN, marking is skipped for
- * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
- */
- if (!(flags & VM_ALLOC))
- area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
- KASAN_VMALLOC_PROT_NORMAL);
- return area;
- }
- struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
- unsigned long start, unsigned long end,
- const void *caller)
- {
- return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
- NUMA_NO_NODE, GFP_KERNEL, caller);
- }
- /**
- * get_vm_area - reserve a contiguous kernel virtual area
- * @size: size of the area
- * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
- *
- * Search an area of @size in the kernel virtual mapping area,
- * and reserved it for out purposes. Returns the area descriptor
- * on success or %NULL on failure.
- *
- * Return: the area descriptor on success or %NULL on failure.
- */
- struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
- {
- return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
- VMALLOC_START, VMALLOC_END,
- NUMA_NO_NODE, GFP_KERNEL,
- __builtin_return_address(0));
- }
- struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
- const void *caller)
- {
- return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
- VMALLOC_START, VMALLOC_END,
- NUMA_NO_NODE, GFP_KERNEL, caller);
- }
- /**
- * find_vm_area - find a continuous kernel virtual area
- * @addr: base address
- *
- * Search for the kernel VM area starting at @addr, and return it.
- * It is up to the caller to do all required locking to keep the returned
- * pointer valid.
- *
- * Return: the area descriptor on success or %NULL on failure.
- */
- struct vm_struct *find_vm_area(const void *addr)
- {
- struct vmap_area *va;
- va = find_vmap_area((unsigned long)addr);
- if (!va)
- return NULL;
- return va->vm;
- }
- /**
- * remove_vm_area - find and remove a continuous kernel virtual area
- * @addr: base address
- *
- * Search for the kernel VM area starting at @addr, and remove it.
- * This function returns the found VM area, but using it is NOT safe
- * on SMP machines, except for its size or flags.
- *
- * Return: the area descriptor on success or %NULL on failure.
- */
- struct vm_struct *remove_vm_area(const void *addr)
- {
- struct vmap_area *va;
- might_sleep();
- spin_lock(&vmap_area_lock);
- va = __find_vmap_area((unsigned long)addr, &vmap_area_root);
- if (va && va->vm) {
- struct vm_struct *vm = va->vm;
- va->vm = NULL;
- spin_unlock(&vmap_area_lock);
- kasan_free_module_shadow(vm);
- free_unmap_vmap_area(va);
- return vm;
- }
- spin_unlock(&vmap_area_lock);
- return NULL;
- }
- static inline void set_area_direct_map(const struct vm_struct *area,
- int (*set_direct_map)(struct page *page))
- {
- int i;
- /* HUGE_VMALLOC passes small pages to set_direct_map */
- for (i = 0; i < area->nr_pages; i++)
- if (page_address(area->pages[i]))
- set_direct_map(area->pages[i]);
- }
- /* Handle removing and resetting vm mappings related to the vm_struct. */
- static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
- {
- unsigned long start = ULONG_MAX, end = 0;
- unsigned int page_order = vm_area_page_order(area);
- int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
- int flush_dmap = 0;
- int i;
- remove_vm_area(area->addr);
- /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
- if (!flush_reset)
- return;
- /*
- * If not deallocating pages, just do the flush of the VM area and
- * return.
- */
- if (!deallocate_pages) {
- vm_unmap_aliases();
- return;
- }
- /*
- * If execution gets here, flush the vm mapping and reset the direct
- * map. Find the start and end range of the direct mappings to make sure
- * the vm_unmap_aliases() flush includes the direct map.
- */
- for (i = 0; i < area->nr_pages; i += 1U << page_order) {
- unsigned long addr = (unsigned long)page_address(area->pages[i]);
- if (addr) {
- unsigned long page_size;
- page_size = PAGE_SIZE << page_order;
- start = min(addr, start);
- end = max(addr + page_size, end);
- flush_dmap = 1;
- }
- }
- /*
- * Set direct map to something invalid so that it won't be cached if
- * there are any accesses after the TLB flush, then flush the TLB and
- * reset the direct map permissions to the default.
- */
- set_area_direct_map(area, set_direct_map_invalid_noflush);
- _vm_unmap_aliases(start, end, flush_dmap);
- set_area_direct_map(area, set_direct_map_default_noflush);
- }
- static void __vunmap(const void *addr, int deallocate_pages)
- {
- struct vm_struct *area;
- if (!addr)
- return;
- if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
- addr))
- return;
- area = find_vm_area(addr);
- if (unlikely(!area)) {
- WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
- addr);
- return;
- }
- debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
- debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
- kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
- if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) &&
- area->flags & VM_IOREMAP)
- iounmap_phys_range_hook(area->phys_addr, get_vm_area_size(area));
- vm_remove_mappings(area, deallocate_pages);
- if (deallocate_pages) {
- int i;
- for (i = 0; i < area->nr_pages; i++) {
- struct page *page = area->pages[i];
- #ifdef CONFIG_RKP
- u64 va;
- #endif
- BUG_ON(!page);
- mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
- #ifdef CONFIG_RKP
- va = (u64)phys_to_virt(page_to_phys(page));
- if (is_rkp_ro_buffer(va))
- rkp_ro_free((void *)va);
- else
- __free_pages(page, 0);
- #else
- /*
- * High-order allocs for huge vmallocs are split, so
- * can be freed as an array of order-0 allocations
- */
- __free_pages(page, 0);
- #endif
- cond_resched();
- }
- atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
- kvfree(area->pages);
- }
- kfree(area);
- }
- static inline void __vfree_deferred(const void *addr)
- {
- /*
- * Use raw_cpu_ptr() because this can be called from preemptible
- * context. Preemption is absolutely fine here, because the llist_add()
- * implementation is lockless, so it works even if we are adding to
- * another cpu's list. schedule_work() should be fine with this too.
- */
- struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
- if (llist_add((struct llist_node *)addr, &p->list))
- schedule_work(&p->wq);
- }
- /**
- * vfree_atomic - release memory allocated by vmalloc()
- * @addr: memory base address
- *
- * This one is just like vfree() but can be called in any atomic context
- * except NMIs.
- */
- void vfree_atomic(const void *addr)
- {
- BUG_ON(in_nmi());
- kmemleak_free(addr);
- if (!addr)
- return;
- __vfree_deferred(addr);
- }
- static void __vfree(const void *addr)
- {
- if (unlikely(in_interrupt()))
- __vfree_deferred(addr);
- else
- __vunmap(addr, 1);
- }
- /**
- * vfree - Release memory allocated by vmalloc()
- * @addr: Memory base address
- *
- * Free the virtually continuous memory area starting at @addr, as obtained
- * from one of the vmalloc() family of APIs. This will usually also free the
- * physical memory underlying the virtual allocation, but that memory is
- * reference counted, so it will not be freed until the last user goes away.
- *
- * If @addr is NULL, no operation is performed.
- *
- * Context:
- * May sleep if called *not* from interrupt context.
- * Must not be called in NMI context (strictly speaking, it could be
- * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
- * conventions for vfree() arch-dependent would be a really bad idea).
- */
- void vfree(const void *addr)
- {
- BUG_ON(in_nmi());
- kmemleak_free(addr);
- might_sleep_if(!in_interrupt());
- if (!addr)
- return;
- __vfree(addr);
- }
- EXPORT_SYMBOL(vfree);
- /**
- * vunmap - release virtual mapping obtained by vmap()
- * @addr: memory base address
- *
- * Free the virtually contiguous memory area starting at @addr,
- * which was created from the page array passed to vmap().
- *
- * Must not be called in interrupt context.
- */
- void vunmap(const void *addr)
- {
- BUG_ON(in_interrupt());
- might_sleep();
- if (addr)
- __vunmap(addr, 0);
- }
- EXPORT_SYMBOL(vunmap);
- /**
- * vmap - map an array of pages into virtually contiguous space
- * @pages: array of page pointers
- * @count: number of pages to map
- * @flags: vm_area->flags
- * @prot: page protection for the mapping
- *
- * Maps @count pages from @pages into contiguous kernel virtual space.
- * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
- * (which must be kmalloc or vmalloc memory) and one reference per pages in it
- * are transferred from the caller to vmap(), and will be freed / dropped when
- * vfree() is called on the return value.
- *
- * Return: the address of the area or %NULL on failure
- */
- void *vmap(struct page **pages, unsigned int count,
- unsigned long flags, pgprot_t prot)
- {
- struct vm_struct *area;
- unsigned long addr;
- unsigned long size; /* In bytes */
- might_sleep();
- /*
- * Your top guard is someone else's bottom guard. Not having a top
- * guard compromises someone else's mappings too.
- */
- if (WARN_ON_ONCE(flags & VM_NO_GUARD))
- flags &= ~VM_NO_GUARD;
- if (count > totalram_pages())
- return NULL;
- size = (unsigned long)count << PAGE_SHIFT;
- area = get_vm_area_caller(size, flags, __builtin_return_address(0));
- if (!area)
- return NULL;
- addr = (unsigned long)area->addr;
- if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
- pages, PAGE_SHIFT) < 0) {
- vunmap(area->addr);
- return NULL;
- }
- if (flags & VM_MAP_PUT_PAGES) {
- area->pages = pages;
- area->nr_pages = count;
- }
- return area->addr;
- }
- EXPORT_SYMBOL(vmap);
- #ifdef CONFIG_VMAP_PFN
- struct vmap_pfn_data {
- unsigned long *pfns;
- pgprot_t prot;
- unsigned int idx;
- };
- static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
- {
- struct vmap_pfn_data *data = private;
- if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
- return -EINVAL;
- *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
- return 0;
- }
- /**
- * vmap_pfn - map an array of PFNs into virtually contiguous space
- * @pfns: array of PFNs
- * @count: number of pages to map
- * @prot: page protection for the mapping
- *
- * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
- * the start address of the mapping.
- */
- void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
- {
- struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
- struct vm_struct *area;
- area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
- __builtin_return_address(0));
- if (!area)
- return NULL;
- if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
- count * PAGE_SIZE, vmap_pfn_apply, &data)) {
- free_vm_area(area);
- return NULL;
- }
- flush_cache_vmap((unsigned long)area->addr,
- (unsigned long)area->addr + count * PAGE_SIZE);
- return area->addr;
- }
- EXPORT_SYMBOL_GPL(vmap_pfn);
- #endif /* CONFIG_VMAP_PFN */
- static inline unsigned int
- vm_area_alloc_pages(gfp_t gfp, int nid,
- unsigned int order, unsigned int nr_pages, struct page **pages)
- {
- unsigned int nr_allocated = 0;
- struct page *page;
- int i;
- /*
- * For order-0 pages we make use of bulk allocator, if
- * the page array is partly or not at all populated due
- * to fails, fallback to a single page allocator that is
- * more permissive.
- */
- if (!order) {
- gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
- while (nr_allocated < nr_pages) {
- unsigned int nr, nr_pages_request;
- /*
- * A maximum allowed request is hard-coded and is 100
- * pages per call. That is done in order to prevent a
- * long preemption off scenario in the bulk-allocator
- * so the range is [1:100].
- */
- nr_pages_request = min(100U, nr_pages - nr_allocated);
- /* memory allocation should consider mempolicy, we can't
- * wrongly use nearest node when nid == NUMA_NO_NODE,
- * otherwise memory may be allocated in only one node,
- * but mempolicy wants to alloc memory by interleaving.
- */
- if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
- nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
- nr_pages_request,
- pages + nr_allocated);
- else
- nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
- nr_pages_request,
- pages + nr_allocated);
- nr_allocated += nr;
- cond_resched();
- /*
- * If zero or pages were obtained partly,
- * fallback to a single page allocator.
- */
- if (nr != nr_pages_request)
- break;
- }
- }
- /* High-order pages or fallback path if "bulk" fails. */
- while (nr_allocated < nr_pages) {
- if (fatal_signal_pending(current))
- break;
- if (nid == NUMA_NO_NODE)
- page = alloc_pages(gfp, order);
- else
- page = alloc_pages_node(nid, gfp, order);
- if (unlikely(!page))
- break;
- /*
- * Higher order allocations must be able to be treated as
- * indepdenent small pages by callers (as they can with
- * small-page vmallocs). Some drivers do their own refcounting
- * on vmalloc_to_page() pages, some use page->mapping,
- * page->lru, etc.
- */
- if (order)
- split_page(page, order);
- /*
- * Careful, we allocate and map page-order pages, but
- * tracking is done per PAGE_SIZE page so as to keep the
- * vm_struct APIs independent of the physical/mapped size.
- */
- for (i = 0; i < (1U << order); i++)
- pages[nr_allocated + i] = page + i;
- cond_resched();
- nr_allocated += 1U << order;
- }
- return nr_allocated;
- }
- static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, unsigned int page_shift,
- int node)
- {
- const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
- bool nofail = gfp_mask & __GFP_NOFAIL;
- unsigned long addr = (unsigned long)area->addr;
- unsigned long size = get_vm_area_size(area);
- unsigned long array_size;
- unsigned int nr_small_pages = size >> PAGE_SHIFT;
- unsigned int page_order;
- unsigned int flags;
- int ret;
- array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
- gfp_mask |= __GFP_NOWARN;
- if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
- gfp_mask |= __GFP_HIGHMEM;
- /* Please note that the recursion is strictly bounded. */
- if (array_size > PAGE_SIZE) {
- area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
- area->caller);
- } else {
- area->pages = kmalloc_node(array_size, nested_gfp, node);
- }
- if (!area->pages) {
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, failed to allocated page array size %lu",
- nr_small_pages * PAGE_SIZE, array_size);
- free_vm_area(area);
- return NULL;
- }
- set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
- page_order = vm_area_page_order(area);
- area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
- node, page_order, nr_small_pages, area->pages);
- atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
- if (gfp_mask & __GFP_ACCOUNT) {
- int i;
- for (i = 0; i < area->nr_pages; i++)
- mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
- }
- /*
- * If not enough pages were obtained to accomplish an
- * allocation request, free them via __vfree() if any.
- */
- if (area->nr_pages != nr_small_pages) {
- /* vm_area_alloc_pages() can also fail due to a fatal signal */
- if (!fatal_signal_pending(current))
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, page order %u, failed to allocate pages",
- area->nr_pages * PAGE_SIZE, page_order);
- goto fail;
- }
- /*
- * page tables allocations ignore external gfp mask, enforce it
- * by the scope API
- */
- if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
- flags = memalloc_nofs_save();
- else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
- flags = memalloc_noio_save();
- do {
- ret = vmap_pages_range(addr, addr + size, prot, area->pages,
- page_shift);
- if (nofail && (ret < 0))
- schedule_timeout_uninterruptible(1);
- } while (nofail && (ret < 0));
- if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
- memalloc_nofs_restore(flags);
- else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
- memalloc_noio_restore(flags);
- if (ret < 0) {
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, failed to map pages",
- area->nr_pages * PAGE_SIZE);
- goto fail;
- }
- return area->addr;
- fail:
- __vfree(area->addr);
- return NULL;
- }
- /**
- * __vmalloc_node_range - allocate virtually contiguous memory
- * @size: allocation size
- * @align: desired alignment
- * @start: vm area range start
- * @end: vm area range end
- * @gfp_mask: flags for the page level allocator
- * @prot: protection mask for the allocated pages
- * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
- * @node: node to use for allocation or NUMA_NO_NODE
- * @caller: caller's return address
- *
- * Allocate enough pages to cover @size from the page level
- * allocator with @gfp_mask flags. Please note that the full set of gfp
- * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
- * supported.
- * Zone modifiers are not supported. From the reclaim modifiers
- * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
- * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
- * __GFP_RETRY_MAYFAIL are not supported).
- *
- * __GFP_NOWARN can be used to suppress failures messages.
- *
- * Map them into contiguous kernel virtual space, using a pagetable
- * protection of @prot.
- *
- * Return: the address of the area or %NULL on failure
- */
- void *__vmalloc_node_range(unsigned long size, unsigned long align,
- unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller)
- {
- struct vm_struct *area;
- void *ret;
- kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
- unsigned long real_size = size;
- unsigned long real_align = align;
- unsigned int shift = PAGE_SHIFT;
- if (WARN_ON_ONCE(!size))
- return NULL;
- if ((size >> PAGE_SHIFT) > totalram_pages()) {
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, exceeds total pages",
- real_size);
- return NULL;
- }
- if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
- unsigned long size_per_node;
- /*
- * Try huge pages. Only try for PAGE_KERNEL allocations,
- * others like modules don't yet expect huge pages in
- * their allocations due to apply_to_page_range not
- * supporting them.
- */
- size_per_node = size;
- if (node == NUMA_NO_NODE)
- size_per_node /= num_online_nodes();
- if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
- shift = PMD_SHIFT;
- else
- shift = arch_vmap_pte_supported_shift(size_per_node);
- align = max(real_align, 1UL << shift);
- size = ALIGN(real_size, 1UL << shift);
- }
- again:
- area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
- VM_UNINITIALIZED | vm_flags, start, end, node,
- gfp_mask, caller);
- if (!area) {
- bool nofail = gfp_mask & __GFP_NOFAIL;
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, vm_struct allocation failed%s",
- real_size, (nofail) ? ". Retrying." : "");
- if (nofail) {
- schedule_timeout_uninterruptible(1);
- goto again;
- }
- goto fail;
- }
- /*
- * Prepare arguments for __vmalloc_area_node() and
- * kasan_unpoison_vmalloc().
- */
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
- if (kasan_hw_tags_enabled()) {
- /*
- * Modify protection bits to allow tagging.
- * This must be done before mapping.
- */
- prot = arch_vmap_pgprot_tagged(prot);
- /*
- * Skip page_alloc poisoning and zeroing for physical
- * pages backing VM_ALLOC mapping. Memory is instead
- * poisoned and zeroed by kasan_unpoison_vmalloc().
- */
- gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
- }
- /* Take note that the mapping is PAGE_KERNEL. */
- kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
- }
- /* Allocate physical pages and map them into vmalloc space. */
- ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
- if (!ret)
- goto fail;
- /*
- * Mark the pages as accessible, now that they are mapped.
- * The condition for setting KASAN_VMALLOC_INIT should complement the
- * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
- * to make sure that memory is initialized under the same conditions.
- * Tag-based KASAN modes only assign tags to normal non-executable
- * allocations, see __kasan_unpoison_vmalloc().
- */
- kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
- if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
- (gfp_mask & __GFP_SKIP_ZERO))
- kasan_flags |= KASAN_VMALLOC_INIT;
- /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
- area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
- /*
- * In this function, newly allocated vm_struct has VM_UNINITIALIZED
- * flag. It means that vm_struct is not fully initialized.
- * Now, it is fully initialized, so remove this flag here.
- */
- clear_vm_uninitialized_flag(area);
- size = PAGE_ALIGN(size);
- if (!(vm_flags & VM_DEFER_KMEMLEAK))
- kmemleak_vmalloc(area, size, gfp_mask);
- return area->addr;
- fail:
- if (shift > PAGE_SHIFT) {
- shift = PAGE_SHIFT;
- align = real_align;
- size = real_size;
- goto again;
- }
- return NULL;
- }
- /**
- * __vmalloc_node - allocate virtually contiguous memory
- * @size: allocation size
- * @align: desired alignment
- * @gfp_mask: flags for the page level allocator
- * @node: node to use for allocation or NUMA_NO_NODE
- * @caller: caller's return address
- *
- * Allocate enough pages to cover @size from the page level allocator with
- * @gfp_mask flags. Map them into contiguous kernel virtual space.
- *
- * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
- * and __GFP_NOFAIL are not supported
- *
- * Any use of gfp flags outside of GFP_KERNEL should be consulted
- * with mm people.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *__vmalloc_node(unsigned long size, unsigned long align,
- gfp_t gfp_mask, int node, const void *caller)
- {
- return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
- gfp_mask, PAGE_KERNEL, 0, node, caller);
- }
- /*
- * This is only for performance analysis of vmalloc and stress purpose.
- * It is required by vmalloc test module, therefore do not use it other
- * than that.
- */
- #ifdef CONFIG_TEST_VMALLOC_MODULE
- EXPORT_SYMBOL_GPL(__vmalloc_node);
- #endif
- void *__vmalloc(unsigned long size, gfp_t gfp_mask)
- {
- return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(__vmalloc);
- /**
- * vmalloc - allocate virtually contiguous memory
- * @size: allocation size
- *
- * Allocate enough pages to cover @size from the page level
- * allocator and map them into contiguous kernel virtual space.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *vmalloc(unsigned long size)
- {
- return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(vmalloc);
- /**
- * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
- * @size: allocation size
- * @gfp_mask: flags for the page level allocator
- *
- * Allocate enough pages to cover @size from the page level
- * allocator and map them into contiguous kernel virtual space.
- * If @size is greater than or equal to PMD_SIZE, allow using
- * huge pages for the memory
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
- {
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
- NUMA_NO_NODE, __builtin_return_address(0));
- }
- EXPORT_SYMBOL_GPL(vmalloc_huge);
- /**
- * vzalloc - allocate virtually contiguous memory with zero fill
- * @size: allocation size
- *
- * Allocate enough pages to cover @size from the page level
- * allocator and map them into contiguous kernel virtual space.
- * The memory allocated is set to zero.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *vzalloc(unsigned long size)
- {
- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(vzalloc);
- /**
- * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
- * @size: allocation size
- *
- * The resulting memory area is zeroed so it can be mapped to userspace
- * without leaking data.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *vmalloc_user(unsigned long size)
- {
- return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
- VM_USERMAP, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(vmalloc_user);
- /**
- * vmalloc_node - allocate memory on a specific node
- * @size: allocation size
- * @node: numa node
- *
- * Allocate enough pages to cover @size from the page level
- * allocator and map them into contiguous kernel virtual space.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *vmalloc_node(unsigned long size, int node)
- {
- return __vmalloc_node(size, 1, GFP_KERNEL, node,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(vmalloc_node);
- /**
- * vzalloc_node - allocate memory on a specific node with zero fill
- * @size: allocation size
- * @node: numa node
- *
- * Allocate enough pages to cover @size from the page level
- * allocator and map them into contiguous kernel virtual space.
- * The memory allocated is set to zero.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *vzalloc_node(unsigned long size, int node)
- {
- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(vzalloc_node);
- #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
- #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
- #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
- #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
- #else
- /*
- * 64b systems should always have either DMA or DMA32 zones. For others
- * GFP_DMA32 should do the right thing and use the normal zone.
- */
- #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
- #endif
- /**
- * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
- * @size: allocation size
- *
- * Allocate enough 32bit PA addressable pages to cover @size from the
- * page level allocator and map them into contiguous kernel virtual space.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *vmalloc_32(unsigned long size)
- {
- return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(vmalloc_32);
- /**
- * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
- * @size: allocation size
- *
- * The resulting memory area is 32bit addressable and zeroed so it can be
- * mapped to userspace without leaking data.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
- void *vmalloc_32_user(unsigned long size)
- {
- return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
- GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
- VM_USERMAP, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(vmalloc_32_user);
- /*
- * small helper routine , copy contents to buf from addr.
- * If the page is not present, fill zero.
- */
- static int aligned_vread(char *buf, char *addr, unsigned long count)
- {
- struct page *p;
- int copied = 0;
- while (count) {
- unsigned long offset, length;
- offset = offset_in_page(addr);
- length = PAGE_SIZE - offset;
- if (length > count)
- length = count;
- p = vmalloc_to_page(addr);
- /*
- * To do safe access to this _mapped_ area, we need
- * lock. But adding lock here means that we need to add
- * overhead of vmalloc()/vfree() calls for this _debug_
- * interface, rarely used. Instead of that, we'll use
- * kmap() and get small overhead in this access function.
- */
- if (p) {
- /* We can expect USER0 is not used -- see vread() */
- void *map = kmap_atomic(p);
- memcpy(buf, map + offset, length);
- kunmap_atomic(map);
- } else
- memset(buf, 0, length);
- addr += length;
- buf += length;
- copied += length;
- count -= length;
- }
- return copied;
- }
- /**
- * vread() - read vmalloc area in a safe way.
- * @buf: buffer for reading data
- * @addr: vm address.
- * @count: number of bytes to be read.
- *
- * This function checks that addr is a valid vmalloc'ed area, and
- * copy data from that area to a given buffer. If the given memory range
- * of [addr...addr+count) includes some valid address, data is copied to
- * proper area of @buf. If there are memory holes, they'll be zero-filled.
- * IOREMAP area is treated as memory hole and no copy is done.
- *
- * If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0. @buf should be kernel's buffer.
- *
- * Note: In usual ops, vread() is never necessary because the caller
- * should know vmalloc() area is valid and can use memcpy().
- * This is for routines which have to access vmalloc area without
- * any information, as /proc/kcore.
- *
- * Return: number of bytes for which addr and buf should be increased
- * (same number as @count) or %0 if [addr...addr+count) doesn't
- * include any intersection with valid vmalloc area
- */
- long vread(char *buf, char *addr, unsigned long count)
- {
- struct vmap_area *va;
- struct vm_struct *vm;
- char *vaddr, *buf_start = buf;
- unsigned long buflen = count;
- unsigned long n;
- addr = kasan_reset_tag(addr);
- /* Don't allow overflow */
- if ((unsigned long) addr + count < count)
- count = -(unsigned long) addr;
- spin_lock(&vmap_area_lock);
- va = find_vmap_area_exceed_addr((unsigned long)addr);
- if (!va)
- goto finished;
- /* no intersects with alive vmap_area */
- if ((unsigned long)addr + count <= va->va_start)
- goto finished;
- list_for_each_entry_from(va, &vmap_area_list, list) {
- if (!count)
- break;
- if (!va->vm)
- continue;
- vm = va->vm;
- vaddr = (char *) vm->addr;
- if (addr >= vaddr + get_vm_area_size(vm))
- continue;
- while (addr < vaddr) {
- if (count == 0)
- goto finished;
- *buf = '\0';
- buf++;
- addr++;
- count--;
- }
- n = vaddr + get_vm_area_size(vm) - addr;
- if (n > count)
- n = count;
- if (!(vm->flags & VM_IOREMAP))
- aligned_vread(buf, addr, n);
- else /* IOREMAP area is treated as memory hole */
- memset(buf, 0, n);
- buf += n;
- addr += n;
- count -= n;
- }
- finished:
- spin_unlock(&vmap_area_lock);
- if (buf == buf_start)
- return 0;
- /* zero-fill memory holes */
- if (buf != buf_start + buflen)
- memset(buf, 0, buflen - (buf - buf_start));
- return buflen;
- }
- /**
- * remap_vmalloc_range_partial - map vmalloc pages to userspace
- * @vma: vma to cover
- * @uaddr: target user address to start at
- * @kaddr: virtual address of vmalloc kernel memory
- * @pgoff: offset from @kaddr to start at
- * @size: size of map area
- *
- * Returns: 0 for success, -Exxx on failure
- *
- * This function checks that @kaddr is a valid vmalloc'ed area,
- * and that it is big enough to cover the range starting at
- * @uaddr in @vma. Will return failure if that criteria isn't
- * met.
- *
- * Similar to remap_pfn_range() (see mm/memory.c)
- */
- int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
- void *kaddr, unsigned long pgoff,
- unsigned long size)
- {
- struct vm_struct *area;
- unsigned long off;
- unsigned long end_index;
- if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
- return -EINVAL;
- size = PAGE_ALIGN(size);
- if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
- return -EINVAL;
- area = find_vm_area(kaddr);
- if (!area)
- return -EINVAL;
- if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
- return -EINVAL;
- if (check_add_overflow(size, off, &end_index) ||
- end_index > get_vm_area_size(area))
- return -EINVAL;
- kaddr += off;
- do {
- struct page *page = vmalloc_to_page(kaddr);
- int ret;
- ret = vm_insert_page(vma, uaddr, page);
- if (ret)
- return ret;
- uaddr += PAGE_SIZE;
- kaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- } while (size > 0);
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
- return 0;
- }
- /**
- * remap_vmalloc_range - map vmalloc pages to userspace
- * @vma: vma to cover (map full range of vma)
- * @addr: vmalloc memory
- * @pgoff: number of pages into addr before first page to map
- *
- * Returns: 0 for success, -Exxx on failure
- *
- * This function checks that addr is a valid vmalloc'ed area, and
- * that it is big enough to cover the vma. Will return failure if
- * that criteria isn't met.
- *
- * Similar to remap_pfn_range() (see mm/memory.c)
- */
- int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
- unsigned long pgoff)
- {
- return remap_vmalloc_range_partial(vma, vma->vm_start,
- addr, pgoff,
- vma->vm_end - vma->vm_start);
- }
- EXPORT_SYMBOL(remap_vmalloc_range);
- void free_vm_area(struct vm_struct *area)
- {
- struct vm_struct *ret;
- ret = remove_vm_area(area->addr);
- BUG_ON(ret != area);
- kfree(area);
- }
- EXPORT_SYMBOL_GPL(free_vm_area);
- #ifdef CONFIG_SMP
- static struct vmap_area *node_to_va(struct rb_node *n)
- {
- return rb_entry_safe(n, struct vmap_area, rb_node);
- }
- /**
- * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
- * @addr: target address
- *
- * Returns: vmap_area if it is found. If there is no such area
- * the first highest(reverse order) vmap_area is returned
- * i.e. va->va_start < addr && va->va_end < addr or NULL
- * if there are no any areas before @addr.
- */
- static struct vmap_area *
- pvm_find_va_enclose_addr(unsigned long addr)
- {
- struct vmap_area *va, *tmp;
- struct rb_node *n;
- n = free_vmap_area_root.rb_node;
- va = NULL;
- while (n) {
- tmp = rb_entry(n, struct vmap_area, rb_node);
- if (tmp->va_start <= addr) {
- va = tmp;
- if (tmp->va_end >= addr)
- break;
- n = n->rb_right;
- } else {
- n = n->rb_left;
- }
- }
- return va;
- }
- /**
- * pvm_determine_end_from_reverse - find the highest aligned address
- * of free block below VMALLOC_END
- * @va:
- * in - the VA we start the search(reverse order);
- * out - the VA with the highest aligned end address.
- * @align: alignment for required highest address
- *
- * Returns: determined end address within vmap_area
- */
- static unsigned long
- pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
- {
- unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
- unsigned long addr;
- if (likely(*va)) {
- list_for_each_entry_from_reverse((*va),
- &free_vmap_area_list, list) {
- addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
- if ((*va)->va_start < addr)
- return addr;
- }
- }
- return 0;
- }
- /**
- * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
- * @offsets: array containing offset of each area
- * @sizes: array containing size of each area
- * @nr_vms: the number of areas to allocate
- * @align: alignment, all entries in @offsets and @sizes must be aligned to this
- *
- * Returns: kmalloc'd vm_struct pointer array pointing to allocated
- * vm_structs on success, %NULL on failure
- *
- * Percpu allocator wants to use congruent vm areas so that it can
- * maintain the offsets among percpu areas. This function allocates
- * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
- * be scattered pretty far, distance between two areas easily going up
- * to gigabytes. To avoid interacting with regular vmallocs, these
- * areas are allocated from top.
- *
- * Despite its complicated look, this allocator is rather simple. It
- * does everything top-down and scans free blocks from the end looking
- * for matching base. While scanning, if any of the areas do not fit the
- * base address is pulled down to fit the area. Scanning is repeated till
- * all the areas fit and then all necessary data structures are inserted
- * and the result is returned.
- */
- struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
- const size_t *sizes, int nr_vms,
- size_t align)
- {
- const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
- const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
- struct vmap_area **vas, *va;
- struct vm_struct **vms;
- int area, area2, last_area, term_area;
- unsigned long base, start, size, end, last_end, orig_start, orig_end;
- bool purged = false;
- /* verify parameters and allocate data structures */
- BUG_ON(offset_in_page(align) || !is_power_of_2(align));
- for (last_area = 0, area = 0; area < nr_vms; area++) {
- start = offsets[area];
- end = start + sizes[area];
- /* is everything aligned properly? */
- BUG_ON(!IS_ALIGNED(offsets[area], align));
- BUG_ON(!IS_ALIGNED(sizes[area], align));
- /* detect the area with the highest address */
- if (start > offsets[last_area])
- last_area = area;
- for (area2 = area + 1; area2 < nr_vms; area2++) {
- unsigned long start2 = offsets[area2];
- unsigned long end2 = start2 + sizes[area2];
- BUG_ON(start2 < end && start < end2);
- }
- }
- last_end = offsets[last_area] + sizes[last_area];
- if (vmalloc_end - vmalloc_start < last_end) {
- WARN_ON(true);
- return NULL;
- }
- vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
- vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
- if (!vas || !vms)
- goto err_free2;
- for (area = 0; area < nr_vms; area++) {
- vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
- vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
- if (!vas[area] || !vms[area])
- goto err_free;
- }
- retry:
- spin_lock(&free_vmap_area_lock);
- /* start scanning - we scan from the top, begin with the last area */
- area = term_area = last_area;
- start = offsets[area];
- end = start + sizes[area];
- va = pvm_find_va_enclose_addr(vmalloc_end);
- base = pvm_determine_end_from_reverse(&va, align) - end;
- while (true) {
- /*
- * base might have underflowed, add last_end before
- * comparing.
- */
- if (base + last_end < vmalloc_start + last_end)
- goto overflow;
- /*
- * Fitting base has not been found.
- */
- if (va == NULL)
- goto overflow;
- /*
- * If required width exceeds current VA block, move
- * base downwards and then recheck.
- */
- if (base + end > va->va_end) {
- base = pvm_determine_end_from_reverse(&va, align) - end;
- term_area = area;
- continue;
- }
- /*
- * If this VA does not fit, move base downwards and recheck.
- */
- if (base + start < va->va_start) {
- va = node_to_va(rb_prev(&va->rb_node));
- base = pvm_determine_end_from_reverse(&va, align) - end;
- term_area = area;
- continue;
- }
- /*
- * This area fits, move on to the previous one. If
- * the previous one is the terminal one, we're done.
- */
- area = (area + nr_vms - 1) % nr_vms;
- if (area == term_area)
- break;
- start = offsets[area];
- end = start + sizes[area];
- va = pvm_find_va_enclose_addr(base + end);
- }
- /* we've found a fitting base, insert all va's */
- for (area = 0; area < nr_vms; area++) {
- int ret;
- start = base + offsets[area];
- size = sizes[area];
- va = pvm_find_va_enclose_addr(start);
- if (WARN_ON_ONCE(va == NULL))
- /* It is a BUG(), but trigger recovery instead. */
- goto recovery;
- ret = adjust_va_to_fit_type(&free_vmap_area_root,
- &free_vmap_area_list,
- va, start, size);
- if (WARN_ON_ONCE(unlikely(ret)))
- /* It is a BUG(), but trigger recovery instead. */
- goto recovery;
- /* Allocated area. */
- va = vas[area];
- va->va_start = start;
- va->va_end = start + size;
- }
- spin_unlock(&free_vmap_area_lock);
- /* populate the kasan shadow space */
- for (area = 0; area < nr_vms; area++) {
- if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
- goto err_free_shadow;
- }
- /* insert all vm's */
- spin_lock(&vmap_area_lock);
- for (area = 0; area < nr_vms; area++) {
- insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
- setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
- pcpu_get_vm_areas);
- }
- spin_unlock(&vmap_area_lock);
- /*
- * Mark allocated areas as accessible. Do it now as a best-effort
- * approach, as they can be mapped outside of vmalloc code.
- * With hardware tag-based KASAN, marking is skipped for
- * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
- */
- for (area = 0; area < nr_vms; area++)
- vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
- vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
- kfree(vas);
- return vms;
- recovery:
- /*
- * Remove previously allocated areas. There is no
- * need in removing these areas from the busy tree,
- * because they are inserted only on the final step
- * and when pcpu_get_vm_areas() is success.
- */
- while (area--) {
- orig_start = vas[area]->va_start;
- orig_end = vas[area]->va_end;
- va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
- &free_vmap_area_list);
- if (va)
- kasan_release_vmalloc(orig_start, orig_end,
- va->va_start, va->va_end);
- vas[area] = NULL;
- }
- overflow:
- spin_unlock(&free_vmap_area_lock);
- if (!purged) {
- purge_vmap_area_lazy();
- purged = true;
- /* Before "retry", check if we recover. */
- for (area = 0; area < nr_vms; area++) {
- if (vas[area])
- continue;
- vas[area] = kmem_cache_zalloc(
- vmap_area_cachep, GFP_KERNEL);
- if (!vas[area])
- goto err_free;
- }
- goto retry;
- }
- err_free:
- for (area = 0; area < nr_vms; area++) {
- if (vas[area])
- kmem_cache_free(vmap_area_cachep, vas[area]);
- kfree(vms[area]);
- }
- err_free2:
- kfree(vas);
- kfree(vms);
- return NULL;
- err_free_shadow:
- spin_lock(&free_vmap_area_lock);
- /*
- * We release all the vmalloc shadows, even the ones for regions that
- * hadn't been successfully added. This relies on kasan_release_vmalloc
- * being able to tolerate this case.
- */
- for (area = 0; area < nr_vms; area++) {
- orig_start = vas[area]->va_start;
- orig_end = vas[area]->va_end;
- va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
- &free_vmap_area_list);
- if (va)
- kasan_release_vmalloc(orig_start, orig_end,
- va->va_start, va->va_end);
- vas[area] = NULL;
- kfree(vms[area]);
- }
- spin_unlock(&free_vmap_area_lock);
- kfree(vas);
- kfree(vms);
- return NULL;
- }
- /**
- * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
- * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
- * @nr_vms: the number of allocated areas
- *
- * Free vm_structs and the array allocated by pcpu_get_vm_areas().
- */
- void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
- {
- int i;
- for (i = 0; i < nr_vms; i++)
- free_vm_area(vms[i]);
- kfree(vms);
- }
- #endif /* CONFIG_SMP */
- #ifdef CONFIG_PRINTK
- bool vmalloc_dump_obj(void *object)
- {
- void *objp = (void *)PAGE_ALIGN((unsigned long)object);
- const void *caller;
- struct vm_struct *vm;
- struct vmap_area *va;
- unsigned long addr;
- unsigned int nr_pages;
- if (!spin_trylock(&vmap_area_lock))
- return false;
- va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
- if (!va) {
- spin_unlock(&vmap_area_lock);
- return false;
- }
- vm = va->vm;
- if (!vm) {
- spin_unlock(&vmap_area_lock);
- return false;
- }
- addr = (unsigned long)vm->addr;
- caller = vm->caller;
- nr_pages = vm->nr_pages;
- spin_unlock(&vmap_area_lock);
- pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
- nr_pages, addr, caller);
- return true;
- }
- #endif
- #ifdef CONFIG_PROC_FS
- static void *s_start(struct seq_file *m, loff_t *pos)
- __acquires(&vmap_purge_lock)
- __acquires(&vmap_area_lock)
- {
- mutex_lock(&vmap_purge_lock);
- spin_lock(&vmap_area_lock);
- return seq_list_start(&vmap_area_list, *pos);
- }
- static void *s_next(struct seq_file *m, void *p, loff_t *pos)
- {
- return seq_list_next(p, &vmap_area_list, pos);
- }
- static void s_stop(struct seq_file *m, void *p)
- __releases(&vmap_area_lock)
- __releases(&vmap_purge_lock)
- {
- spin_unlock(&vmap_area_lock);
- mutex_unlock(&vmap_purge_lock);
- }
- static void show_numa_info(struct seq_file *m, struct vm_struct *v)
- {
- if (IS_ENABLED(CONFIG_NUMA)) {
- unsigned int nr, *counters = m->private;
- unsigned int step = 1U << vm_area_page_order(v);
- if (!counters)
- return;
- if (v->flags & VM_UNINITIALIZED)
- return;
- /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
- smp_rmb();
- memset(counters, 0, nr_node_ids * sizeof(unsigned int));
- for (nr = 0; nr < v->nr_pages; nr += step)
- counters[page_to_nid(v->pages[nr])] += step;
- for_each_node_state(nr, N_HIGH_MEMORY)
- if (counters[nr])
- seq_printf(m, " N%u=%u", nr, counters[nr]);
- }
- }
- static void show_purge_info(struct seq_file *m)
- {
- struct vmap_area *va;
- spin_lock(&purge_vmap_area_lock);
- list_for_each_entry(va, &purge_vmap_area_list, list) {
- seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
- (void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start);
- }
- spin_unlock(&purge_vmap_area_lock);
- }
- static int s_show(struct seq_file *m, void *p)
- {
- struct vmap_area *va;
- struct vm_struct *v;
- va = list_entry(p, struct vmap_area, list);
- /*
- * s_show can encounter race with remove_vm_area, !vm on behalf
- * of vmap area is being tear down or vm_map_ram allocation.
- */
- if (!va->vm) {
- seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
- (void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start);
- goto final;
- }
- v = va->vm;
- seq_printf(m, "0x%pK-0x%pK %7ld",
- v->addr, v->addr + v->size, v->size);
- if (v->caller)
- seq_printf(m, " %pS", v->caller);
- if (v->nr_pages)
- seq_printf(m, " pages=%d", v->nr_pages);
- if (v->phys_addr)
- seq_printf(m, " phys=%pa", &v->phys_addr);
- if (v->flags & VM_IOREMAP)
- seq_puts(m, " ioremap");
- if (v->flags & VM_ALLOC)
- seq_puts(m, " vmalloc");
- if (v->flags & VM_MAP)
- seq_puts(m, " vmap");
- if (v->flags & VM_USERMAP)
- seq_puts(m, " user");
- if (v->flags & VM_DMA_COHERENT)
- seq_puts(m, " dma-coherent");
- if (is_vmalloc_addr(v->pages))
- seq_puts(m, " vpages");
- show_numa_info(m, v);
- seq_putc(m, '\n');
- /*
- * As a final step, dump "unpurged" areas.
- */
- final:
- if (list_is_last(&va->list, &vmap_area_list))
- show_purge_info(m);
- return 0;
- }
- static const struct seq_operations vmalloc_op = {
- .start = s_start,
- .next = s_next,
- .stop = s_stop,
- .show = s_show,
- };
- static int __init proc_vmalloc_init(void)
- {
- if (IS_ENABLED(CONFIG_NUMA))
- proc_create_seq_private("vmallocinfo", 0400, NULL,
- &vmalloc_op,
- nr_node_ids * sizeof(unsigned int), NULL);
- else
- proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
- return 0;
- }
- module_init(proc_vmalloc_init);
- #endif
- #ifdef CONFIG_RKP
- static void *__vmalloc_area_node_for_module(unsigned long core_text_size, struct vm_struct *area,
- gfp_t gfp_mask, pgprot_t prot, unsigned int page_shift, int node)
- {
- const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
- bool nofail = gfp_mask & __GFP_NOFAIL;
- unsigned long addr = (unsigned long)area->addr;
- unsigned long size = get_vm_area_size(area);
- unsigned long array_size;
- unsigned int nr_small_pages = size >> PAGE_SHIFT;
- unsigned int page_order;
- unsigned int flags;
- int ret;
- phys_addr_t p;
- struct page *page;
- int i;
- array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
- gfp_mask |= __GFP_NOWARN;
- if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
- gfp_mask |= __GFP_HIGHMEM;
- /* Please note that the recursion is strictly bounded. */
- if (array_size > PAGE_SIZE) {
- area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
- area->caller);
- } else {
- area->pages = kmalloc_node(array_size, nested_gfp, node);
- }
- if (!area->pages) {
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, failed to allocated page array size %lu",
- nr_small_pages * PAGE_SIZE, array_size);
- free_vm_area(area);
- return NULL;
- }
- set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
- page_order = vm_area_page_order(area);
- for (i = 0; i < nr_small_pages; i++) {
- if (i * PAGE_SIZE < core_text_size) {
- p = rkp_ro_alloc_phys_for_text();
- if (p) {
- page = phys_to_page(p);
- } else {
- page = alloc_page(gfp_mask | __GFP_NOWARN);
- }
- } else {
- page = alloc_page(gfp_mask | __GFP_NOWARN);
- }
- if (unlikely(!page))
- break;
- area->pages[area->nr_pages++] = page;
- }
- area->nr_pages = i;
- atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
- if (gfp_mask & __GFP_ACCOUNT) {
- for (i = 0; i < area->nr_pages; i++)
- mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
- }
- /*
- * If not enough pages were obtained to accomplish an
- * allocation request, free them via __vfree() if any.
- */
- if (area->nr_pages != nr_small_pages) {
- /* vm_area_alloc_pages() can also fail due to a fatal signal */
- if (!fatal_signal_pending(current))
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, page order %u, failed to allocate pages",
- area->nr_pages * PAGE_SIZE, page_order);
- goto fail;
- }
- /*
- * page tables allocations ignore external gfp mask, enforce it
- * by the scope API
- */
- if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
- flags = memalloc_nofs_save();
- else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
- flags = memalloc_noio_save();
- do {
- ret = vmap_pages_range(addr, addr + size, prot, area->pages,
- page_shift);
- if (nofail && (ret < 0))
- schedule_timeout_uninterruptible(1);
- } while (nofail && (ret < 0));
- if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
- memalloc_nofs_restore(flags);
- else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
- memalloc_noio_restore(flags);
- if (ret < 0) {
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, failed to map pages",
- area->nr_pages * PAGE_SIZE);
- goto fail;
- }
- return area->addr;
- fail:
- __vfree(area->addr);
- return NULL;
- }
- void *__vmalloc_node_range_for_module(unsigned long core_layout_size, unsigned long core_text_size,
- unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller)
- {
- struct vm_struct *area;
- void *ret;
- kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
- unsigned long real_size = core_layout_size;
- unsigned int shift = PAGE_SHIFT;
- if (WARN_ON_ONCE(!core_layout_size))
- return NULL;
- again:
- area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
- VM_UNINITIALIZED | vm_flags, start, end, node,
- gfp_mask, caller);
- if (!area) {
- bool nofail = gfp_mask & __GFP_NOFAIL;
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, vm_struct allocation failed%s",
- real_size, (nofail) ? ". Retrying." : "");
- if (nofail) {
- schedule_timeout_uninterruptible(1);
- goto again;
- }
- return NULL;
- }
- /*
- * Prepare arguments for __vmalloc_area_node() and
- * kasan_unpoison_vmalloc().
- */
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
- if (kasan_hw_tags_enabled()) {
- /*
- * Modify protection bits to allow tagging.
- * This must be done before mapping.
- */
- prot = arch_vmap_pgprot_tagged(prot);
- /*
- * Skip page_alloc poisoning and zeroing for physical
- * pages backing VM_ALLOC mapping. Memory is instead
- * poisoned and zeroed by kasan_unpoison_vmalloc().
- */
- gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
- }
- /* Take note that the mapping is PAGE_KERNEL. */
- kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
- }
- /* Allocate physical pages and map them into vmalloc space. */
- ret = __vmalloc_area_node_for_module(core_text_size, area, gfp_mask, prot, shift, node);
- if (!ret)
- return NULL;
- /*
- * Mark the pages as accessible, now that they are mapped.
- * The init condition should match the one in post_alloc_hook()
- * (except for the should_skip_init() check) to make sure that memory
- * is initialized under the same conditions regardless of the enabled
- * KASAN mode.
- * Tag-based KASAN modes only assign tags to normal non-executable
- * allocations, see __kasan_unpoison_vmalloc().
- */
- kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
- if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
- (gfp_mask & __GFP_SKIP_ZERO))
- kasan_flags |= KASAN_VMALLOC_INIT;
- /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
- area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
- /*
- * In this function, newly allocated vm_struct has VM_UNINITIALIZED
- * flag. It means that vm_struct is not fully initialized.
- * Now, it is fully initialized, so remove this flag here.
- */
- clear_vm_uninitialized_flag(area);
- return area->addr;
- }
- #endif
|