vmalloc.c 115 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 1993 Linus Torvalds
  4. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  5. * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <[email protected]>, May 2000
  6. * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
  7. * Numa awareness, Christoph Lameter, SGI, June 2005
  8. * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
  9. */
  10. #include <linux/vmalloc.h>
  11. #include <linux/mm.h>
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/sched/signal.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/set_memory.h>
  21. #include <linux/debugobjects.h>
  22. #include <linux/kallsyms.h>
  23. #include <linux/list.h>
  24. #include <linux/notifier.h>
  25. #include <linux/rbtree.h>
  26. #include <linux/xarray.h>
  27. #include <linux/io.h>
  28. #include <linux/rcupdate.h>
  29. #include <linux/pfn.h>
  30. #include <linux/kmemleak.h>
  31. #include <linux/atomic.h>
  32. #include <linux/compiler.h>
  33. #include <linux/memcontrol.h>
  34. #include <linux/llist.h>
  35. #include <linux/bitops.h>
  36. #include <linux/rbtree_augmented.h>
  37. #include <linux/overflow.h>
  38. #include <linux/pgtable.h>
  39. #include <linux/uaccess.h>
  40. #include <linux/hugetlb.h>
  41. #include <linux/sched/mm.h>
  42. #include <linux/io.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/shmparam.h>
  45. #ifdef CONFIG_RKP
  46. #include <linux/uh.h>
  47. #include <linux/rkp.h>
  48. #include <linux/moduleloader.h>
  49. #endif
  50. #include "internal.h"
  51. #include "pgalloc-track.h"
  52. #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
  53. static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
  54. static int __init set_nohugeiomap(char *str)
  55. {
  56. ioremap_max_page_shift = PAGE_SHIFT;
  57. return 0;
  58. }
  59. early_param("nohugeiomap", set_nohugeiomap);
  60. #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
  61. static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
  62. #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
  63. #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
  64. static bool __ro_after_init vmap_allow_huge = true;
  65. static int __init set_nohugevmalloc(char *str)
  66. {
  67. vmap_allow_huge = false;
  68. return 0;
  69. }
  70. early_param("nohugevmalloc", set_nohugevmalloc);
  71. #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  72. static const bool vmap_allow_huge = false;
  73. #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  74. bool is_vmalloc_addr(const void *x)
  75. {
  76. unsigned long addr = (unsigned long)kasan_reset_tag(x);
  77. return addr >= VMALLOC_START && addr < VMALLOC_END;
  78. }
  79. EXPORT_SYMBOL(is_vmalloc_addr);
  80. struct vfree_deferred {
  81. struct llist_head list;
  82. struct work_struct wq;
  83. };
  84. static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  85. static void __vunmap(const void *, int);
  86. static void free_work(struct work_struct *w)
  87. {
  88. struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  89. struct llist_node *t, *llnode;
  90. llist_for_each_safe(llnode, t, llist_del_all(&p->list))
  91. __vunmap((void *)llnode, 1);
  92. }
  93. /*** Page table manipulation functions ***/
  94. static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  95. phys_addr_t phys_addr, pgprot_t prot,
  96. unsigned int max_page_shift, pgtbl_mod_mask *mask)
  97. {
  98. pte_t *pte;
  99. u64 pfn;
  100. unsigned long size = PAGE_SIZE;
  101. pfn = phys_addr >> PAGE_SHIFT;
  102. pte = pte_alloc_kernel_track(pmd, addr, mask);
  103. if (!pte)
  104. return -ENOMEM;
  105. do {
  106. BUG_ON(!pte_none(*pte));
  107. #ifdef CONFIG_HUGETLB_PAGE
  108. size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
  109. if (size != PAGE_SIZE) {
  110. pte_t entry = pfn_pte(pfn, prot);
  111. entry = arch_make_huge_pte(entry, ilog2(size), 0);
  112. set_huge_pte_at(&init_mm, addr, pte, entry);
  113. pfn += PFN_DOWN(size);
  114. continue;
  115. }
  116. #endif
  117. set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
  118. pfn++;
  119. } while (pte += PFN_DOWN(size), addr += size, addr != end);
  120. *mask |= PGTBL_PTE_MODIFIED;
  121. return 0;
  122. }
  123. static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
  124. phys_addr_t phys_addr, pgprot_t prot,
  125. unsigned int max_page_shift)
  126. {
  127. if (max_page_shift < PMD_SHIFT)
  128. return 0;
  129. if (!arch_vmap_pmd_supported(prot))
  130. return 0;
  131. if ((end - addr) != PMD_SIZE)
  132. return 0;
  133. if (!IS_ALIGNED(addr, PMD_SIZE))
  134. return 0;
  135. if (!IS_ALIGNED(phys_addr, PMD_SIZE))
  136. return 0;
  137. if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
  138. return 0;
  139. return pmd_set_huge(pmd, phys_addr, prot);
  140. }
  141. static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
  142. phys_addr_t phys_addr, pgprot_t prot,
  143. unsigned int max_page_shift, pgtbl_mod_mask *mask)
  144. {
  145. pmd_t *pmd;
  146. unsigned long next;
  147. pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
  148. if (!pmd)
  149. return -ENOMEM;
  150. do {
  151. next = pmd_addr_end(addr, end);
  152. if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
  153. max_page_shift)) {
  154. *mask |= PGTBL_PMD_MODIFIED;
  155. continue;
  156. }
  157. if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
  158. return -ENOMEM;
  159. } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
  160. return 0;
  161. }
  162. static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
  163. phys_addr_t phys_addr, pgprot_t prot,
  164. unsigned int max_page_shift)
  165. {
  166. if (max_page_shift < PUD_SHIFT)
  167. return 0;
  168. if (!arch_vmap_pud_supported(prot))
  169. return 0;
  170. if ((end - addr) != PUD_SIZE)
  171. return 0;
  172. if (!IS_ALIGNED(addr, PUD_SIZE))
  173. return 0;
  174. if (!IS_ALIGNED(phys_addr, PUD_SIZE))
  175. return 0;
  176. if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
  177. return 0;
  178. return pud_set_huge(pud, phys_addr, prot);
  179. }
  180. static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
  181. phys_addr_t phys_addr, pgprot_t prot,
  182. unsigned int max_page_shift, pgtbl_mod_mask *mask)
  183. {
  184. pud_t *pud;
  185. unsigned long next;
  186. pud = pud_alloc_track(&init_mm, p4d, addr, mask);
  187. if (!pud)
  188. return -ENOMEM;
  189. do {
  190. next = pud_addr_end(addr, end);
  191. if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
  192. max_page_shift)) {
  193. *mask |= PGTBL_PUD_MODIFIED;
  194. continue;
  195. }
  196. if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
  197. max_page_shift, mask))
  198. return -ENOMEM;
  199. } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
  200. return 0;
  201. }
  202. static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
  203. phys_addr_t phys_addr, pgprot_t prot,
  204. unsigned int max_page_shift)
  205. {
  206. if (max_page_shift < P4D_SHIFT)
  207. return 0;
  208. if (!arch_vmap_p4d_supported(prot))
  209. return 0;
  210. if ((end - addr) != P4D_SIZE)
  211. return 0;
  212. if (!IS_ALIGNED(addr, P4D_SIZE))
  213. return 0;
  214. if (!IS_ALIGNED(phys_addr, P4D_SIZE))
  215. return 0;
  216. if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
  217. return 0;
  218. return p4d_set_huge(p4d, phys_addr, prot);
  219. }
  220. static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
  221. phys_addr_t phys_addr, pgprot_t prot,
  222. unsigned int max_page_shift, pgtbl_mod_mask *mask)
  223. {
  224. p4d_t *p4d;
  225. unsigned long next;
  226. p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
  227. if (!p4d)
  228. return -ENOMEM;
  229. do {
  230. next = p4d_addr_end(addr, end);
  231. if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
  232. max_page_shift)) {
  233. *mask |= PGTBL_P4D_MODIFIED;
  234. continue;
  235. }
  236. if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
  237. max_page_shift, mask))
  238. return -ENOMEM;
  239. } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
  240. return 0;
  241. }
  242. static int vmap_range_noflush(unsigned long addr, unsigned long end,
  243. phys_addr_t phys_addr, pgprot_t prot,
  244. unsigned int max_page_shift)
  245. {
  246. pgd_t *pgd;
  247. unsigned long start;
  248. unsigned long next;
  249. int err;
  250. pgtbl_mod_mask mask = 0;
  251. might_sleep();
  252. BUG_ON(addr >= end);
  253. start = addr;
  254. pgd = pgd_offset_k(addr);
  255. do {
  256. next = pgd_addr_end(addr, end);
  257. err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
  258. max_page_shift, &mask);
  259. if (err)
  260. break;
  261. } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
  262. if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
  263. arch_sync_kernel_mappings(start, end);
  264. return err;
  265. }
  266. int ioremap_page_range(unsigned long addr, unsigned long end,
  267. phys_addr_t phys_addr, pgprot_t prot)
  268. {
  269. int err;
  270. prot = pgprot_nx(prot);
  271. err = vmap_range_noflush(addr, end, phys_addr, prot,
  272. ioremap_max_page_shift);
  273. flush_cache_vmap(addr, end);
  274. if (!err)
  275. err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
  276. ioremap_max_page_shift);
  277. if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) && !err)
  278. ioremap_phys_range_hook(phys_addr, end - addr, prot);
  279. return err;
  280. }
  281. static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  282. pgtbl_mod_mask *mask)
  283. {
  284. pte_t *pte;
  285. pte = pte_offset_kernel(pmd, addr);
  286. do {
  287. pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  288. WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  289. } while (pte++, addr += PAGE_SIZE, addr != end);
  290. *mask |= PGTBL_PTE_MODIFIED;
  291. }
  292. static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
  293. pgtbl_mod_mask *mask)
  294. {
  295. pmd_t *pmd;
  296. unsigned long next;
  297. int cleared;
  298. pmd = pmd_offset(pud, addr);
  299. do {
  300. next = pmd_addr_end(addr, end);
  301. cleared = pmd_clear_huge(pmd);
  302. if (cleared || pmd_bad(*pmd))
  303. *mask |= PGTBL_PMD_MODIFIED;
  304. if (cleared)
  305. continue;
  306. if (pmd_none_or_clear_bad(pmd))
  307. continue;
  308. vunmap_pte_range(pmd, addr, next, mask);
  309. cond_resched();
  310. } while (pmd++, addr = next, addr != end);
  311. }
  312. static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
  313. pgtbl_mod_mask *mask)
  314. {
  315. pud_t *pud;
  316. unsigned long next;
  317. int cleared;
  318. pud = pud_offset(p4d, addr);
  319. do {
  320. next = pud_addr_end(addr, end);
  321. cleared = pud_clear_huge(pud);
  322. if (cleared || pud_bad(*pud))
  323. *mask |= PGTBL_PUD_MODIFIED;
  324. if (cleared)
  325. continue;
  326. if (pud_none_or_clear_bad(pud))
  327. continue;
  328. vunmap_pmd_range(pud, addr, next, mask);
  329. } while (pud++, addr = next, addr != end);
  330. }
  331. static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
  332. pgtbl_mod_mask *mask)
  333. {
  334. p4d_t *p4d;
  335. unsigned long next;
  336. p4d = p4d_offset(pgd, addr);
  337. do {
  338. next = p4d_addr_end(addr, end);
  339. p4d_clear_huge(p4d);
  340. if (p4d_bad(*p4d))
  341. *mask |= PGTBL_P4D_MODIFIED;
  342. if (p4d_none_or_clear_bad(p4d))
  343. continue;
  344. vunmap_pud_range(p4d, addr, next, mask);
  345. } while (p4d++, addr = next, addr != end);
  346. }
  347. /*
  348. * vunmap_range_noflush is similar to vunmap_range, but does not
  349. * flush caches or TLBs.
  350. *
  351. * The caller is responsible for calling flush_cache_vmap() before calling
  352. * this function, and flush_tlb_kernel_range after it has returned
  353. * successfully (and before the addresses are expected to cause a page fault
  354. * or be re-mapped for something else, if TLB flushes are being delayed or
  355. * coalesced).
  356. *
  357. * This is an internal function only. Do not use outside mm/.
  358. */
  359. void __vunmap_range_noflush(unsigned long start, unsigned long end)
  360. {
  361. unsigned long next;
  362. pgd_t *pgd;
  363. unsigned long addr = start;
  364. pgtbl_mod_mask mask = 0;
  365. BUG_ON(addr >= end);
  366. pgd = pgd_offset_k(addr);
  367. do {
  368. next = pgd_addr_end(addr, end);
  369. if (pgd_bad(*pgd))
  370. mask |= PGTBL_PGD_MODIFIED;
  371. if (pgd_none_or_clear_bad(pgd))
  372. continue;
  373. vunmap_p4d_range(pgd, addr, next, &mask);
  374. } while (pgd++, addr = next, addr != end);
  375. if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
  376. arch_sync_kernel_mappings(start, end);
  377. }
  378. void vunmap_range_noflush(unsigned long start, unsigned long end)
  379. {
  380. kmsan_vunmap_range_noflush(start, end);
  381. __vunmap_range_noflush(start, end);
  382. }
  383. /**
  384. * vunmap_range - unmap kernel virtual addresses
  385. * @addr: start of the VM area to unmap
  386. * @end: end of the VM area to unmap (non-inclusive)
  387. *
  388. * Clears any present PTEs in the virtual address range, flushes TLBs and
  389. * caches. Any subsequent access to the address before it has been re-mapped
  390. * is a kernel bug.
  391. */
  392. void vunmap_range(unsigned long addr, unsigned long end)
  393. {
  394. flush_cache_vunmap(addr, end);
  395. vunmap_range_noflush(addr, end);
  396. flush_tlb_kernel_range(addr, end);
  397. }
  398. static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
  399. unsigned long end, pgprot_t prot, struct page **pages, int *nr,
  400. pgtbl_mod_mask *mask)
  401. {
  402. pte_t *pte;
  403. /*
  404. * nr is a running index into the array which helps higher level
  405. * callers keep track of where we're up to.
  406. */
  407. pte = pte_alloc_kernel_track(pmd, addr, mask);
  408. if (!pte)
  409. return -ENOMEM;
  410. do {
  411. struct page *page = pages[*nr];
  412. if (WARN_ON(!pte_none(*pte)))
  413. return -EBUSY;
  414. if (WARN_ON(!page))
  415. return -ENOMEM;
  416. if (WARN_ON(!pfn_valid(page_to_pfn(page))))
  417. return -EINVAL;
  418. set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
  419. (*nr)++;
  420. } while (pte++, addr += PAGE_SIZE, addr != end);
  421. *mask |= PGTBL_PTE_MODIFIED;
  422. return 0;
  423. }
  424. static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
  425. unsigned long end, pgprot_t prot, struct page **pages, int *nr,
  426. pgtbl_mod_mask *mask)
  427. {
  428. pmd_t *pmd;
  429. unsigned long next;
  430. pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
  431. if (!pmd)
  432. return -ENOMEM;
  433. do {
  434. next = pmd_addr_end(addr, end);
  435. if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
  436. return -ENOMEM;
  437. } while (pmd++, addr = next, addr != end);
  438. return 0;
  439. }
  440. static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
  441. unsigned long end, pgprot_t prot, struct page **pages, int *nr,
  442. pgtbl_mod_mask *mask)
  443. {
  444. pud_t *pud;
  445. unsigned long next;
  446. pud = pud_alloc_track(&init_mm, p4d, addr, mask);
  447. if (!pud)
  448. return -ENOMEM;
  449. do {
  450. next = pud_addr_end(addr, end);
  451. if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
  452. return -ENOMEM;
  453. } while (pud++, addr = next, addr != end);
  454. return 0;
  455. }
  456. static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
  457. unsigned long end, pgprot_t prot, struct page **pages, int *nr,
  458. pgtbl_mod_mask *mask)
  459. {
  460. p4d_t *p4d;
  461. unsigned long next;
  462. p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
  463. if (!p4d)
  464. return -ENOMEM;
  465. do {
  466. next = p4d_addr_end(addr, end);
  467. if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
  468. return -ENOMEM;
  469. } while (p4d++, addr = next, addr != end);
  470. return 0;
  471. }
  472. static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
  473. pgprot_t prot, struct page **pages)
  474. {
  475. unsigned long start = addr;
  476. pgd_t *pgd;
  477. unsigned long next;
  478. int err = 0;
  479. int nr = 0;
  480. pgtbl_mod_mask mask = 0;
  481. BUG_ON(addr >= end);
  482. pgd = pgd_offset_k(addr);
  483. do {
  484. next = pgd_addr_end(addr, end);
  485. if (pgd_bad(*pgd))
  486. mask |= PGTBL_PGD_MODIFIED;
  487. err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
  488. if (err)
  489. return err;
  490. } while (pgd++, addr = next, addr != end);
  491. if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
  492. arch_sync_kernel_mappings(start, end);
  493. return 0;
  494. }
  495. /*
  496. * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
  497. * flush caches.
  498. *
  499. * The caller is responsible for calling flush_cache_vmap() after this
  500. * function returns successfully and before the addresses are accessed.
  501. *
  502. * This is an internal function only. Do not use outside mm/.
  503. */
  504. int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
  505. pgprot_t prot, struct page **pages, unsigned int page_shift)
  506. {
  507. unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
  508. WARN_ON(page_shift < PAGE_SHIFT);
  509. if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
  510. page_shift == PAGE_SHIFT)
  511. return vmap_small_pages_range_noflush(addr, end, prot, pages);
  512. for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
  513. int err;
  514. err = vmap_range_noflush(addr, addr + (1UL << page_shift),
  515. page_to_phys(pages[i]), prot,
  516. page_shift);
  517. if (err)
  518. return err;
  519. addr += 1UL << page_shift;
  520. }
  521. return 0;
  522. }
  523. int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
  524. pgprot_t prot, struct page **pages, unsigned int page_shift)
  525. {
  526. int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
  527. page_shift);
  528. if (ret)
  529. return ret;
  530. return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
  531. }
  532. /**
  533. * vmap_pages_range - map pages to a kernel virtual address
  534. * @addr: start of the VM area to map
  535. * @end: end of the VM area to map (non-inclusive)
  536. * @prot: page protection flags to use
  537. * @pages: pages to map (always PAGE_SIZE pages)
  538. * @page_shift: maximum shift that the pages may be mapped with, @pages must
  539. * be aligned and contiguous up to at least this shift.
  540. *
  541. * RETURNS:
  542. * 0 on success, -errno on failure.
  543. */
  544. static int vmap_pages_range(unsigned long addr, unsigned long end,
  545. pgprot_t prot, struct page **pages, unsigned int page_shift)
  546. {
  547. int err;
  548. err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
  549. flush_cache_vmap(addr, end);
  550. return err;
  551. }
  552. int is_vmalloc_or_module_addr(const void *x)
  553. {
  554. /*
  555. * ARM, x86-64 and sparc64 put modules in a special place,
  556. * and fall back on vmalloc() if that fails. Others
  557. * just put it in the vmalloc space.
  558. */
  559. #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
  560. unsigned long addr = (unsigned long)kasan_reset_tag(x);
  561. if (addr >= MODULES_VADDR && addr < MODULES_END)
  562. return 1;
  563. #endif
  564. return is_vmalloc_addr(x);
  565. }
  566. /*
  567. * Walk a vmap address to the struct page it maps. Huge vmap mappings will
  568. * return the tail page that corresponds to the base page address, which
  569. * matches small vmap mappings.
  570. */
  571. struct page *vmalloc_to_page(const void *vmalloc_addr)
  572. {
  573. unsigned long addr = (unsigned long) vmalloc_addr;
  574. struct page *page = NULL;
  575. pgd_t *pgd = pgd_offset_k(addr);
  576. p4d_t *p4d;
  577. pud_t *pud;
  578. pmd_t *pmd;
  579. pte_t *ptep, pte;
  580. /*
  581. * XXX we might need to change this if we add VIRTUAL_BUG_ON for
  582. * architectures that do not vmalloc module space
  583. */
  584. VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
  585. if (pgd_none(*pgd))
  586. return NULL;
  587. if (WARN_ON_ONCE(pgd_leaf(*pgd)))
  588. return NULL; /* XXX: no allowance for huge pgd */
  589. if (WARN_ON_ONCE(pgd_bad(*pgd)))
  590. return NULL;
  591. p4d = p4d_offset(pgd, addr);
  592. if (p4d_none(*p4d))
  593. return NULL;
  594. if (p4d_leaf(*p4d))
  595. return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
  596. if (WARN_ON_ONCE(p4d_bad(*p4d)))
  597. return NULL;
  598. pud = pud_offset(p4d, addr);
  599. if (pud_none(*pud))
  600. return NULL;
  601. if (pud_leaf(*pud))
  602. return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  603. if (WARN_ON_ONCE(pud_bad(*pud)))
  604. return NULL;
  605. pmd = pmd_offset(pud, addr);
  606. if (pmd_none(*pmd))
  607. return NULL;
  608. if (pmd_leaf(*pmd))
  609. return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  610. if (WARN_ON_ONCE(pmd_bad(*pmd)))
  611. return NULL;
  612. ptep = pte_offset_map(pmd, addr);
  613. pte = *ptep;
  614. if (pte_present(pte))
  615. page = pte_page(pte);
  616. pte_unmap(ptep);
  617. return page;
  618. }
  619. EXPORT_SYMBOL(vmalloc_to_page);
  620. /*
  621. * Map a vmalloc()-space virtual address to the physical page frame number.
  622. */
  623. unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
  624. {
  625. return page_to_pfn(vmalloc_to_page(vmalloc_addr));
  626. }
  627. EXPORT_SYMBOL(vmalloc_to_pfn);
  628. /*** Global kva allocator ***/
  629. #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
  630. #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
  631. static DEFINE_SPINLOCK(vmap_area_lock);
  632. static DEFINE_SPINLOCK(free_vmap_area_lock);
  633. /* Export for kexec only */
  634. LIST_HEAD(vmap_area_list);
  635. static struct rb_root vmap_area_root = RB_ROOT;
  636. static bool vmap_initialized __read_mostly;
  637. static struct rb_root purge_vmap_area_root = RB_ROOT;
  638. static LIST_HEAD(purge_vmap_area_list);
  639. static DEFINE_SPINLOCK(purge_vmap_area_lock);
  640. /*
  641. * This kmem_cache is used for vmap_area objects. Instead of
  642. * allocating from slab we reuse an object from this cache to
  643. * make things faster. Especially in "no edge" splitting of
  644. * free block.
  645. */
  646. static struct kmem_cache *vmap_area_cachep;
  647. /*
  648. * This linked list is used in pair with free_vmap_area_root.
  649. * It gives O(1) access to prev/next to perform fast coalescing.
  650. */
  651. static LIST_HEAD(free_vmap_area_list);
  652. /*
  653. * This augment red-black tree represents the free vmap space.
  654. * All vmap_area objects in this tree are sorted by va->va_start
  655. * address. It is used for allocation and merging when a vmap
  656. * object is released.
  657. *
  658. * Each vmap_area node contains a maximum available free block
  659. * of its sub-tree, right or left. Therefore it is possible to
  660. * find a lowest match of free area.
  661. */
  662. static struct rb_root free_vmap_area_root = RB_ROOT;
  663. /*
  664. * Preload a CPU with one object for "no edge" split case. The
  665. * aim is to get rid of allocations from the atomic context, thus
  666. * to use more permissive allocation masks.
  667. */
  668. static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
  669. static __always_inline unsigned long
  670. va_size(struct vmap_area *va)
  671. {
  672. return (va->va_end - va->va_start);
  673. }
  674. static __always_inline unsigned long
  675. get_subtree_max_size(struct rb_node *node)
  676. {
  677. struct vmap_area *va;
  678. va = rb_entry_safe(node, struct vmap_area, rb_node);
  679. return va ? va->subtree_max_size : 0;
  680. }
  681. RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
  682. struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
  683. static void purge_vmap_area_lazy(void);
  684. static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
  685. static void drain_vmap_area_work(struct work_struct *work);
  686. static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
  687. static atomic_long_t nr_vmalloc_pages;
  688. unsigned long vmalloc_nr_pages(void)
  689. {
  690. return atomic_long_read(&nr_vmalloc_pages);
  691. }
  692. EXPORT_SYMBOL_GPL(vmalloc_nr_pages);
  693. /* Look up the first VA which satisfies addr < va_end, NULL if none. */
  694. static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
  695. {
  696. struct vmap_area *va = NULL;
  697. struct rb_node *n = vmap_area_root.rb_node;
  698. addr = (unsigned long)kasan_reset_tag((void *)addr);
  699. while (n) {
  700. struct vmap_area *tmp;
  701. tmp = rb_entry(n, struct vmap_area, rb_node);
  702. if (tmp->va_end > addr) {
  703. va = tmp;
  704. if (tmp->va_start <= addr)
  705. break;
  706. n = n->rb_left;
  707. } else
  708. n = n->rb_right;
  709. }
  710. return va;
  711. }
  712. static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
  713. {
  714. struct rb_node *n = root->rb_node;
  715. addr = (unsigned long)kasan_reset_tag((void *)addr);
  716. while (n) {
  717. struct vmap_area *va;
  718. va = rb_entry(n, struct vmap_area, rb_node);
  719. if (addr < va->va_start)
  720. n = n->rb_left;
  721. else if (addr >= va->va_end)
  722. n = n->rb_right;
  723. else
  724. return va;
  725. }
  726. return NULL;
  727. }
  728. /*
  729. * This function returns back addresses of parent node
  730. * and its left or right link for further processing.
  731. *
  732. * Otherwise NULL is returned. In that case all further
  733. * steps regarding inserting of conflicting overlap range
  734. * have to be declined and actually considered as a bug.
  735. */
  736. static __always_inline struct rb_node **
  737. find_va_links(struct vmap_area *va,
  738. struct rb_root *root, struct rb_node *from,
  739. struct rb_node **parent)
  740. {
  741. struct vmap_area *tmp_va;
  742. struct rb_node **link;
  743. if (root) {
  744. link = &root->rb_node;
  745. if (unlikely(!*link)) {
  746. *parent = NULL;
  747. return link;
  748. }
  749. } else {
  750. link = &from;
  751. }
  752. /*
  753. * Go to the bottom of the tree. When we hit the last point
  754. * we end up with parent rb_node and correct direction, i name
  755. * it link, where the new va->rb_node will be attached to.
  756. */
  757. do {
  758. tmp_va = rb_entry(*link, struct vmap_area, rb_node);
  759. /*
  760. * During the traversal we also do some sanity check.
  761. * Trigger the BUG() if there are sides(left/right)
  762. * or full overlaps.
  763. */
  764. if (va->va_end <= tmp_va->va_start)
  765. link = &(*link)->rb_left;
  766. else if (va->va_start >= tmp_va->va_end)
  767. link = &(*link)->rb_right;
  768. else {
  769. WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
  770. va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
  771. return NULL;
  772. }
  773. } while (*link);
  774. *parent = &tmp_va->rb_node;
  775. return link;
  776. }
  777. static __always_inline struct list_head *
  778. get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
  779. {
  780. struct list_head *list;
  781. if (unlikely(!parent))
  782. /*
  783. * The red-black tree where we try to find VA neighbors
  784. * before merging or inserting is empty, i.e. it means
  785. * there is no free vmap space. Normally it does not
  786. * happen but we handle this case anyway.
  787. */
  788. return NULL;
  789. list = &rb_entry(parent, struct vmap_area, rb_node)->list;
  790. return (&parent->rb_right == link ? list->next : list);
  791. }
  792. static __always_inline void
  793. __link_va(struct vmap_area *va, struct rb_root *root,
  794. struct rb_node *parent, struct rb_node **link,
  795. struct list_head *head, bool augment)
  796. {
  797. /*
  798. * VA is still not in the list, but we can
  799. * identify its future previous list_head node.
  800. */
  801. if (likely(parent)) {
  802. head = &rb_entry(parent, struct vmap_area, rb_node)->list;
  803. if (&parent->rb_right != link)
  804. head = head->prev;
  805. }
  806. /* Insert to the rb-tree */
  807. rb_link_node(&va->rb_node, parent, link);
  808. if (augment) {
  809. /*
  810. * Some explanation here. Just perform simple insertion
  811. * to the tree. We do not set va->subtree_max_size to
  812. * its current size before calling rb_insert_augmented().
  813. * It is because we populate the tree from the bottom
  814. * to parent levels when the node _is_ in the tree.
  815. *
  816. * Therefore we set subtree_max_size to zero after insertion,
  817. * to let __augment_tree_propagate_from() puts everything to
  818. * the correct order later on.
  819. */
  820. rb_insert_augmented(&va->rb_node,
  821. root, &free_vmap_area_rb_augment_cb);
  822. va->subtree_max_size = 0;
  823. } else {
  824. rb_insert_color(&va->rb_node, root);
  825. }
  826. /* Address-sort this list */
  827. list_add(&va->list, head);
  828. }
  829. static __always_inline void
  830. link_va(struct vmap_area *va, struct rb_root *root,
  831. struct rb_node *parent, struct rb_node **link,
  832. struct list_head *head)
  833. {
  834. __link_va(va, root, parent, link, head, false);
  835. }
  836. static __always_inline void
  837. link_va_augment(struct vmap_area *va, struct rb_root *root,
  838. struct rb_node *parent, struct rb_node **link,
  839. struct list_head *head)
  840. {
  841. __link_va(va, root, parent, link, head, true);
  842. }
  843. static __always_inline void
  844. __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
  845. {
  846. if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
  847. return;
  848. if (augment)
  849. rb_erase_augmented(&va->rb_node,
  850. root, &free_vmap_area_rb_augment_cb);
  851. else
  852. rb_erase(&va->rb_node, root);
  853. list_del_init(&va->list);
  854. RB_CLEAR_NODE(&va->rb_node);
  855. }
  856. static __always_inline void
  857. unlink_va(struct vmap_area *va, struct rb_root *root)
  858. {
  859. __unlink_va(va, root, false);
  860. }
  861. static __always_inline void
  862. unlink_va_augment(struct vmap_area *va, struct rb_root *root)
  863. {
  864. __unlink_va(va, root, true);
  865. }
  866. #if DEBUG_AUGMENT_PROPAGATE_CHECK
  867. /*
  868. * Gets called when remove the node and rotate.
  869. */
  870. static __always_inline unsigned long
  871. compute_subtree_max_size(struct vmap_area *va)
  872. {
  873. return max3(va_size(va),
  874. get_subtree_max_size(va->rb_node.rb_left),
  875. get_subtree_max_size(va->rb_node.rb_right));
  876. }
  877. static void
  878. augment_tree_propagate_check(void)
  879. {
  880. struct vmap_area *va;
  881. unsigned long computed_size;
  882. list_for_each_entry(va, &free_vmap_area_list, list) {
  883. computed_size = compute_subtree_max_size(va);
  884. if (computed_size != va->subtree_max_size)
  885. pr_emerg("tree is corrupted: %lu, %lu\n",
  886. va_size(va), va->subtree_max_size);
  887. }
  888. }
  889. #endif
  890. /*
  891. * This function populates subtree_max_size from bottom to upper
  892. * levels starting from VA point. The propagation must be done
  893. * when VA size is modified by changing its va_start/va_end. Or
  894. * in case of newly inserting of VA to the tree.
  895. *
  896. * It means that __augment_tree_propagate_from() must be called:
  897. * - After VA has been inserted to the tree(free path);
  898. * - After VA has been shrunk(allocation path);
  899. * - After VA has been increased(merging path).
  900. *
  901. * Please note that, it does not mean that upper parent nodes
  902. * and their subtree_max_size are recalculated all the time up
  903. * to the root node.
  904. *
  905. * 4--8
  906. * /\
  907. * / \
  908. * / \
  909. * 2--2 8--8
  910. *
  911. * For example if we modify the node 4, shrinking it to 2, then
  912. * no any modification is required. If we shrink the node 2 to 1
  913. * its subtree_max_size is updated only, and set to 1. If we shrink
  914. * the node 8 to 6, then its subtree_max_size is set to 6 and parent
  915. * node becomes 4--6.
  916. */
  917. static __always_inline void
  918. augment_tree_propagate_from(struct vmap_area *va)
  919. {
  920. /*
  921. * Populate the tree from bottom towards the root until
  922. * the calculated maximum available size of checked node
  923. * is equal to its current one.
  924. */
  925. free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
  926. #if DEBUG_AUGMENT_PROPAGATE_CHECK
  927. augment_tree_propagate_check();
  928. #endif
  929. }
  930. static void
  931. insert_vmap_area(struct vmap_area *va,
  932. struct rb_root *root, struct list_head *head)
  933. {
  934. struct rb_node **link;
  935. struct rb_node *parent;
  936. link = find_va_links(va, root, NULL, &parent);
  937. if (link)
  938. link_va(va, root, parent, link, head);
  939. }
  940. static void
  941. insert_vmap_area_augment(struct vmap_area *va,
  942. struct rb_node *from, struct rb_root *root,
  943. struct list_head *head)
  944. {
  945. struct rb_node **link;
  946. struct rb_node *parent;
  947. if (from)
  948. link = find_va_links(va, NULL, from, &parent);
  949. else
  950. link = find_va_links(va, root, NULL, &parent);
  951. if (link) {
  952. link_va_augment(va, root, parent, link, head);
  953. augment_tree_propagate_from(va);
  954. }
  955. }
  956. /*
  957. * Merge de-allocated chunk of VA memory with previous
  958. * and next free blocks. If coalesce is not done a new
  959. * free area is inserted. If VA has been merged, it is
  960. * freed.
  961. *
  962. * Please note, it can return NULL in case of overlap
  963. * ranges, followed by WARN() report. Despite it is a
  964. * buggy behaviour, a system can be alive and keep
  965. * ongoing.
  966. */
  967. static __always_inline struct vmap_area *
  968. __merge_or_add_vmap_area(struct vmap_area *va,
  969. struct rb_root *root, struct list_head *head, bool augment)
  970. {
  971. struct vmap_area *sibling;
  972. struct list_head *next;
  973. struct rb_node **link;
  974. struct rb_node *parent;
  975. bool merged = false;
  976. /*
  977. * Find a place in the tree where VA potentially will be
  978. * inserted, unless it is merged with its sibling/siblings.
  979. */
  980. link = find_va_links(va, root, NULL, &parent);
  981. if (!link)
  982. return NULL;
  983. /*
  984. * Get next node of VA to check if merging can be done.
  985. */
  986. next = get_va_next_sibling(parent, link);
  987. if (unlikely(next == NULL))
  988. goto insert;
  989. /*
  990. * start end
  991. * | |
  992. * |<------VA------>|<-----Next----->|
  993. * | |
  994. * start end
  995. */
  996. if (next != head) {
  997. sibling = list_entry(next, struct vmap_area, list);
  998. if (sibling->va_start == va->va_end) {
  999. sibling->va_start = va->va_start;
  1000. /* Free vmap_area object. */
  1001. kmem_cache_free(vmap_area_cachep, va);
  1002. /* Point to the new merged area. */
  1003. va = sibling;
  1004. merged = true;
  1005. }
  1006. }
  1007. /*
  1008. * start end
  1009. * | |
  1010. * |<-----Prev----->|<------VA------>|
  1011. * | |
  1012. * start end
  1013. */
  1014. if (next->prev != head) {
  1015. sibling = list_entry(next->prev, struct vmap_area, list);
  1016. if (sibling->va_end == va->va_start) {
  1017. /*
  1018. * If both neighbors are coalesced, it is important
  1019. * to unlink the "next" node first, followed by merging
  1020. * with "previous" one. Otherwise the tree might not be
  1021. * fully populated if a sibling's augmented value is
  1022. * "normalized" because of rotation operations.
  1023. */
  1024. if (merged)
  1025. __unlink_va(va, root, augment);
  1026. sibling->va_end = va->va_end;
  1027. /* Free vmap_area object. */
  1028. kmem_cache_free(vmap_area_cachep, va);
  1029. /* Point to the new merged area. */
  1030. va = sibling;
  1031. merged = true;
  1032. }
  1033. }
  1034. insert:
  1035. if (!merged)
  1036. __link_va(va, root, parent, link, head, augment);
  1037. return va;
  1038. }
  1039. static __always_inline struct vmap_area *
  1040. merge_or_add_vmap_area(struct vmap_area *va,
  1041. struct rb_root *root, struct list_head *head)
  1042. {
  1043. return __merge_or_add_vmap_area(va, root, head, false);
  1044. }
  1045. static __always_inline struct vmap_area *
  1046. merge_or_add_vmap_area_augment(struct vmap_area *va,
  1047. struct rb_root *root, struct list_head *head)
  1048. {
  1049. va = __merge_or_add_vmap_area(va, root, head, true);
  1050. if (va)
  1051. augment_tree_propagate_from(va);
  1052. return va;
  1053. }
  1054. static __always_inline bool
  1055. is_within_this_va(struct vmap_area *va, unsigned long size,
  1056. unsigned long align, unsigned long vstart)
  1057. {
  1058. unsigned long nva_start_addr;
  1059. if (va->va_start > vstart)
  1060. nva_start_addr = ALIGN(va->va_start, align);
  1061. else
  1062. nva_start_addr = ALIGN(vstart, align);
  1063. /* Can be overflowed due to big size or alignment. */
  1064. if (nva_start_addr + size < nva_start_addr ||
  1065. nva_start_addr < vstart)
  1066. return false;
  1067. return (nva_start_addr + size <= va->va_end);
  1068. }
  1069. /*
  1070. * Find the first free block(lowest start address) in the tree,
  1071. * that will accomplish the request corresponding to passing
  1072. * parameters. Please note, with an alignment bigger than PAGE_SIZE,
  1073. * a search length is adjusted to account for worst case alignment
  1074. * overhead.
  1075. */
  1076. static __always_inline struct vmap_area *
  1077. find_vmap_lowest_match(struct rb_root *root, unsigned long size,
  1078. unsigned long align, unsigned long vstart, bool adjust_search_size)
  1079. {
  1080. struct vmap_area *va;
  1081. struct rb_node *node;
  1082. unsigned long length;
  1083. /* Start from the root. */
  1084. node = root->rb_node;
  1085. /* Adjust the search size for alignment overhead. */
  1086. length = adjust_search_size ? size + align - 1 : size;
  1087. while (node) {
  1088. va = rb_entry(node, struct vmap_area, rb_node);
  1089. if (get_subtree_max_size(node->rb_left) >= length &&
  1090. vstart < va->va_start) {
  1091. node = node->rb_left;
  1092. } else {
  1093. if (is_within_this_va(va, size, align, vstart))
  1094. return va;
  1095. /*
  1096. * Does not make sense to go deeper towards the right
  1097. * sub-tree if it does not have a free block that is
  1098. * equal or bigger to the requested search length.
  1099. */
  1100. if (get_subtree_max_size(node->rb_right) >= length) {
  1101. node = node->rb_right;
  1102. continue;
  1103. }
  1104. /*
  1105. * OK. We roll back and find the first right sub-tree,
  1106. * that will satisfy the search criteria. It can happen
  1107. * due to "vstart" restriction or an alignment overhead
  1108. * that is bigger then PAGE_SIZE.
  1109. */
  1110. while ((node = rb_parent(node))) {
  1111. va = rb_entry(node, struct vmap_area, rb_node);
  1112. if (is_within_this_va(va, size, align, vstart))
  1113. return va;
  1114. if (get_subtree_max_size(node->rb_right) >= length &&
  1115. vstart <= va->va_start) {
  1116. /*
  1117. * Shift the vstart forward. Please note, we update it with
  1118. * parent's start address adding "1" because we do not want
  1119. * to enter same sub-tree after it has already been checked
  1120. * and no suitable free block found there.
  1121. */
  1122. vstart = va->va_start + 1;
  1123. node = node->rb_right;
  1124. break;
  1125. }
  1126. }
  1127. }
  1128. }
  1129. return NULL;
  1130. }
  1131. #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
  1132. #include <linux/random.h>
  1133. static struct vmap_area *
  1134. find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
  1135. unsigned long align, unsigned long vstart)
  1136. {
  1137. struct vmap_area *va;
  1138. list_for_each_entry(va, head, list) {
  1139. if (!is_within_this_va(va, size, align, vstart))
  1140. continue;
  1141. return va;
  1142. }
  1143. return NULL;
  1144. }
  1145. static void
  1146. find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
  1147. unsigned long size, unsigned long align)
  1148. {
  1149. struct vmap_area *va_1, *va_2;
  1150. unsigned long vstart;
  1151. unsigned int rnd;
  1152. get_random_bytes(&rnd, sizeof(rnd));
  1153. vstart = VMALLOC_START + rnd;
  1154. va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
  1155. va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
  1156. if (va_1 != va_2)
  1157. pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
  1158. va_1, va_2, vstart);
  1159. }
  1160. #endif
  1161. enum fit_type {
  1162. NOTHING_FIT = 0,
  1163. FL_FIT_TYPE = 1, /* full fit */
  1164. LE_FIT_TYPE = 2, /* left edge fit */
  1165. RE_FIT_TYPE = 3, /* right edge fit */
  1166. NE_FIT_TYPE = 4 /* no edge fit */
  1167. };
  1168. static __always_inline enum fit_type
  1169. classify_va_fit_type(struct vmap_area *va,
  1170. unsigned long nva_start_addr, unsigned long size)
  1171. {
  1172. enum fit_type type;
  1173. /* Check if it is within VA. */
  1174. if (nva_start_addr < va->va_start ||
  1175. nva_start_addr + size > va->va_end)
  1176. return NOTHING_FIT;
  1177. /* Now classify. */
  1178. if (va->va_start == nva_start_addr) {
  1179. if (va->va_end == nva_start_addr + size)
  1180. type = FL_FIT_TYPE;
  1181. else
  1182. type = LE_FIT_TYPE;
  1183. } else if (va->va_end == nva_start_addr + size) {
  1184. type = RE_FIT_TYPE;
  1185. } else {
  1186. type = NE_FIT_TYPE;
  1187. }
  1188. return type;
  1189. }
  1190. static __always_inline int
  1191. adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
  1192. struct vmap_area *va, unsigned long nva_start_addr,
  1193. unsigned long size)
  1194. {
  1195. struct vmap_area *lva = NULL;
  1196. enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
  1197. if (type == FL_FIT_TYPE) {
  1198. /*
  1199. * No need to split VA, it fully fits.
  1200. *
  1201. * | |
  1202. * V NVA V
  1203. * |---------------|
  1204. */
  1205. unlink_va_augment(va, root);
  1206. kmem_cache_free(vmap_area_cachep, va);
  1207. } else if (type == LE_FIT_TYPE) {
  1208. /*
  1209. * Split left edge of fit VA.
  1210. *
  1211. * | |
  1212. * V NVA V R
  1213. * |-------|-------|
  1214. */
  1215. va->va_start += size;
  1216. } else if (type == RE_FIT_TYPE) {
  1217. /*
  1218. * Split right edge of fit VA.
  1219. *
  1220. * | |
  1221. * L V NVA V
  1222. * |-------|-------|
  1223. */
  1224. va->va_end = nva_start_addr;
  1225. } else if (type == NE_FIT_TYPE) {
  1226. /*
  1227. * Split no edge of fit VA.
  1228. *
  1229. * | |
  1230. * L V NVA V R
  1231. * |---|-------|---|
  1232. */
  1233. lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
  1234. if (unlikely(!lva)) {
  1235. /*
  1236. * For percpu allocator we do not do any pre-allocation
  1237. * and leave it as it is. The reason is it most likely
  1238. * never ends up with NE_FIT_TYPE splitting. In case of
  1239. * percpu allocations offsets and sizes are aligned to
  1240. * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
  1241. * are its main fitting cases.
  1242. *
  1243. * There are a few exceptions though, as an example it is
  1244. * a first allocation (early boot up) when we have "one"
  1245. * big free space that has to be split.
  1246. *
  1247. * Also we can hit this path in case of regular "vmap"
  1248. * allocations, if "this" current CPU was not preloaded.
  1249. * See the comment in alloc_vmap_area() why. If so, then
  1250. * GFP_NOWAIT is used instead to get an extra object for
  1251. * split purpose. That is rare and most time does not
  1252. * occur.
  1253. *
  1254. * What happens if an allocation gets failed. Basically,
  1255. * an "overflow" path is triggered to purge lazily freed
  1256. * areas to free some memory, then, the "retry" path is
  1257. * triggered to repeat one more time. See more details
  1258. * in alloc_vmap_area() function.
  1259. */
  1260. lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
  1261. if (!lva)
  1262. return -1;
  1263. }
  1264. /*
  1265. * Build the remainder.
  1266. */
  1267. lva->va_start = va->va_start;
  1268. lva->va_end = nva_start_addr;
  1269. /*
  1270. * Shrink this VA to remaining size.
  1271. */
  1272. va->va_start = nva_start_addr + size;
  1273. } else {
  1274. return -1;
  1275. }
  1276. if (type != FL_FIT_TYPE) {
  1277. augment_tree_propagate_from(va);
  1278. if (lva) /* type == NE_FIT_TYPE */
  1279. insert_vmap_area_augment(lva, &va->rb_node, root, head);
  1280. }
  1281. return 0;
  1282. }
  1283. /*
  1284. * Returns a start address of the newly allocated area, if success.
  1285. * Otherwise a vend is returned that indicates failure.
  1286. */
  1287. static __always_inline unsigned long
  1288. __alloc_vmap_area(struct rb_root *root, struct list_head *head,
  1289. unsigned long size, unsigned long align,
  1290. unsigned long vstart, unsigned long vend)
  1291. {
  1292. bool adjust_search_size = true;
  1293. unsigned long nva_start_addr;
  1294. struct vmap_area *va;
  1295. int ret;
  1296. /*
  1297. * Do not adjust when:
  1298. * a) align <= PAGE_SIZE, because it does not make any sense.
  1299. * All blocks(their start addresses) are at least PAGE_SIZE
  1300. * aligned anyway;
  1301. * b) a short range where a requested size corresponds to exactly
  1302. * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
  1303. * With adjusted search length an allocation would not succeed.
  1304. */
  1305. if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
  1306. adjust_search_size = false;
  1307. va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
  1308. if (unlikely(!va))
  1309. return vend;
  1310. if (va->va_start > vstart)
  1311. nva_start_addr = ALIGN(va->va_start, align);
  1312. else
  1313. nva_start_addr = ALIGN(vstart, align);
  1314. /* Check the "vend" restriction. */
  1315. if (nva_start_addr + size > vend)
  1316. return vend;
  1317. /* Update the free vmap_area. */
  1318. ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
  1319. if (WARN_ON_ONCE(ret))
  1320. return vend;
  1321. #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
  1322. find_vmap_lowest_match_check(root, head, size, align);
  1323. #endif
  1324. return nva_start_addr;
  1325. }
  1326. /*
  1327. * Free a region of KVA allocated by alloc_vmap_area
  1328. */
  1329. static void free_vmap_area(struct vmap_area *va)
  1330. {
  1331. /*
  1332. * Remove from the busy tree/list.
  1333. */
  1334. spin_lock(&vmap_area_lock);
  1335. unlink_va(va, &vmap_area_root);
  1336. spin_unlock(&vmap_area_lock);
  1337. /*
  1338. * Insert/Merge it back to the free tree/list.
  1339. */
  1340. spin_lock(&free_vmap_area_lock);
  1341. merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
  1342. spin_unlock(&free_vmap_area_lock);
  1343. }
  1344. static inline void
  1345. preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
  1346. {
  1347. struct vmap_area *va = NULL;
  1348. /*
  1349. * Preload this CPU with one extra vmap_area object. It is used
  1350. * when fit type of free area is NE_FIT_TYPE. It guarantees that
  1351. * a CPU that does an allocation is preloaded.
  1352. *
  1353. * We do it in non-atomic context, thus it allows us to use more
  1354. * permissive allocation masks to be more stable under low memory
  1355. * condition and high memory pressure.
  1356. */
  1357. if (!this_cpu_read(ne_fit_preload_node))
  1358. va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
  1359. spin_lock(lock);
  1360. if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
  1361. kmem_cache_free(vmap_area_cachep, va);
  1362. }
  1363. /*
  1364. * Allocate a region of KVA of the specified size and alignment, within the
  1365. * vstart and vend.
  1366. */
  1367. static struct vmap_area *alloc_vmap_area(unsigned long size,
  1368. unsigned long align,
  1369. unsigned long vstart, unsigned long vend,
  1370. int node, gfp_t gfp_mask)
  1371. {
  1372. struct vmap_area *va;
  1373. unsigned long freed;
  1374. unsigned long addr;
  1375. int purged = 0;
  1376. int ret;
  1377. BUG_ON(!size);
  1378. BUG_ON(offset_in_page(size));
  1379. BUG_ON(!is_power_of_2(align));
  1380. if (unlikely(!vmap_initialized))
  1381. return ERR_PTR(-EBUSY);
  1382. might_sleep();
  1383. gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
  1384. va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
  1385. if (unlikely(!va))
  1386. return ERR_PTR(-ENOMEM);
  1387. /*
  1388. * Only scan the relevant parts containing pointers to other objects
  1389. * to avoid false negatives.
  1390. */
  1391. kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
  1392. retry:
  1393. preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
  1394. addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
  1395. size, align, vstart, vend);
  1396. spin_unlock(&free_vmap_area_lock);
  1397. /*
  1398. * If an allocation fails, the "vend" address is
  1399. * returned. Therefore trigger the overflow path.
  1400. */
  1401. if (unlikely(addr == vend))
  1402. goto overflow;
  1403. va->va_start = addr;
  1404. va->va_end = addr + size;
  1405. va->vm = NULL;
  1406. spin_lock(&vmap_area_lock);
  1407. insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
  1408. spin_unlock(&vmap_area_lock);
  1409. BUG_ON(!IS_ALIGNED(va->va_start, align));
  1410. BUG_ON(va->va_start < vstart);
  1411. BUG_ON(va->va_end > vend);
  1412. ret = kasan_populate_vmalloc(addr, size);
  1413. if (ret) {
  1414. free_vmap_area(va);
  1415. return ERR_PTR(ret);
  1416. }
  1417. return va;
  1418. overflow:
  1419. if (!purged) {
  1420. purge_vmap_area_lazy();
  1421. purged = 1;
  1422. goto retry;
  1423. }
  1424. freed = 0;
  1425. blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
  1426. if (freed > 0) {
  1427. purged = 0;
  1428. goto retry;
  1429. }
  1430. if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
  1431. pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
  1432. size);
  1433. kmem_cache_free(vmap_area_cachep, va);
  1434. return ERR_PTR(-EBUSY);
  1435. }
  1436. int register_vmap_purge_notifier(struct notifier_block *nb)
  1437. {
  1438. return blocking_notifier_chain_register(&vmap_notify_list, nb);
  1439. }
  1440. EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
  1441. int unregister_vmap_purge_notifier(struct notifier_block *nb)
  1442. {
  1443. return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
  1444. }
  1445. EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
  1446. /*
  1447. * lazy_max_pages is the maximum amount of virtual address space we gather up
  1448. * before attempting to purge with a TLB flush.
  1449. *
  1450. * There is a tradeoff here: a larger number will cover more kernel page tables
  1451. * and take slightly longer to purge, but it will linearly reduce the number of
  1452. * global TLB flushes that must be performed. It would seem natural to scale
  1453. * this number up linearly with the number of CPUs (because vmapping activity
  1454. * could also scale linearly with the number of CPUs), however it is likely
  1455. * that in practice, workloads might be constrained in other ways that mean
  1456. * vmap activity will not scale linearly with CPUs. Also, I want to be
  1457. * conservative and not introduce a big latency on huge systems, so go with
  1458. * a less aggressive log scale. It will still be an improvement over the old
  1459. * code, and it will be simple to change the scale factor if we find that it
  1460. * becomes a problem on bigger systems.
  1461. */
  1462. static unsigned long lazy_max_pages(void)
  1463. {
  1464. unsigned int log;
  1465. log = fls(num_online_cpus());
  1466. return log * (32UL * 1024 * 1024 / PAGE_SIZE);
  1467. }
  1468. static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
  1469. /*
  1470. * Serialize vmap purging. There is no actual critical section protected
  1471. * by this lock, but we want to avoid concurrent calls for performance
  1472. * reasons and to make the pcpu_get_vm_areas more deterministic.
  1473. */
  1474. static DEFINE_MUTEX(vmap_purge_lock);
  1475. /* for per-CPU blocks */
  1476. static void purge_fragmented_blocks_allcpus(void);
  1477. /*
  1478. * Purges all lazily-freed vmap areas.
  1479. */
  1480. static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
  1481. {
  1482. unsigned long resched_threshold;
  1483. struct list_head local_purge_list;
  1484. struct vmap_area *va, *n_va;
  1485. lockdep_assert_held(&vmap_purge_lock);
  1486. spin_lock(&purge_vmap_area_lock);
  1487. purge_vmap_area_root = RB_ROOT;
  1488. list_replace_init(&purge_vmap_area_list, &local_purge_list);
  1489. spin_unlock(&purge_vmap_area_lock);
  1490. if (unlikely(list_empty(&local_purge_list)))
  1491. return false;
  1492. start = min(start,
  1493. list_first_entry(&local_purge_list,
  1494. struct vmap_area, list)->va_start);
  1495. end = max(end,
  1496. list_last_entry(&local_purge_list,
  1497. struct vmap_area, list)->va_end);
  1498. flush_tlb_kernel_range(start, end);
  1499. resched_threshold = lazy_max_pages() << 1;
  1500. spin_lock(&free_vmap_area_lock);
  1501. list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
  1502. unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
  1503. unsigned long orig_start = va->va_start;
  1504. unsigned long orig_end = va->va_end;
  1505. /*
  1506. * Finally insert or merge lazily-freed area. It is
  1507. * detached and there is no need to "unlink" it from
  1508. * anything.
  1509. */
  1510. va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
  1511. &free_vmap_area_list);
  1512. if (!va)
  1513. continue;
  1514. if (is_vmalloc_or_module_addr((void *)orig_start))
  1515. kasan_release_vmalloc(orig_start, orig_end,
  1516. va->va_start, va->va_end);
  1517. atomic_long_sub(nr, &vmap_lazy_nr);
  1518. if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
  1519. cond_resched_lock(&free_vmap_area_lock);
  1520. }
  1521. spin_unlock(&free_vmap_area_lock);
  1522. return true;
  1523. }
  1524. /*
  1525. * Kick off a purge of the outstanding lazy areas.
  1526. */
  1527. static void purge_vmap_area_lazy(void)
  1528. {
  1529. mutex_lock(&vmap_purge_lock);
  1530. purge_fragmented_blocks_allcpus();
  1531. __purge_vmap_area_lazy(ULONG_MAX, 0);
  1532. mutex_unlock(&vmap_purge_lock);
  1533. }
  1534. static void drain_vmap_area_work(struct work_struct *work)
  1535. {
  1536. unsigned long nr_lazy;
  1537. do {
  1538. mutex_lock(&vmap_purge_lock);
  1539. __purge_vmap_area_lazy(ULONG_MAX, 0);
  1540. mutex_unlock(&vmap_purge_lock);
  1541. /* Recheck if further work is required. */
  1542. nr_lazy = atomic_long_read(&vmap_lazy_nr);
  1543. } while (nr_lazy > lazy_max_pages());
  1544. }
  1545. /*
  1546. * Free a vmap area, caller ensuring that the area has been unmapped
  1547. * and flush_cache_vunmap had been called for the correct range
  1548. * previously.
  1549. */
  1550. static void free_vmap_area_noflush(struct vmap_area *va)
  1551. {
  1552. unsigned long nr_lazy;
  1553. spin_lock(&vmap_area_lock);
  1554. unlink_va(va, &vmap_area_root);
  1555. spin_unlock(&vmap_area_lock);
  1556. nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
  1557. PAGE_SHIFT, &vmap_lazy_nr);
  1558. /*
  1559. * Merge or place it to the purge tree/list.
  1560. */
  1561. spin_lock(&purge_vmap_area_lock);
  1562. merge_or_add_vmap_area(va,
  1563. &purge_vmap_area_root, &purge_vmap_area_list);
  1564. spin_unlock(&purge_vmap_area_lock);
  1565. /* After this point, we may free va at any time */
  1566. if (unlikely(nr_lazy > lazy_max_pages()))
  1567. schedule_work(&drain_vmap_work);
  1568. }
  1569. /*
  1570. * Free and unmap a vmap area
  1571. */
  1572. static void free_unmap_vmap_area(struct vmap_area *va)
  1573. {
  1574. flush_cache_vunmap(va->va_start, va->va_end);
  1575. vunmap_range_noflush(va->va_start, va->va_end);
  1576. if (debug_pagealloc_enabled_static())
  1577. flush_tlb_kernel_range(va->va_start, va->va_end);
  1578. free_vmap_area_noflush(va);
  1579. }
  1580. struct vmap_area *find_vmap_area(unsigned long addr)
  1581. {
  1582. struct vmap_area *va;
  1583. spin_lock(&vmap_area_lock);
  1584. va = __find_vmap_area(addr, &vmap_area_root);
  1585. spin_unlock(&vmap_area_lock);
  1586. return va;
  1587. }
  1588. /*** Per cpu kva allocator ***/
  1589. /*
  1590. * vmap space is limited especially on 32 bit architectures. Ensure there is
  1591. * room for at least 16 percpu vmap blocks per CPU.
  1592. */
  1593. /*
  1594. * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
  1595. * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
  1596. * instead (we just need a rough idea)
  1597. */
  1598. #if BITS_PER_LONG == 32
  1599. #define VMALLOC_SPACE (128UL*1024*1024)
  1600. #else
  1601. #define VMALLOC_SPACE (128UL*1024*1024*1024)
  1602. #endif
  1603. #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
  1604. #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
  1605. #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
  1606. #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
  1607. #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
  1608. #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
  1609. #define VMAP_BBMAP_BITS \
  1610. VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
  1611. VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
  1612. VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
  1613. #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
  1614. struct vmap_block_queue {
  1615. spinlock_t lock;
  1616. struct list_head free;
  1617. };
  1618. struct vmap_block {
  1619. spinlock_t lock;
  1620. struct vmap_area *va;
  1621. unsigned long free, dirty;
  1622. unsigned long dirty_min, dirty_max; /*< dirty range */
  1623. struct list_head free_list;
  1624. struct rcu_head rcu_head;
  1625. struct list_head purge;
  1626. };
  1627. /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
  1628. static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
  1629. /*
  1630. * XArray of vmap blocks, indexed by address, to quickly find a vmap block
  1631. * in the free path. Could get rid of this if we change the API to return a
  1632. * "cookie" from alloc, to be passed to free. But no big deal yet.
  1633. */
  1634. static DEFINE_XARRAY(vmap_blocks);
  1635. /*
  1636. * We should probably have a fallback mechanism to allocate virtual memory
  1637. * out of partially filled vmap blocks. However vmap block sizing should be
  1638. * fairly reasonable according to the vmalloc size, so it shouldn't be a
  1639. * big problem.
  1640. */
  1641. static unsigned long addr_to_vb_idx(unsigned long addr)
  1642. {
  1643. addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
  1644. addr /= VMAP_BLOCK_SIZE;
  1645. return addr;
  1646. }
  1647. static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
  1648. {
  1649. unsigned long addr;
  1650. addr = va_start + (pages_off << PAGE_SHIFT);
  1651. BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
  1652. return (void *)addr;
  1653. }
  1654. /**
  1655. * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
  1656. * block. Of course pages number can't exceed VMAP_BBMAP_BITS
  1657. * @order: how many 2^order pages should be occupied in newly allocated block
  1658. * @gfp_mask: flags for the page level allocator
  1659. *
  1660. * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
  1661. */
  1662. static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
  1663. {
  1664. struct vmap_block_queue *vbq;
  1665. struct vmap_block *vb;
  1666. struct vmap_area *va;
  1667. unsigned long vb_idx;
  1668. int node, err;
  1669. void *vaddr;
  1670. node = numa_node_id();
  1671. vb = kmalloc_node(sizeof(struct vmap_block),
  1672. gfp_mask & GFP_RECLAIM_MASK, node);
  1673. if (unlikely(!vb))
  1674. return ERR_PTR(-ENOMEM);
  1675. va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
  1676. VMALLOC_START, VMALLOC_END,
  1677. node, gfp_mask);
  1678. if (IS_ERR(va)) {
  1679. kfree(vb);
  1680. return ERR_CAST(va);
  1681. }
  1682. vaddr = vmap_block_vaddr(va->va_start, 0);
  1683. spin_lock_init(&vb->lock);
  1684. vb->va = va;
  1685. /* At least something should be left free */
  1686. BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
  1687. vb->free = VMAP_BBMAP_BITS - (1UL << order);
  1688. vb->dirty = 0;
  1689. vb->dirty_min = VMAP_BBMAP_BITS;
  1690. vb->dirty_max = 0;
  1691. INIT_LIST_HEAD(&vb->free_list);
  1692. vb_idx = addr_to_vb_idx(va->va_start);
  1693. err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
  1694. if (err) {
  1695. kfree(vb);
  1696. free_vmap_area(va);
  1697. return ERR_PTR(err);
  1698. }
  1699. vbq = raw_cpu_ptr(&vmap_block_queue);
  1700. spin_lock(&vbq->lock);
  1701. list_add_tail_rcu(&vb->free_list, &vbq->free);
  1702. spin_unlock(&vbq->lock);
  1703. return vaddr;
  1704. }
  1705. static void free_vmap_block(struct vmap_block *vb)
  1706. {
  1707. struct vmap_block *tmp;
  1708. tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
  1709. BUG_ON(tmp != vb);
  1710. free_vmap_area_noflush(vb->va);
  1711. kfree_rcu(vb, rcu_head);
  1712. }
  1713. static void purge_fragmented_blocks(int cpu)
  1714. {
  1715. LIST_HEAD(purge);
  1716. struct vmap_block *vb;
  1717. struct vmap_block *n_vb;
  1718. struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  1719. rcu_read_lock();
  1720. list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  1721. if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
  1722. continue;
  1723. spin_lock(&vb->lock);
  1724. if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
  1725. vb->free = 0; /* prevent further allocs after releasing lock */
  1726. vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
  1727. vb->dirty_min = 0;
  1728. vb->dirty_max = VMAP_BBMAP_BITS;
  1729. spin_lock(&vbq->lock);
  1730. list_del_rcu(&vb->free_list);
  1731. spin_unlock(&vbq->lock);
  1732. spin_unlock(&vb->lock);
  1733. list_add_tail(&vb->purge, &purge);
  1734. } else
  1735. spin_unlock(&vb->lock);
  1736. }
  1737. rcu_read_unlock();
  1738. list_for_each_entry_safe(vb, n_vb, &purge, purge) {
  1739. list_del(&vb->purge);
  1740. free_vmap_block(vb);
  1741. }
  1742. }
  1743. static void purge_fragmented_blocks_allcpus(void)
  1744. {
  1745. int cpu;
  1746. for_each_possible_cpu(cpu)
  1747. purge_fragmented_blocks(cpu);
  1748. }
  1749. static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
  1750. {
  1751. struct vmap_block_queue *vbq;
  1752. struct vmap_block *vb;
  1753. void *vaddr = NULL;
  1754. unsigned int order;
  1755. BUG_ON(offset_in_page(size));
  1756. BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
  1757. if (WARN_ON(size == 0)) {
  1758. /*
  1759. * Allocating 0 bytes isn't what caller wants since
  1760. * get_order(0) returns funny result. Just warn and terminate
  1761. * early.
  1762. */
  1763. return NULL;
  1764. }
  1765. order = get_order(size);
  1766. rcu_read_lock();
  1767. vbq = raw_cpu_ptr(&vmap_block_queue);
  1768. list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  1769. unsigned long pages_off;
  1770. spin_lock(&vb->lock);
  1771. if (vb->free < (1UL << order)) {
  1772. spin_unlock(&vb->lock);
  1773. continue;
  1774. }
  1775. pages_off = VMAP_BBMAP_BITS - vb->free;
  1776. vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
  1777. vb->free -= 1UL << order;
  1778. if (vb->free == 0) {
  1779. spin_lock(&vbq->lock);
  1780. list_del_rcu(&vb->free_list);
  1781. spin_unlock(&vbq->lock);
  1782. }
  1783. spin_unlock(&vb->lock);
  1784. break;
  1785. }
  1786. rcu_read_unlock();
  1787. /* Allocate new block if nothing was found */
  1788. if (!vaddr)
  1789. vaddr = new_vmap_block(order, gfp_mask);
  1790. return vaddr;
  1791. }
  1792. static void vb_free(unsigned long addr, unsigned long size)
  1793. {
  1794. unsigned long offset;
  1795. unsigned int order;
  1796. struct vmap_block *vb;
  1797. BUG_ON(offset_in_page(size));
  1798. BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
  1799. flush_cache_vunmap(addr, addr + size);
  1800. order = get_order(size);
  1801. offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
  1802. vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
  1803. vunmap_range_noflush(addr, addr + size);
  1804. if (debug_pagealloc_enabled_static())
  1805. flush_tlb_kernel_range(addr, addr + size);
  1806. spin_lock(&vb->lock);
  1807. /* Expand dirty range */
  1808. vb->dirty_min = min(vb->dirty_min, offset);
  1809. vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
  1810. vb->dirty += 1UL << order;
  1811. if (vb->dirty == VMAP_BBMAP_BITS) {
  1812. BUG_ON(vb->free);
  1813. spin_unlock(&vb->lock);
  1814. free_vmap_block(vb);
  1815. } else
  1816. spin_unlock(&vb->lock);
  1817. }
  1818. static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
  1819. {
  1820. int cpu;
  1821. if (unlikely(!vmap_initialized))
  1822. return;
  1823. might_sleep();
  1824. for_each_possible_cpu(cpu) {
  1825. struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  1826. struct vmap_block *vb;
  1827. rcu_read_lock();
  1828. list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  1829. spin_lock(&vb->lock);
  1830. if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
  1831. unsigned long va_start = vb->va->va_start;
  1832. unsigned long s, e;
  1833. s = va_start + (vb->dirty_min << PAGE_SHIFT);
  1834. e = va_start + (vb->dirty_max << PAGE_SHIFT);
  1835. start = min(s, start);
  1836. end = max(e, end);
  1837. flush = 1;
  1838. }
  1839. spin_unlock(&vb->lock);
  1840. }
  1841. rcu_read_unlock();
  1842. }
  1843. mutex_lock(&vmap_purge_lock);
  1844. purge_fragmented_blocks_allcpus();
  1845. if (!__purge_vmap_area_lazy(start, end) && flush)
  1846. flush_tlb_kernel_range(start, end);
  1847. mutex_unlock(&vmap_purge_lock);
  1848. }
  1849. /**
  1850. * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
  1851. *
  1852. * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
  1853. * to amortize TLB flushing overheads. What this means is that any page you
  1854. * have now, may, in a former life, have been mapped into kernel virtual
  1855. * address by the vmap layer and so there might be some CPUs with TLB entries
  1856. * still referencing that page (additional to the regular 1:1 kernel mapping).
  1857. *
  1858. * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
  1859. * be sure that none of the pages we have control over will have any aliases
  1860. * from the vmap layer.
  1861. */
  1862. void vm_unmap_aliases(void)
  1863. {
  1864. unsigned long start = ULONG_MAX, end = 0;
  1865. int flush = 0;
  1866. _vm_unmap_aliases(start, end, flush);
  1867. }
  1868. EXPORT_SYMBOL_GPL(vm_unmap_aliases);
  1869. /**
  1870. * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
  1871. * @mem: the pointer returned by vm_map_ram
  1872. * @count: the count passed to that vm_map_ram call (cannot unmap partial)
  1873. */
  1874. void vm_unmap_ram(const void *mem, unsigned int count)
  1875. {
  1876. unsigned long size = (unsigned long)count << PAGE_SHIFT;
  1877. unsigned long addr = (unsigned long)kasan_reset_tag(mem);
  1878. struct vmap_area *va;
  1879. might_sleep();
  1880. BUG_ON(!addr);
  1881. BUG_ON(addr < VMALLOC_START);
  1882. BUG_ON(addr > VMALLOC_END);
  1883. BUG_ON(!PAGE_ALIGNED(addr));
  1884. kasan_poison_vmalloc(mem, size);
  1885. if (likely(count <= VMAP_MAX_ALLOC)) {
  1886. debug_check_no_locks_freed(mem, size);
  1887. vb_free(addr, size);
  1888. return;
  1889. }
  1890. va = find_vmap_area(addr);
  1891. BUG_ON(!va);
  1892. debug_check_no_locks_freed((void *)va->va_start,
  1893. (va->va_end - va->va_start));
  1894. free_unmap_vmap_area(va);
  1895. }
  1896. EXPORT_SYMBOL(vm_unmap_ram);
  1897. /**
  1898. * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
  1899. * @pages: an array of pointers to the pages to be mapped
  1900. * @count: number of pages
  1901. * @node: prefer to allocate data structures on this node
  1902. *
  1903. * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
  1904. * faster than vmap so it's good. But if you mix long-life and short-life
  1905. * objects with vm_map_ram(), it could consume lots of address space through
  1906. * fragmentation (especially on a 32bit machine). You could see failures in
  1907. * the end. Please use this function for short-lived objects.
  1908. *
  1909. * Returns: a pointer to the address that has been mapped, or %NULL on failure
  1910. */
  1911. void *vm_map_ram(struct page **pages, unsigned int count, int node)
  1912. {
  1913. unsigned long size = (unsigned long)count << PAGE_SHIFT;
  1914. unsigned long addr;
  1915. void *mem;
  1916. if (likely(count <= VMAP_MAX_ALLOC)) {
  1917. mem = vb_alloc(size, GFP_KERNEL);
  1918. if (IS_ERR(mem))
  1919. return NULL;
  1920. addr = (unsigned long)mem;
  1921. } else {
  1922. struct vmap_area *va;
  1923. va = alloc_vmap_area(size, PAGE_SIZE,
  1924. VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
  1925. if (IS_ERR(va))
  1926. return NULL;
  1927. addr = va->va_start;
  1928. mem = (void *)addr;
  1929. }
  1930. if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
  1931. pages, PAGE_SHIFT) < 0) {
  1932. vm_unmap_ram(mem, count);
  1933. return NULL;
  1934. }
  1935. /*
  1936. * Mark the pages as accessible, now that they are mapped.
  1937. * With hardware tag-based KASAN, marking is skipped for
  1938. * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
  1939. */
  1940. mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
  1941. return mem;
  1942. }
  1943. EXPORT_SYMBOL(vm_map_ram);
  1944. static struct vm_struct *vmlist __initdata;
  1945. static inline unsigned int vm_area_page_order(struct vm_struct *vm)
  1946. {
  1947. #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
  1948. return vm->page_order;
  1949. #else
  1950. return 0;
  1951. #endif
  1952. }
  1953. static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
  1954. {
  1955. #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
  1956. vm->page_order = order;
  1957. #else
  1958. BUG_ON(order != 0);
  1959. #endif
  1960. }
  1961. /**
  1962. * vm_area_add_early - add vmap area early during boot
  1963. * @vm: vm_struct to add
  1964. *
  1965. * This function is used to add fixed kernel vm area to vmlist before
  1966. * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
  1967. * should contain proper values and the other fields should be zero.
  1968. *
  1969. * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
  1970. */
  1971. void __init vm_area_add_early(struct vm_struct *vm)
  1972. {
  1973. struct vm_struct *tmp, **p;
  1974. BUG_ON(vmap_initialized);
  1975. for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
  1976. if (tmp->addr >= vm->addr) {
  1977. BUG_ON(tmp->addr < vm->addr + vm->size);
  1978. break;
  1979. } else
  1980. BUG_ON(tmp->addr + tmp->size > vm->addr);
  1981. }
  1982. vm->next = *p;
  1983. *p = vm;
  1984. }
  1985. /**
  1986. * vm_area_register_early - register vmap area early during boot
  1987. * @vm: vm_struct to register
  1988. * @align: requested alignment
  1989. *
  1990. * This function is used to register kernel vm area before
  1991. * vmalloc_init() is called. @vm->size and @vm->flags should contain
  1992. * proper values on entry and other fields should be zero. On return,
  1993. * vm->addr contains the allocated address.
  1994. *
  1995. * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
  1996. */
  1997. void __init vm_area_register_early(struct vm_struct *vm, size_t align)
  1998. {
  1999. unsigned long addr = ALIGN(VMALLOC_START, align);
  2000. struct vm_struct *cur, **p;
  2001. BUG_ON(vmap_initialized);
  2002. for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
  2003. if ((unsigned long)cur->addr - addr >= vm->size)
  2004. break;
  2005. addr = ALIGN((unsigned long)cur->addr + cur->size, align);
  2006. }
  2007. BUG_ON(addr > VMALLOC_END - vm->size);
  2008. vm->addr = (void *)addr;
  2009. vm->next = *p;
  2010. *p = vm;
  2011. kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
  2012. }
  2013. static void vmap_init_free_space(void)
  2014. {
  2015. unsigned long vmap_start = 1;
  2016. const unsigned long vmap_end = ULONG_MAX;
  2017. struct vmap_area *busy, *free;
  2018. /*
  2019. * B F B B B F
  2020. * -|-----|.....|-----|-----|-----|.....|-
  2021. * | The KVA space |
  2022. * |<--------------------------------->|
  2023. */
  2024. list_for_each_entry(busy, &vmap_area_list, list) {
  2025. if (busy->va_start - vmap_start > 0) {
  2026. free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
  2027. if (!WARN_ON_ONCE(!free)) {
  2028. free->va_start = vmap_start;
  2029. free->va_end = busy->va_start;
  2030. insert_vmap_area_augment(free, NULL,
  2031. &free_vmap_area_root,
  2032. &free_vmap_area_list);
  2033. }
  2034. }
  2035. vmap_start = busy->va_end;
  2036. }
  2037. if (vmap_end - vmap_start > 0) {
  2038. free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
  2039. if (!WARN_ON_ONCE(!free)) {
  2040. free->va_start = vmap_start;
  2041. free->va_end = vmap_end;
  2042. insert_vmap_area_augment(free, NULL,
  2043. &free_vmap_area_root,
  2044. &free_vmap_area_list);
  2045. }
  2046. }
  2047. }
  2048. void __init vmalloc_init(void)
  2049. {
  2050. struct vmap_area *va;
  2051. struct vm_struct *tmp;
  2052. int i;
  2053. /*
  2054. * Create the cache for vmap_area objects.
  2055. */
  2056. vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
  2057. for_each_possible_cpu(i) {
  2058. struct vmap_block_queue *vbq;
  2059. struct vfree_deferred *p;
  2060. vbq = &per_cpu(vmap_block_queue, i);
  2061. spin_lock_init(&vbq->lock);
  2062. INIT_LIST_HEAD(&vbq->free);
  2063. p = &per_cpu(vfree_deferred, i);
  2064. init_llist_head(&p->list);
  2065. INIT_WORK(&p->wq, free_work);
  2066. }
  2067. /* Import existing vmlist entries. */
  2068. for (tmp = vmlist; tmp; tmp = tmp->next) {
  2069. va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
  2070. if (WARN_ON_ONCE(!va))
  2071. continue;
  2072. va->va_start = (unsigned long)tmp->addr;
  2073. va->va_end = va->va_start + tmp->size;
  2074. va->vm = tmp;
  2075. insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
  2076. }
  2077. /*
  2078. * Now we can initialize a free vmap space.
  2079. */
  2080. vmap_init_free_space();
  2081. vmap_initialized = true;
  2082. }
  2083. static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
  2084. struct vmap_area *va, unsigned long flags, const void *caller)
  2085. {
  2086. vm->flags = flags;
  2087. vm->addr = (void *)va->va_start;
  2088. vm->size = va->va_end - va->va_start;
  2089. vm->caller = caller;
  2090. va->vm = vm;
  2091. }
  2092. static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
  2093. unsigned long flags, const void *caller)
  2094. {
  2095. spin_lock(&vmap_area_lock);
  2096. setup_vmalloc_vm_locked(vm, va, flags, caller);
  2097. spin_unlock(&vmap_area_lock);
  2098. }
  2099. static void clear_vm_uninitialized_flag(struct vm_struct *vm)
  2100. {
  2101. /*
  2102. * Before removing VM_UNINITIALIZED,
  2103. * we should make sure that vm has proper values.
  2104. * Pair with smp_rmb() in show_numa_info().
  2105. */
  2106. smp_wmb();
  2107. vm->flags &= ~VM_UNINITIALIZED;
  2108. }
  2109. static struct vm_struct *__get_vm_area_node(unsigned long size,
  2110. unsigned long align, unsigned long shift, unsigned long flags,
  2111. unsigned long start, unsigned long end, int node,
  2112. gfp_t gfp_mask, const void *caller)
  2113. {
  2114. struct vmap_area *va;
  2115. struct vm_struct *area;
  2116. unsigned long requested_size = size;
  2117. BUG_ON(in_interrupt());
  2118. size = ALIGN(size, 1ul << shift);
  2119. if (unlikely(!size))
  2120. return NULL;
  2121. if (flags & VM_IOREMAP)
  2122. align = 1ul << clamp_t(int, get_count_order_long(size),
  2123. PAGE_SHIFT, IOREMAP_MAX_ORDER);
  2124. area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
  2125. if (unlikely(!area))
  2126. return NULL;
  2127. if (!(flags & VM_NO_GUARD))
  2128. size += PAGE_SIZE;
  2129. va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
  2130. if (IS_ERR(va)) {
  2131. kfree(area);
  2132. return NULL;
  2133. }
  2134. setup_vmalloc_vm(area, va, flags, caller);
  2135. /*
  2136. * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
  2137. * best-effort approach, as they can be mapped outside of vmalloc code.
  2138. * For VM_ALLOC mappings, the pages are marked as accessible after
  2139. * getting mapped in __vmalloc_node_range().
  2140. * With hardware tag-based KASAN, marking is skipped for
  2141. * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
  2142. */
  2143. if (!(flags & VM_ALLOC))
  2144. area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
  2145. KASAN_VMALLOC_PROT_NORMAL);
  2146. return area;
  2147. }
  2148. struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
  2149. unsigned long start, unsigned long end,
  2150. const void *caller)
  2151. {
  2152. return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
  2153. NUMA_NO_NODE, GFP_KERNEL, caller);
  2154. }
  2155. /**
  2156. * get_vm_area - reserve a contiguous kernel virtual area
  2157. * @size: size of the area
  2158. * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
  2159. *
  2160. * Search an area of @size in the kernel virtual mapping area,
  2161. * and reserved it for out purposes. Returns the area descriptor
  2162. * on success or %NULL on failure.
  2163. *
  2164. * Return: the area descriptor on success or %NULL on failure.
  2165. */
  2166. struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
  2167. {
  2168. return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
  2169. VMALLOC_START, VMALLOC_END,
  2170. NUMA_NO_NODE, GFP_KERNEL,
  2171. __builtin_return_address(0));
  2172. }
  2173. struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
  2174. const void *caller)
  2175. {
  2176. return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
  2177. VMALLOC_START, VMALLOC_END,
  2178. NUMA_NO_NODE, GFP_KERNEL, caller);
  2179. }
  2180. /**
  2181. * find_vm_area - find a continuous kernel virtual area
  2182. * @addr: base address
  2183. *
  2184. * Search for the kernel VM area starting at @addr, and return it.
  2185. * It is up to the caller to do all required locking to keep the returned
  2186. * pointer valid.
  2187. *
  2188. * Return: the area descriptor on success or %NULL on failure.
  2189. */
  2190. struct vm_struct *find_vm_area(const void *addr)
  2191. {
  2192. struct vmap_area *va;
  2193. va = find_vmap_area((unsigned long)addr);
  2194. if (!va)
  2195. return NULL;
  2196. return va->vm;
  2197. }
  2198. /**
  2199. * remove_vm_area - find and remove a continuous kernel virtual area
  2200. * @addr: base address
  2201. *
  2202. * Search for the kernel VM area starting at @addr, and remove it.
  2203. * This function returns the found VM area, but using it is NOT safe
  2204. * on SMP machines, except for its size or flags.
  2205. *
  2206. * Return: the area descriptor on success or %NULL on failure.
  2207. */
  2208. struct vm_struct *remove_vm_area(const void *addr)
  2209. {
  2210. struct vmap_area *va;
  2211. might_sleep();
  2212. spin_lock(&vmap_area_lock);
  2213. va = __find_vmap_area((unsigned long)addr, &vmap_area_root);
  2214. if (va && va->vm) {
  2215. struct vm_struct *vm = va->vm;
  2216. va->vm = NULL;
  2217. spin_unlock(&vmap_area_lock);
  2218. kasan_free_module_shadow(vm);
  2219. free_unmap_vmap_area(va);
  2220. return vm;
  2221. }
  2222. spin_unlock(&vmap_area_lock);
  2223. return NULL;
  2224. }
  2225. static inline void set_area_direct_map(const struct vm_struct *area,
  2226. int (*set_direct_map)(struct page *page))
  2227. {
  2228. int i;
  2229. /* HUGE_VMALLOC passes small pages to set_direct_map */
  2230. for (i = 0; i < area->nr_pages; i++)
  2231. if (page_address(area->pages[i]))
  2232. set_direct_map(area->pages[i]);
  2233. }
  2234. /* Handle removing and resetting vm mappings related to the vm_struct. */
  2235. static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
  2236. {
  2237. unsigned long start = ULONG_MAX, end = 0;
  2238. unsigned int page_order = vm_area_page_order(area);
  2239. int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
  2240. int flush_dmap = 0;
  2241. int i;
  2242. remove_vm_area(area->addr);
  2243. /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
  2244. if (!flush_reset)
  2245. return;
  2246. /*
  2247. * If not deallocating pages, just do the flush of the VM area and
  2248. * return.
  2249. */
  2250. if (!deallocate_pages) {
  2251. vm_unmap_aliases();
  2252. return;
  2253. }
  2254. /*
  2255. * If execution gets here, flush the vm mapping and reset the direct
  2256. * map. Find the start and end range of the direct mappings to make sure
  2257. * the vm_unmap_aliases() flush includes the direct map.
  2258. */
  2259. for (i = 0; i < area->nr_pages; i += 1U << page_order) {
  2260. unsigned long addr = (unsigned long)page_address(area->pages[i]);
  2261. if (addr) {
  2262. unsigned long page_size;
  2263. page_size = PAGE_SIZE << page_order;
  2264. start = min(addr, start);
  2265. end = max(addr + page_size, end);
  2266. flush_dmap = 1;
  2267. }
  2268. }
  2269. /*
  2270. * Set direct map to something invalid so that it won't be cached if
  2271. * there are any accesses after the TLB flush, then flush the TLB and
  2272. * reset the direct map permissions to the default.
  2273. */
  2274. set_area_direct_map(area, set_direct_map_invalid_noflush);
  2275. _vm_unmap_aliases(start, end, flush_dmap);
  2276. set_area_direct_map(area, set_direct_map_default_noflush);
  2277. }
  2278. static void __vunmap(const void *addr, int deallocate_pages)
  2279. {
  2280. struct vm_struct *area;
  2281. if (!addr)
  2282. return;
  2283. if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
  2284. addr))
  2285. return;
  2286. area = find_vm_area(addr);
  2287. if (unlikely(!area)) {
  2288. WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
  2289. addr);
  2290. return;
  2291. }
  2292. debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
  2293. debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
  2294. kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
  2295. if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) &&
  2296. area->flags & VM_IOREMAP)
  2297. iounmap_phys_range_hook(area->phys_addr, get_vm_area_size(area));
  2298. vm_remove_mappings(area, deallocate_pages);
  2299. if (deallocate_pages) {
  2300. int i;
  2301. for (i = 0; i < area->nr_pages; i++) {
  2302. struct page *page = area->pages[i];
  2303. #ifdef CONFIG_RKP
  2304. u64 va;
  2305. #endif
  2306. BUG_ON(!page);
  2307. mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
  2308. #ifdef CONFIG_RKP
  2309. va = (u64)phys_to_virt(page_to_phys(page));
  2310. if (is_rkp_ro_buffer(va))
  2311. rkp_ro_free((void *)va);
  2312. else
  2313. __free_pages(page, 0);
  2314. #else
  2315. /*
  2316. * High-order allocs for huge vmallocs are split, so
  2317. * can be freed as an array of order-0 allocations
  2318. */
  2319. __free_pages(page, 0);
  2320. #endif
  2321. cond_resched();
  2322. }
  2323. atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
  2324. kvfree(area->pages);
  2325. }
  2326. kfree(area);
  2327. }
  2328. static inline void __vfree_deferred(const void *addr)
  2329. {
  2330. /*
  2331. * Use raw_cpu_ptr() because this can be called from preemptible
  2332. * context. Preemption is absolutely fine here, because the llist_add()
  2333. * implementation is lockless, so it works even if we are adding to
  2334. * another cpu's list. schedule_work() should be fine with this too.
  2335. */
  2336. struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
  2337. if (llist_add((struct llist_node *)addr, &p->list))
  2338. schedule_work(&p->wq);
  2339. }
  2340. /**
  2341. * vfree_atomic - release memory allocated by vmalloc()
  2342. * @addr: memory base address
  2343. *
  2344. * This one is just like vfree() but can be called in any atomic context
  2345. * except NMIs.
  2346. */
  2347. void vfree_atomic(const void *addr)
  2348. {
  2349. BUG_ON(in_nmi());
  2350. kmemleak_free(addr);
  2351. if (!addr)
  2352. return;
  2353. __vfree_deferred(addr);
  2354. }
  2355. static void __vfree(const void *addr)
  2356. {
  2357. if (unlikely(in_interrupt()))
  2358. __vfree_deferred(addr);
  2359. else
  2360. __vunmap(addr, 1);
  2361. }
  2362. /**
  2363. * vfree - Release memory allocated by vmalloc()
  2364. * @addr: Memory base address
  2365. *
  2366. * Free the virtually continuous memory area starting at @addr, as obtained
  2367. * from one of the vmalloc() family of APIs. This will usually also free the
  2368. * physical memory underlying the virtual allocation, but that memory is
  2369. * reference counted, so it will not be freed until the last user goes away.
  2370. *
  2371. * If @addr is NULL, no operation is performed.
  2372. *
  2373. * Context:
  2374. * May sleep if called *not* from interrupt context.
  2375. * Must not be called in NMI context (strictly speaking, it could be
  2376. * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
  2377. * conventions for vfree() arch-dependent would be a really bad idea).
  2378. */
  2379. void vfree(const void *addr)
  2380. {
  2381. BUG_ON(in_nmi());
  2382. kmemleak_free(addr);
  2383. might_sleep_if(!in_interrupt());
  2384. if (!addr)
  2385. return;
  2386. __vfree(addr);
  2387. }
  2388. EXPORT_SYMBOL(vfree);
  2389. /**
  2390. * vunmap - release virtual mapping obtained by vmap()
  2391. * @addr: memory base address
  2392. *
  2393. * Free the virtually contiguous memory area starting at @addr,
  2394. * which was created from the page array passed to vmap().
  2395. *
  2396. * Must not be called in interrupt context.
  2397. */
  2398. void vunmap(const void *addr)
  2399. {
  2400. BUG_ON(in_interrupt());
  2401. might_sleep();
  2402. if (addr)
  2403. __vunmap(addr, 0);
  2404. }
  2405. EXPORT_SYMBOL(vunmap);
  2406. /**
  2407. * vmap - map an array of pages into virtually contiguous space
  2408. * @pages: array of page pointers
  2409. * @count: number of pages to map
  2410. * @flags: vm_area->flags
  2411. * @prot: page protection for the mapping
  2412. *
  2413. * Maps @count pages from @pages into contiguous kernel virtual space.
  2414. * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
  2415. * (which must be kmalloc or vmalloc memory) and one reference per pages in it
  2416. * are transferred from the caller to vmap(), and will be freed / dropped when
  2417. * vfree() is called on the return value.
  2418. *
  2419. * Return: the address of the area or %NULL on failure
  2420. */
  2421. void *vmap(struct page **pages, unsigned int count,
  2422. unsigned long flags, pgprot_t prot)
  2423. {
  2424. struct vm_struct *area;
  2425. unsigned long addr;
  2426. unsigned long size; /* In bytes */
  2427. might_sleep();
  2428. /*
  2429. * Your top guard is someone else's bottom guard. Not having a top
  2430. * guard compromises someone else's mappings too.
  2431. */
  2432. if (WARN_ON_ONCE(flags & VM_NO_GUARD))
  2433. flags &= ~VM_NO_GUARD;
  2434. if (count > totalram_pages())
  2435. return NULL;
  2436. size = (unsigned long)count << PAGE_SHIFT;
  2437. area = get_vm_area_caller(size, flags, __builtin_return_address(0));
  2438. if (!area)
  2439. return NULL;
  2440. addr = (unsigned long)area->addr;
  2441. if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
  2442. pages, PAGE_SHIFT) < 0) {
  2443. vunmap(area->addr);
  2444. return NULL;
  2445. }
  2446. if (flags & VM_MAP_PUT_PAGES) {
  2447. area->pages = pages;
  2448. area->nr_pages = count;
  2449. }
  2450. return area->addr;
  2451. }
  2452. EXPORT_SYMBOL(vmap);
  2453. #ifdef CONFIG_VMAP_PFN
  2454. struct vmap_pfn_data {
  2455. unsigned long *pfns;
  2456. pgprot_t prot;
  2457. unsigned int idx;
  2458. };
  2459. static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
  2460. {
  2461. struct vmap_pfn_data *data = private;
  2462. if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
  2463. return -EINVAL;
  2464. *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
  2465. return 0;
  2466. }
  2467. /**
  2468. * vmap_pfn - map an array of PFNs into virtually contiguous space
  2469. * @pfns: array of PFNs
  2470. * @count: number of pages to map
  2471. * @prot: page protection for the mapping
  2472. *
  2473. * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
  2474. * the start address of the mapping.
  2475. */
  2476. void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
  2477. {
  2478. struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
  2479. struct vm_struct *area;
  2480. area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
  2481. __builtin_return_address(0));
  2482. if (!area)
  2483. return NULL;
  2484. if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
  2485. count * PAGE_SIZE, vmap_pfn_apply, &data)) {
  2486. free_vm_area(area);
  2487. return NULL;
  2488. }
  2489. flush_cache_vmap((unsigned long)area->addr,
  2490. (unsigned long)area->addr + count * PAGE_SIZE);
  2491. return area->addr;
  2492. }
  2493. EXPORT_SYMBOL_GPL(vmap_pfn);
  2494. #endif /* CONFIG_VMAP_PFN */
  2495. static inline unsigned int
  2496. vm_area_alloc_pages(gfp_t gfp, int nid,
  2497. unsigned int order, unsigned int nr_pages, struct page **pages)
  2498. {
  2499. unsigned int nr_allocated = 0;
  2500. struct page *page;
  2501. int i;
  2502. /*
  2503. * For order-0 pages we make use of bulk allocator, if
  2504. * the page array is partly or not at all populated due
  2505. * to fails, fallback to a single page allocator that is
  2506. * more permissive.
  2507. */
  2508. if (!order) {
  2509. gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
  2510. while (nr_allocated < nr_pages) {
  2511. unsigned int nr, nr_pages_request;
  2512. /*
  2513. * A maximum allowed request is hard-coded and is 100
  2514. * pages per call. That is done in order to prevent a
  2515. * long preemption off scenario in the bulk-allocator
  2516. * so the range is [1:100].
  2517. */
  2518. nr_pages_request = min(100U, nr_pages - nr_allocated);
  2519. /* memory allocation should consider mempolicy, we can't
  2520. * wrongly use nearest node when nid == NUMA_NO_NODE,
  2521. * otherwise memory may be allocated in only one node,
  2522. * but mempolicy wants to alloc memory by interleaving.
  2523. */
  2524. if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
  2525. nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
  2526. nr_pages_request,
  2527. pages + nr_allocated);
  2528. else
  2529. nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
  2530. nr_pages_request,
  2531. pages + nr_allocated);
  2532. nr_allocated += nr;
  2533. cond_resched();
  2534. /*
  2535. * If zero or pages were obtained partly,
  2536. * fallback to a single page allocator.
  2537. */
  2538. if (nr != nr_pages_request)
  2539. break;
  2540. }
  2541. }
  2542. /* High-order pages or fallback path if "bulk" fails. */
  2543. while (nr_allocated < nr_pages) {
  2544. if (fatal_signal_pending(current))
  2545. break;
  2546. if (nid == NUMA_NO_NODE)
  2547. page = alloc_pages(gfp, order);
  2548. else
  2549. page = alloc_pages_node(nid, gfp, order);
  2550. if (unlikely(!page))
  2551. break;
  2552. /*
  2553. * Higher order allocations must be able to be treated as
  2554. * indepdenent small pages by callers (as they can with
  2555. * small-page vmallocs). Some drivers do their own refcounting
  2556. * on vmalloc_to_page() pages, some use page->mapping,
  2557. * page->lru, etc.
  2558. */
  2559. if (order)
  2560. split_page(page, order);
  2561. /*
  2562. * Careful, we allocate and map page-order pages, but
  2563. * tracking is done per PAGE_SIZE page so as to keep the
  2564. * vm_struct APIs independent of the physical/mapped size.
  2565. */
  2566. for (i = 0; i < (1U << order); i++)
  2567. pages[nr_allocated + i] = page + i;
  2568. cond_resched();
  2569. nr_allocated += 1U << order;
  2570. }
  2571. return nr_allocated;
  2572. }
  2573. static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
  2574. pgprot_t prot, unsigned int page_shift,
  2575. int node)
  2576. {
  2577. const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
  2578. bool nofail = gfp_mask & __GFP_NOFAIL;
  2579. unsigned long addr = (unsigned long)area->addr;
  2580. unsigned long size = get_vm_area_size(area);
  2581. unsigned long array_size;
  2582. unsigned int nr_small_pages = size >> PAGE_SHIFT;
  2583. unsigned int page_order;
  2584. unsigned int flags;
  2585. int ret;
  2586. array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
  2587. gfp_mask |= __GFP_NOWARN;
  2588. if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
  2589. gfp_mask |= __GFP_HIGHMEM;
  2590. /* Please note that the recursion is strictly bounded. */
  2591. if (array_size > PAGE_SIZE) {
  2592. area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
  2593. area->caller);
  2594. } else {
  2595. area->pages = kmalloc_node(array_size, nested_gfp, node);
  2596. }
  2597. if (!area->pages) {
  2598. warn_alloc(gfp_mask, NULL,
  2599. "vmalloc error: size %lu, failed to allocated page array size %lu",
  2600. nr_small_pages * PAGE_SIZE, array_size);
  2601. free_vm_area(area);
  2602. return NULL;
  2603. }
  2604. set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
  2605. page_order = vm_area_page_order(area);
  2606. area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
  2607. node, page_order, nr_small_pages, area->pages);
  2608. atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
  2609. if (gfp_mask & __GFP_ACCOUNT) {
  2610. int i;
  2611. for (i = 0; i < area->nr_pages; i++)
  2612. mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
  2613. }
  2614. /*
  2615. * If not enough pages were obtained to accomplish an
  2616. * allocation request, free them via __vfree() if any.
  2617. */
  2618. if (area->nr_pages != nr_small_pages) {
  2619. /* vm_area_alloc_pages() can also fail due to a fatal signal */
  2620. if (!fatal_signal_pending(current))
  2621. warn_alloc(gfp_mask, NULL,
  2622. "vmalloc error: size %lu, page order %u, failed to allocate pages",
  2623. area->nr_pages * PAGE_SIZE, page_order);
  2624. goto fail;
  2625. }
  2626. /*
  2627. * page tables allocations ignore external gfp mask, enforce it
  2628. * by the scope API
  2629. */
  2630. if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
  2631. flags = memalloc_nofs_save();
  2632. else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
  2633. flags = memalloc_noio_save();
  2634. do {
  2635. ret = vmap_pages_range(addr, addr + size, prot, area->pages,
  2636. page_shift);
  2637. if (nofail && (ret < 0))
  2638. schedule_timeout_uninterruptible(1);
  2639. } while (nofail && (ret < 0));
  2640. if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
  2641. memalloc_nofs_restore(flags);
  2642. else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
  2643. memalloc_noio_restore(flags);
  2644. if (ret < 0) {
  2645. warn_alloc(gfp_mask, NULL,
  2646. "vmalloc error: size %lu, failed to map pages",
  2647. area->nr_pages * PAGE_SIZE);
  2648. goto fail;
  2649. }
  2650. return area->addr;
  2651. fail:
  2652. __vfree(area->addr);
  2653. return NULL;
  2654. }
  2655. /**
  2656. * __vmalloc_node_range - allocate virtually contiguous memory
  2657. * @size: allocation size
  2658. * @align: desired alignment
  2659. * @start: vm area range start
  2660. * @end: vm area range end
  2661. * @gfp_mask: flags for the page level allocator
  2662. * @prot: protection mask for the allocated pages
  2663. * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
  2664. * @node: node to use for allocation or NUMA_NO_NODE
  2665. * @caller: caller's return address
  2666. *
  2667. * Allocate enough pages to cover @size from the page level
  2668. * allocator with @gfp_mask flags. Please note that the full set of gfp
  2669. * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
  2670. * supported.
  2671. * Zone modifiers are not supported. From the reclaim modifiers
  2672. * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
  2673. * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
  2674. * __GFP_RETRY_MAYFAIL are not supported).
  2675. *
  2676. * __GFP_NOWARN can be used to suppress failures messages.
  2677. *
  2678. * Map them into contiguous kernel virtual space, using a pagetable
  2679. * protection of @prot.
  2680. *
  2681. * Return: the address of the area or %NULL on failure
  2682. */
  2683. void *__vmalloc_node_range(unsigned long size, unsigned long align,
  2684. unsigned long start, unsigned long end, gfp_t gfp_mask,
  2685. pgprot_t prot, unsigned long vm_flags, int node,
  2686. const void *caller)
  2687. {
  2688. struct vm_struct *area;
  2689. void *ret;
  2690. kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
  2691. unsigned long real_size = size;
  2692. unsigned long real_align = align;
  2693. unsigned int shift = PAGE_SHIFT;
  2694. if (WARN_ON_ONCE(!size))
  2695. return NULL;
  2696. if ((size >> PAGE_SHIFT) > totalram_pages()) {
  2697. warn_alloc(gfp_mask, NULL,
  2698. "vmalloc error: size %lu, exceeds total pages",
  2699. real_size);
  2700. return NULL;
  2701. }
  2702. if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
  2703. unsigned long size_per_node;
  2704. /*
  2705. * Try huge pages. Only try for PAGE_KERNEL allocations,
  2706. * others like modules don't yet expect huge pages in
  2707. * their allocations due to apply_to_page_range not
  2708. * supporting them.
  2709. */
  2710. size_per_node = size;
  2711. if (node == NUMA_NO_NODE)
  2712. size_per_node /= num_online_nodes();
  2713. if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
  2714. shift = PMD_SHIFT;
  2715. else
  2716. shift = arch_vmap_pte_supported_shift(size_per_node);
  2717. align = max(real_align, 1UL << shift);
  2718. size = ALIGN(real_size, 1UL << shift);
  2719. }
  2720. again:
  2721. area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
  2722. VM_UNINITIALIZED | vm_flags, start, end, node,
  2723. gfp_mask, caller);
  2724. if (!area) {
  2725. bool nofail = gfp_mask & __GFP_NOFAIL;
  2726. warn_alloc(gfp_mask, NULL,
  2727. "vmalloc error: size %lu, vm_struct allocation failed%s",
  2728. real_size, (nofail) ? ". Retrying." : "");
  2729. if (nofail) {
  2730. schedule_timeout_uninterruptible(1);
  2731. goto again;
  2732. }
  2733. goto fail;
  2734. }
  2735. /*
  2736. * Prepare arguments for __vmalloc_area_node() and
  2737. * kasan_unpoison_vmalloc().
  2738. */
  2739. if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
  2740. if (kasan_hw_tags_enabled()) {
  2741. /*
  2742. * Modify protection bits to allow tagging.
  2743. * This must be done before mapping.
  2744. */
  2745. prot = arch_vmap_pgprot_tagged(prot);
  2746. /*
  2747. * Skip page_alloc poisoning and zeroing for physical
  2748. * pages backing VM_ALLOC mapping. Memory is instead
  2749. * poisoned and zeroed by kasan_unpoison_vmalloc().
  2750. */
  2751. gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
  2752. }
  2753. /* Take note that the mapping is PAGE_KERNEL. */
  2754. kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
  2755. }
  2756. /* Allocate physical pages and map them into vmalloc space. */
  2757. ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
  2758. if (!ret)
  2759. goto fail;
  2760. /*
  2761. * Mark the pages as accessible, now that they are mapped.
  2762. * The condition for setting KASAN_VMALLOC_INIT should complement the
  2763. * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
  2764. * to make sure that memory is initialized under the same conditions.
  2765. * Tag-based KASAN modes only assign tags to normal non-executable
  2766. * allocations, see __kasan_unpoison_vmalloc().
  2767. */
  2768. kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
  2769. if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
  2770. (gfp_mask & __GFP_SKIP_ZERO))
  2771. kasan_flags |= KASAN_VMALLOC_INIT;
  2772. /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
  2773. area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
  2774. /*
  2775. * In this function, newly allocated vm_struct has VM_UNINITIALIZED
  2776. * flag. It means that vm_struct is not fully initialized.
  2777. * Now, it is fully initialized, so remove this flag here.
  2778. */
  2779. clear_vm_uninitialized_flag(area);
  2780. size = PAGE_ALIGN(size);
  2781. if (!(vm_flags & VM_DEFER_KMEMLEAK))
  2782. kmemleak_vmalloc(area, size, gfp_mask);
  2783. return area->addr;
  2784. fail:
  2785. if (shift > PAGE_SHIFT) {
  2786. shift = PAGE_SHIFT;
  2787. align = real_align;
  2788. size = real_size;
  2789. goto again;
  2790. }
  2791. return NULL;
  2792. }
  2793. /**
  2794. * __vmalloc_node - allocate virtually contiguous memory
  2795. * @size: allocation size
  2796. * @align: desired alignment
  2797. * @gfp_mask: flags for the page level allocator
  2798. * @node: node to use for allocation or NUMA_NO_NODE
  2799. * @caller: caller's return address
  2800. *
  2801. * Allocate enough pages to cover @size from the page level allocator with
  2802. * @gfp_mask flags. Map them into contiguous kernel virtual space.
  2803. *
  2804. * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
  2805. * and __GFP_NOFAIL are not supported
  2806. *
  2807. * Any use of gfp flags outside of GFP_KERNEL should be consulted
  2808. * with mm people.
  2809. *
  2810. * Return: pointer to the allocated memory or %NULL on error
  2811. */
  2812. void *__vmalloc_node(unsigned long size, unsigned long align,
  2813. gfp_t gfp_mask, int node, const void *caller)
  2814. {
  2815. return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
  2816. gfp_mask, PAGE_KERNEL, 0, node, caller);
  2817. }
  2818. /*
  2819. * This is only for performance analysis of vmalloc and stress purpose.
  2820. * It is required by vmalloc test module, therefore do not use it other
  2821. * than that.
  2822. */
  2823. #ifdef CONFIG_TEST_VMALLOC_MODULE
  2824. EXPORT_SYMBOL_GPL(__vmalloc_node);
  2825. #endif
  2826. void *__vmalloc(unsigned long size, gfp_t gfp_mask)
  2827. {
  2828. return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
  2829. __builtin_return_address(0));
  2830. }
  2831. EXPORT_SYMBOL(__vmalloc);
  2832. /**
  2833. * vmalloc - allocate virtually contiguous memory
  2834. * @size: allocation size
  2835. *
  2836. * Allocate enough pages to cover @size from the page level
  2837. * allocator and map them into contiguous kernel virtual space.
  2838. *
  2839. * For tight control over page level allocator and protection flags
  2840. * use __vmalloc() instead.
  2841. *
  2842. * Return: pointer to the allocated memory or %NULL on error
  2843. */
  2844. void *vmalloc(unsigned long size)
  2845. {
  2846. return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
  2847. __builtin_return_address(0));
  2848. }
  2849. EXPORT_SYMBOL(vmalloc);
  2850. /**
  2851. * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
  2852. * @size: allocation size
  2853. * @gfp_mask: flags for the page level allocator
  2854. *
  2855. * Allocate enough pages to cover @size from the page level
  2856. * allocator and map them into contiguous kernel virtual space.
  2857. * If @size is greater than or equal to PMD_SIZE, allow using
  2858. * huge pages for the memory
  2859. *
  2860. * Return: pointer to the allocated memory or %NULL on error
  2861. */
  2862. void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
  2863. {
  2864. return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
  2865. gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
  2866. NUMA_NO_NODE, __builtin_return_address(0));
  2867. }
  2868. EXPORT_SYMBOL_GPL(vmalloc_huge);
  2869. /**
  2870. * vzalloc - allocate virtually contiguous memory with zero fill
  2871. * @size: allocation size
  2872. *
  2873. * Allocate enough pages to cover @size from the page level
  2874. * allocator and map them into contiguous kernel virtual space.
  2875. * The memory allocated is set to zero.
  2876. *
  2877. * For tight control over page level allocator and protection flags
  2878. * use __vmalloc() instead.
  2879. *
  2880. * Return: pointer to the allocated memory or %NULL on error
  2881. */
  2882. void *vzalloc(unsigned long size)
  2883. {
  2884. return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
  2885. __builtin_return_address(0));
  2886. }
  2887. EXPORT_SYMBOL(vzalloc);
  2888. /**
  2889. * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
  2890. * @size: allocation size
  2891. *
  2892. * The resulting memory area is zeroed so it can be mapped to userspace
  2893. * without leaking data.
  2894. *
  2895. * Return: pointer to the allocated memory or %NULL on error
  2896. */
  2897. void *vmalloc_user(unsigned long size)
  2898. {
  2899. return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
  2900. GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
  2901. VM_USERMAP, NUMA_NO_NODE,
  2902. __builtin_return_address(0));
  2903. }
  2904. EXPORT_SYMBOL(vmalloc_user);
  2905. /**
  2906. * vmalloc_node - allocate memory on a specific node
  2907. * @size: allocation size
  2908. * @node: numa node
  2909. *
  2910. * Allocate enough pages to cover @size from the page level
  2911. * allocator and map them into contiguous kernel virtual space.
  2912. *
  2913. * For tight control over page level allocator and protection flags
  2914. * use __vmalloc() instead.
  2915. *
  2916. * Return: pointer to the allocated memory or %NULL on error
  2917. */
  2918. void *vmalloc_node(unsigned long size, int node)
  2919. {
  2920. return __vmalloc_node(size, 1, GFP_KERNEL, node,
  2921. __builtin_return_address(0));
  2922. }
  2923. EXPORT_SYMBOL(vmalloc_node);
  2924. /**
  2925. * vzalloc_node - allocate memory on a specific node with zero fill
  2926. * @size: allocation size
  2927. * @node: numa node
  2928. *
  2929. * Allocate enough pages to cover @size from the page level
  2930. * allocator and map them into contiguous kernel virtual space.
  2931. * The memory allocated is set to zero.
  2932. *
  2933. * Return: pointer to the allocated memory or %NULL on error
  2934. */
  2935. void *vzalloc_node(unsigned long size, int node)
  2936. {
  2937. return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
  2938. __builtin_return_address(0));
  2939. }
  2940. EXPORT_SYMBOL(vzalloc_node);
  2941. #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
  2942. #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
  2943. #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
  2944. #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
  2945. #else
  2946. /*
  2947. * 64b systems should always have either DMA or DMA32 zones. For others
  2948. * GFP_DMA32 should do the right thing and use the normal zone.
  2949. */
  2950. #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
  2951. #endif
  2952. /**
  2953. * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
  2954. * @size: allocation size
  2955. *
  2956. * Allocate enough 32bit PA addressable pages to cover @size from the
  2957. * page level allocator and map them into contiguous kernel virtual space.
  2958. *
  2959. * Return: pointer to the allocated memory or %NULL on error
  2960. */
  2961. void *vmalloc_32(unsigned long size)
  2962. {
  2963. return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
  2964. __builtin_return_address(0));
  2965. }
  2966. EXPORT_SYMBOL(vmalloc_32);
  2967. /**
  2968. * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
  2969. * @size: allocation size
  2970. *
  2971. * The resulting memory area is 32bit addressable and zeroed so it can be
  2972. * mapped to userspace without leaking data.
  2973. *
  2974. * Return: pointer to the allocated memory or %NULL on error
  2975. */
  2976. void *vmalloc_32_user(unsigned long size)
  2977. {
  2978. return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
  2979. GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
  2980. VM_USERMAP, NUMA_NO_NODE,
  2981. __builtin_return_address(0));
  2982. }
  2983. EXPORT_SYMBOL(vmalloc_32_user);
  2984. /*
  2985. * small helper routine , copy contents to buf from addr.
  2986. * If the page is not present, fill zero.
  2987. */
  2988. static int aligned_vread(char *buf, char *addr, unsigned long count)
  2989. {
  2990. struct page *p;
  2991. int copied = 0;
  2992. while (count) {
  2993. unsigned long offset, length;
  2994. offset = offset_in_page(addr);
  2995. length = PAGE_SIZE - offset;
  2996. if (length > count)
  2997. length = count;
  2998. p = vmalloc_to_page(addr);
  2999. /*
  3000. * To do safe access to this _mapped_ area, we need
  3001. * lock. But adding lock here means that we need to add
  3002. * overhead of vmalloc()/vfree() calls for this _debug_
  3003. * interface, rarely used. Instead of that, we'll use
  3004. * kmap() and get small overhead in this access function.
  3005. */
  3006. if (p) {
  3007. /* We can expect USER0 is not used -- see vread() */
  3008. void *map = kmap_atomic(p);
  3009. memcpy(buf, map + offset, length);
  3010. kunmap_atomic(map);
  3011. } else
  3012. memset(buf, 0, length);
  3013. addr += length;
  3014. buf += length;
  3015. copied += length;
  3016. count -= length;
  3017. }
  3018. return copied;
  3019. }
  3020. /**
  3021. * vread() - read vmalloc area in a safe way.
  3022. * @buf: buffer for reading data
  3023. * @addr: vm address.
  3024. * @count: number of bytes to be read.
  3025. *
  3026. * This function checks that addr is a valid vmalloc'ed area, and
  3027. * copy data from that area to a given buffer. If the given memory range
  3028. * of [addr...addr+count) includes some valid address, data is copied to
  3029. * proper area of @buf. If there are memory holes, they'll be zero-filled.
  3030. * IOREMAP area is treated as memory hole and no copy is done.
  3031. *
  3032. * If [addr...addr+count) doesn't includes any intersects with alive
  3033. * vm_struct area, returns 0. @buf should be kernel's buffer.
  3034. *
  3035. * Note: In usual ops, vread() is never necessary because the caller
  3036. * should know vmalloc() area is valid and can use memcpy().
  3037. * This is for routines which have to access vmalloc area without
  3038. * any information, as /proc/kcore.
  3039. *
  3040. * Return: number of bytes for which addr and buf should be increased
  3041. * (same number as @count) or %0 if [addr...addr+count) doesn't
  3042. * include any intersection with valid vmalloc area
  3043. */
  3044. long vread(char *buf, char *addr, unsigned long count)
  3045. {
  3046. struct vmap_area *va;
  3047. struct vm_struct *vm;
  3048. char *vaddr, *buf_start = buf;
  3049. unsigned long buflen = count;
  3050. unsigned long n;
  3051. addr = kasan_reset_tag(addr);
  3052. /* Don't allow overflow */
  3053. if ((unsigned long) addr + count < count)
  3054. count = -(unsigned long) addr;
  3055. spin_lock(&vmap_area_lock);
  3056. va = find_vmap_area_exceed_addr((unsigned long)addr);
  3057. if (!va)
  3058. goto finished;
  3059. /* no intersects with alive vmap_area */
  3060. if ((unsigned long)addr + count <= va->va_start)
  3061. goto finished;
  3062. list_for_each_entry_from(va, &vmap_area_list, list) {
  3063. if (!count)
  3064. break;
  3065. if (!va->vm)
  3066. continue;
  3067. vm = va->vm;
  3068. vaddr = (char *) vm->addr;
  3069. if (addr >= vaddr + get_vm_area_size(vm))
  3070. continue;
  3071. while (addr < vaddr) {
  3072. if (count == 0)
  3073. goto finished;
  3074. *buf = '\0';
  3075. buf++;
  3076. addr++;
  3077. count--;
  3078. }
  3079. n = vaddr + get_vm_area_size(vm) - addr;
  3080. if (n > count)
  3081. n = count;
  3082. if (!(vm->flags & VM_IOREMAP))
  3083. aligned_vread(buf, addr, n);
  3084. else /* IOREMAP area is treated as memory hole */
  3085. memset(buf, 0, n);
  3086. buf += n;
  3087. addr += n;
  3088. count -= n;
  3089. }
  3090. finished:
  3091. spin_unlock(&vmap_area_lock);
  3092. if (buf == buf_start)
  3093. return 0;
  3094. /* zero-fill memory holes */
  3095. if (buf != buf_start + buflen)
  3096. memset(buf, 0, buflen - (buf - buf_start));
  3097. return buflen;
  3098. }
  3099. /**
  3100. * remap_vmalloc_range_partial - map vmalloc pages to userspace
  3101. * @vma: vma to cover
  3102. * @uaddr: target user address to start at
  3103. * @kaddr: virtual address of vmalloc kernel memory
  3104. * @pgoff: offset from @kaddr to start at
  3105. * @size: size of map area
  3106. *
  3107. * Returns: 0 for success, -Exxx on failure
  3108. *
  3109. * This function checks that @kaddr is a valid vmalloc'ed area,
  3110. * and that it is big enough to cover the range starting at
  3111. * @uaddr in @vma. Will return failure if that criteria isn't
  3112. * met.
  3113. *
  3114. * Similar to remap_pfn_range() (see mm/memory.c)
  3115. */
  3116. int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
  3117. void *kaddr, unsigned long pgoff,
  3118. unsigned long size)
  3119. {
  3120. struct vm_struct *area;
  3121. unsigned long off;
  3122. unsigned long end_index;
  3123. if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
  3124. return -EINVAL;
  3125. size = PAGE_ALIGN(size);
  3126. if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
  3127. return -EINVAL;
  3128. area = find_vm_area(kaddr);
  3129. if (!area)
  3130. return -EINVAL;
  3131. if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
  3132. return -EINVAL;
  3133. if (check_add_overflow(size, off, &end_index) ||
  3134. end_index > get_vm_area_size(area))
  3135. return -EINVAL;
  3136. kaddr += off;
  3137. do {
  3138. struct page *page = vmalloc_to_page(kaddr);
  3139. int ret;
  3140. ret = vm_insert_page(vma, uaddr, page);
  3141. if (ret)
  3142. return ret;
  3143. uaddr += PAGE_SIZE;
  3144. kaddr += PAGE_SIZE;
  3145. size -= PAGE_SIZE;
  3146. } while (size > 0);
  3147. vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
  3148. return 0;
  3149. }
  3150. /**
  3151. * remap_vmalloc_range - map vmalloc pages to userspace
  3152. * @vma: vma to cover (map full range of vma)
  3153. * @addr: vmalloc memory
  3154. * @pgoff: number of pages into addr before first page to map
  3155. *
  3156. * Returns: 0 for success, -Exxx on failure
  3157. *
  3158. * This function checks that addr is a valid vmalloc'ed area, and
  3159. * that it is big enough to cover the vma. Will return failure if
  3160. * that criteria isn't met.
  3161. *
  3162. * Similar to remap_pfn_range() (see mm/memory.c)
  3163. */
  3164. int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  3165. unsigned long pgoff)
  3166. {
  3167. return remap_vmalloc_range_partial(vma, vma->vm_start,
  3168. addr, pgoff,
  3169. vma->vm_end - vma->vm_start);
  3170. }
  3171. EXPORT_SYMBOL(remap_vmalloc_range);
  3172. void free_vm_area(struct vm_struct *area)
  3173. {
  3174. struct vm_struct *ret;
  3175. ret = remove_vm_area(area->addr);
  3176. BUG_ON(ret != area);
  3177. kfree(area);
  3178. }
  3179. EXPORT_SYMBOL_GPL(free_vm_area);
  3180. #ifdef CONFIG_SMP
  3181. static struct vmap_area *node_to_va(struct rb_node *n)
  3182. {
  3183. return rb_entry_safe(n, struct vmap_area, rb_node);
  3184. }
  3185. /**
  3186. * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
  3187. * @addr: target address
  3188. *
  3189. * Returns: vmap_area if it is found. If there is no such area
  3190. * the first highest(reverse order) vmap_area is returned
  3191. * i.e. va->va_start < addr && va->va_end < addr or NULL
  3192. * if there are no any areas before @addr.
  3193. */
  3194. static struct vmap_area *
  3195. pvm_find_va_enclose_addr(unsigned long addr)
  3196. {
  3197. struct vmap_area *va, *tmp;
  3198. struct rb_node *n;
  3199. n = free_vmap_area_root.rb_node;
  3200. va = NULL;
  3201. while (n) {
  3202. tmp = rb_entry(n, struct vmap_area, rb_node);
  3203. if (tmp->va_start <= addr) {
  3204. va = tmp;
  3205. if (tmp->va_end >= addr)
  3206. break;
  3207. n = n->rb_right;
  3208. } else {
  3209. n = n->rb_left;
  3210. }
  3211. }
  3212. return va;
  3213. }
  3214. /**
  3215. * pvm_determine_end_from_reverse - find the highest aligned address
  3216. * of free block below VMALLOC_END
  3217. * @va:
  3218. * in - the VA we start the search(reverse order);
  3219. * out - the VA with the highest aligned end address.
  3220. * @align: alignment for required highest address
  3221. *
  3222. * Returns: determined end address within vmap_area
  3223. */
  3224. static unsigned long
  3225. pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
  3226. {
  3227. unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  3228. unsigned long addr;
  3229. if (likely(*va)) {
  3230. list_for_each_entry_from_reverse((*va),
  3231. &free_vmap_area_list, list) {
  3232. addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
  3233. if ((*va)->va_start < addr)
  3234. return addr;
  3235. }
  3236. }
  3237. return 0;
  3238. }
  3239. /**
  3240. * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
  3241. * @offsets: array containing offset of each area
  3242. * @sizes: array containing size of each area
  3243. * @nr_vms: the number of areas to allocate
  3244. * @align: alignment, all entries in @offsets and @sizes must be aligned to this
  3245. *
  3246. * Returns: kmalloc'd vm_struct pointer array pointing to allocated
  3247. * vm_structs on success, %NULL on failure
  3248. *
  3249. * Percpu allocator wants to use congruent vm areas so that it can
  3250. * maintain the offsets among percpu areas. This function allocates
  3251. * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
  3252. * be scattered pretty far, distance between two areas easily going up
  3253. * to gigabytes. To avoid interacting with regular vmallocs, these
  3254. * areas are allocated from top.
  3255. *
  3256. * Despite its complicated look, this allocator is rather simple. It
  3257. * does everything top-down and scans free blocks from the end looking
  3258. * for matching base. While scanning, if any of the areas do not fit the
  3259. * base address is pulled down to fit the area. Scanning is repeated till
  3260. * all the areas fit and then all necessary data structures are inserted
  3261. * and the result is returned.
  3262. */
  3263. struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
  3264. const size_t *sizes, int nr_vms,
  3265. size_t align)
  3266. {
  3267. const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
  3268. const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  3269. struct vmap_area **vas, *va;
  3270. struct vm_struct **vms;
  3271. int area, area2, last_area, term_area;
  3272. unsigned long base, start, size, end, last_end, orig_start, orig_end;
  3273. bool purged = false;
  3274. /* verify parameters and allocate data structures */
  3275. BUG_ON(offset_in_page(align) || !is_power_of_2(align));
  3276. for (last_area = 0, area = 0; area < nr_vms; area++) {
  3277. start = offsets[area];
  3278. end = start + sizes[area];
  3279. /* is everything aligned properly? */
  3280. BUG_ON(!IS_ALIGNED(offsets[area], align));
  3281. BUG_ON(!IS_ALIGNED(sizes[area], align));
  3282. /* detect the area with the highest address */
  3283. if (start > offsets[last_area])
  3284. last_area = area;
  3285. for (area2 = area + 1; area2 < nr_vms; area2++) {
  3286. unsigned long start2 = offsets[area2];
  3287. unsigned long end2 = start2 + sizes[area2];
  3288. BUG_ON(start2 < end && start < end2);
  3289. }
  3290. }
  3291. last_end = offsets[last_area] + sizes[last_area];
  3292. if (vmalloc_end - vmalloc_start < last_end) {
  3293. WARN_ON(true);
  3294. return NULL;
  3295. }
  3296. vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
  3297. vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
  3298. if (!vas || !vms)
  3299. goto err_free2;
  3300. for (area = 0; area < nr_vms; area++) {
  3301. vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
  3302. vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
  3303. if (!vas[area] || !vms[area])
  3304. goto err_free;
  3305. }
  3306. retry:
  3307. spin_lock(&free_vmap_area_lock);
  3308. /* start scanning - we scan from the top, begin with the last area */
  3309. area = term_area = last_area;
  3310. start = offsets[area];
  3311. end = start + sizes[area];
  3312. va = pvm_find_va_enclose_addr(vmalloc_end);
  3313. base = pvm_determine_end_from_reverse(&va, align) - end;
  3314. while (true) {
  3315. /*
  3316. * base might have underflowed, add last_end before
  3317. * comparing.
  3318. */
  3319. if (base + last_end < vmalloc_start + last_end)
  3320. goto overflow;
  3321. /*
  3322. * Fitting base has not been found.
  3323. */
  3324. if (va == NULL)
  3325. goto overflow;
  3326. /*
  3327. * If required width exceeds current VA block, move
  3328. * base downwards and then recheck.
  3329. */
  3330. if (base + end > va->va_end) {
  3331. base = pvm_determine_end_from_reverse(&va, align) - end;
  3332. term_area = area;
  3333. continue;
  3334. }
  3335. /*
  3336. * If this VA does not fit, move base downwards and recheck.
  3337. */
  3338. if (base + start < va->va_start) {
  3339. va = node_to_va(rb_prev(&va->rb_node));
  3340. base = pvm_determine_end_from_reverse(&va, align) - end;
  3341. term_area = area;
  3342. continue;
  3343. }
  3344. /*
  3345. * This area fits, move on to the previous one. If
  3346. * the previous one is the terminal one, we're done.
  3347. */
  3348. area = (area + nr_vms - 1) % nr_vms;
  3349. if (area == term_area)
  3350. break;
  3351. start = offsets[area];
  3352. end = start + sizes[area];
  3353. va = pvm_find_va_enclose_addr(base + end);
  3354. }
  3355. /* we've found a fitting base, insert all va's */
  3356. for (area = 0; area < nr_vms; area++) {
  3357. int ret;
  3358. start = base + offsets[area];
  3359. size = sizes[area];
  3360. va = pvm_find_va_enclose_addr(start);
  3361. if (WARN_ON_ONCE(va == NULL))
  3362. /* It is a BUG(), but trigger recovery instead. */
  3363. goto recovery;
  3364. ret = adjust_va_to_fit_type(&free_vmap_area_root,
  3365. &free_vmap_area_list,
  3366. va, start, size);
  3367. if (WARN_ON_ONCE(unlikely(ret)))
  3368. /* It is a BUG(), but trigger recovery instead. */
  3369. goto recovery;
  3370. /* Allocated area. */
  3371. va = vas[area];
  3372. va->va_start = start;
  3373. va->va_end = start + size;
  3374. }
  3375. spin_unlock(&free_vmap_area_lock);
  3376. /* populate the kasan shadow space */
  3377. for (area = 0; area < nr_vms; area++) {
  3378. if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
  3379. goto err_free_shadow;
  3380. }
  3381. /* insert all vm's */
  3382. spin_lock(&vmap_area_lock);
  3383. for (area = 0; area < nr_vms; area++) {
  3384. insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
  3385. setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
  3386. pcpu_get_vm_areas);
  3387. }
  3388. spin_unlock(&vmap_area_lock);
  3389. /*
  3390. * Mark allocated areas as accessible. Do it now as a best-effort
  3391. * approach, as they can be mapped outside of vmalloc code.
  3392. * With hardware tag-based KASAN, marking is skipped for
  3393. * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
  3394. */
  3395. for (area = 0; area < nr_vms; area++)
  3396. vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
  3397. vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
  3398. kfree(vas);
  3399. return vms;
  3400. recovery:
  3401. /*
  3402. * Remove previously allocated areas. There is no
  3403. * need in removing these areas from the busy tree,
  3404. * because they are inserted only on the final step
  3405. * and when pcpu_get_vm_areas() is success.
  3406. */
  3407. while (area--) {
  3408. orig_start = vas[area]->va_start;
  3409. orig_end = vas[area]->va_end;
  3410. va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
  3411. &free_vmap_area_list);
  3412. if (va)
  3413. kasan_release_vmalloc(orig_start, orig_end,
  3414. va->va_start, va->va_end);
  3415. vas[area] = NULL;
  3416. }
  3417. overflow:
  3418. spin_unlock(&free_vmap_area_lock);
  3419. if (!purged) {
  3420. purge_vmap_area_lazy();
  3421. purged = true;
  3422. /* Before "retry", check if we recover. */
  3423. for (area = 0; area < nr_vms; area++) {
  3424. if (vas[area])
  3425. continue;
  3426. vas[area] = kmem_cache_zalloc(
  3427. vmap_area_cachep, GFP_KERNEL);
  3428. if (!vas[area])
  3429. goto err_free;
  3430. }
  3431. goto retry;
  3432. }
  3433. err_free:
  3434. for (area = 0; area < nr_vms; area++) {
  3435. if (vas[area])
  3436. kmem_cache_free(vmap_area_cachep, vas[area]);
  3437. kfree(vms[area]);
  3438. }
  3439. err_free2:
  3440. kfree(vas);
  3441. kfree(vms);
  3442. return NULL;
  3443. err_free_shadow:
  3444. spin_lock(&free_vmap_area_lock);
  3445. /*
  3446. * We release all the vmalloc shadows, even the ones for regions that
  3447. * hadn't been successfully added. This relies on kasan_release_vmalloc
  3448. * being able to tolerate this case.
  3449. */
  3450. for (area = 0; area < nr_vms; area++) {
  3451. orig_start = vas[area]->va_start;
  3452. orig_end = vas[area]->va_end;
  3453. va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
  3454. &free_vmap_area_list);
  3455. if (va)
  3456. kasan_release_vmalloc(orig_start, orig_end,
  3457. va->va_start, va->va_end);
  3458. vas[area] = NULL;
  3459. kfree(vms[area]);
  3460. }
  3461. spin_unlock(&free_vmap_area_lock);
  3462. kfree(vas);
  3463. kfree(vms);
  3464. return NULL;
  3465. }
  3466. /**
  3467. * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
  3468. * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
  3469. * @nr_vms: the number of allocated areas
  3470. *
  3471. * Free vm_structs and the array allocated by pcpu_get_vm_areas().
  3472. */
  3473. void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
  3474. {
  3475. int i;
  3476. for (i = 0; i < nr_vms; i++)
  3477. free_vm_area(vms[i]);
  3478. kfree(vms);
  3479. }
  3480. #endif /* CONFIG_SMP */
  3481. #ifdef CONFIG_PRINTK
  3482. bool vmalloc_dump_obj(void *object)
  3483. {
  3484. void *objp = (void *)PAGE_ALIGN((unsigned long)object);
  3485. const void *caller;
  3486. struct vm_struct *vm;
  3487. struct vmap_area *va;
  3488. unsigned long addr;
  3489. unsigned int nr_pages;
  3490. if (!spin_trylock(&vmap_area_lock))
  3491. return false;
  3492. va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
  3493. if (!va) {
  3494. spin_unlock(&vmap_area_lock);
  3495. return false;
  3496. }
  3497. vm = va->vm;
  3498. if (!vm) {
  3499. spin_unlock(&vmap_area_lock);
  3500. return false;
  3501. }
  3502. addr = (unsigned long)vm->addr;
  3503. caller = vm->caller;
  3504. nr_pages = vm->nr_pages;
  3505. spin_unlock(&vmap_area_lock);
  3506. pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
  3507. nr_pages, addr, caller);
  3508. return true;
  3509. }
  3510. #endif
  3511. #ifdef CONFIG_PROC_FS
  3512. static void *s_start(struct seq_file *m, loff_t *pos)
  3513. __acquires(&vmap_purge_lock)
  3514. __acquires(&vmap_area_lock)
  3515. {
  3516. mutex_lock(&vmap_purge_lock);
  3517. spin_lock(&vmap_area_lock);
  3518. return seq_list_start(&vmap_area_list, *pos);
  3519. }
  3520. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  3521. {
  3522. return seq_list_next(p, &vmap_area_list, pos);
  3523. }
  3524. static void s_stop(struct seq_file *m, void *p)
  3525. __releases(&vmap_area_lock)
  3526. __releases(&vmap_purge_lock)
  3527. {
  3528. spin_unlock(&vmap_area_lock);
  3529. mutex_unlock(&vmap_purge_lock);
  3530. }
  3531. static void show_numa_info(struct seq_file *m, struct vm_struct *v)
  3532. {
  3533. if (IS_ENABLED(CONFIG_NUMA)) {
  3534. unsigned int nr, *counters = m->private;
  3535. unsigned int step = 1U << vm_area_page_order(v);
  3536. if (!counters)
  3537. return;
  3538. if (v->flags & VM_UNINITIALIZED)
  3539. return;
  3540. /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
  3541. smp_rmb();
  3542. memset(counters, 0, nr_node_ids * sizeof(unsigned int));
  3543. for (nr = 0; nr < v->nr_pages; nr += step)
  3544. counters[page_to_nid(v->pages[nr])] += step;
  3545. for_each_node_state(nr, N_HIGH_MEMORY)
  3546. if (counters[nr])
  3547. seq_printf(m, " N%u=%u", nr, counters[nr]);
  3548. }
  3549. }
  3550. static void show_purge_info(struct seq_file *m)
  3551. {
  3552. struct vmap_area *va;
  3553. spin_lock(&purge_vmap_area_lock);
  3554. list_for_each_entry(va, &purge_vmap_area_list, list) {
  3555. seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
  3556. (void *)va->va_start, (void *)va->va_end,
  3557. va->va_end - va->va_start);
  3558. }
  3559. spin_unlock(&purge_vmap_area_lock);
  3560. }
  3561. static int s_show(struct seq_file *m, void *p)
  3562. {
  3563. struct vmap_area *va;
  3564. struct vm_struct *v;
  3565. va = list_entry(p, struct vmap_area, list);
  3566. /*
  3567. * s_show can encounter race with remove_vm_area, !vm on behalf
  3568. * of vmap area is being tear down or vm_map_ram allocation.
  3569. */
  3570. if (!va->vm) {
  3571. seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
  3572. (void *)va->va_start, (void *)va->va_end,
  3573. va->va_end - va->va_start);
  3574. goto final;
  3575. }
  3576. v = va->vm;
  3577. seq_printf(m, "0x%pK-0x%pK %7ld",
  3578. v->addr, v->addr + v->size, v->size);
  3579. if (v->caller)
  3580. seq_printf(m, " %pS", v->caller);
  3581. if (v->nr_pages)
  3582. seq_printf(m, " pages=%d", v->nr_pages);
  3583. if (v->phys_addr)
  3584. seq_printf(m, " phys=%pa", &v->phys_addr);
  3585. if (v->flags & VM_IOREMAP)
  3586. seq_puts(m, " ioremap");
  3587. if (v->flags & VM_ALLOC)
  3588. seq_puts(m, " vmalloc");
  3589. if (v->flags & VM_MAP)
  3590. seq_puts(m, " vmap");
  3591. if (v->flags & VM_USERMAP)
  3592. seq_puts(m, " user");
  3593. if (v->flags & VM_DMA_COHERENT)
  3594. seq_puts(m, " dma-coherent");
  3595. if (is_vmalloc_addr(v->pages))
  3596. seq_puts(m, " vpages");
  3597. show_numa_info(m, v);
  3598. seq_putc(m, '\n');
  3599. /*
  3600. * As a final step, dump "unpurged" areas.
  3601. */
  3602. final:
  3603. if (list_is_last(&va->list, &vmap_area_list))
  3604. show_purge_info(m);
  3605. return 0;
  3606. }
  3607. static const struct seq_operations vmalloc_op = {
  3608. .start = s_start,
  3609. .next = s_next,
  3610. .stop = s_stop,
  3611. .show = s_show,
  3612. };
  3613. static int __init proc_vmalloc_init(void)
  3614. {
  3615. if (IS_ENABLED(CONFIG_NUMA))
  3616. proc_create_seq_private("vmallocinfo", 0400, NULL,
  3617. &vmalloc_op,
  3618. nr_node_ids * sizeof(unsigned int), NULL);
  3619. else
  3620. proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
  3621. return 0;
  3622. }
  3623. module_init(proc_vmalloc_init);
  3624. #endif
  3625. #ifdef CONFIG_RKP
  3626. static void *__vmalloc_area_node_for_module(unsigned long core_text_size, struct vm_struct *area,
  3627. gfp_t gfp_mask, pgprot_t prot, unsigned int page_shift, int node)
  3628. {
  3629. const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
  3630. bool nofail = gfp_mask & __GFP_NOFAIL;
  3631. unsigned long addr = (unsigned long)area->addr;
  3632. unsigned long size = get_vm_area_size(area);
  3633. unsigned long array_size;
  3634. unsigned int nr_small_pages = size >> PAGE_SHIFT;
  3635. unsigned int page_order;
  3636. unsigned int flags;
  3637. int ret;
  3638. phys_addr_t p;
  3639. struct page *page;
  3640. int i;
  3641. array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
  3642. gfp_mask |= __GFP_NOWARN;
  3643. if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
  3644. gfp_mask |= __GFP_HIGHMEM;
  3645. /* Please note that the recursion is strictly bounded. */
  3646. if (array_size > PAGE_SIZE) {
  3647. area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
  3648. area->caller);
  3649. } else {
  3650. area->pages = kmalloc_node(array_size, nested_gfp, node);
  3651. }
  3652. if (!area->pages) {
  3653. warn_alloc(gfp_mask, NULL,
  3654. "vmalloc error: size %lu, failed to allocated page array size %lu",
  3655. nr_small_pages * PAGE_SIZE, array_size);
  3656. free_vm_area(area);
  3657. return NULL;
  3658. }
  3659. set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
  3660. page_order = vm_area_page_order(area);
  3661. for (i = 0; i < nr_small_pages; i++) {
  3662. if (i * PAGE_SIZE < core_text_size) {
  3663. p = rkp_ro_alloc_phys_for_text();
  3664. if (p) {
  3665. page = phys_to_page(p);
  3666. } else {
  3667. page = alloc_page(gfp_mask | __GFP_NOWARN);
  3668. }
  3669. } else {
  3670. page = alloc_page(gfp_mask | __GFP_NOWARN);
  3671. }
  3672. if (unlikely(!page))
  3673. break;
  3674. area->pages[area->nr_pages++] = page;
  3675. }
  3676. area->nr_pages = i;
  3677. atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
  3678. if (gfp_mask & __GFP_ACCOUNT) {
  3679. for (i = 0; i < area->nr_pages; i++)
  3680. mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
  3681. }
  3682. /*
  3683. * If not enough pages were obtained to accomplish an
  3684. * allocation request, free them via __vfree() if any.
  3685. */
  3686. if (area->nr_pages != nr_small_pages) {
  3687. /* vm_area_alloc_pages() can also fail due to a fatal signal */
  3688. if (!fatal_signal_pending(current))
  3689. warn_alloc(gfp_mask, NULL,
  3690. "vmalloc error: size %lu, page order %u, failed to allocate pages",
  3691. area->nr_pages * PAGE_SIZE, page_order);
  3692. goto fail;
  3693. }
  3694. /*
  3695. * page tables allocations ignore external gfp mask, enforce it
  3696. * by the scope API
  3697. */
  3698. if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
  3699. flags = memalloc_nofs_save();
  3700. else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
  3701. flags = memalloc_noio_save();
  3702. do {
  3703. ret = vmap_pages_range(addr, addr + size, prot, area->pages,
  3704. page_shift);
  3705. if (nofail && (ret < 0))
  3706. schedule_timeout_uninterruptible(1);
  3707. } while (nofail && (ret < 0));
  3708. if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
  3709. memalloc_nofs_restore(flags);
  3710. else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
  3711. memalloc_noio_restore(flags);
  3712. if (ret < 0) {
  3713. warn_alloc(gfp_mask, NULL,
  3714. "vmalloc error: size %lu, failed to map pages",
  3715. area->nr_pages * PAGE_SIZE);
  3716. goto fail;
  3717. }
  3718. return area->addr;
  3719. fail:
  3720. __vfree(area->addr);
  3721. return NULL;
  3722. }
  3723. void *__vmalloc_node_range_for_module(unsigned long core_layout_size, unsigned long core_text_size,
  3724. unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask,
  3725. pgprot_t prot, unsigned long vm_flags, int node,
  3726. const void *caller)
  3727. {
  3728. struct vm_struct *area;
  3729. void *ret;
  3730. kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
  3731. unsigned long real_size = core_layout_size;
  3732. unsigned int shift = PAGE_SHIFT;
  3733. if (WARN_ON_ONCE(!core_layout_size))
  3734. return NULL;
  3735. again:
  3736. area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
  3737. VM_UNINITIALIZED | vm_flags, start, end, node,
  3738. gfp_mask, caller);
  3739. if (!area) {
  3740. bool nofail = gfp_mask & __GFP_NOFAIL;
  3741. warn_alloc(gfp_mask, NULL,
  3742. "vmalloc error: size %lu, vm_struct allocation failed%s",
  3743. real_size, (nofail) ? ". Retrying." : "");
  3744. if (nofail) {
  3745. schedule_timeout_uninterruptible(1);
  3746. goto again;
  3747. }
  3748. return NULL;
  3749. }
  3750. /*
  3751. * Prepare arguments for __vmalloc_area_node() and
  3752. * kasan_unpoison_vmalloc().
  3753. */
  3754. if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
  3755. if (kasan_hw_tags_enabled()) {
  3756. /*
  3757. * Modify protection bits to allow tagging.
  3758. * This must be done before mapping.
  3759. */
  3760. prot = arch_vmap_pgprot_tagged(prot);
  3761. /*
  3762. * Skip page_alloc poisoning and zeroing for physical
  3763. * pages backing VM_ALLOC mapping. Memory is instead
  3764. * poisoned and zeroed by kasan_unpoison_vmalloc().
  3765. */
  3766. gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
  3767. }
  3768. /* Take note that the mapping is PAGE_KERNEL. */
  3769. kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
  3770. }
  3771. /* Allocate physical pages and map them into vmalloc space. */
  3772. ret = __vmalloc_area_node_for_module(core_text_size, area, gfp_mask, prot, shift, node);
  3773. if (!ret)
  3774. return NULL;
  3775. /*
  3776. * Mark the pages as accessible, now that they are mapped.
  3777. * The init condition should match the one in post_alloc_hook()
  3778. * (except for the should_skip_init() check) to make sure that memory
  3779. * is initialized under the same conditions regardless of the enabled
  3780. * KASAN mode.
  3781. * Tag-based KASAN modes only assign tags to normal non-executable
  3782. * allocations, see __kasan_unpoison_vmalloc().
  3783. */
  3784. kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
  3785. if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
  3786. (gfp_mask & __GFP_SKIP_ZERO))
  3787. kasan_flags |= KASAN_VMALLOC_INIT;
  3788. /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
  3789. area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
  3790. /*
  3791. * In this function, newly allocated vm_struct has VM_UNINITIALIZED
  3792. * flag. It means that vm_struct is not fully initialized.
  3793. * Now, it is fully initialized, so remove this flag here.
  3794. */
  3795. clear_vm_uninitialized_flag(area);
  3796. return area->addr;
  3797. }
  3798. #endif