gmap.c 82 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KVM guest address space mapping code
  4. *
  5. * Copyright IBM Corp. 2007, 2020
  6. * Author(s): Martin Schwidefsky <[email protected]>
  7. * David Hildenbrand <[email protected]>
  8. * Janosch Frank <[email protected]>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/pagewalk.h>
  12. #include <linux/swap.h>
  13. #include <linux/smp.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/slab.h>
  16. #include <linux/swapops.h>
  17. #include <linux/ksm.h>
  18. #include <linux/mman.h>
  19. #include <linux/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/gmap.h>
  22. #include <asm/tlb.h>
  23. #define GMAP_SHADOW_FAKE_TABLE 1ULL
  24. /**
  25. * gmap_alloc - allocate and initialize a guest address space
  26. * @limit: maximum address of the gmap address space
  27. *
  28. * Returns a guest address space structure.
  29. */
  30. static struct gmap *gmap_alloc(unsigned long limit)
  31. {
  32. struct gmap *gmap;
  33. struct page *page;
  34. unsigned long *table;
  35. unsigned long etype, atype;
  36. if (limit < _REGION3_SIZE) {
  37. limit = _REGION3_SIZE - 1;
  38. atype = _ASCE_TYPE_SEGMENT;
  39. etype = _SEGMENT_ENTRY_EMPTY;
  40. } else if (limit < _REGION2_SIZE) {
  41. limit = _REGION2_SIZE - 1;
  42. atype = _ASCE_TYPE_REGION3;
  43. etype = _REGION3_ENTRY_EMPTY;
  44. } else if (limit < _REGION1_SIZE) {
  45. limit = _REGION1_SIZE - 1;
  46. atype = _ASCE_TYPE_REGION2;
  47. etype = _REGION2_ENTRY_EMPTY;
  48. } else {
  49. limit = -1UL;
  50. atype = _ASCE_TYPE_REGION1;
  51. etype = _REGION1_ENTRY_EMPTY;
  52. }
  53. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
  54. if (!gmap)
  55. goto out;
  56. INIT_LIST_HEAD(&gmap->crst_list);
  57. INIT_LIST_HEAD(&gmap->children);
  58. INIT_LIST_HEAD(&gmap->pt_list);
  59. INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
  60. INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
  61. INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
  62. spin_lock_init(&gmap->guest_table_lock);
  63. spin_lock_init(&gmap->shadow_lock);
  64. refcount_set(&gmap->ref_count, 1);
  65. page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
  66. if (!page)
  67. goto out_free;
  68. page->index = 0;
  69. list_add(&page->lru, &gmap->crst_list);
  70. table = (unsigned long *) page_to_phys(page);
  71. crst_table_init(table, etype);
  72. gmap->table = table;
  73. gmap->asce = atype | _ASCE_TABLE_LENGTH |
  74. _ASCE_USER_BITS | __pa(table);
  75. gmap->asce_end = limit;
  76. return gmap;
  77. out_free:
  78. kfree(gmap);
  79. out:
  80. return NULL;
  81. }
  82. /**
  83. * gmap_create - create a guest address space
  84. * @mm: pointer to the parent mm_struct
  85. * @limit: maximum size of the gmap address space
  86. *
  87. * Returns a guest address space structure.
  88. */
  89. struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
  90. {
  91. struct gmap *gmap;
  92. unsigned long gmap_asce;
  93. gmap = gmap_alloc(limit);
  94. if (!gmap)
  95. return NULL;
  96. gmap->mm = mm;
  97. spin_lock(&mm->context.lock);
  98. list_add_rcu(&gmap->list, &mm->context.gmap_list);
  99. if (list_is_singular(&mm->context.gmap_list))
  100. gmap_asce = gmap->asce;
  101. else
  102. gmap_asce = -1UL;
  103. WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
  104. spin_unlock(&mm->context.lock);
  105. return gmap;
  106. }
  107. EXPORT_SYMBOL_GPL(gmap_create);
  108. static void gmap_flush_tlb(struct gmap *gmap)
  109. {
  110. if (MACHINE_HAS_IDTE)
  111. __tlb_flush_idte(gmap->asce);
  112. else
  113. __tlb_flush_global();
  114. }
  115. static void gmap_radix_tree_free(struct radix_tree_root *root)
  116. {
  117. struct radix_tree_iter iter;
  118. unsigned long indices[16];
  119. unsigned long index;
  120. void __rcu **slot;
  121. int i, nr;
  122. /* A radix tree is freed by deleting all of its entries */
  123. index = 0;
  124. do {
  125. nr = 0;
  126. radix_tree_for_each_slot(slot, root, &iter, index) {
  127. indices[nr] = iter.index;
  128. if (++nr == 16)
  129. break;
  130. }
  131. for (i = 0; i < nr; i++) {
  132. index = indices[i];
  133. radix_tree_delete(root, index);
  134. }
  135. } while (nr > 0);
  136. }
  137. static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
  138. {
  139. struct gmap_rmap *rmap, *rnext, *head;
  140. struct radix_tree_iter iter;
  141. unsigned long indices[16];
  142. unsigned long index;
  143. void __rcu **slot;
  144. int i, nr;
  145. /* A radix tree is freed by deleting all of its entries */
  146. index = 0;
  147. do {
  148. nr = 0;
  149. radix_tree_for_each_slot(slot, root, &iter, index) {
  150. indices[nr] = iter.index;
  151. if (++nr == 16)
  152. break;
  153. }
  154. for (i = 0; i < nr; i++) {
  155. index = indices[i];
  156. head = radix_tree_delete(root, index);
  157. gmap_for_each_rmap_safe(rmap, rnext, head)
  158. kfree(rmap);
  159. }
  160. } while (nr > 0);
  161. }
  162. /**
  163. * gmap_free - free a guest address space
  164. * @gmap: pointer to the guest address space structure
  165. *
  166. * No locks required. There are no references to this gmap anymore.
  167. */
  168. static void gmap_free(struct gmap *gmap)
  169. {
  170. struct page *page, *next;
  171. /* Flush tlb of all gmaps (if not already done for shadows) */
  172. if (!(gmap_is_shadow(gmap) && gmap->removed))
  173. gmap_flush_tlb(gmap);
  174. /* Free all segment & region tables. */
  175. list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
  176. __free_pages(page, CRST_ALLOC_ORDER);
  177. gmap_radix_tree_free(&gmap->guest_to_host);
  178. gmap_radix_tree_free(&gmap->host_to_guest);
  179. /* Free additional data for a shadow gmap */
  180. if (gmap_is_shadow(gmap)) {
  181. /* Free all page tables. */
  182. list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
  183. page_table_free_pgste(page);
  184. gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
  185. /* Release reference to the parent */
  186. gmap_put(gmap->parent);
  187. }
  188. kfree(gmap);
  189. }
  190. /**
  191. * gmap_get - increase reference counter for guest address space
  192. * @gmap: pointer to the guest address space structure
  193. *
  194. * Returns the gmap pointer
  195. */
  196. struct gmap *gmap_get(struct gmap *gmap)
  197. {
  198. refcount_inc(&gmap->ref_count);
  199. return gmap;
  200. }
  201. EXPORT_SYMBOL_GPL(gmap_get);
  202. /**
  203. * gmap_put - decrease reference counter for guest address space
  204. * @gmap: pointer to the guest address space structure
  205. *
  206. * If the reference counter reaches zero the guest address space is freed.
  207. */
  208. void gmap_put(struct gmap *gmap)
  209. {
  210. if (refcount_dec_and_test(&gmap->ref_count))
  211. gmap_free(gmap);
  212. }
  213. EXPORT_SYMBOL_GPL(gmap_put);
  214. /**
  215. * gmap_remove - remove a guest address space but do not free it yet
  216. * @gmap: pointer to the guest address space structure
  217. */
  218. void gmap_remove(struct gmap *gmap)
  219. {
  220. struct gmap *sg, *next;
  221. unsigned long gmap_asce;
  222. /* Remove all shadow gmaps linked to this gmap */
  223. if (!list_empty(&gmap->children)) {
  224. spin_lock(&gmap->shadow_lock);
  225. list_for_each_entry_safe(sg, next, &gmap->children, list) {
  226. list_del(&sg->list);
  227. gmap_put(sg);
  228. }
  229. spin_unlock(&gmap->shadow_lock);
  230. }
  231. /* Remove gmap from the pre-mm list */
  232. spin_lock(&gmap->mm->context.lock);
  233. list_del_rcu(&gmap->list);
  234. if (list_empty(&gmap->mm->context.gmap_list))
  235. gmap_asce = 0;
  236. else if (list_is_singular(&gmap->mm->context.gmap_list))
  237. gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
  238. struct gmap, list)->asce;
  239. else
  240. gmap_asce = -1UL;
  241. WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
  242. spin_unlock(&gmap->mm->context.lock);
  243. synchronize_rcu();
  244. /* Put reference */
  245. gmap_put(gmap);
  246. }
  247. EXPORT_SYMBOL_GPL(gmap_remove);
  248. /**
  249. * gmap_enable - switch primary space to the guest address space
  250. * @gmap: pointer to the guest address space structure
  251. */
  252. void gmap_enable(struct gmap *gmap)
  253. {
  254. S390_lowcore.gmap = (unsigned long) gmap;
  255. }
  256. EXPORT_SYMBOL_GPL(gmap_enable);
  257. /**
  258. * gmap_disable - switch back to the standard primary address space
  259. * @gmap: pointer to the guest address space structure
  260. */
  261. void gmap_disable(struct gmap *gmap)
  262. {
  263. S390_lowcore.gmap = 0UL;
  264. }
  265. EXPORT_SYMBOL_GPL(gmap_disable);
  266. /**
  267. * gmap_get_enabled - get a pointer to the currently enabled gmap
  268. *
  269. * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
  270. */
  271. struct gmap *gmap_get_enabled(void)
  272. {
  273. return (struct gmap *) S390_lowcore.gmap;
  274. }
  275. EXPORT_SYMBOL_GPL(gmap_get_enabled);
  276. /*
  277. * gmap_alloc_table is assumed to be called with mmap_lock held
  278. */
  279. static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
  280. unsigned long init, unsigned long gaddr)
  281. {
  282. struct page *page;
  283. unsigned long *new;
  284. /* since we dont free the gmap table until gmap_free we can unlock */
  285. page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
  286. if (!page)
  287. return -ENOMEM;
  288. new = (unsigned long *) page_to_phys(page);
  289. crst_table_init(new, init);
  290. spin_lock(&gmap->guest_table_lock);
  291. if (*table & _REGION_ENTRY_INVALID) {
  292. list_add(&page->lru, &gmap->crst_list);
  293. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  294. (*table & _REGION_ENTRY_TYPE_MASK);
  295. page->index = gaddr;
  296. page = NULL;
  297. }
  298. spin_unlock(&gmap->guest_table_lock);
  299. if (page)
  300. __free_pages(page, CRST_ALLOC_ORDER);
  301. return 0;
  302. }
  303. /**
  304. * __gmap_segment_gaddr - find virtual address from segment pointer
  305. * @entry: pointer to a segment table entry in the guest address space
  306. *
  307. * Returns the virtual address in the guest address space for the segment
  308. */
  309. static unsigned long __gmap_segment_gaddr(unsigned long *entry)
  310. {
  311. struct page *page;
  312. unsigned long offset, mask;
  313. offset = (unsigned long) entry / sizeof(unsigned long);
  314. offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
  315. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  316. page = virt_to_page((void *)((unsigned long) entry & mask));
  317. return page->index + offset;
  318. }
  319. /**
  320. * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
  321. * @gmap: pointer to the guest address space structure
  322. * @vmaddr: address in the host process address space
  323. *
  324. * Returns 1 if a TLB flush is required
  325. */
  326. static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
  327. {
  328. unsigned long *entry;
  329. int flush = 0;
  330. BUG_ON(gmap_is_shadow(gmap));
  331. spin_lock(&gmap->guest_table_lock);
  332. entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
  333. if (entry) {
  334. flush = (*entry != _SEGMENT_ENTRY_EMPTY);
  335. *entry = _SEGMENT_ENTRY_EMPTY;
  336. }
  337. spin_unlock(&gmap->guest_table_lock);
  338. return flush;
  339. }
  340. /**
  341. * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
  342. * @gmap: pointer to the guest address space structure
  343. * @gaddr: address in the guest address space
  344. *
  345. * Returns 1 if a TLB flush is required
  346. */
  347. static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
  348. {
  349. unsigned long vmaddr;
  350. vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
  351. gaddr >> PMD_SHIFT);
  352. return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
  353. }
  354. /**
  355. * gmap_unmap_segment - unmap segment from the guest address space
  356. * @gmap: pointer to the guest address space structure
  357. * @to: address in the guest address space
  358. * @len: length of the memory area to unmap
  359. *
  360. * Returns 0 if the unmap succeeded, -EINVAL if not.
  361. */
  362. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  363. {
  364. unsigned long off;
  365. int flush;
  366. BUG_ON(gmap_is_shadow(gmap));
  367. if ((to | len) & (PMD_SIZE - 1))
  368. return -EINVAL;
  369. if (len == 0 || to + len < to)
  370. return -EINVAL;
  371. flush = 0;
  372. mmap_write_lock(gmap->mm);
  373. for (off = 0; off < len; off += PMD_SIZE)
  374. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  375. mmap_write_unlock(gmap->mm);
  376. if (flush)
  377. gmap_flush_tlb(gmap);
  378. return 0;
  379. }
  380. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  381. /**
  382. * gmap_map_segment - map a segment to the guest address space
  383. * @gmap: pointer to the guest address space structure
  384. * @from: source address in the parent address space
  385. * @to: target address in the guest address space
  386. * @len: length of the memory area to map
  387. *
  388. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  389. */
  390. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  391. unsigned long to, unsigned long len)
  392. {
  393. unsigned long off;
  394. int flush;
  395. BUG_ON(gmap_is_shadow(gmap));
  396. if ((from | to | len) & (PMD_SIZE - 1))
  397. return -EINVAL;
  398. if (len == 0 || from + len < from || to + len < to ||
  399. from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
  400. return -EINVAL;
  401. flush = 0;
  402. mmap_write_lock(gmap->mm);
  403. for (off = 0; off < len; off += PMD_SIZE) {
  404. /* Remove old translation */
  405. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  406. /* Store new translation */
  407. if (radix_tree_insert(&gmap->guest_to_host,
  408. (to + off) >> PMD_SHIFT,
  409. (void *) from + off))
  410. break;
  411. }
  412. mmap_write_unlock(gmap->mm);
  413. if (flush)
  414. gmap_flush_tlb(gmap);
  415. if (off >= len)
  416. return 0;
  417. gmap_unmap_segment(gmap, to, len);
  418. return -ENOMEM;
  419. }
  420. EXPORT_SYMBOL_GPL(gmap_map_segment);
  421. /**
  422. * __gmap_translate - translate a guest address to a user space address
  423. * @gmap: pointer to guest mapping meta data structure
  424. * @gaddr: guest address
  425. *
  426. * Returns user space address which corresponds to the guest address or
  427. * -EFAULT if no such mapping exists.
  428. * This function does not establish potentially missing page table entries.
  429. * The mmap_lock of the mm that belongs to the address space must be held
  430. * when this function gets called.
  431. *
  432. * Note: Can also be called for shadow gmaps.
  433. */
  434. unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
  435. {
  436. unsigned long vmaddr;
  437. vmaddr = (unsigned long)
  438. radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
  439. /* Note: guest_to_host is empty for a shadow gmap */
  440. return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
  441. }
  442. EXPORT_SYMBOL_GPL(__gmap_translate);
  443. /**
  444. * gmap_translate - translate a guest address to a user space address
  445. * @gmap: pointer to guest mapping meta data structure
  446. * @gaddr: guest address
  447. *
  448. * Returns user space address which corresponds to the guest address or
  449. * -EFAULT if no such mapping exists.
  450. * This function does not establish potentially missing page table entries.
  451. */
  452. unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
  453. {
  454. unsigned long rc;
  455. mmap_read_lock(gmap->mm);
  456. rc = __gmap_translate(gmap, gaddr);
  457. mmap_read_unlock(gmap->mm);
  458. return rc;
  459. }
  460. EXPORT_SYMBOL_GPL(gmap_translate);
  461. /**
  462. * gmap_unlink - disconnect a page table from the gmap shadow tables
  463. * @mm: pointer to the parent mm_struct
  464. * @table: pointer to the host page table
  465. * @vmaddr: vm address associated with the host page table
  466. */
  467. void gmap_unlink(struct mm_struct *mm, unsigned long *table,
  468. unsigned long vmaddr)
  469. {
  470. struct gmap *gmap;
  471. int flush;
  472. rcu_read_lock();
  473. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  474. flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
  475. if (flush)
  476. gmap_flush_tlb(gmap);
  477. }
  478. rcu_read_unlock();
  479. }
  480. static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
  481. unsigned long gaddr);
  482. /**
  483. * __gmap_link - set up shadow page tables to connect a host to a guest address
  484. * @gmap: pointer to guest mapping meta data structure
  485. * @gaddr: guest address
  486. * @vmaddr: vm address
  487. *
  488. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  489. * if the vm address is already mapped to a different guest segment.
  490. * The mmap_lock of the mm that belongs to the address space must be held
  491. * when this function gets called.
  492. */
  493. int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
  494. {
  495. struct mm_struct *mm;
  496. unsigned long *table;
  497. spinlock_t *ptl;
  498. pgd_t *pgd;
  499. p4d_t *p4d;
  500. pud_t *pud;
  501. pmd_t *pmd;
  502. u64 unprot;
  503. int rc;
  504. BUG_ON(gmap_is_shadow(gmap));
  505. /* Create higher level tables in the gmap page table */
  506. table = gmap->table;
  507. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
  508. table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
  509. if ((*table & _REGION_ENTRY_INVALID) &&
  510. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
  511. gaddr & _REGION1_MASK))
  512. return -ENOMEM;
  513. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  514. }
  515. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
  516. table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
  517. if ((*table & _REGION_ENTRY_INVALID) &&
  518. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
  519. gaddr & _REGION2_MASK))
  520. return -ENOMEM;
  521. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  522. }
  523. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
  524. table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
  525. if ((*table & _REGION_ENTRY_INVALID) &&
  526. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
  527. gaddr & _REGION3_MASK))
  528. return -ENOMEM;
  529. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  530. }
  531. table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  532. /* Walk the parent mm page table */
  533. mm = gmap->mm;
  534. pgd = pgd_offset(mm, vmaddr);
  535. VM_BUG_ON(pgd_none(*pgd));
  536. p4d = p4d_offset(pgd, vmaddr);
  537. VM_BUG_ON(p4d_none(*p4d));
  538. pud = pud_offset(p4d, vmaddr);
  539. VM_BUG_ON(pud_none(*pud));
  540. /* large puds cannot yet be handled */
  541. if (pud_large(*pud))
  542. return -EFAULT;
  543. pmd = pmd_offset(pud, vmaddr);
  544. VM_BUG_ON(pmd_none(*pmd));
  545. /* Are we allowed to use huge pages? */
  546. if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
  547. return -EFAULT;
  548. /* Link gmap segment table entry location to page table. */
  549. rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
  550. if (rc)
  551. return rc;
  552. ptl = pmd_lock(mm, pmd);
  553. spin_lock(&gmap->guest_table_lock);
  554. if (*table == _SEGMENT_ENTRY_EMPTY) {
  555. rc = radix_tree_insert(&gmap->host_to_guest,
  556. vmaddr >> PMD_SHIFT, table);
  557. if (!rc) {
  558. if (pmd_large(*pmd)) {
  559. *table = (pmd_val(*pmd) &
  560. _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
  561. | _SEGMENT_ENTRY_GMAP_UC;
  562. } else
  563. *table = pmd_val(*pmd) &
  564. _SEGMENT_ENTRY_HARDWARE_BITS;
  565. }
  566. } else if (*table & _SEGMENT_ENTRY_PROTECT &&
  567. !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
  568. unprot = (u64)*table;
  569. unprot &= ~_SEGMENT_ENTRY_PROTECT;
  570. unprot |= _SEGMENT_ENTRY_GMAP_UC;
  571. gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
  572. }
  573. spin_unlock(&gmap->guest_table_lock);
  574. spin_unlock(ptl);
  575. radix_tree_preload_end();
  576. return rc;
  577. }
  578. /**
  579. * gmap_fault - resolve a fault on a guest address
  580. * @gmap: pointer to guest mapping meta data structure
  581. * @gaddr: guest address
  582. * @fault_flags: flags to pass down to handle_mm_fault()
  583. *
  584. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  585. * if the vm address is already mapped to a different guest segment.
  586. */
  587. int gmap_fault(struct gmap *gmap, unsigned long gaddr,
  588. unsigned int fault_flags)
  589. {
  590. unsigned long vmaddr;
  591. int rc;
  592. bool unlocked;
  593. mmap_read_lock(gmap->mm);
  594. retry:
  595. unlocked = false;
  596. vmaddr = __gmap_translate(gmap, gaddr);
  597. if (IS_ERR_VALUE(vmaddr)) {
  598. rc = vmaddr;
  599. goto out_up;
  600. }
  601. if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
  602. &unlocked)) {
  603. rc = -EFAULT;
  604. goto out_up;
  605. }
  606. /*
  607. * In the case that fixup_user_fault unlocked the mmap_lock during
  608. * faultin redo __gmap_translate to not race with a map/unmap_segment.
  609. */
  610. if (unlocked)
  611. goto retry;
  612. rc = __gmap_link(gmap, gaddr, vmaddr);
  613. out_up:
  614. mmap_read_unlock(gmap->mm);
  615. return rc;
  616. }
  617. EXPORT_SYMBOL_GPL(gmap_fault);
  618. /*
  619. * this function is assumed to be called with mmap_lock held
  620. */
  621. void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
  622. {
  623. struct vm_area_struct *vma;
  624. unsigned long vmaddr;
  625. spinlock_t *ptl;
  626. pte_t *ptep;
  627. /* Find the vm address for the guest address */
  628. vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
  629. gaddr >> PMD_SHIFT);
  630. if (vmaddr) {
  631. vmaddr |= gaddr & ~PMD_MASK;
  632. vma = vma_lookup(gmap->mm, vmaddr);
  633. if (!vma || is_vm_hugetlb_page(vma))
  634. return;
  635. /* Get pointer to the page table entry */
  636. ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
  637. if (likely(ptep)) {
  638. ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
  639. pte_unmap_unlock(ptep, ptl);
  640. }
  641. }
  642. }
  643. EXPORT_SYMBOL_GPL(__gmap_zap);
  644. void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
  645. {
  646. unsigned long gaddr, vmaddr, size;
  647. struct vm_area_struct *vma;
  648. mmap_read_lock(gmap->mm);
  649. for (gaddr = from; gaddr < to;
  650. gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
  651. /* Find the vm address for the guest address */
  652. vmaddr = (unsigned long)
  653. radix_tree_lookup(&gmap->guest_to_host,
  654. gaddr >> PMD_SHIFT);
  655. if (!vmaddr)
  656. continue;
  657. vmaddr |= gaddr & ~PMD_MASK;
  658. /* Find vma in the parent mm */
  659. vma = find_vma(gmap->mm, vmaddr);
  660. if (!vma)
  661. continue;
  662. /*
  663. * We do not discard pages that are backed by
  664. * hugetlbfs, so we don't have to refault them.
  665. */
  666. if (is_vm_hugetlb_page(vma))
  667. continue;
  668. size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
  669. zap_page_range(vma, vmaddr, size);
  670. }
  671. mmap_read_unlock(gmap->mm);
  672. }
  673. EXPORT_SYMBOL_GPL(gmap_discard);
  674. static LIST_HEAD(gmap_notifier_list);
  675. static DEFINE_SPINLOCK(gmap_notifier_lock);
  676. /**
  677. * gmap_register_pte_notifier - register a pte invalidation callback
  678. * @nb: pointer to the gmap notifier block
  679. */
  680. void gmap_register_pte_notifier(struct gmap_notifier *nb)
  681. {
  682. spin_lock(&gmap_notifier_lock);
  683. list_add_rcu(&nb->list, &gmap_notifier_list);
  684. spin_unlock(&gmap_notifier_lock);
  685. }
  686. EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
  687. /**
  688. * gmap_unregister_pte_notifier - remove a pte invalidation callback
  689. * @nb: pointer to the gmap notifier block
  690. */
  691. void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
  692. {
  693. spin_lock(&gmap_notifier_lock);
  694. list_del_rcu(&nb->list);
  695. spin_unlock(&gmap_notifier_lock);
  696. synchronize_rcu();
  697. }
  698. EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
  699. /**
  700. * gmap_call_notifier - call all registered invalidation callbacks
  701. * @gmap: pointer to guest mapping meta data structure
  702. * @start: start virtual address in the guest address space
  703. * @end: end virtual address in the guest address space
  704. */
  705. static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
  706. unsigned long end)
  707. {
  708. struct gmap_notifier *nb;
  709. list_for_each_entry(nb, &gmap_notifier_list, list)
  710. nb->notifier_call(gmap, start, end);
  711. }
  712. /**
  713. * gmap_table_walk - walk the gmap page tables
  714. * @gmap: pointer to guest mapping meta data structure
  715. * @gaddr: virtual address in the guest address space
  716. * @level: page table level to stop at
  717. *
  718. * Returns a table entry pointer for the given guest address and @level
  719. * @level=0 : returns a pointer to a page table table entry (or NULL)
  720. * @level=1 : returns a pointer to a segment table entry (or NULL)
  721. * @level=2 : returns a pointer to a region-3 table entry (or NULL)
  722. * @level=3 : returns a pointer to a region-2 table entry (or NULL)
  723. * @level=4 : returns a pointer to a region-1 table entry (or NULL)
  724. *
  725. * Returns NULL if the gmap page tables could not be walked to the
  726. * requested level.
  727. *
  728. * Note: Can also be called for shadow gmaps.
  729. */
  730. static inline unsigned long *gmap_table_walk(struct gmap *gmap,
  731. unsigned long gaddr, int level)
  732. {
  733. const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
  734. unsigned long *table = gmap->table;
  735. if (gmap_is_shadow(gmap) && gmap->removed)
  736. return NULL;
  737. if (WARN_ON_ONCE(level > (asce_type >> 2) + 1))
  738. return NULL;
  739. if (asce_type != _ASCE_TYPE_REGION1 &&
  740. gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
  741. return NULL;
  742. switch (asce_type) {
  743. case _ASCE_TYPE_REGION1:
  744. table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
  745. if (level == 4)
  746. break;
  747. if (*table & _REGION_ENTRY_INVALID)
  748. return NULL;
  749. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  750. fallthrough;
  751. case _ASCE_TYPE_REGION2:
  752. table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
  753. if (level == 3)
  754. break;
  755. if (*table & _REGION_ENTRY_INVALID)
  756. return NULL;
  757. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  758. fallthrough;
  759. case _ASCE_TYPE_REGION3:
  760. table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
  761. if (level == 2)
  762. break;
  763. if (*table & _REGION_ENTRY_INVALID)
  764. return NULL;
  765. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  766. fallthrough;
  767. case _ASCE_TYPE_SEGMENT:
  768. table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  769. if (level == 1)
  770. break;
  771. if (*table & _REGION_ENTRY_INVALID)
  772. return NULL;
  773. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  774. table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
  775. }
  776. return table;
  777. }
  778. /**
  779. * gmap_pte_op_walk - walk the gmap page table, get the page table lock
  780. * and return the pte pointer
  781. * @gmap: pointer to guest mapping meta data structure
  782. * @gaddr: virtual address in the guest address space
  783. * @ptl: pointer to the spinlock pointer
  784. *
  785. * Returns a pointer to the locked pte for a guest address, or NULL
  786. */
  787. static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
  788. spinlock_t **ptl)
  789. {
  790. unsigned long *table;
  791. BUG_ON(gmap_is_shadow(gmap));
  792. /* Walk the gmap page table, lock and get pte pointer */
  793. table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
  794. if (!table || *table & _SEGMENT_ENTRY_INVALID)
  795. return NULL;
  796. return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
  797. }
  798. /**
  799. * gmap_pte_op_fixup - force a page in and connect the gmap page table
  800. * @gmap: pointer to guest mapping meta data structure
  801. * @gaddr: virtual address in the guest address space
  802. * @vmaddr: address in the host process address space
  803. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  804. *
  805. * Returns 0 if the caller can retry __gmap_translate (might fail again),
  806. * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
  807. * up or connecting the gmap page table.
  808. */
  809. static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
  810. unsigned long vmaddr, int prot)
  811. {
  812. struct mm_struct *mm = gmap->mm;
  813. unsigned int fault_flags;
  814. bool unlocked = false;
  815. BUG_ON(gmap_is_shadow(gmap));
  816. fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
  817. if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked))
  818. return -EFAULT;
  819. if (unlocked)
  820. /* lost mmap_lock, caller has to retry __gmap_translate */
  821. return 0;
  822. /* Connect the page tables */
  823. return __gmap_link(gmap, gaddr, vmaddr);
  824. }
  825. /**
  826. * gmap_pte_op_end - release the page table lock
  827. * @ptl: pointer to the spinlock pointer
  828. */
  829. static void gmap_pte_op_end(spinlock_t *ptl)
  830. {
  831. if (ptl)
  832. spin_unlock(ptl);
  833. }
  834. /**
  835. * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
  836. * and return the pmd pointer
  837. * @gmap: pointer to guest mapping meta data structure
  838. * @gaddr: virtual address in the guest address space
  839. *
  840. * Returns a pointer to the pmd for a guest address, or NULL
  841. */
  842. static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
  843. {
  844. pmd_t *pmdp;
  845. BUG_ON(gmap_is_shadow(gmap));
  846. pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
  847. if (!pmdp)
  848. return NULL;
  849. /* without huge pages, there is no need to take the table lock */
  850. if (!gmap->mm->context.allow_gmap_hpage_1m)
  851. return pmd_none(*pmdp) ? NULL : pmdp;
  852. spin_lock(&gmap->guest_table_lock);
  853. if (pmd_none(*pmdp)) {
  854. spin_unlock(&gmap->guest_table_lock);
  855. return NULL;
  856. }
  857. /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
  858. if (!pmd_large(*pmdp))
  859. spin_unlock(&gmap->guest_table_lock);
  860. return pmdp;
  861. }
  862. /**
  863. * gmap_pmd_op_end - release the guest_table_lock if needed
  864. * @gmap: pointer to the guest mapping meta data structure
  865. * @pmdp: pointer to the pmd
  866. */
  867. static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
  868. {
  869. if (pmd_large(*pmdp))
  870. spin_unlock(&gmap->guest_table_lock);
  871. }
  872. /*
  873. * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
  874. * @pmdp: pointer to the pmd to be protected
  875. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  876. * @bits: notification bits to set
  877. *
  878. * Returns:
  879. * 0 if successfully protected
  880. * -EAGAIN if a fixup is needed
  881. * -EINVAL if unsupported notifier bits have been specified
  882. *
  883. * Expected to be called with sg->mm->mmap_lock in read and
  884. * guest_table_lock held.
  885. */
  886. static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
  887. pmd_t *pmdp, int prot, unsigned long bits)
  888. {
  889. int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
  890. int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
  891. pmd_t new = *pmdp;
  892. /* Fixup needed */
  893. if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
  894. return -EAGAIN;
  895. if (prot == PROT_NONE && !pmd_i) {
  896. new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
  897. gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
  898. }
  899. if (prot == PROT_READ && !pmd_p) {
  900. new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
  901. new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_PROTECT));
  902. gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
  903. }
  904. if (bits & GMAP_NOTIFY_MPROT)
  905. set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
  906. /* Shadow GMAP protection needs split PMDs */
  907. if (bits & GMAP_NOTIFY_SHADOW)
  908. return -EINVAL;
  909. return 0;
  910. }
  911. /*
  912. * gmap_protect_pte - remove access rights to memory and set pgste bits
  913. * @gmap: pointer to guest mapping meta data structure
  914. * @gaddr: virtual address in the guest address space
  915. * @pmdp: pointer to the pmd associated with the pte
  916. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  917. * @bits: notification bits to set
  918. *
  919. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  920. * -EAGAIN if a fixup is needed.
  921. *
  922. * Expected to be called with sg->mm->mmap_lock in read
  923. */
  924. static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
  925. pmd_t *pmdp, int prot, unsigned long bits)
  926. {
  927. int rc;
  928. pte_t *ptep;
  929. spinlock_t *ptl = NULL;
  930. unsigned long pbits = 0;
  931. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
  932. return -EAGAIN;
  933. ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
  934. if (!ptep)
  935. return -ENOMEM;
  936. pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
  937. pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
  938. /* Protect and unlock. */
  939. rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
  940. gmap_pte_op_end(ptl);
  941. return rc;
  942. }
  943. /*
  944. * gmap_protect_range - remove access rights to memory and set pgste bits
  945. * @gmap: pointer to guest mapping meta data structure
  946. * @gaddr: virtual address in the guest address space
  947. * @len: size of area
  948. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  949. * @bits: pgste notification bits to set
  950. *
  951. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  952. * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
  953. *
  954. * Called with sg->mm->mmap_lock in read.
  955. */
  956. static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
  957. unsigned long len, int prot, unsigned long bits)
  958. {
  959. unsigned long vmaddr, dist;
  960. pmd_t *pmdp;
  961. int rc;
  962. BUG_ON(gmap_is_shadow(gmap));
  963. while (len) {
  964. rc = -EAGAIN;
  965. pmdp = gmap_pmd_op_walk(gmap, gaddr);
  966. if (pmdp) {
  967. if (!pmd_large(*pmdp)) {
  968. rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
  969. bits);
  970. if (!rc) {
  971. len -= PAGE_SIZE;
  972. gaddr += PAGE_SIZE;
  973. }
  974. } else {
  975. rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
  976. bits);
  977. if (!rc) {
  978. dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
  979. len = len < dist ? 0 : len - dist;
  980. gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
  981. }
  982. }
  983. gmap_pmd_op_end(gmap, pmdp);
  984. }
  985. if (rc) {
  986. if (rc == -EINVAL)
  987. return rc;
  988. /* -EAGAIN, fixup of userspace mm and gmap */
  989. vmaddr = __gmap_translate(gmap, gaddr);
  990. if (IS_ERR_VALUE(vmaddr))
  991. return vmaddr;
  992. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
  993. if (rc)
  994. return rc;
  995. }
  996. }
  997. return 0;
  998. }
  999. /**
  1000. * gmap_mprotect_notify - change access rights for a range of ptes and
  1001. * call the notifier if any pte changes again
  1002. * @gmap: pointer to guest mapping meta data structure
  1003. * @gaddr: virtual address in the guest address space
  1004. * @len: size of area
  1005. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  1006. *
  1007. * Returns 0 if for each page in the given range a gmap mapping exists,
  1008. * the new access rights could be set and the notifier could be armed.
  1009. * If the gmap mapping is missing for one or more pages -EFAULT is
  1010. * returned. If no memory could be allocated -ENOMEM is returned.
  1011. * This function establishes missing page table entries.
  1012. */
  1013. int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
  1014. unsigned long len, int prot)
  1015. {
  1016. int rc;
  1017. if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
  1018. return -EINVAL;
  1019. if (!MACHINE_HAS_ESOP && prot == PROT_READ)
  1020. return -EINVAL;
  1021. mmap_read_lock(gmap->mm);
  1022. rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
  1023. mmap_read_unlock(gmap->mm);
  1024. return rc;
  1025. }
  1026. EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
  1027. /**
  1028. * gmap_read_table - get an unsigned long value from a guest page table using
  1029. * absolute addressing, without marking the page referenced.
  1030. * @gmap: pointer to guest mapping meta data structure
  1031. * @gaddr: virtual address in the guest address space
  1032. * @val: pointer to the unsigned long value to return
  1033. *
  1034. * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
  1035. * if reading using the virtual address failed. -EINVAL if called on a gmap
  1036. * shadow.
  1037. *
  1038. * Called with gmap->mm->mmap_lock in read.
  1039. */
  1040. int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
  1041. {
  1042. unsigned long address, vmaddr;
  1043. spinlock_t *ptl;
  1044. pte_t *ptep, pte;
  1045. int rc;
  1046. if (gmap_is_shadow(gmap))
  1047. return -EINVAL;
  1048. while (1) {
  1049. rc = -EAGAIN;
  1050. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  1051. if (ptep) {
  1052. pte = *ptep;
  1053. if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
  1054. address = pte_val(pte) & PAGE_MASK;
  1055. address += gaddr & ~PAGE_MASK;
  1056. *val = *(unsigned long *) address;
  1057. set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
  1058. /* Do *NOT* clear the _PAGE_INVALID bit! */
  1059. rc = 0;
  1060. }
  1061. gmap_pte_op_end(ptl);
  1062. }
  1063. if (!rc)
  1064. break;
  1065. vmaddr = __gmap_translate(gmap, gaddr);
  1066. if (IS_ERR_VALUE(vmaddr)) {
  1067. rc = vmaddr;
  1068. break;
  1069. }
  1070. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
  1071. if (rc)
  1072. break;
  1073. }
  1074. return rc;
  1075. }
  1076. EXPORT_SYMBOL_GPL(gmap_read_table);
  1077. /**
  1078. * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
  1079. * @sg: pointer to the shadow guest address space structure
  1080. * @vmaddr: vm address associated with the rmap
  1081. * @rmap: pointer to the rmap structure
  1082. *
  1083. * Called with the sg->guest_table_lock
  1084. */
  1085. static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
  1086. struct gmap_rmap *rmap)
  1087. {
  1088. struct gmap_rmap *temp;
  1089. void __rcu **slot;
  1090. BUG_ON(!gmap_is_shadow(sg));
  1091. slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  1092. if (slot) {
  1093. rmap->next = radix_tree_deref_slot_protected(slot,
  1094. &sg->guest_table_lock);
  1095. for (temp = rmap->next; temp; temp = temp->next) {
  1096. if (temp->raddr == rmap->raddr) {
  1097. kfree(rmap);
  1098. return;
  1099. }
  1100. }
  1101. radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
  1102. } else {
  1103. rmap->next = NULL;
  1104. radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
  1105. rmap);
  1106. }
  1107. }
  1108. /**
  1109. * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
  1110. * @sg: pointer to the shadow guest address space structure
  1111. * @raddr: rmap address in the shadow gmap
  1112. * @paddr: address in the parent guest address space
  1113. * @len: length of the memory area to protect
  1114. *
  1115. * Returns 0 if successfully protected and the rmap was created, -ENOMEM
  1116. * if out of memory and -EFAULT if paddr is invalid.
  1117. */
  1118. static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
  1119. unsigned long paddr, unsigned long len)
  1120. {
  1121. struct gmap *parent;
  1122. struct gmap_rmap *rmap;
  1123. unsigned long vmaddr;
  1124. spinlock_t *ptl;
  1125. pte_t *ptep;
  1126. int rc;
  1127. BUG_ON(!gmap_is_shadow(sg));
  1128. parent = sg->parent;
  1129. while (len) {
  1130. vmaddr = __gmap_translate(parent, paddr);
  1131. if (IS_ERR_VALUE(vmaddr))
  1132. return vmaddr;
  1133. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
  1134. if (!rmap)
  1135. return -ENOMEM;
  1136. rmap->raddr = raddr;
  1137. rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
  1138. if (rc) {
  1139. kfree(rmap);
  1140. return rc;
  1141. }
  1142. rc = -EAGAIN;
  1143. ptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1144. if (ptep) {
  1145. spin_lock(&sg->guest_table_lock);
  1146. rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
  1147. PGSTE_VSIE_BIT);
  1148. if (!rc)
  1149. gmap_insert_rmap(sg, vmaddr, rmap);
  1150. spin_unlock(&sg->guest_table_lock);
  1151. gmap_pte_op_end(ptl);
  1152. }
  1153. radix_tree_preload_end();
  1154. if (rc) {
  1155. kfree(rmap);
  1156. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
  1157. if (rc)
  1158. return rc;
  1159. continue;
  1160. }
  1161. paddr += PAGE_SIZE;
  1162. len -= PAGE_SIZE;
  1163. }
  1164. return 0;
  1165. }
  1166. #define _SHADOW_RMAP_MASK 0x7
  1167. #define _SHADOW_RMAP_REGION1 0x5
  1168. #define _SHADOW_RMAP_REGION2 0x4
  1169. #define _SHADOW_RMAP_REGION3 0x3
  1170. #define _SHADOW_RMAP_SEGMENT 0x2
  1171. #define _SHADOW_RMAP_PGTABLE 0x1
  1172. /**
  1173. * gmap_idte_one - invalidate a single region or segment table entry
  1174. * @asce: region or segment table *origin* + table-type bits
  1175. * @vaddr: virtual address to identify the table entry to flush
  1176. *
  1177. * The invalid bit of a single region or segment table entry is set
  1178. * and the associated TLB entries depending on the entry are flushed.
  1179. * The table-type of the @asce identifies the portion of the @vaddr
  1180. * that is used as the invalidation index.
  1181. */
  1182. static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
  1183. {
  1184. asm volatile(
  1185. " idte %0,0,%1"
  1186. : : "a" (asce), "a" (vaddr) : "cc", "memory");
  1187. }
  1188. /**
  1189. * gmap_unshadow_page - remove a page from a shadow page table
  1190. * @sg: pointer to the shadow guest address space structure
  1191. * @raddr: rmap address in the shadow guest address space
  1192. *
  1193. * Called with the sg->guest_table_lock
  1194. */
  1195. static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
  1196. {
  1197. unsigned long *table;
  1198. BUG_ON(!gmap_is_shadow(sg));
  1199. table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
  1200. if (!table || *table & _PAGE_INVALID)
  1201. return;
  1202. gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
  1203. ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
  1204. }
  1205. /**
  1206. * __gmap_unshadow_pgt - remove all entries from a shadow page table
  1207. * @sg: pointer to the shadow guest address space structure
  1208. * @raddr: rmap address in the shadow guest address space
  1209. * @pgt: pointer to the start of a shadow page table
  1210. *
  1211. * Called with the sg->guest_table_lock
  1212. */
  1213. static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
  1214. unsigned long *pgt)
  1215. {
  1216. int i;
  1217. BUG_ON(!gmap_is_shadow(sg));
  1218. for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
  1219. pgt[i] = _PAGE_INVALID;
  1220. }
  1221. /**
  1222. * gmap_unshadow_pgt - remove a shadow page table from a segment entry
  1223. * @sg: pointer to the shadow guest address space structure
  1224. * @raddr: address in the shadow guest address space
  1225. *
  1226. * Called with the sg->guest_table_lock
  1227. */
  1228. static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
  1229. {
  1230. unsigned long sto, *ste, *pgt;
  1231. struct page *page;
  1232. BUG_ON(!gmap_is_shadow(sg));
  1233. ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
  1234. if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
  1235. return;
  1236. gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
  1237. sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
  1238. gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
  1239. pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
  1240. *ste = _SEGMENT_ENTRY_EMPTY;
  1241. __gmap_unshadow_pgt(sg, raddr, pgt);
  1242. /* Free page table */
  1243. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1244. list_del(&page->lru);
  1245. page_table_free_pgste(page);
  1246. }
  1247. /**
  1248. * __gmap_unshadow_sgt - remove all entries from a shadow segment table
  1249. * @sg: pointer to the shadow guest address space structure
  1250. * @raddr: rmap address in the shadow guest address space
  1251. * @sgt: pointer to the start of a shadow segment table
  1252. *
  1253. * Called with the sg->guest_table_lock
  1254. */
  1255. static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
  1256. unsigned long *sgt)
  1257. {
  1258. unsigned long *pgt;
  1259. struct page *page;
  1260. int i;
  1261. BUG_ON(!gmap_is_shadow(sg));
  1262. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
  1263. if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
  1264. continue;
  1265. pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
  1266. sgt[i] = _SEGMENT_ENTRY_EMPTY;
  1267. __gmap_unshadow_pgt(sg, raddr, pgt);
  1268. /* Free page table */
  1269. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1270. list_del(&page->lru);
  1271. page_table_free_pgste(page);
  1272. }
  1273. }
  1274. /**
  1275. * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
  1276. * @sg: pointer to the shadow guest address space structure
  1277. * @raddr: rmap address in the shadow guest address space
  1278. *
  1279. * Called with the shadow->guest_table_lock
  1280. */
  1281. static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
  1282. {
  1283. unsigned long r3o, *r3e, *sgt;
  1284. struct page *page;
  1285. BUG_ON(!gmap_is_shadow(sg));
  1286. r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
  1287. if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
  1288. return;
  1289. gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
  1290. r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
  1291. gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
  1292. sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
  1293. *r3e = _REGION3_ENTRY_EMPTY;
  1294. __gmap_unshadow_sgt(sg, raddr, sgt);
  1295. /* Free segment table */
  1296. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1297. list_del(&page->lru);
  1298. __free_pages(page, CRST_ALLOC_ORDER);
  1299. }
  1300. /**
  1301. * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
  1302. * @sg: pointer to the shadow guest address space structure
  1303. * @raddr: address in the shadow guest address space
  1304. * @r3t: pointer to the start of a shadow region-3 table
  1305. *
  1306. * Called with the sg->guest_table_lock
  1307. */
  1308. static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
  1309. unsigned long *r3t)
  1310. {
  1311. unsigned long *sgt;
  1312. struct page *page;
  1313. int i;
  1314. BUG_ON(!gmap_is_shadow(sg));
  1315. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
  1316. if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
  1317. continue;
  1318. sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
  1319. r3t[i] = _REGION3_ENTRY_EMPTY;
  1320. __gmap_unshadow_sgt(sg, raddr, sgt);
  1321. /* Free segment table */
  1322. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1323. list_del(&page->lru);
  1324. __free_pages(page, CRST_ALLOC_ORDER);
  1325. }
  1326. }
  1327. /**
  1328. * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
  1329. * @sg: pointer to the shadow guest address space structure
  1330. * @raddr: rmap address in the shadow guest address space
  1331. *
  1332. * Called with the sg->guest_table_lock
  1333. */
  1334. static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
  1335. {
  1336. unsigned long r2o, *r2e, *r3t;
  1337. struct page *page;
  1338. BUG_ON(!gmap_is_shadow(sg));
  1339. r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
  1340. if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
  1341. return;
  1342. gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
  1343. r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
  1344. gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
  1345. r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
  1346. *r2e = _REGION2_ENTRY_EMPTY;
  1347. __gmap_unshadow_r3t(sg, raddr, r3t);
  1348. /* Free region 3 table */
  1349. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1350. list_del(&page->lru);
  1351. __free_pages(page, CRST_ALLOC_ORDER);
  1352. }
  1353. /**
  1354. * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
  1355. * @sg: pointer to the shadow guest address space structure
  1356. * @raddr: rmap address in the shadow guest address space
  1357. * @r2t: pointer to the start of a shadow region-2 table
  1358. *
  1359. * Called with the sg->guest_table_lock
  1360. */
  1361. static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
  1362. unsigned long *r2t)
  1363. {
  1364. unsigned long *r3t;
  1365. struct page *page;
  1366. int i;
  1367. BUG_ON(!gmap_is_shadow(sg));
  1368. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
  1369. if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
  1370. continue;
  1371. r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
  1372. r2t[i] = _REGION2_ENTRY_EMPTY;
  1373. __gmap_unshadow_r3t(sg, raddr, r3t);
  1374. /* Free region 3 table */
  1375. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1376. list_del(&page->lru);
  1377. __free_pages(page, CRST_ALLOC_ORDER);
  1378. }
  1379. }
  1380. /**
  1381. * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
  1382. * @sg: pointer to the shadow guest address space structure
  1383. * @raddr: rmap address in the shadow guest address space
  1384. *
  1385. * Called with the sg->guest_table_lock
  1386. */
  1387. static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
  1388. {
  1389. unsigned long r1o, *r1e, *r2t;
  1390. struct page *page;
  1391. BUG_ON(!gmap_is_shadow(sg));
  1392. r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
  1393. if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
  1394. return;
  1395. gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
  1396. r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
  1397. gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
  1398. r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
  1399. *r1e = _REGION1_ENTRY_EMPTY;
  1400. __gmap_unshadow_r2t(sg, raddr, r2t);
  1401. /* Free region 2 table */
  1402. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1403. list_del(&page->lru);
  1404. __free_pages(page, CRST_ALLOC_ORDER);
  1405. }
  1406. /**
  1407. * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
  1408. * @sg: pointer to the shadow guest address space structure
  1409. * @raddr: rmap address in the shadow guest address space
  1410. * @r1t: pointer to the start of a shadow region-1 table
  1411. *
  1412. * Called with the shadow->guest_table_lock
  1413. */
  1414. static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
  1415. unsigned long *r1t)
  1416. {
  1417. unsigned long asce, *r2t;
  1418. struct page *page;
  1419. int i;
  1420. BUG_ON(!gmap_is_shadow(sg));
  1421. asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
  1422. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
  1423. if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
  1424. continue;
  1425. r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
  1426. __gmap_unshadow_r2t(sg, raddr, r2t);
  1427. /* Clear entry and flush translation r1t -> r2t */
  1428. gmap_idte_one(asce, raddr);
  1429. r1t[i] = _REGION1_ENTRY_EMPTY;
  1430. /* Free region 2 table */
  1431. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1432. list_del(&page->lru);
  1433. __free_pages(page, CRST_ALLOC_ORDER);
  1434. }
  1435. }
  1436. /**
  1437. * gmap_unshadow - remove a shadow page table completely
  1438. * @sg: pointer to the shadow guest address space structure
  1439. *
  1440. * Called with sg->guest_table_lock
  1441. */
  1442. static void gmap_unshadow(struct gmap *sg)
  1443. {
  1444. unsigned long *table;
  1445. BUG_ON(!gmap_is_shadow(sg));
  1446. if (sg->removed)
  1447. return;
  1448. sg->removed = 1;
  1449. gmap_call_notifier(sg, 0, -1UL);
  1450. gmap_flush_tlb(sg);
  1451. table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
  1452. switch (sg->asce & _ASCE_TYPE_MASK) {
  1453. case _ASCE_TYPE_REGION1:
  1454. __gmap_unshadow_r1t(sg, 0, table);
  1455. break;
  1456. case _ASCE_TYPE_REGION2:
  1457. __gmap_unshadow_r2t(sg, 0, table);
  1458. break;
  1459. case _ASCE_TYPE_REGION3:
  1460. __gmap_unshadow_r3t(sg, 0, table);
  1461. break;
  1462. case _ASCE_TYPE_SEGMENT:
  1463. __gmap_unshadow_sgt(sg, 0, table);
  1464. break;
  1465. }
  1466. }
  1467. /**
  1468. * gmap_find_shadow - find a specific asce in the list of shadow tables
  1469. * @parent: pointer to the parent gmap
  1470. * @asce: ASCE for which the shadow table is created
  1471. * @edat_level: edat level to be used for the shadow translation
  1472. *
  1473. * Returns the pointer to a gmap if a shadow table with the given asce is
  1474. * already available, ERR_PTR(-EAGAIN) if another one is just being created,
  1475. * otherwise NULL
  1476. */
  1477. static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
  1478. int edat_level)
  1479. {
  1480. struct gmap *sg;
  1481. list_for_each_entry(sg, &parent->children, list) {
  1482. if (sg->orig_asce != asce || sg->edat_level != edat_level ||
  1483. sg->removed)
  1484. continue;
  1485. if (!sg->initialized)
  1486. return ERR_PTR(-EAGAIN);
  1487. refcount_inc(&sg->ref_count);
  1488. return sg;
  1489. }
  1490. return NULL;
  1491. }
  1492. /**
  1493. * gmap_shadow_valid - check if a shadow guest address space matches the
  1494. * given properties and is still valid
  1495. * @sg: pointer to the shadow guest address space structure
  1496. * @asce: ASCE for which the shadow table is requested
  1497. * @edat_level: edat level to be used for the shadow translation
  1498. *
  1499. * Returns 1 if the gmap shadow is still valid and matches the given
  1500. * properties, the caller can continue using it. Returns 0 otherwise, the
  1501. * caller has to request a new shadow gmap in this case.
  1502. *
  1503. */
  1504. int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
  1505. {
  1506. if (sg->removed)
  1507. return 0;
  1508. return sg->orig_asce == asce && sg->edat_level == edat_level;
  1509. }
  1510. EXPORT_SYMBOL_GPL(gmap_shadow_valid);
  1511. /**
  1512. * gmap_shadow - create/find a shadow guest address space
  1513. * @parent: pointer to the parent gmap
  1514. * @asce: ASCE for which the shadow table is created
  1515. * @edat_level: edat level to be used for the shadow translation
  1516. *
  1517. * The pages of the top level page table referred by the asce parameter
  1518. * will be set to read-only and marked in the PGSTEs of the kvm process.
  1519. * The shadow table will be removed automatically on any change to the
  1520. * PTE mapping for the source table.
  1521. *
  1522. * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
  1523. * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
  1524. * parent gmap table could not be protected.
  1525. */
  1526. struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
  1527. int edat_level)
  1528. {
  1529. struct gmap *sg, *new;
  1530. unsigned long limit;
  1531. int rc;
  1532. BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
  1533. BUG_ON(gmap_is_shadow(parent));
  1534. spin_lock(&parent->shadow_lock);
  1535. sg = gmap_find_shadow(parent, asce, edat_level);
  1536. spin_unlock(&parent->shadow_lock);
  1537. if (sg)
  1538. return sg;
  1539. /* Create a new shadow gmap */
  1540. limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
  1541. if (asce & _ASCE_REAL_SPACE)
  1542. limit = -1UL;
  1543. new = gmap_alloc(limit);
  1544. if (!new)
  1545. return ERR_PTR(-ENOMEM);
  1546. new->mm = parent->mm;
  1547. new->parent = gmap_get(parent);
  1548. new->orig_asce = asce;
  1549. new->edat_level = edat_level;
  1550. new->initialized = false;
  1551. spin_lock(&parent->shadow_lock);
  1552. /* Recheck if another CPU created the same shadow */
  1553. sg = gmap_find_shadow(parent, asce, edat_level);
  1554. if (sg) {
  1555. spin_unlock(&parent->shadow_lock);
  1556. gmap_free(new);
  1557. return sg;
  1558. }
  1559. if (asce & _ASCE_REAL_SPACE) {
  1560. /* only allow one real-space gmap shadow */
  1561. list_for_each_entry(sg, &parent->children, list) {
  1562. if (sg->orig_asce & _ASCE_REAL_SPACE) {
  1563. spin_lock(&sg->guest_table_lock);
  1564. gmap_unshadow(sg);
  1565. spin_unlock(&sg->guest_table_lock);
  1566. list_del(&sg->list);
  1567. gmap_put(sg);
  1568. break;
  1569. }
  1570. }
  1571. }
  1572. refcount_set(&new->ref_count, 2);
  1573. list_add(&new->list, &parent->children);
  1574. if (asce & _ASCE_REAL_SPACE) {
  1575. /* nothing to protect, return right away */
  1576. new->initialized = true;
  1577. spin_unlock(&parent->shadow_lock);
  1578. return new;
  1579. }
  1580. spin_unlock(&parent->shadow_lock);
  1581. /* protect after insertion, so it will get properly invalidated */
  1582. mmap_read_lock(parent->mm);
  1583. rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
  1584. ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
  1585. PROT_READ, GMAP_NOTIFY_SHADOW);
  1586. mmap_read_unlock(parent->mm);
  1587. spin_lock(&parent->shadow_lock);
  1588. new->initialized = true;
  1589. if (rc) {
  1590. list_del(&new->list);
  1591. gmap_free(new);
  1592. new = ERR_PTR(rc);
  1593. }
  1594. spin_unlock(&parent->shadow_lock);
  1595. return new;
  1596. }
  1597. EXPORT_SYMBOL_GPL(gmap_shadow);
  1598. /**
  1599. * gmap_shadow_r2t - create an empty shadow region 2 table
  1600. * @sg: pointer to the shadow guest address space structure
  1601. * @saddr: faulting address in the shadow gmap
  1602. * @r2t: parent gmap address of the region 2 table to get shadowed
  1603. * @fake: r2t references contiguous guest memory block, not a r2t
  1604. *
  1605. * The r2t parameter specifies the address of the source table. The
  1606. * four pages of the source table are made read-only in the parent gmap
  1607. * address space. A write to the source table area @r2t will automatically
  1608. * remove the shadow r2 table and all of its decendents.
  1609. *
  1610. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1611. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1612. * -EFAULT if an address in the parent gmap could not be resolved.
  1613. *
  1614. * Called with sg->mm->mmap_lock in read.
  1615. */
  1616. int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
  1617. int fake)
  1618. {
  1619. unsigned long raddr, origin, offset, len;
  1620. unsigned long *s_r2t, *table;
  1621. struct page *page;
  1622. int rc;
  1623. BUG_ON(!gmap_is_shadow(sg));
  1624. /* Allocate a shadow region second table */
  1625. page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
  1626. if (!page)
  1627. return -ENOMEM;
  1628. page->index = r2t & _REGION_ENTRY_ORIGIN;
  1629. if (fake)
  1630. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1631. s_r2t = (unsigned long *) page_to_phys(page);
  1632. /* Install shadow region second table */
  1633. spin_lock(&sg->guest_table_lock);
  1634. table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
  1635. if (!table) {
  1636. rc = -EAGAIN; /* Race with unshadow */
  1637. goto out_free;
  1638. }
  1639. if (!(*table & _REGION_ENTRY_INVALID)) {
  1640. rc = 0; /* Already established */
  1641. goto out_free;
  1642. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1643. rc = -EAGAIN; /* Race with shadow */
  1644. goto out_free;
  1645. }
  1646. crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
  1647. /* mark as invalid as long as the parent table is not protected */
  1648. *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
  1649. _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
  1650. if (sg->edat_level >= 1)
  1651. *table |= (r2t & _REGION_ENTRY_PROTECT);
  1652. list_add(&page->lru, &sg->crst_list);
  1653. if (fake) {
  1654. /* nothing to protect for fake tables */
  1655. *table &= ~_REGION_ENTRY_INVALID;
  1656. spin_unlock(&sg->guest_table_lock);
  1657. return 0;
  1658. }
  1659. spin_unlock(&sg->guest_table_lock);
  1660. /* Make r2t read-only in parent gmap page table */
  1661. raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
  1662. origin = r2t & _REGION_ENTRY_ORIGIN;
  1663. offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1664. len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1665. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1666. spin_lock(&sg->guest_table_lock);
  1667. if (!rc) {
  1668. table = gmap_table_walk(sg, saddr, 4);
  1669. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1670. (unsigned long) s_r2t)
  1671. rc = -EAGAIN; /* Race with unshadow */
  1672. else
  1673. *table &= ~_REGION_ENTRY_INVALID;
  1674. } else {
  1675. gmap_unshadow_r2t(sg, raddr);
  1676. }
  1677. spin_unlock(&sg->guest_table_lock);
  1678. return rc;
  1679. out_free:
  1680. spin_unlock(&sg->guest_table_lock);
  1681. __free_pages(page, CRST_ALLOC_ORDER);
  1682. return rc;
  1683. }
  1684. EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
  1685. /**
  1686. * gmap_shadow_r3t - create a shadow region 3 table
  1687. * @sg: pointer to the shadow guest address space structure
  1688. * @saddr: faulting address in the shadow gmap
  1689. * @r3t: parent gmap address of the region 3 table to get shadowed
  1690. * @fake: r3t references contiguous guest memory block, not a r3t
  1691. *
  1692. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1693. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1694. * -EFAULT if an address in the parent gmap could not be resolved.
  1695. *
  1696. * Called with sg->mm->mmap_lock in read.
  1697. */
  1698. int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
  1699. int fake)
  1700. {
  1701. unsigned long raddr, origin, offset, len;
  1702. unsigned long *s_r3t, *table;
  1703. struct page *page;
  1704. int rc;
  1705. BUG_ON(!gmap_is_shadow(sg));
  1706. /* Allocate a shadow region second table */
  1707. page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
  1708. if (!page)
  1709. return -ENOMEM;
  1710. page->index = r3t & _REGION_ENTRY_ORIGIN;
  1711. if (fake)
  1712. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1713. s_r3t = (unsigned long *) page_to_phys(page);
  1714. /* Install shadow region second table */
  1715. spin_lock(&sg->guest_table_lock);
  1716. table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
  1717. if (!table) {
  1718. rc = -EAGAIN; /* Race with unshadow */
  1719. goto out_free;
  1720. }
  1721. if (!(*table & _REGION_ENTRY_INVALID)) {
  1722. rc = 0; /* Already established */
  1723. goto out_free;
  1724. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1725. rc = -EAGAIN; /* Race with shadow */
  1726. goto out_free;
  1727. }
  1728. crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
  1729. /* mark as invalid as long as the parent table is not protected */
  1730. *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
  1731. _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
  1732. if (sg->edat_level >= 1)
  1733. *table |= (r3t & _REGION_ENTRY_PROTECT);
  1734. list_add(&page->lru, &sg->crst_list);
  1735. if (fake) {
  1736. /* nothing to protect for fake tables */
  1737. *table &= ~_REGION_ENTRY_INVALID;
  1738. spin_unlock(&sg->guest_table_lock);
  1739. return 0;
  1740. }
  1741. spin_unlock(&sg->guest_table_lock);
  1742. /* Make r3t read-only in parent gmap page table */
  1743. raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
  1744. origin = r3t & _REGION_ENTRY_ORIGIN;
  1745. offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1746. len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1747. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1748. spin_lock(&sg->guest_table_lock);
  1749. if (!rc) {
  1750. table = gmap_table_walk(sg, saddr, 3);
  1751. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1752. (unsigned long) s_r3t)
  1753. rc = -EAGAIN; /* Race with unshadow */
  1754. else
  1755. *table &= ~_REGION_ENTRY_INVALID;
  1756. } else {
  1757. gmap_unshadow_r3t(sg, raddr);
  1758. }
  1759. spin_unlock(&sg->guest_table_lock);
  1760. return rc;
  1761. out_free:
  1762. spin_unlock(&sg->guest_table_lock);
  1763. __free_pages(page, CRST_ALLOC_ORDER);
  1764. return rc;
  1765. }
  1766. EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
  1767. /**
  1768. * gmap_shadow_sgt - create a shadow segment table
  1769. * @sg: pointer to the shadow guest address space structure
  1770. * @saddr: faulting address in the shadow gmap
  1771. * @sgt: parent gmap address of the segment table to get shadowed
  1772. * @fake: sgt references contiguous guest memory block, not a sgt
  1773. *
  1774. * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1775. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1776. * -EFAULT if an address in the parent gmap could not be resolved.
  1777. *
  1778. * Called with sg->mm->mmap_lock in read.
  1779. */
  1780. int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
  1781. int fake)
  1782. {
  1783. unsigned long raddr, origin, offset, len;
  1784. unsigned long *s_sgt, *table;
  1785. struct page *page;
  1786. int rc;
  1787. BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
  1788. /* Allocate a shadow segment table */
  1789. page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
  1790. if (!page)
  1791. return -ENOMEM;
  1792. page->index = sgt & _REGION_ENTRY_ORIGIN;
  1793. if (fake)
  1794. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1795. s_sgt = (unsigned long *) page_to_phys(page);
  1796. /* Install shadow region second table */
  1797. spin_lock(&sg->guest_table_lock);
  1798. table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
  1799. if (!table) {
  1800. rc = -EAGAIN; /* Race with unshadow */
  1801. goto out_free;
  1802. }
  1803. if (!(*table & _REGION_ENTRY_INVALID)) {
  1804. rc = 0; /* Already established */
  1805. goto out_free;
  1806. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1807. rc = -EAGAIN; /* Race with shadow */
  1808. goto out_free;
  1809. }
  1810. crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
  1811. /* mark as invalid as long as the parent table is not protected */
  1812. *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
  1813. _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
  1814. if (sg->edat_level >= 1)
  1815. *table |= sgt & _REGION_ENTRY_PROTECT;
  1816. list_add(&page->lru, &sg->crst_list);
  1817. if (fake) {
  1818. /* nothing to protect for fake tables */
  1819. *table &= ~_REGION_ENTRY_INVALID;
  1820. spin_unlock(&sg->guest_table_lock);
  1821. return 0;
  1822. }
  1823. spin_unlock(&sg->guest_table_lock);
  1824. /* Make sgt read-only in parent gmap page table */
  1825. raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
  1826. origin = sgt & _REGION_ENTRY_ORIGIN;
  1827. offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1828. len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1829. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1830. spin_lock(&sg->guest_table_lock);
  1831. if (!rc) {
  1832. table = gmap_table_walk(sg, saddr, 2);
  1833. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1834. (unsigned long) s_sgt)
  1835. rc = -EAGAIN; /* Race with unshadow */
  1836. else
  1837. *table &= ~_REGION_ENTRY_INVALID;
  1838. } else {
  1839. gmap_unshadow_sgt(sg, raddr);
  1840. }
  1841. spin_unlock(&sg->guest_table_lock);
  1842. return rc;
  1843. out_free:
  1844. spin_unlock(&sg->guest_table_lock);
  1845. __free_pages(page, CRST_ALLOC_ORDER);
  1846. return rc;
  1847. }
  1848. EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
  1849. /**
  1850. * gmap_shadow_pgt_lookup - find a shadow page table
  1851. * @sg: pointer to the shadow guest address space structure
  1852. * @saddr: the address in the shadow aguest address space
  1853. * @pgt: parent gmap address of the page table to get shadowed
  1854. * @dat_protection: if the pgtable is marked as protected by dat
  1855. * @fake: pgt references contiguous guest memory block, not a pgtable
  1856. *
  1857. * Returns 0 if the shadow page table was found and -EAGAIN if the page
  1858. * table was not found.
  1859. *
  1860. * Called with sg->mm->mmap_lock in read.
  1861. */
  1862. int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
  1863. unsigned long *pgt, int *dat_protection,
  1864. int *fake)
  1865. {
  1866. unsigned long *table;
  1867. struct page *page;
  1868. int rc;
  1869. BUG_ON(!gmap_is_shadow(sg));
  1870. spin_lock(&sg->guest_table_lock);
  1871. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1872. if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
  1873. /* Shadow page tables are full pages (pte+pgste) */
  1874. page = pfn_to_page(*table >> PAGE_SHIFT);
  1875. *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
  1876. *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
  1877. *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
  1878. rc = 0;
  1879. } else {
  1880. rc = -EAGAIN;
  1881. }
  1882. spin_unlock(&sg->guest_table_lock);
  1883. return rc;
  1884. }
  1885. EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
  1886. /**
  1887. * gmap_shadow_pgt - instantiate a shadow page table
  1888. * @sg: pointer to the shadow guest address space structure
  1889. * @saddr: faulting address in the shadow gmap
  1890. * @pgt: parent gmap address of the page table to get shadowed
  1891. * @fake: pgt references contiguous guest memory block, not a pgtable
  1892. *
  1893. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1894. * shadow table structure is incomplete, -ENOMEM if out of memory,
  1895. * -EFAULT if an address in the parent gmap could not be resolved and
  1896. *
  1897. * Called with gmap->mm->mmap_lock in read
  1898. */
  1899. int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
  1900. int fake)
  1901. {
  1902. unsigned long raddr, origin;
  1903. unsigned long *s_pgt, *table;
  1904. struct page *page;
  1905. int rc;
  1906. BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
  1907. /* Allocate a shadow page table */
  1908. page = page_table_alloc_pgste(sg->mm);
  1909. if (!page)
  1910. return -ENOMEM;
  1911. page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
  1912. if (fake)
  1913. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1914. s_pgt = (unsigned long *) page_to_phys(page);
  1915. /* Install shadow page table */
  1916. spin_lock(&sg->guest_table_lock);
  1917. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1918. if (!table) {
  1919. rc = -EAGAIN; /* Race with unshadow */
  1920. goto out_free;
  1921. }
  1922. if (!(*table & _SEGMENT_ENTRY_INVALID)) {
  1923. rc = 0; /* Already established */
  1924. goto out_free;
  1925. } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
  1926. rc = -EAGAIN; /* Race with shadow */
  1927. goto out_free;
  1928. }
  1929. /* mark as invalid as long as the parent table is not protected */
  1930. *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
  1931. (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
  1932. list_add(&page->lru, &sg->pt_list);
  1933. if (fake) {
  1934. /* nothing to protect for fake tables */
  1935. *table &= ~_SEGMENT_ENTRY_INVALID;
  1936. spin_unlock(&sg->guest_table_lock);
  1937. return 0;
  1938. }
  1939. spin_unlock(&sg->guest_table_lock);
  1940. /* Make pgt read-only in parent gmap page table (not the pgste) */
  1941. raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
  1942. origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
  1943. rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
  1944. spin_lock(&sg->guest_table_lock);
  1945. if (!rc) {
  1946. table = gmap_table_walk(sg, saddr, 1);
  1947. if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
  1948. (unsigned long) s_pgt)
  1949. rc = -EAGAIN; /* Race with unshadow */
  1950. else
  1951. *table &= ~_SEGMENT_ENTRY_INVALID;
  1952. } else {
  1953. gmap_unshadow_pgt(sg, raddr);
  1954. }
  1955. spin_unlock(&sg->guest_table_lock);
  1956. return rc;
  1957. out_free:
  1958. spin_unlock(&sg->guest_table_lock);
  1959. page_table_free_pgste(page);
  1960. return rc;
  1961. }
  1962. EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
  1963. /**
  1964. * gmap_shadow_page - create a shadow page mapping
  1965. * @sg: pointer to the shadow guest address space structure
  1966. * @saddr: faulting address in the shadow gmap
  1967. * @pte: pte in parent gmap address space to get shadowed
  1968. *
  1969. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1970. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1971. * -EFAULT if an address in the parent gmap could not be resolved.
  1972. *
  1973. * Called with sg->mm->mmap_lock in read.
  1974. */
  1975. int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
  1976. {
  1977. struct gmap *parent;
  1978. struct gmap_rmap *rmap;
  1979. unsigned long vmaddr, paddr;
  1980. spinlock_t *ptl;
  1981. pte_t *sptep, *tptep;
  1982. int prot;
  1983. int rc;
  1984. BUG_ON(!gmap_is_shadow(sg));
  1985. parent = sg->parent;
  1986. prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
  1987. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
  1988. if (!rmap)
  1989. return -ENOMEM;
  1990. rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
  1991. while (1) {
  1992. paddr = pte_val(pte) & PAGE_MASK;
  1993. vmaddr = __gmap_translate(parent, paddr);
  1994. if (IS_ERR_VALUE(vmaddr)) {
  1995. rc = vmaddr;
  1996. break;
  1997. }
  1998. rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
  1999. if (rc)
  2000. break;
  2001. rc = -EAGAIN;
  2002. sptep = gmap_pte_op_walk(parent, paddr, &ptl);
  2003. if (sptep) {
  2004. spin_lock(&sg->guest_table_lock);
  2005. /* Get page table pointer */
  2006. tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
  2007. if (!tptep) {
  2008. spin_unlock(&sg->guest_table_lock);
  2009. gmap_pte_op_end(ptl);
  2010. radix_tree_preload_end();
  2011. break;
  2012. }
  2013. rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
  2014. if (rc > 0) {
  2015. /* Success and a new mapping */
  2016. gmap_insert_rmap(sg, vmaddr, rmap);
  2017. rmap = NULL;
  2018. rc = 0;
  2019. }
  2020. gmap_pte_op_end(ptl);
  2021. spin_unlock(&sg->guest_table_lock);
  2022. }
  2023. radix_tree_preload_end();
  2024. if (!rc)
  2025. break;
  2026. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  2027. if (rc)
  2028. break;
  2029. }
  2030. kfree(rmap);
  2031. return rc;
  2032. }
  2033. EXPORT_SYMBOL_GPL(gmap_shadow_page);
  2034. /*
  2035. * gmap_shadow_notify - handle notifications for shadow gmap
  2036. *
  2037. * Called with sg->parent->shadow_lock.
  2038. */
  2039. static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
  2040. unsigned long gaddr)
  2041. {
  2042. struct gmap_rmap *rmap, *rnext, *head;
  2043. unsigned long start, end, bits, raddr;
  2044. BUG_ON(!gmap_is_shadow(sg));
  2045. spin_lock(&sg->guest_table_lock);
  2046. if (sg->removed) {
  2047. spin_unlock(&sg->guest_table_lock);
  2048. return;
  2049. }
  2050. /* Check for top level table */
  2051. start = sg->orig_asce & _ASCE_ORIGIN;
  2052. end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
  2053. if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
  2054. gaddr < end) {
  2055. /* The complete shadow table has to go */
  2056. gmap_unshadow(sg);
  2057. spin_unlock(&sg->guest_table_lock);
  2058. list_del(&sg->list);
  2059. gmap_put(sg);
  2060. return;
  2061. }
  2062. /* Remove the page table tree from on specific entry */
  2063. head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  2064. gmap_for_each_rmap_safe(rmap, rnext, head) {
  2065. bits = rmap->raddr & _SHADOW_RMAP_MASK;
  2066. raddr = rmap->raddr ^ bits;
  2067. switch (bits) {
  2068. case _SHADOW_RMAP_REGION1:
  2069. gmap_unshadow_r2t(sg, raddr);
  2070. break;
  2071. case _SHADOW_RMAP_REGION2:
  2072. gmap_unshadow_r3t(sg, raddr);
  2073. break;
  2074. case _SHADOW_RMAP_REGION3:
  2075. gmap_unshadow_sgt(sg, raddr);
  2076. break;
  2077. case _SHADOW_RMAP_SEGMENT:
  2078. gmap_unshadow_pgt(sg, raddr);
  2079. break;
  2080. case _SHADOW_RMAP_PGTABLE:
  2081. gmap_unshadow_page(sg, raddr);
  2082. break;
  2083. }
  2084. kfree(rmap);
  2085. }
  2086. spin_unlock(&sg->guest_table_lock);
  2087. }
  2088. /**
  2089. * ptep_notify - call all invalidation callbacks for a specific pte.
  2090. * @mm: pointer to the process mm_struct
  2091. * @vmaddr: virtual address in the process address space
  2092. * @pte: pointer to the page table entry
  2093. * @bits: bits from the pgste that caused the notify call
  2094. *
  2095. * This function is assumed to be called with the page table lock held
  2096. * for the pte to notify.
  2097. */
  2098. void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
  2099. pte_t *pte, unsigned long bits)
  2100. {
  2101. unsigned long offset, gaddr = 0;
  2102. unsigned long *table;
  2103. struct gmap *gmap, *sg, *next;
  2104. offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  2105. offset = offset * (PAGE_SIZE / sizeof(pte_t));
  2106. rcu_read_lock();
  2107. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2108. spin_lock(&gmap->guest_table_lock);
  2109. table = radix_tree_lookup(&gmap->host_to_guest,
  2110. vmaddr >> PMD_SHIFT);
  2111. if (table)
  2112. gaddr = __gmap_segment_gaddr(table) + offset;
  2113. spin_unlock(&gmap->guest_table_lock);
  2114. if (!table)
  2115. continue;
  2116. if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
  2117. spin_lock(&gmap->shadow_lock);
  2118. list_for_each_entry_safe(sg, next,
  2119. &gmap->children, list)
  2120. gmap_shadow_notify(sg, vmaddr, gaddr);
  2121. spin_unlock(&gmap->shadow_lock);
  2122. }
  2123. if (bits & PGSTE_IN_BIT)
  2124. gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
  2125. }
  2126. rcu_read_unlock();
  2127. }
  2128. EXPORT_SYMBOL_GPL(ptep_notify);
  2129. static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
  2130. unsigned long gaddr)
  2131. {
  2132. set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
  2133. gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
  2134. }
  2135. /**
  2136. * gmap_pmdp_xchg - exchange a gmap pmd with another
  2137. * @gmap: pointer to the guest address space structure
  2138. * @pmdp: pointer to the pmd entry
  2139. * @new: replacement entry
  2140. * @gaddr: the affected guest address
  2141. *
  2142. * This function is assumed to be called with the guest_table_lock
  2143. * held.
  2144. */
  2145. static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
  2146. unsigned long gaddr)
  2147. {
  2148. gaddr &= HPAGE_MASK;
  2149. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2150. new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN));
  2151. if (MACHINE_HAS_TLB_GUEST)
  2152. __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
  2153. IDTE_GLOBAL);
  2154. else if (MACHINE_HAS_IDTE)
  2155. __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
  2156. else
  2157. __pmdp_csp(pmdp);
  2158. set_pmd(pmdp, new);
  2159. }
  2160. static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
  2161. int purge)
  2162. {
  2163. pmd_t *pmdp;
  2164. struct gmap *gmap;
  2165. unsigned long gaddr;
  2166. rcu_read_lock();
  2167. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2168. spin_lock(&gmap->guest_table_lock);
  2169. pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
  2170. vmaddr >> PMD_SHIFT);
  2171. if (pmdp) {
  2172. gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
  2173. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2174. WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2175. _SEGMENT_ENTRY_GMAP_UC));
  2176. if (purge)
  2177. __pmdp_csp(pmdp);
  2178. set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
  2179. }
  2180. spin_unlock(&gmap->guest_table_lock);
  2181. }
  2182. rcu_read_unlock();
  2183. }
  2184. /**
  2185. * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
  2186. * flushing
  2187. * @mm: pointer to the process mm_struct
  2188. * @vmaddr: virtual address in the process address space
  2189. */
  2190. void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
  2191. {
  2192. gmap_pmdp_clear(mm, vmaddr, 0);
  2193. }
  2194. EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
  2195. /**
  2196. * gmap_pmdp_csp - csp all affected guest pmd entries
  2197. * @mm: pointer to the process mm_struct
  2198. * @vmaddr: virtual address in the process address space
  2199. */
  2200. void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
  2201. {
  2202. gmap_pmdp_clear(mm, vmaddr, 1);
  2203. }
  2204. EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
  2205. /**
  2206. * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
  2207. * @mm: pointer to the process mm_struct
  2208. * @vmaddr: virtual address in the process address space
  2209. */
  2210. void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
  2211. {
  2212. unsigned long *entry, gaddr;
  2213. struct gmap *gmap;
  2214. pmd_t *pmdp;
  2215. rcu_read_lock();
  2216. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2217. spin_lock(&gmap->guest_table_lock);
  2218. entry = radix_tree_delete(&gmap->host_to_guest,
  2219. vmaddr >> PMD_SHIFT);
  2220. if (entry) {
  2221. pmdp = (pmd_t *)entry;
  2222. gaddr = __gmap_segment_gaddr(entry);
  2223. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2224. WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2225. _SEGMENT_ENTRY_GMAP_UC));
  2226. if (MACHINE_HAS_TLB_GUEST)
  2227. __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
  2228. gmap->asce, IDTE_LOCAL);
  2229. else if (MACHINE_HAS_IDTE)
  2230. __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
  2231. *entry = _SEGMENT_ENTRY_EMPTY;
  2232. }
  2233. spin_unlock(&gmap->guest_table_lock);
  2234. }
  2235. rcu_read_unlock();
  2236. }
  2237. EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
  2238. /**
  2239. * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
  2240. * @mm: pointer to the process mm_struct
  2241. * @vmaddr: virtual address in the process address space
  2242. */
  2243. void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
  2244. {
  2245. unsigned long *entry, gaddr;
  2246. struct gmap *gmap;
  2247. pmd_t *pmdp;
  2248. rcu_read_lock();
  2249. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2250. spin_lock(&gmap->guest_table_lock);
  2251. entry = radix_tree_delete(&gmap->host_to_guest,
  2252. vmaddr >> PMD_SHIFT);
  2253. if (entry) {
  2254. pmdp = (pmd_t *)entry;
  2255. gaddr = __gmap_segment_gaddr(entry);
  2256. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2257. WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2258. _SEGMENT_ENTRY_GMAP_UC));
  2259. if (MACHINE_HAS_TLB_GUEST)
  2260. __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
  2261. gmap->asce, IDTE_GLOBAL);
  2262. else if (MACHINE_HAS_IDTE)
  2263. __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
  2264. else
  2265. __pmdp_csp(pmdp);
  2266. *entry = _SEGMENT_ENTRY_EMPTY;
  2267. }
  2268. spin_unlock(&gmap->guest_table_lock);
  2269. }
  2270. rcu_read_unlock();
  2271. }
  2272. EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
  2273. /**
  2274. * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
  2275. * @gmap: pointer to guest address space
  2276. * @pmdp: pointer to the pmd to be tested
  2277. * @gaddr: virtual address in the guest address space
  2278. *
  2279. * This function is assumed to be called with the guest_table_lock
  2280. * held.
  2281. */
  2282. static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
  2283. unsigned long gaddr)
  2284. {
  2285. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
  2286. return false;
  2287. /* Already protected memory, which did not change is clean */
  2288. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
  2289. !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
  2290. return false;
  2291. /* Clear UC indication and reset protection */
  2292. set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC)));
  2293. gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
  2294. return true;
  2295. }
  2296. /**
  2297. * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
  2298. * @gmap: pointer to guest address space
  2299. * @bitmap: dirty bitmap for this pmd
  2300. * @gaddr: virtual address in the guest address space
  2301. * @vmaddr: virtual address in the host address space
  2302. *
  2303. * This function is assumed to be called with the guest_table_lock
  2304. * held.
  2305. */
  2306. void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
  2307. unsigned long gaddr, unsigned long vmaddr)
  2308. {
  2309. int i;
  2310. pmd_t *pmdp;
  2311. pte_t *ptep;
  2312. spinlock_t *ptl;
  2313. pmdp = gmap_pmd_op_walk(gmap, gaddr);
  2314. if (!pmdp)
  2315. return;
  2316. if (pmd_large(*pmdp)) {
  2317. if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
  2318. bitmap_fill(bitmap, _PAGE_ENTRIES);
  2319. } else {
  2320. for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
  2321. ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
  2322. if (!ptep)
  2323. continue;
  2324. if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
  2325. set_bit(i, bitmap);
  2326. spin_unlock(ptl);
  2327. }
  2328. }
  2329. gmap_pmd_op_end(gmap, pmdp);
  2330. }
  2331. EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
  2332. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  2333. static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
  2334. unsigned long end, struct mm_walk *walk)
  2335. {
  2336. struct vm_area_struct *vma = walk->vma;
  2337. split_huge_pmd(vma, pmd, addr);
  2338. return 0;
  2339. }
  2340. static const struct mm_walk_ops thp_split_walk_ops = {
  2341. .pmd_entry = thp_split_walk_pmd_entry,
  2342. .walk_lock = PGWALK_WRLOCK_VERIFY,
  2343. };
  2344. static inline void thp_split_mm(struct mm_struct *mm)
  2345. {
  2346. struct vm_area_struct *vma;
  2347. VMA_ITERATOR(vmi, mm, 0);
  2348. for_each_vma(vmi, vma) {
  2349. vm_flags_mod(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
  2350. walk_page_vma(vma, &thp_split_walk_ops, NULL);
  2351. }
  2352. mm->def_flags |= VM_NOHUGEPAGE;
  2353. }
  2354. #else
  2355. static inline void thp_split_mm(struct mm_struct *mm)
  2356. {
  2357. }
  2358. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  2359. /*
  2360. * Remove all empty zero pages from the mapping for lazy refaulting
  2361. * - This must be called after mm->context.has_pgste is set, to avoid
  2362. * future creation of zero pages
  2363. * - This must be called after THP was enabled
  2364. */
  2365. static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
  2366. unsigned long end, struct mm_walk *walk)
  2367. {
  2368. unsigned long addr;
  2369. for (addr = start; addr != end; addr += PAGE_SIZE) {
  2370. pte_t *ptep;
  2371. spinlock_t *ptl;
  2372. ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  2373. if (is_zero_pfn(pte_pfn(*ptep)))
  2374. ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
  2375. pte_unmap_unlock(ptep, ptl);
  2376. }
  2377. return 0;
  2378. }
  2379. static const struct mm_walk_ops zap_zero_walk_ops = {
  2380. .pmd_entry = __zap_zero_pages,
  2381. .walk_lock = PGWALK_WRLOCK,
  2382. };
  2383. /*
  2384. * switch on pgstes for its userspace process (for kvm)
  2385. */
  2386. int s390_enable_sie(void)
  2387. {
  2388. struct mm_struct *mm = current->mm;
  2389. /* Do we have pgstes? if yes, we are done */
  2390. if (mm_has_pgste(mm))
  2391. return 0;
  2392. /* Fail if the page tables are 2K */
  2393. if (!mm_alloc_pgste(mm))
  2394. return -EINVAL;
  2395. mmap_write_lock(mm);
  2396. mm->context.has_pgste = 1;
  2397. /* split thp mappings and disable thp for future mappings */
  2398. thp_split_mm(mm);
  2399. walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
  2400. mmap_write_unlock(mm);
  2401. return 0;
  2402. }
  2403. EXPORT_SYMBOL_GPL(s390_enable_sie);
  2404. int gmap_mark_unmergeable(void)
  2405. {
  2406. struct mm_struct *mm = current->mm;
  2407. struct vm_area_struct *vma;
  2408. unsigned long vm_flags;
  2409. int ret;
  2410. VMA_ITERATOR(vmi, mm, 0);
  2411. for_each_vma(vmi, vma) {
  2412. /* Copy vm_flags to avoid partial modifications in ksm_madvise */
  2413. vm_flags = vma->vm_flags;
  2414. ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
  2415. MADV_UNMERGEABLE, &vm_flags);
  2416. if (ret)
  2417. return ret;
  2418. vm_flags_reset(vma, vm_flags);
  2419. }
  2420. mm->def_flags &= ~VM_MERGEABLE;
  2421. return 0;
  2422. }
  2423. EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
  2424. /*
  2425. * Enable storage key handling from now on and initialize the storage
  2426. * keys with the default key.
  2427. */
  2428. static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
  2429. unsigned long next, struct mm_walk *walk)
  2430. {
  2431. /* Clear storage key */
  2432. ptep_zap_key(walk->mm, addr, pte);
  2433. return 0;
  2434. }
  2435. /*
  2436. * Give a chance to schedule after setting a key to 256 pages.
  2437. * We only hold the mm lock, which is a rwsem and the kvm srcu.
  2438. * Both can sleep.
  2439. */
  2440. static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
  2441. unsigned long next, struct mm_walk *walk)
  2442. {
  2443. cond_resched();
  2444. return 0;
  2445. }
  2446. static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
  2447. unsigned long hmask, unsigned long next,
  2448. struct mm_walk *walk)
  2449. {
  2450. pmd_t *pmd = (pmd_t *)pte;
  2451. unsigned long start, end;
  2452. struct page *page = pmd_page(*pmd);
  2453. /*
  2454. * The write check makes sure we do not set a key on shared
  2455. * memory. This is needed as the walker does not differentiate
  2456. * between actual guest memory and the process executable or
  2457. * shared libraries.
  2458. */
  2459. if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
  2460. !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
  2461. return 0;
  2462. start = pmd_val(*pmd) & HPAGE_MASK;
  2463. end = start + HPAGE_SIZE - 1;
  2464. __storage_key_init_range(start, end);
  2465. set_bit(PG_arch_1, &page->flags);
  2466. cond_resched();
  2467. return 0;
  2468. }
  2469. static const struct mm_walk_ops enable_skey_walk_ops = {
  2470. .hugetlb_entry = __s390_enable_skey_hugetlb,
  2471. .pte_entry = __s390_enable_skey_pte,
  2472. .pmd_entry = __s390_enable_skey_pmd,
  2473. .walk_lock = PGWALK_WRLOCK,
  2474. };
  2475. int s390_enable_skey(void)
  2476. {
  2477. struct mm_struct *mm = current->mm;
  2478. int rc = 0;
  2479. mmap_write_lock(mm);
  2480. if (mm_uses_skeys(mm))
  2481. goto out_up;
  2482. mm->context.uses_skeys = 1;
  2483. rc = gmap_mark_unmergeable();
  2484. if (rc) {
  2485. mm->context.uses_skeys = 0;
  2486. goto out_up;
  2487. }
  2488. walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
  2489. out_up:
  2490. mmap_write_unlock(mm);
  2491. return rc;
  2492. }
  2493. EXPORT_SYMBOL_GPL(s390_enable_skey);
  2494. /*
  2495. * Reset CMMA state, make all pages stable again.
  2496. */
  2497. static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
  2498. unsigned long next, struct mm_walk *walk)
  2499. {
  2500. ptep_zap_unused(walk->mm, addr, pte, 1);
  2501. return 0;
  2502. }
  2503. static const struct mm_walk_ops reset_cmma_walk_ops = {
  2504. .pte_entry = __s390_reset_cmma,
  2505. .walk_lock = PGWALK_WRLOCK,
  2506. };
  2507. void s390_reset_cmma(struct mm_struct *mm)
  2508. {
  2509. mmap_write_lock(mm);
  2510. walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
  2511. mmap_write_unlock(mm);
  2512. }
  2513. EXPORT_SYMBOL_GPL(s390_reset_cmma);
  2514. #define GATHER_GET_PAGES 32
  2515. struct reset_walk_state {
  2516. unsigned long next;
  2517. unsigned long count;
  2518. unsigned long pfns[GATHER_GET_PAGES];
  2519. };
  2520. static int s390_gather_pages(pte_t *ptep, unsigned long addr,
  2521. unsigned long next, struct mm_walk *walk)
  2522. {
  2523. struct reset_walk_state *p = walk->private;
  2524. pte_t pte = READ_ONCE(*ptep);
  2525. if (pte_present(pte)) {
  2526. /* we have a reference from the mapping, take an extra one */
  2527. get_page(phys_to_page(pte_val(pte)));
  2528. p->pfns[p->count] = phys_to_pfn(pte_val(pte));
  2529. p->next = next;
  2530. p->count++;
  2531. }
  2532. return p->count >= GATHER_GET_PAGES;
  2533. }
  2534. static const struct mm_walk_ops gather_pages_ops = {
  2535. .pte_entry = s390_gather_pages,
  2536. .walk_lock = PGWALK_RDLOCK,
  2537. };
  2538. /*
  2539. * Call the Destroy secure page UVC on each page in the given array of PFNs.
  2540. * Each page needs to have an extra reference, which will be released here.
  2541. */
  2542. void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns)
  2543. {
  2544. unsigned long i;
  2545. for (i = 0; i < count; i++) {
  2546. /* we always have an extra reference */
  2547. uv_destroy_owned_page(pfn_to_phys(pfns[i]));
  2548. /* get rid of the extra reference */
  2549. put_page(pfn_to_page(pfns[i]));
  2550. cond_resched();
  2551. }
  2552. }
  2553. EXPORT_SYMBOL_GPL(s390_uv_destroy_pfns);
  2554. /**
  2555. * __s390_uv_destroy_range - Call the destroy secure page UVC on each page
  2556. * in the given range of the given address space.
  2557. * @mm: the mm to operate on
  2558. * @start: the start of the range
  2559. * @end: the end of the range
  2560. * @interruptible: if not 0, stop when a fatal signal is received
  2561. *
  2562. * Walk the given range of the given address space and call the destroy
  2563. * secure page UVC on each page. Optionally exit early if a fatal signal is
  2564. * pending.
  2565. *
  2566. * Return: 0 on success, -EINTR if the function stopped before completing
  2567. */
  2568. int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
  2569. unsigned long end, bool interruptible)
  2570. {
  2571. struct reset_walk_state state = { .next = start };
  2572. int r = 1;
  2573. while (r > 0) {
  2574. state.count = 0;
  2575. mmap_read_lock(mm);
  2576. r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state);
  2577. mmap_read_unlock(mm);
  2578. cond_resched();
  2579. s390_uv_destroy_pfns(state.count, state.pfns);
  2580. if (interruptible && fatal_signal_pending(current))
  2581. return -EINTR;
  2582. }
  2583. return 0;
  2584. }
  2585. EXPORT_SYMBOL_GPL(__s390_uv_destroy_range);
  2586. /**
  2587. * s390_unlist_old_asce - Remove the topmost level of page tables from the
  2588. * list of page tables of the gmap.
  2589. * @gmap: the gmap whose table is to be removed
  2590. *
  2591. * On s390x, KVM keeps a list of all pages containing the page tables of the
  2592. * gmap (the CRST list). This list is used at tear down time to free all
  2593. * pages that are now not needed anymore.
  2594. *
  2595. * This function removes the topmost page of the tree (the one pointed to by
  2596. * the ASCE) from the CRST list.
  2597. *
  2598. * This means that it will not be freed when the VM is torn down, and needs
  2599. * to be handled separately by the caller, unless a leak is actually
  2600. * intended. Notice that this function will only remove the page from the
  2601. * list, the page will still be used as a top level page table (and ASCE).
  2602. */
  2603. void s390_unlist_old_asce(struct gmap *gmap)
  2604. {
  2605. struct page *old;
  2606. old = virt_to_page(gmap->table);
  2607. spin_lock(&gmap->guest_table_lock);
  2608. list_del(&old->lru);
  2609. /*
  2610. * Sometimes the topmost page might need to be "removed" multiple
  2611. * times, for example if the VM is rebooted into secure mode several
  2612. * times concurrently, or if s390_replace_asce fails after calling
  2613. * s390_remove_old_asce and is attempted again later. In that case
  2614. * the old asce has been removed from the list, and therefore it
  2615. * will not be freed when the VM terminates, but the ASCE is still
  2616. * in use and still pointed to.
  2617. * A subsequent call to replace_asce will follow the pointer and try
  2618. * to remove the same page from the list again.
  2619. * Therefore it's necessary that the page of the ASCE has valid
  2620. * pointers, so list_del can work (and do nothing) without
  2621. * dereferencing stale or invalid pointers.
  2622. */
  2623. INIT_LIST_HEAD(&old->lru);
  2624. spin_unlock(&gmap->guest_table_lock);
  2625. }
  2626. EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
  2627. /**
  2628. * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
  2629. * @gmap: the gmap whose ASCE needs to be replaced
  2630. *
  2631. * If the allocation of the new top level page table fails, the ASCE is not
  2632. * replaced.
  2633. * In any case, the old ASCE is always removed from the gmap CRST list.
  2634. * Therefore the caller has to make sure to save a pointer to it
  2635. * beforehand, unless a leak is actually intended.
  2636. */
  2637. int s390_replace_asce(struct gmap *gmap)
  2638. {
  2639. unsigned long asce;
  2640. struct page *page;
  2641. void *table;
  2642. s390_unlist_old_asce(gmap);
  2643. page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
  2644. if (!page)
  2645. return -ENOMEM;
  2646. page->index = 0;
  2647. table = page_to_virt(page);
  2648. memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
  2649. /*
  2650. * The caller has to deal with the old ASCE, but here we make sure
  2651. * the new one is properly added to the CRST list, so that
  2652. * it will be freed when the VM is torn down.
  2653. */
  2654. spin_lock(&gmap->guest_table_lock);
  2655. list_add(&page->lru, &gmap->crst_list);
  2656. spin_unlock(&gmap->guest_table_lock);
  2657. /* Set new table origin while preserving existing ASCE control bits */
  2658. asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table);
  2659. WRITE_ONCE(gmap->asce, asce);
  2660. WRITE_ONCE(gmap->mm->context.gmap_asce, asce);
  2661. WRITE_ONCE(gmap->table, table);
  2662. return 0;
  2663. }
  2664. EXPORT_SYMBOL_GPL(s390_replace_asce);