namespace.c 126 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/fs/namespace.c
  4. *
  5. * (C) Copyright Al Viro 2000, 2001
  6. *
  7. * Based on code from fs/super.c, copyright Linus Torvalds and others.
  8. * Heavily rewritten.
  9. */
  10. #include <linux/syscalls.h>
  11. #include <linux/export.h>
  12. #include <linux/capability.h>
  13. #include <linux/mnt_namespace.h>
  14. #include <linux/user_namespace.h>
  15. #include <linux/namei.h>
  16. #include <linux/security.h>
  17. #include <linux/cred.h>
  18. #include <linux/idr.h>
  19. #include <linux/init.h> /* init_rootfs */
  20. #include <linux/fs_struct.h> /* get_fs_root et.al. */
  21. #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
  22. #include <linux/file.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/proc_ns.h>
  25. #include <linux/magic.h>
  26. #include <linux/memblock.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/task_work.h>
  29. #include <linux/sched/task.h>
  30. #include <uapi/linux/mount.h>
  31. #include <linux/fs_context.h>
  32. #include <linux/shmem_fs.h>
  33. #include <linux/mnt_idmapping.h>
  34. #ifdef CONFIG_KDP_NS
  35. #include <linux/kdp.h>
  36. #endif
  37. #include "pnode.h"
  38. #include "internal.h"
  39. /* Maximum number of mounts in a mount namespace */
  40. static unsigned int sysctl_mount_max __read_mostly = 100000;
  41. static unsigned int m_hash_mask __read_mostly;
  42. static unsigned int m_hash_shift __read_mostly;
  43. static unsigned int mp_hash_mask __read_mostly;
  44. static unsigned int mp_hash_shift __read_mostly;
  45. static __initdata unsigned long mhash_entries;
  46. static int __init set_mhash_entries(char *str)
  47. {
  48. if (!str)
  49. return 0;
  50. mhash_entries = simple_strtoul(str, &str, 0);
  51. return 1;
  52. }
  53. __setup("mhash_entries=", set_mhash_entries);
  54. static __initdata unsigned long mphash_entries;
  55. static int __init set_mphash_entries(char *str)
  56. {
  57. if (!str)
  58. return 0;
  59. mphash_entries = simple_strtoul(str, &str, 0);
  60. return 1;
  61. }
  62. __setup("mphash_entries=", set_mphash_entries);
  63. static u64 event;
  64. static DEFINE_IDA(mnt_id_ida);
  65. static DEFINE_IDA(mnt_group_ida);
  66. static struct hlist_head *mount_hashtable __read_mostly;
  67. static struct hlist_head *mountpoint_hashtable __read_mostly;
  68. static struct kmem_cache *mnt_cache __read_mostly;
  69. static DECLARE_RWSEM(namespace_sem);
  70. static HLIST_HEAD(unmounted); /* protected by namespace_sem */
  71. static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
  72. struct mount_kattr {
  73. unsigned int attr_set;
  74. unsigned int attr_clr;
  75. unsigned int propagation;
  76. unsigned int lookup_flags;
  77. bool recurse;
  78. struct user_namespace *mnt_userns;
  79. };
  80. /* /sys/fs */
  81. struct kobject *fs_kobj;
  82. EXPORT_SYMBOL_GPL(fs_kobj);
  83. /*
  84. * vfsmount lock may be taken for read to prevent changes to the
  85. * vfsmount hash, ie. during mountpoint lookups or walking back
  86. * up the tree.
  87. *
  88. * It should be taken for write in all cases where the vfsmount
  89. * tree or hash is modified or when a vfsmount structure is modified.
  90. */
  91. __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
  92. static inline void lock_mount_hash(void)
  93. {
  94. write_seqlock(&mount_lock);
  95. }
  96. static inline void unlock_mount_hash(void)
  97. {
  98. write_sequnlock(&mount_lock);
  99. }
  100. static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
  101. {
  102. unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
  103. tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
  104. tmp = tmp + (tmp >> m_hash_shift);
  105. return &mount_hashtable[tmp & m_hash_mask];
  106. }
  107. static inline struct hlist_head *mp_hash(struct dentry *dentry)
  108. {
  109. unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
  110. tmp = tmp + (tmp >> mp_hash_shift);
  111. return &mountpoint_hashtable[tmp & mp_hash_mask];
  112. }
  113. static int mnt_alloc_id(struct mount *mnt)
  114. {
  115. int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
  116. if (res < 0)
  117. return res;
  118. mnt->mnt_id = res;
  119. return 0;
  120. }
  121. static void mnt_free_id(struct mount *mnt)
  122. {
  123. ida_free(&mnt_id_ida, mnt->mnt_id);
  124. }
  125. /*
  126. * Allocate a new peer group ID
  127. */
  128. static int mnt_alloc_group_id(struct mount *mnt)
  129. {
  130. int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
  131. if (res < 0)
  132. return res;
  133. mnt->mnt_group_id = res;
  134. return 0;
  135. }
  136. /*
  137. * Release a peer group ID
  138. */
  139. void mnt_release_group_id(struct mount *mnt)
  140. {
  141. ida_free(&mnt_group_ida, mnt->mnt_group_id);
  142. mnt->mnt_group_id = 0;
  143. }
  144. /*
  145. * vfsmount lock must be held for read
  146. */
  147. static inline void mnt_add_count(struct mount *mnt, int n)
  148. {
  149. #ifdef CONFIG_SMP
  150. this_cpu_add(mnt->mnt_pcp->mnt_count, n);
  151. #else
  152. preempt_disable();
  153. mnt->mnt_count += n;
  154. preempt_enable();
  155. #endif
  156. }
  157. /*
  158. * vfsmount lock must be held for write
  159. */
  160. int mnt_get_count(struct mount *mnt)
  161. {
  162. #ifdef CONFIG_SMP
  163. int count = 0;
  164. int cpu;
  165. for_each_possible_cpu(cpu) {
  166. count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
  167. }
  168. return count;
  169. #else
  170. return mnt->mnt_count;
  171. #endif
  172. }
  173. static struct mount *alloc_vfsmnt(const char *name)
  174. {
  175. struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
  176. if (mnt) {
  177. int err;
  178. err = mnt_alloc_id(mnt);
  179. if (err)
  180. goto out_free_cache;
  181. #ifdef CONFIG_KDP_NS
  182. err = kdp_mnt_alloc_vfsmount(mnt);
  183. if (err)
  184. goto out_free_cache;
  185. #endif
  186. if (name) {
  187. mnt->mnt_devname = kstrdup_const(name,
  188. GFP_KERNEL_ACCOUNT);
  189. if (!mnt->mnt_devname)
  190. goto out_free_id;
  191. }
  192. #ifdef CONFIG_SMP
  193. mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
  194. if (!mnt->mnt_pcp)
  195. goto out_free_devname;
  196. this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
  197. #else
  198. mnt->mnt_count = 1;
  199. mnt->mnt_writers = 0;
  200. #endif
  201. INIT_HLIST_NODE(&mnt->mnt_hash);
  202. INIT_LIST_HEAD(&mnt->mnt_child);
  203. INIT_LIST_HEAD(&mnt->mnt_mounts);
  204. INIT_LIST_HEAD(&mnt->mnt_list);
  205. INIT_LIST_HEAD(&mnt->mnt_expire);
  206. INIT_LIST_HEAD(&mnt->mnt_share);
  207. INIT_LIST_HEAD(&mnt->mnt_slave_list);
  208. INIT_LIST_HEAD(&mnt->mnt_slave);
  209. INIT_HLIST_NODE(&mnt->mnt_mp_list);
  210. INIT_LIST_HEAD(&mnt->mnt_umounting);
  211. INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
  212. #ifdef CONFIG_KDP_NS
  213. kdp_set_mnt_userns(((struct kdp_mount *)mnt)->mnt, &init_user_ns);
  214. #else
  215. mnt->mnt.mnt_userns = &init_user_ns;
  216. #endif
  217. }
  218. return mnt;
  219. #ifdef CONFIG_SMP
  220. out_free_devname:
  221. kfree_const(mnt->mnt_devname);
  222. #endif
  223. out_free_id:
  224. mnt_free_id(mnt);
  225. out_free_cache:
  226. kmem_cache_free(mnt_cache, mnt);
  227. return NULL;
  228. }
  229. /*
  230. * Most r/o checks on a fs are for operations that take
  231. * discrete amounts of time, like a write() or unlink().
  232. * We must keep track of when those operations start
  233. * (for permission checks) and when they end, so that
  234. * we can determine when writes are able to occur to
  235. * a filesystem.
  236. */
  237. /*
  238. * __mnt_is_readonly: check whether a mount is read-only
  239. * @mnt: the mount to check for its write status
  240. *
  241. * This shouldn't be used directly ouside of the VFS.
  242. * It does not guarantee that the filesystem will stay
  243. * r/w, just that it is right *now*. This can not and
  244. * should not be used in place of IS_RDONLY(inode).
  245. * mnt_want/drop_write() will _keep_ the filesystem
  246. * r/w.
  247. */
  248. bool __mnt_is_readonly(struct vfsmount *mnt)
  249. {
  250. return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
  251. }
  252. EXPORT_SYMBOL_GPL(__mnt_is_readonly);
  253. static inline void mnt_inc_writers(struct mount *mnt)
  254. {
  255. #ifdef CONFIG_SMP
  256. this_cpu_inc(mnt->mnt_pcp->mnt_writers);
  257. #else
  258. mnt->mnt_writers++;
  259. #endif
  260. }
  261. static inline void mnt_dec_writers(struct mount *mnt)
  262. {
  263. #ifdef CONFIG_SMP
  264. this_cpu_dec(mnt->mnt_pcp->mnt_writers);
  265. #else
  266. mnt->mnt_writers--;
  267. #endif
  268. }
  269. static unsigned int mnt_get_writers(struct mount *mnt)
  270. {
  271. #ifdef CONFIG_SMP
  272. unsigned int count = 0;
  273. int cpu;
  274. for_each_possible_cpu(cpu) {
  275. count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
  276. }
  277. return count;
  278. #else
  279. return mnt->mnt_writers;
  280. #endif
  281. }
  282. static int mnt_is_readonly(struct vfsmount *mnt)
  283. {
  284. if (mnt->mnt_sb->s_readonly_remount)
  285. return 1;
  286. /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
  287. smp_rmb();
  288. return __mnt_is_readonly(mnt);
  289. }
  290. /*
  291. * Most r/o & frozen checks on a fs are for operations that take discrete
  292. * amounts of time, like a write() or unlink(). We must keep track of when
  293. * those operations start (for permission checks) and when they end, so that we
  294. * can determine when writes are able to occur to a filesystem.
  295. */
  296. /**
  297. * __mnt_want_write - get write access to a mount without freeze protection
  298. * @m: the mount on which to take a write
  299. *
  300. * This tells the low-level filesystem that a write is about to be performed to
  301. * it, and makes sure that writes are allowed (mnt it read-write) before
  302. * returning success. This operation does not protect against filesystem being
  303. * frozen. When the write operation is finished, __mnt_drop_write() must be
  304. * called. This is effectively a refcount.
  305. */
  306. int __mnt_want_write(struct vfsmount *m)
  307. {
  308. struct mount *mnt = real_mount(m);
  309. int ret = 0;
  310. preempt_disable();
  311. mnt_inc_writers(mnt);
  312. /*
  313. * The store to mnt_inc_writers must be visible before we pass
  314. * MNT_WRITE_HOLD loop below, so that the slowpath can see our
  315. * incremented count after it has set MNT_WRITE_HOLD.
  316. */
  317. smp_mb();
  318. might_lock(&mount_lock.lock);
  319. #ifdef CONFIG_KDP_NS
  320. while (READ_ONCE(((struct kdp_mount *)mnt)->mnt->mnt_flags) & MNT_WRITE_HOLD) {
  321. #else
  322. while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
  323. #endif
  324. if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
  325. cpu_relax();
  326. } else {
  327. /*
  328. * This prevents priority inversion, if the task
  329. * setting MNT_WRITE_HOLD got preempted on a remote
  330. * CPU, and it prevents life lock if the task setting
  331. * MNT_WRITE_HOLD has a lower priority and is bound to
  332. * the same CPU as the task that is spinning here.
  333. */
  334. preempt_enable();
  335. lock_mount_hash();
  336. unlock_mount_hash();
  337. preempt_disable();
  338. }
  339. }
  340. /*
  341. * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
  342. * be set to match its requirements. So we must not load that until
  343. * MNT_WRITE_HOLD is cleared.
  344. */
  345. smp_rmb();
  346. if (mnt_is_readonly(m)) {
  347. mnt_dec_writers(mnt);
  348. ret = -EROFS;
  349. }
  350. preempt_enable();
  351. return ret;
  352. }
  353. /**
  354. * mnt_want_write - get write access to a mount
  355. * @m: the mount on which to take a write
  356. *
  357. * This tells the low-level filesystem that a write is about to be performed to
  358. * it, and makes sure that writes are allowed (mount is read-write, filesystem
  359. * is not frozen) before returning success. When the write operation is
  360. * finished, mnt_drop_write() must be called. This is effectively a refcount.
  361. */
  362. int mnt_want_write(struct vfsmount *m)
  363. {
  364. int ret;
  365. sb_start_write(m->mnt_sb);
  366. ret = __mnt_want_write(m);
  367. if (ret)
  368. sb_end_write(m->mnt_sb);
  369. return ret;
  370. }
  371. EXPORT_SYMBOL_GPL(mnt_want_write);
  372. /**
  373. * __mnt_want_write_file - get write access to a file's mount
  374. * @file: the file who's mount on which to take a write
  375. *
  376. * This is like __mnt_want_write, but if the file is already open for writing it
  377. * skips incrementing mnt_writers (since the open file already has a reference)
  378. * and instead only does the check for emergency r/o remounts. This must be
  379. * paired with __mnt_drop_write_file.
  380. */
  381. int __mnt_want_write_file(struct file *file)
  382. {
  383. if (file->f_mode & FMODE_WRITER) {
  384. /*
  385. * Superblock may have become readonly while there are still
  386. * writable fd's, e.g. due to a fs error with errors=remount-ro
  387. */
  388. if (__mnt_is_readonly(file->f_path.mnt))
  389. return -EROFS;
  390. return 0;
  391. }
  392. return __mnt_want_write(file->f_path.mnt);
  393. }
  394. /**
  395. * mnt_want_write_file - get write access to a file's mount
  396. * @file: the file who's mount on which to take a write
  397. *
  398. * This is like mnt_want_write, but if the file is already open for writing it
  399. * skips incrementing mnt_writers (since the open file already has a reference)
  400. * and instead only does the freeze protection and the check for emergency r/o
  401. * remounts. This must be paired with mnt_drop_write_file.
  402. */
  403. int mnt_want_write_file(struct file *file)
  404. {
  405. int ret;
  406. sb_start_write(file_inode(file)->i_sb);
  407. ret = __mnt_want_write_file(file);
  408. if (ret)
  409. sb_end_write(file_inode(file)->i_sb);
  410. return ret;
  411. }
  412. EXPORT_SYMBOL_GPL(mnt_want_write_file);
  413. /**
  414. * __mnt_drop_write - give up write access to a mount
  415. * @mnt: the mount on which to give up write access
  416. *
  417. * Tells the low-level filesystem that we are done
  418. * performing writes to it. Must be matched with
  419. * __mnt_want_write() call above.
  420. */
  421. void __mnt_drop_write(struct vfsmount *mnt)
  422. {
  423. preempt_disable();
  424. mnt_dec_writers(real_mount(mnt));
  425. preempt_enable();
  426. }
  427. /**
  428. * mnt_drop_write - give up write access to a mount
  429. * @mnt: the mount on which to give up write access
  430. *
  431. * Tells the low-level filesystem that we are done performing writes to it and
  432. * also allows filesystem to be frozen again. Must be matched with
  433. * mnt_want_write() call above.
  434. */
  435. void mnt_drop_write(struct vfsmount *mnt)
  436. {
  437. __mnt_drop_write(mnt);
  438. sb_end_write(mnt->mnt_sb);
  439. }
  440. EXPORT_SYMBOL_GPL(mnt_drop_write);
  441. void __mnt_drop_write_file(struct file *file)
  442. {
  443. if (!(file->f_mode & FMODE_WRITER))
  444. __mnt_drop_write(file->f_path.mnt);
  445. }
  446. void mnt_drop_write_file(struct file *file)
  447. {
  448. __mnt_drop_write_file(file);
  449. sb_end_write(file_inode(file)->i_sb);
  450. }
  451. EXPORT_SYMBOL(mnt_drop_write_file);
  452. /**
  453. * mnt_hold_writers - prevent write access to the given mount
  454. * @mnt: mnt to prevent write access to
  455. *
  456. * Prevents write access to @mnt if there are no active writers for @mnt.
  457. * This function needs to be called and return successfully before changing
  458. * properties of @mnt that need to remain stable for callers with write access
  459. * to @mnt.
  460. *
  461. * After this functions has been called successfully callers must pair it with
  462. * a call to mnt_unhold_writers() in order to stop preventing write access to
  463. * @mnt.
  464. *
  465. * Context: This function expects lock_mount_hash() to be held serializing
  466. * setting MNT_WRITE_HOLD.
  467. * Return: On success 0 is returned.
  468. * On error, -EBUSY is returned.
  469. */
  470. static inline int mnt_hold_writers(struct mount *mnt)
  471. {
  472. #ifdef CONFIG_KDP_NS
  473. kdp_set_mnt_flags(((struct kdp_mount *)mnt)->mnt, MNT_WRITE_HOLD);
  474. #else
  475. mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
  476. #endif
  477. /*
  478. * After storing MNT_WRITE_HOLD, we'll read the counters. This store
  479. * should be visible before we do.
  480. */
  481. smp_mb();
  482. /*
  483. * With writers on hold, if this value is zero, then there are
  484. * definitely no active writers (although held writers may subsequently
  485. * increment the count, they'll have to wait, and decrement it after
  486. * seeing MNT_READONLY).
  487. *
  488. * It is OK to have counter incremented on one CPU and decremented on
  489. * another: the sum will add up correctly. The danger would be when we
  490. * sum up each counter, if we read a counter before it is incremented,
  491. * but then read another CPU's count which it has been subsequently
  492. * decremented from -- we would see more decrements than we should.
  493. * MNT_WRITE_HOLD protects against this scenario, because
  494. * mnt_want_write first increments count, then smp_mb, then spins on
  495. * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
  496. * we're counting up here.
  497. */
  498. if (mnt_get_writers(mnt) > 0)
  499. return -EBUSY;
  500. return 0;
  501. }
  502. /**
  503. * mnt_unhold_writers - stop preventing write access to the given mount
  504. * @mnt: mnt to stop preventing write access to
  505. *
  506. * Stop preventing write access to @mnt allowing callers to gain write access
  507. * to @mnt again.
  508. *
  509. * This function can only be called after a successful call to
  510. * mnt_hold_writers().
  511. *
  512. * Context: This function expects lock_mount_hash() to be held.
  513. */
  514. static inline void mnt_unhold_writers(struct mount *mnt)
  515. {
  516. /*
  517. * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
  518. * that become unheld will see MNT_READONLY.
  519. */
  520. smp_wmb();
  521. #ifdef CONFIG_KDP_NS
  522. kdp_clear_mnt_flags(((struct kdp_mount *)mnt)->mnt, MNT_WRITE_HOLD);
  523. #else
  524. mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
  525. #endif
  526. }
  527. static int mnt_make_readonly(struct mount *mnt)
  528. {
  529. int ret;
  530. ret = mnt_hold_writers(mnt);
  531. if (!ret)
  532. #ifdef CONFIG_KDP_NS
  533. kdp_set_mnt_flags(((struct kdp_mount *)mnt)->mnt, MNT_READONLY);
  534. #else
  535. mnt->mnt.mnt_flags |= MNT_READONLY;
  536. #endif
  537. mnt_unhold_writers(mnt);
  538. return ret;
  539. }
  540. int sb_prepare_remount_readonly(struct super_block *sb)
  541. {
  542. struct mount *mnt;
  543. int err = 0;
  544. /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
  545. if (atomic_long_read(&sb->s_remove_count))
  546. return -EBUSY;
  547. lock_mount_hash();
  548. list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
  549. #ifdef CONFIG_KDP_NS
  550. if (!(((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_READONLY)) {
  551. #else
  552. if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
  553. #endif
  554. err = mnt_hold_writers(mnt);
  555. if (err)
  556. break;
  557. }
  558. }
  559. if (!err && atomic_long_read(&sb->s_remove_count))
  560. err = -EBUSY;
  561. if (!err) {
  562. sb->s_readonly_remount = 1;
  563. smp_wmb();
  564. }
  565. list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
  566. #ifdef CONFIG_KDP_NS
  567. if (((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_WRITE_HOLD)
  568. kdp_clear_mnt_flags(((struct kdp_mount *)mnt)->mnt, MNT_WRITE_HOLD);
  569. #else
  570. if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
  571. mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
  572. #endif
  573. }
  574. unlock_mount_hash();
  575. return err;
  576. }
  577. static void free_vfsmnt(struct mount *mnt)
  578. {
  579. struct user_namespace *mnt_userns;
  580. #ifdef CONFIG_KDP_NS
  581. mnt_userns = mnt_user_ns(((struct kdp_mount *)mnt)->mnt);
  582. #else
  583. mnt_userns = mnt_user_ns(&mnt->mnt);
  584. #endif
  585. if (!initial_idmapping(mnt_userns))
  586. put_user_ns(mnt_userns);
  587. kfree_const(mnt->mnt_devname);
  588. #ifdef CONFIG_SMP
  589. free_percpu(mnt->mnt_pcp);
  590. #endif
  591. #ifdef CONFIG_KDP_NS
  592. if(((struct kdp_mount *)mnt)->mnt && is_kdp_vfsmnt_cache((unsigned long)((struct kdp_mount *)mnt)->mnt))
  593. kdp_free_vfsmount(((struct kdp_mount *)mnt)->mnt);
  594. #endif
  595. kmem_cache_free(mnt_cache, mnt);
  596. }
  597. static void delayed_free_vfsmnt(struct rcu_head *head)
  598. {
  599. free_vfsmnt(container_of(head, struct mount, mnt_rcu));
  600. }
  601. /* call under rcu_read_lock */
  602. int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
  603. {
  604. struct mount *mnt;
  605. if (read_seqretry(&mount_lock, seq))
  606. return 1;
  607. if (bastard == NULL)
  608. return 0;
  609. mnt = real_mount(bastard);
  610. mnt_add_count(mnt, 1);
  611. smp_mb(); // see mntput_no_expire()
  612. if (likely(!read_seqretry(&mount_lock, seq)))
  613. return 0;
  614. if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
  615. mnt_add_count(mnt, -1);
  616. return 1;
  617. }
  618. lock_mount_hash();
  619. if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
  620. mnt_add_count(mnt, -1);
  621. unlock_mount_hash();
  622. return 1;
  623. }
  624. unlock_mount_hash();
  625. /* caller will mntput() */
  626. return -1;
  627. }
  628. /* call under rcu_read_lock */
  629. static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
  630. {
  631. int res = __legitimize_mnt(bastard, seq);
  632. if (likely(!res))
  633. return true;
  634. if (unlikely(res < 0)) {
  635. rcu_read_unlock();
  636. mntput(bastard);
  637. rcu_read_lock();
  638. }
  639. return false;
  640. }
  641. /*
  642. * find the first mount at @dentry on vfsmount @mnt.
  643. * call under rcu_read_lock()
  644. */
  645. struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
  646. {
  647. struct hlist_head *head = m_hash(mnt, dentry);
  648. struct mount *p;
  649. hlist_for_each_entry_rcu(p, head, mnt_hash)
  650. #ifdef CONFIG_KDP_NS
  651. if (((struct kdp_mount *)(p->mnt_parent))->mnt == mnt && p->mnt_mountpoint == dentry)
  652. #else
  653. if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
  654. #endif
  655. return p;
  656. return NULL;
  657. }
  658. /*
  659. * lookup_mnt - Return the first child mount mounted at path
  660. *
  661. * "First" means first mounted chronologically. If you create the
  662. * following mounts:
  663. *
  664. * mount /dev/sda1 /mnt
  665. * mount /dev/sda2 /mnt
  666. * mount /dev/sda3 /mnt
  667. *
  668. * Then lookup_mnt() on the base /mnt dentry in the root mount will
  669. * return successively the root dentry and vfsmount of /dev/sda1, then
  670. * /dev/sda2, then /dev/sda3, then NULL.
  671. *
  672. * lookup_mnt takes a reference to the found vfsmount.
  673. */
  674. struct vfsmount *lookup_mnt(const struct path *path)
  675. {
  676. struct mount *child_mnt;
  677. struct vfsmount *m;
  678. unsigned seq;
  679. rcu_read_lock();
  680. do {
  681. seq = read_seqbegin(&mount_lock);
  682. child_mnt = __lookup_mnt(path->mnt, path->dentry);
  683. #ifdef CONFIG_KDP_NS
  684. m = child_mnt ? ((struct kdp_mount *)child_mnt)->mnt : NULL;
  685. #else
  686. m = child_mnt ? &child_mnt->mnt : NULL;
  687. #endif
  688. } while (!legitimize_mnt(m, seq));
  689. rcu_read_unlock();
  690. return m;
  691. }
  692. static inline void lock_ns_list(struct mnt_namespace *ns)
  693. {
  694. spin_lock(&ns->ns_lock);
  695. }
  696. static inline void unlock_ns_list(struct mnt_namespace *ns)
  697. {
  698. spin_unlock(&ns->ns_lock);
  699. }
  700. static inline bool mnt_is_cursor(struct mount *mnt)
  701. {
  702. return mnt->mnt.mnt_flags & MNT_CURSOR;
  703. }
  704. /*
  705. * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
  706. * current mount namespace.
  707. *
  708. * The common case is dentries are not mountpoints at all and that
  709. * test is handled inline. For the slow case when we are actually
  710. * dealing with a mountpoint of some kind, walk through all of the
  711. * mounts in the current mount namespace and test to see if the dentry
  712. * is a mountpoint.
  713. *
  714. * The mount_hashtable is not usable in the context because we
  715. * need to identify all mounts that may be in the current mount
  716. * namespace not just a mount that happens to have some specified
  717. * parent mount.
  718. */
  719. bool __is_local_mountpoint(struct dentry *dentry)
  720. {
  721. struct mnt_namespace *ns = current->nsproxy->mnt_ns;
  722. struct mount *mnt;
  723. bool is_covered = false;
  724. down_read(&namespace_sem);
  725. lock_ns_list(ns);
  726. list_for_each_entry(mnt, &ns->list, mnt_list) {
  727. if (mnt_is_cursor(mnt))
  728. continue;
  729. is_covered = (mnt->mnt_mountpoint == dentry);
  730. if (is_covered)
  731. break;
  732. }
  733. unlock_ns_list(ns);
  734. up_read(&namespace_sem);
  735. return is_covered;
  736. }
  737. static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
  738. {
  739. struct hlist_head *chain = mp_hash(dentry);
  740. struct mountpoint *mp;
  741. hlist_for_each_entry(mp, chain, m_hash) {
  742. if (mp->m_dentry == dentry) {
  743. mp->m_count++;
  744. return mp;
  745. }
  746. }
  747. return NULL;
  748. }
  749. static struct mountpoint *get_mountpoint(struct dentry *dentry)
  750. {
  751. struct mountpoint *mp, *new = NULL;
  752. int ret;
  753. if (d_mountpoint(dentry)) {
  754. /* might be worth a WARN_ON() */
  755. if (d_unlinked(dentry))
  756. return ERR_PTR(-ENOENT);
  757. mountpoint:
  758. read_seqlock_excl(&mount_lock);
  759. mp = lookup_mountpoint(dentry);
  760. read_sequnlock_excl(&mount_lock);
  761. if (mp)
  762. goto done;
  763. }
  764. if (!new)
  765. new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
  766. if (!new)
  767. return ERR_PTR(-ENOMEM);
  768. /* Exactly one processes may set d_mounted */
  769. ret = d_set_mounted(dentry);
  770. /* Someone else set d_mounted? */
  771. if (ret == -EBUSY)
  772. goto mountpoint;
  773. /* The dentry is not available as a mountpoint? */
  774. mp = ERR_PTR(ret);
  775. if (ret)
  776. goto done;
  777. /* Add the new mountpoint to the hash table */
  778. read_seqlock_excl(&mount_lock);
  779. new->m_dentry = dget(dentry);
  780. new->m_count = 1;
  781. hlist_add_head(&new->m_hash, mp_hash(dentry));
  782. INIT_HLIST_HEAD(&new->m_list);
  783. read_sequnlock_excl(&mount_lock);
  784. mp = new;
  785. new = NULL;
  786. done:
  787. kfree(new);
  788. return mp;
  789. }
  790. /*
  791. * vfsmount lock must be held. Additionally, the caller is responsible
  792. * for serializing calls for given disposal list.
  793. */
  794. static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
  795. {
  796. if (!--mp->m_count) {
  797. struct dentry *dentry = mp->m_dentry;
  798. BUG_ON(!hlist_empty(&mp->m_list));
  799. spin_lock(&dentry->d_lock);
  800. dentry->d_flags &= ~DCACHE_MOUNTED;
  801. spin_unlock(&dentry->d_lock);
  802. dput_to_list(dentry, list);
  803. hlist_del(&mp->m_hash);
  804. kfree(mp);
  805. }
  806. }
  807. /* called with namespace_lock and vfsmount lock */
  808. static void put_mountpoint(struct mountpoint *mp)
  809. {
  810. __put_mountpoint(mp, &ex_mountpoints);
  811. }
  812. static inline int check_mnt(struct mount *mnt)
  813. {
  814. return mnt->mnt_ns == current->nsproxy->mnt_ns;
  815. }
  816. /*
  817. * vfsmount lock must be held for write
  818. */
  819. static void touch_mnt_namespace(struct mnt_namespace *ns)
  820. {
  821. if (ns) {
  822. ns->event = ++event;
  823. wake_up_interruptible(&ns->poll);
  824. }
  825. }
  826. /*
  827. * vfsmount lock must be held for write
  828. */
  829. static void __touch_mnt_namespace(struct mnt_namespace *ns)
  830. {
  831. if (ns && ns->event != event) {
  832. ns->event = event;
  833. wake_up_interruptible(&ns->poll);
  834. }
  835. }
  836. /*
  837. * vfsmount lock must be held for write
  838. */
  839. static struct mountpoint *unhash_mnt(struct mount *mnt)
  840. {
  841. struct mountpoint *mp;
  842. mnt->mnt_parent = mnt;
  843. #ifdef CONFIG_KDP_NS
  844. mnt->mnt_mountpoint = ((struct kdp_mount *)mnt)->mnt->mnt_root;
  845. #else
  846. mnt->mnt_mountpoint = mnt->mnt.mnt_root;
  847. #endif
  848. list_del_init(&mnt->mnt_child);
  849. hlist_del_init_rcu(&mnt->mnt_hash);
  850. hlist_del_init(&mnt->mnt_mp_list);
  851. mp = mnt->mnt_mp;
  852. mnt->mnt_mp = NULL;
  853. return mp;
  854. }
  855. /*
  856. * vfsmount lock must be held for write
  857. */
  858. static void umount_mnt(struct mount *mnt)
  859. {
  860. put_mountpoint(unhash_mnt(mnt));
  861. }
  862. /*
  863. * vfsmount lock must be held for write
  864. */
  865. void mnt_set_mountpoint(struct mount *mnt,
  866. struct mountpoint *mp,
  867. struct mount *child_mnt)
  868. {
  869. mp->m_count++;
  870. mnt_add_count(mnt, 1); /* essentially, that's mntget */
  871. child_mnt->mnt_mountpoint = mp->m_dentry;
  872. child_mnt->mnt_parent = mnt;
  873. child_mnt->mnt_mp = mp;
  874. hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
  875. }
  876. static void __attach_mnt(struct mount *mnt, struct mount *parent)
  877. {
  878. #ifdef CONFIG_KDP_NS
  879. hlist_add_head_rcu(&mnt->mnt_hash,
  880. m_hash(((struct kdp_mount *)parent)->mnt, mnt->mnt_mountpoint));
  881. #else
  882. hlist_add_head_rcu(&mnt->mnt_hash,
  883. m_hash(&parent->mnt, mnt->mnt_mountpoint));
  884. #endif
  885. list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
  886. }
  887. /*
  888. * vfsmount lock must be held for write
  889. */
  890. static void attach_mnt(struct mount *mnt,
  891. struct mount *parent,
  892. struct mountpoint *mp)
  893. {
  894. mnt_set_mountpoint(parent, mp, mnt);
  895. __attach_mnt(mnt, parent);
  896. }
  897. void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
  898. {
  899. struct mountpoint *old_mp = mnt->mnt_mp;
  900. struct mount *old_parent = mnt->mnt_parent;
  901. list_del_init(&mnt->mnt_child);
  902. hlist_del_init(&mnt->mnt_mp_list);
  903. hlist_del_init_rcu(&mnt->mnt_hash);
  904. attach_mnt(mnt, parent, mp);
  905. put_mountpoint(old_mp);
  906. mnt_add_count(old_parent, -1);
  907. }
  908. /*
  909. * vfsmount lock must be held for write
  910. */
  911. static void commit_tree(struct mount *mnt)
  912. {
  913. struct mount *parent = mnt->mnt_parent;
  914. struct mount *m;
  915. LIST_HEAD(head);
  916. struct mnt_namespace *n = parent->mnt_ns;
  917. BUG_ON(parent == mnt);
  918. list_add_tail(&head, &mnt->mnt_list);
  919. list_for_each_entry(m, &head, mnt_list)
  920. m->mnt_ns = n;
  921. list_splice(&head, n->list.prev);
  922. n->mounts += n->pending_mounts;
  923. n->pending_mounts = 0;
  924. __attach_mnt(mnt, parent);
  925. touch_mnt_namespace(n);
  926. }
  927. static struct mount *next_mnt(struct mount *p, struct mount *root)
  928. {
  929. struct list_head *next = p->mnt_mounts.next;
  930. if (next == &p->mnt_mounts) {
  931. while (1) {
  932. if (p == root)
  933. return NULL;
  934. next = p->mnt_child.next;
  935. if (next != &p->mnt_parent->mnt_mounts)
  936. break;
  937. p = p->mnt_parent;
  938. }
  939. }
  940. return list_entry(next, struct mount, mnt_child);
  941. }
  942. static struct mount *skip_mnt_tree(struct mount *p)
  943. {
  944. struct list_head *prev = p->mnt_mounts.prev;
  945. while (prev != &p->mnt_mounts) {
  946. p = list_entry(prev, struct mount, mnt_child);
  947. prev = p->mnt_mounts.prev;
  948. }
  949. return p;
  950. }
  951. /**
  952. * vfs_create_mount - Create a mount for a configured superblock
  953. * @fc: The configuration context with the superblock attached
  954. *
  955. * Create a mount to an already configured superblock. If necessary, the
  956. * caller should invoke vfs_get_tree() before calling this.
  957. *
  958. * Note that this does not attach the mount to anything.
  959. */
  960. struct vfsmount *vfs_create_mount(struct fs_context *fc)
  961. {
  962. struct mount *mnt;
  963. struct user_namespace *fs_userns;
  964. #ifdef CONFIG_KDP_NS
  965. struct user_namespace *userns;
  966. #endif
  967. if (!fc->root)
  968. return ERR_PTR(-EINVAL);
  969. mnt = alloc_vfsmnt(fc->source ?: "none");
  970. if (!mnt)
  971. return ERR_PTR(-ENOMEM);
  972. if (fc->sb_flags & SB_KERNMOUNT)
  973. #ifdef CONFIG_KDP_NS
  974. kdp_set_mnt_flags(((struct kdp_mount *)mnt)->mnt, MNT_INTERNAL);
  975. #else
  976. mnt->mnt.mnt_flags = MNT_INTERNAL;
  977. #endif
  978. atomic_inc(&fc->root->d_sb->s_active);
  979. #ifdef CONFIG_KDP_NS
  980. kdp_set_mnt_root_sb(((struct kdp_mount *)mnt)->mnt, dget(fc->root), fc->root->d_sb);
  981. mnt->mnt_mountpoint = ((struct kdp_mount *)mnt)->mnt->mnt_root;
  982. #else
  983. mnt->mnt.mnt_sb = fc->root->d_sb;
  984. mnt->mnt.mnt_root = dget(fc->root);
  985. mnt->mnt_mountpoint = mnt->mnt.mnt_root;
  986. #endif
  987. mnt->mnt_parent = mnt;
  988. #ifdef CONFIG_KDP_NS
  989. fs_userns = ((struct kdp_mount *)mnt)->mnt->mnt_sb->s_user_ns;
  990. if (!initial_idmapping(fs_userns)) {
  991. userns = get_user_ns(fs_userns);
  992. kdp_set_mnt_userns(((struct kdp_mount *)mnt)->mnt, userns);
  993. }
  994. #else
  995. fs_userns = mnt->mnt.mnt_sb->s_user_ns;
  996. if (!initial_idmapping(fs_userns))
  997. mnt->mnt.mnt_userns = get_user_ns(fs_userns);
  998. #endif
  999. lock_mount_hash();
  1000. #ifdef CONFIG_KDP_NS
  1001. list_add_tail(&mnt->mnt_instance, &((struct kdp_mount *)mnt)->mnt->mnt_sb->s_mounts);
  1002. #else
  1003. list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
  1004. #endif
  1005. unlock_mount_hash();
  1006. #ifdef CONFIG_KDP_NS
  1007. return ((struct kdp_mount *)mnt)->mnt;
  1008. #else
  1009. return &mnt->mnt;
  1010. #endif
  1011. }
  1012. EXPORT_SYMBOL(vfs_create_mount);
  1013. struct vfsmount *fc_mount(struct fs_context *fc)
  1014. {
  1015. int err = vfs_get_tree(fc);
  1016. if (!err) {
  1017. up_write(&fc->root->d_sb->s_umount);
  1018. return vfs_create_mount(fc);
  1019. }
  1020. return ERR_PTR(err);
  1021. }
  1022. EXPORT_SYMBOL(fc_mount);
  1023. struct vfsmount *vfs_kern_mount(struct file_system_type *type,
  1024. int flags, const char *name,
  1025. void *data)
  1026. {
  1027. struct fs_context *fc;
  1028. struct vfsmount *mnt;
  1029. int ret = 0;
  1030. if (!type)
  1031. return ERR_PTR(-EINVAL);
  1032. fc = fs_context_for_mount(type, flags);
  1033. if (IS_ERR(fc))
  1034. return ERR_CAST(fc);
  1035. if (name)
  1036. ret = vfs_parse_fs_string(fc, "source",
  1037. name, strlen(name));
  1038. if (!ret)
  1039. ret = parse_monolithic_mount_data(fc, data);
  1040. if (!ret)
  1041. mnt = fc_mount(fc);
  1042. else
  1043. mnt = ERR_PTR(ret);
  1044. put_fs_context(fc);
  1045. return mnt;
  1046. }
  1047. EXPORT_SYMBOL_GPL(vfs_kern_mount);
  1048. struct vfsmount *
  1049. vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
  1050. const char *name, void *data)
  1051. {
  1052. /* Until it is worked out how to pass the user namespace
  1053. * through from the parent mount to the submount don't support
  1054. * unprivileged mounts with submounts.
  1055. */
  1056. if (mountpoint->d_sb->s_user_ns != &init_user_ns)
  1057. return ERR_PTR(-EPERM);
  1058. return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
  1059. }
  1060. EXPORT_SYMBOL_GPL(vfs_submount);
  1061. static struct mount *clone_mnt(struct mount *old, struct dentry *root,
  1062. int flag)
  1063. {
  1064. #ifdef CONFIG_KDP_NS
  1065. struct super_block *sb = ((struct kdp_mount *)old)->mnt->mnt_sb;
  1066. int nsflags;
  1067. struct user_namespace *userns;
  1068. #else
  1069. struct super_block *sb = old->mnt.mnt_sb;
  1070. #endif
  1071. struct mount *mnt;
  1072. int err;
  1073. mnt = alloc_vfsmnt(old->mnt_devname);
  1074. if (!mnt)
  1075. return ERR_PTR(-ENOMEM);
  1076. if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
  1077. mnt->mnt_group_id = 0; /* not a peer of original */
  1078. else
  1079. mnt->mnt_group_id = old->mnt_group_id;
  1080. if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
  1081. err = mnt_alloc_group_id(mnt);
  1082. if (err)
  1083. goto out_free;
  1084. }
  1085. #ifdef CONFIG_KDP_NS
  1086. nsflags = ((struct kdp_mount *)old)->mnt->mnt_flags;
  1087. nsflags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
  1088. kdp_assign_mnt_flags(((struct kdp_mount *)mnt)->mnt, nsflags);
  1089. atomic_inc(&sb->s_active);
  1090. userns = mnt_user_ns(((struct kdp_mount *)old)->mnt);
  1091. kdp_set_mnt_userns(((struct kdp_mount *)mnt)->mnt, userns);
  1092. if (!initial_idmapping(((struct kdp_mount *)mnt)->mnt->mnt_userns)) {
  1093. userns = get_user_ns(((struct kdp_mount *)mnt)->mnt->mnt_userns);
  1094. kdp_set_mnt_userns(((struct kdp_mount *)mnt)->mnt, userns);
  1095. }
  1096. kdp_set_mnt_root_sb(((struct kdp_mount *)mnt)->mnt, dget(root), sb);
  1097. mnt->mnt_mountpoint = ((struct kdp_mount *)mnt)->mnt->mnt_root;
  1098. #else
  1099. mnt->mnt.mnt_flags = old->mnt.mnt_flags;
  1100. mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
  1101. atomic_inc(&sb->s_active);
  1102. mnt->mnt.mnt_userns = mnt_user_ns(&old->mnt);
  1103. if (!initial_idmapping(mnt->mnt.mnt_userns))
  1104. mnt->mnt.mnt_userns = get_user_ns(mnt->mnt.mnt_userns);
  1105. mnt->mnt.mnt_sb = sb;
  1106. mnt->mnt.mnt_root = dget(root);
  1107. mnt->mnt_mountpoint = mnt->mnt.mnt_root;
  1108. #endif
  1109. mnt->mnt_parent = mnt;
  1110. lock_mount_hash();
  1111. list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
  1112. unlock_mount_hash();
  1113. if ((flag & CL_SLAVE) ||
  1114. ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
  1115. list_add(&mnt->mnt_slave, &old->mnt_slave_list);
  1116. mnt->mnt_master = old;
  1117. CLEAR_MNT_SHARED(mnt);
  1118. } else if (!(flag & CL_PRIVATE)) {
  1119. if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
  1120. list_add(&mnt->mnt_share, &old->mnt_share);
  1121. if (IS_MNT_SLAVE(old))
  1122. list_add(&mnt->mnt_slave, &old->mnt_slave);
  1123. mnt->mnt_master = old->mnt_master;
  1124. } else {
  1125. CLEAR_MNT_SHARED(mnt);
  1126. }
  1127. if (flag & CL_MAKE_SHARED)
  1128. set_mnt_shared(mnt);
  1129. /* stick the duplicate mount on the same expiry list
  1130. * as the original if that was on one */
  1131. if (flag & CL_EXPIRE) {
  1132. if (!list_empty(&old->mnt_expire))
  1133. list_add(&mnt->mnt_expire, &old->mnt_expire);
  1134. }
  1135. return mnt;
  1136. out_free:
  1137. mnt_free_id(mnt);
  1138. free_vfsmnt(mnt);
  1139. return ERR_PTR(err);
  1140. }
  1141. static void cleanup_mnt(struct mount *mnt)
  1142. {
  1143. struct hlist_node *p;
  1144. struct mount *m;
  1145. /*
  1146. * The warning here probably indicates that somebody messed
  1147. * up a mnt_want/drop_write() pair. If this happens, the
  1148. * filesystem was probably unable to make r/w->r/o transitions.
  1149. * The locking used to deal with mnt_count decrement provides barriers,
  1150. * so mnt_get_writers() below is safe.
  1151. */
  1152. WARN_ON(mnt_get_writers(mnt));
  1153. if (unlikely(mnt->mnt_pins.first))
  1154. mnt_pin_kill(mnt);
  1155. #ifdef CONFIG_KDP_NS
  1156. hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
  1157. hlist_del(&m->mnt_umount);
  1158. mntput(((struct kdp_mount *)m)->mnt);
  1159. }
  1160. fsnotify_vfsmount_delete(((struct kdp_mount *)mnt)->mnt);
  1161. dput(((struct kdp_mount *)mnt)->mnt->mnt_root);
  1162. deactivate_super(((struct kdp_mount *)mnt)->mnt->mnt_sb);
  1163. #else
  1164. hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
  1165. hlist_del(&m->mnt_umount);
  1166. mntput(&m->mnt);
  1167. }
  1168. fsnotify_vfsmount_delete(&mnt->mnt);
  1169. dput(mnt->mnt.mnt_root);
  1170. deactivate_super(mnt->mnt.mnt_sb);
  1171. #endif
  1172. mnt_free_id(mnt);
  1173. call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
  1174. }
  1175. static void __cleanup_mnt(struct rcu_head *head)
  1176. {
  1177. cleanup_mnt(container_of(head, struct mount, mnt_rcu));
  1178. }
  1179. static LLIST_HEAD(delayed_mntput_list);
  1180. static void delayed_mntput(struct work_struct *unused)
  1181. {
  1182. struct llist_node *node = llist_del_all(&delayed_mntput_list);
  1183. struct mount *m, *t;
  1184. llist_for_each_entry_safe(m, t, node, mnt_llist)
  1185. cleanup_mnt(m);
  1186. }
  1187. static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
  1188. static void mntput_no_expire(struct mount *mnt)
  1189. {
  1190. LIST_HEAD(list);
  1191. int count;
  1192. rcu_read_lock();
  1193. if (likely(READ_ONCE(mnt->mnt_ns))) {
  1194. /*
  1195. * Since we don't do lock_mount_hash() here,
  1196. * ->mnt_ns can change under us. However, if it's
  1197. * non-NULL, then there's a reference that won't
  1198. * be dropped until after an RCU delay done after
  1199. * turning ->mnt_ns NULL. So if we observe it
  1200. * non-NULL under rcu_read_lock(), the reference
  1201. * we are dropping is not the final one.
  1202. */
  1203. mnt_add_count(mnt, -1);
  1204. rcu_read_unlock();
  1205. return;
  1206. }
  1207. lock_mount_hash();
  1208. /*
  1209. * make sure that if __legitimize_mnt() has not seen us grab
  1210. * mount_lock, we'll see their refcount increment here.
  1211. */
  1212. smp_mb();
  1213. mnt_add_count(mnt, -1);
  1214. count = mnt_get_count(mnt);
  1215. if (count != 0) {
  1216. WARN_ON(count < 0);
  1217. rcu_read_unlock();
  1218. unlock_mount_hash();
  1219. return;
  1220. }
  1221. #ifdef CONFIG_KDP_NS
  1222. if (unlikely(((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_DOOMED)) {
  1223. #else
  1224. if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
  1225. #endif
  1226. rcu_read_unlock();
  1227. unlock_mount_hash();
  1228. return;
  1229. }
  1230. #ifdef CONFIG_KDP_NS
  1231. kdp_set_mnt_flags(((struct kdp_mount *)mnt)->mnt, MNT_DOOMED);
  1232. #else
  1233. mnt->mnt.mnt_flags |= MNT_DOOMED;
  1234. #endif
  1235. rcu_read_unlock();
  1236. list_del(&mnt->mnt_instance);
  1237. if (unlikely(!list_empty(&mnt->mnt_mounts))) {
  1238. struct mount *p, *tmp;
  1239. list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
  1240. __put_mountpoint(unhash_mnt(p), &list);
  1241. hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
  1242. }
  1243. }
  1244. unlock_mount_hash();
  1245. shrink_dentry_list(&list);
  1246. #ifdef CONFIG_KDP_NS
  1247. if (likely(!(((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_INTERNAL))) {
  1248. #else
  1249. if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
  1250. #endif
  1251. struct task_struct *task = current;
  1252. if (likely(!(task->flags & PF_KTHREAD))) {
  1253. init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
  1254. if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
  1255. return;
  1256. }
  1257. if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
  1258. schedule_delayed_work(&delayed_mntput_work, 1);
  1259. return;
  1260. }
  1261. cleanup_mnt(mnt);
  1262. }
  1263. void mntput(struct vfsmount *mnt)
  1264. {
  1265. if (mnt) {
  1266. struct mount *m = real_mount(mnt);
  1267. /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
  1268. if (unlikely(m->mnt_expiry_mark))
  1269. m->mnt_expiry_mark = 0;
  1270. mntput_no_expire(m);
  1271. }
  1272. }
  1273. EXPORT_SYMBOL(mntput);
  1274. struct vfsmount *mntget(struct vfsmount *mnt)
  1275. {
  1276. if (mnt)
  1277. mnt_add_count(real_mount(mnt), 1);
  1278. return mnt;
  1279. }
  1280. EXPORT_SYMBOL(mntget);
  1281. /**
  1282. * path_is_mountpoint() - Check if path is a mount in the current namespace.
  1283. * @path: path to check
  1284. *
  1285. * d_mountpoint() can only be used reliably to establish if a dentry is
  1286. * not mounted in any namespace and that common case is handled inline.
  1287. * d_mountpoint() isn't aware of the possibility there may be multiple
  1288. * mounts using a given dentry in a different namespace. This function
  1289. * checks if the passed in path is a mountpoint rather than the dentry
  1290. * alone.
  1291. */
  1292. bool path_is_mountpoint(const struct path *path)
  1293. {
  1294. unsigned seq;
  1295. bool res;
  1296. if (!d_mountpoint(path->dentry))
  1297. return false;
  1298. rcu_read_lock();
  1299. do {
  1300. seq = read_seqbegin(&mount_lock);
  1301. res = __path_is_mountpoint(path);
  1302. } while (read_seqretry(&mount_lock, seq));
  1303. rcu_read_unlock();
  1304. return res;
  1305. }
  1306. EXPORT_SYMBOL(path_is_mountpoint);
  1307. struct vfsmount *mnt_clone_internal(const struct path *path)
  1308. {
  1309. struct mount *p;
  1310. p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
  1311. if (IS_ERR(p))
  1312. return ERR_CAST(p);
  1313. #ifdef CONFIG_KDP_NS
  1314. kdp_set_mnt_flags(((struct kdp_mount *)p)->mnt, MNT_INTERNAL);
  1315. return ((struct kdp_mount *)p)->mnt;
  1316. #else
  1317. p->mnt.mnt_flags |= MNT_INTERNAL;
  1318. return &p->mnt;
  1319. #endif
  1320. }
  1321. #ifdef CONFIG_PROC_FS
  1322. static struct mount *mnt_list_next(struct mnt_namespace *ns,
  1323. struct list_head *p)
  1324. {
  1325. struct mount *mnt, *ret = NULL;
  1326. lock_ns_list(ns);
  1327. list_for_each_continue(p, &ns->list) {
  1328. mnt = list_entry(p, typeof(*mnt), mnt_list);
  1329. if (!mnt_is_cursor(mnt)) {
  1330. ret = mnt;
  1331. break;
  1332. }
  1333. }
  1334. unlock_ns_list(ns);
  1335. return ret;
  1336. }
  1337. /* iterator; we want it to have access to namespace_sem, thus here... */
  1338. static void *m_start(struct seq_file *m, loff_t *pos)
  1339. {
  1340. struct proc_mounts *p = m->private;
  1341. struct list_head *prev;
  1342. down_read(&namespace_sem);
  1343. if (!*pos) {
  1344. prev = &p->ns->list;
  1345. } else {
  1346. prev = &p->cursor.mnt_list;
  1347. /* Read after we'd reached the end? */
  1348. if (list_empty(prev))
  1349. return NULL;
  1350. }
  1351. return mnt_list_next(p->ns, prev);
  1352. }
  1353. static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  1354. {
  1355. struct proc_mounts *p = m->private;
  1356. struct mount *mnt = v;
  1357. ++*pos;
  1358. return mnt_list_next(p->ns, &mnt->mnt_list);
  1359. }
  1360. static void m_stop(struct seq_file *m, void *v)
  1361. {
  1362. struct proc_mounts *p = m->private;
  1363. struct mount *mnt = v;
  1364. lock_ns_list(p->ns);
  1365. if (mnt)
  1366. list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
  1367. else
  1368. list_del_init(&p->cursor.mnt_list);
  1369. unlock_ns_list(p->ns);
  1370. up_read(&namespace_sem);
  1371. }
  1372. static int m_show(struct seq_file *m, void *v)
  1373. {
  1374. struct proc_mounts *p = m->private;
  1375. struct mount *r = v;
  1376. #ifdef CONFIG_KDP_NS
  1377. return p->show(m, ((struct kdp_mount *)r)->mnt);
  1378. #else
  1379. return p->show(m, &r->mnt);
  1380. #endif
  1381. }
  1382. const struct seq_operations mounts_op = {
  1383. .start = m_start,
  1384. .next = m_next,
  1385. .stop = m_stop,
  1386. .show = m_show,
  1387. };
  1388. void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
  1389. {
  1390. down_read(&namespace_sem);
  1391. lock_ns_list(ns);
  1392. list_del(&cursor->mnt_list);
  1393. unlock_ns_list(ns);
  1394. up_read(&namespace_sem);
  1395. }
  1396. #endif /* CONFIG_PROC_FS */
  1397. /**
  1398. * may_umount_tree - check if a mount tree is busy
  1399. * @m: root of mount tree
  1400. *
  1401. * This is called to check if a tree of mounts has any
  1402. * open files, pwds, chroots or sub mounts that are
  1403. * busy.
  1404. */
  1405. int may_umount_tree(struct vfsmount *m)
  1406. {
  1407. struct mount *mnt = real_mount(m);
  1408. int actual_refs = 0;
  1409. int minimum_refs = 0;
  1410. struct mount *p;
  1411. BUG_ON(!m);
  1412. /* write lock needed for mnt_get_count */
  1413. lock_mount_hash();
  1414. for (p = mnt; p; p = next_mnt(p, mnt)) {
  1415. actual_refs += mnt_get_count(p);
  1416. minimum_refs += 2;
  1417. }
  1418. unlock_mount_hash();
  1419. if (actual_refs > minimum_refs)
  1420. return 0;
  1421. return 1;
  1422. }
  1423. EXPORT_SYMBOL(may_umount_tree);
  1424. /**
  1425. * may_umount - check if a mount point is busy
  1426. * @mnt: root of mount
  1427. *
  1428. * This is called to check if a mount point has any
  1429. * open files, pwds, chroots or sub mounts. If the
  1430. * mount has sub mounts this will return busy
  1431. * regardless of whether the sub mounts are busy.
  1432. *
  1433. * Doesn't take quota and stuff into account. IOW, in some cases it will
  1434. * give false negatives. The main reason why it's here is that we need
  1435. * a non-destructive way to look for easily umountable filesystems.
  1436. */
  1437. int may_umount(struct vfsmount *mnt)
  1438. {
  1439. int ret = 1;
  1440. down_read(&namespace_sem);
  1441. lock_mount_hash();
  1442. if (propagate_mount_busy(real_mount(mnt), 2))
  1443. ret = 0;
  1444. unlock_mount_hash();
  1445. up_read(&namespace_sem);
  1446. return ret;
  1447. }
  1448. EXPORT_SYMBOL(may_umount);
  1449. static void namespace_unlock(void)
  1450. {
  1451. struct hlist_head head;
  1452. struct hlist_node *p;
  1453. struct mount *m;
  1454. LIST_HEAD(list);
  1455. hlist_move_list(&unmounted, &head);
  1456. list_splice_init(&ex_mountpoints, &list);
  1457. up_write(&namespace_sem);
  1458. shrink_dentry_list(&list);
  1459. if (likely(hlist_empty(&head)))
  1460. return;
  1461. synchronize_rcu_expedited();
  1462. hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
  1463. hlist_del(&m->mnt_umount);
  1464. #ifdef CONFIG_KDP_NS
  1465. mntput(((struct kdp_mount *)m)->mnt);
  1466. #else
  1467. mntput(&m->mnt);
  1468. #endif
  1469. }
  1470. }
  1471. static inline void namespace_lock(void)
  1472. {
  1473. down_write(&namespace_sem);
  1474. }
  1475. enum umount_tree_flags {
  1476. UMOUNT_SYNC = 1,
  1477. UMOUNT_PROPAGATE = 2,
  1478. UMOUNT_CONNECTED = 4,
  1479. };
  1480. static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
  1481. {
  1482. /* Leaving mounts connected is only valid for lazy umounts */
  1483. if (how & UMOUNT_SYNC)
  1484. return true;
  1485. /* A mount without a parent has nothing to be connected to */
  1486. if (!mnt_has_parent(mnt))
  1487. return true;
  1488. /* Because the reference counting rules change when mounts are
  1489. * unmounted and connected, umounted mounts may not be
  1490. * connected to mounted mounts.
  1491. */
  1492. #ifdef CONFIG_KDP_NS
  1493. if (!(((struct kdp_mount *)mnt->mnt_parent)->mnt->mnt_flags & MNT_UMOUNT))
  1494. #else
  1495. if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
  1496. #endif
  1497. return true;
  1498. /* Has it been requested that the mount remain connected? */
  1499. if (how & UMOUNT_CONNECTED)
  1500. return false;
  1501. /* Is the mount locked such that it needs to remain connected? */
  1502. if (IS_MNT_LOCKED(mnt))
  1503. return false;
  1504. /* By default disconnect the mount */
  1505. return true;
  1506. }
  1507. /*
  1508. * mount_lock must be held
  1509. * namespace_sem must be held for write
  1510. */
  1511. static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
  1512. {
  1513. LIST_HEAD(tmp_list);
  1514. struct mount *p;
  1515. if (how & UMOUNT_PROPAGATE)
  1516. propagate_mount_unlock(mnt);
  1517. /* Gather the mounts to umount */
  1518. for (p = mnt; p; p = next_mnt(p, mnt)) {
  1519. #ifdef CONFIG_KDP_NS
  1520. kdp_set_mnt_flags(((struct kdp_mount *)p)->mnt, MNT_UMOUNT);
  1521. #else
  1522. p->mnt.mnt_flags |= MNT_UMOUNT;
  1523. #endif
  1524. list_move(&p->mnt_list, &tmp_list);
  1525. }
  1526. /* Hide the mounts from mnt_mounts */
  1527. list_for_each_entry(p, &tmp_list, mnt_list) {
  1528. list_del_init(&p->mnt_child);
  1529. }
  1530. /* Add propogated mounts to the tmp_list */
  1531. if (how & UMOUNT_PROPAGATE)
  1532. propagate_umount(&tmp_list);
  1533. while (!list_empty(&tmp_list)) {
  1534. struct mnt_namespace *ns;
  1535. bool disconnect;
  1536. p = list_first_entry(&tmp_list, struct mount, mnt_list);
  1537. list_del_init(&p->mnt_expire);
  1538. list_del_init(&p->mnt_list);
  1539. ns = p->mnt_ns;
  1540. if (ns) {
  1541. ns->mounts--;
  1542. __touch_mnt_namespace(ns);
  1543. }
  1544. p->mnt_ns = NULL;
  1545. if (how & UMOUNT_SYNC)
  1546. #ifdef CONFIG_KDP_NS
  1547. kdp_set_mnt_flags(((struct kdp_mount *)p)->mnt, MNT_SYNC_UMOUNT);
  1548. #else
  1549. p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
  1550. #endif
  1551. disconnect = disconnect_mount(p, how);
  1552. if (mnt_has_parent(p)) {
  1553. mnt_add_count(p->mnt_parent, -1);
  1554. if (!disconnect) {
  1555. /* Don't forget about p */
  1556. list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
  1557. } else {
  1558. umount_mnt(p);
  1559. }
  1560. }
  1561. change_mnt_propagation(p, MS_PRIVATE);
  1562. if (disconnect)
  1563. hlist_add_head(&p->mnt_umount, &unmounted);
  1564. }
  1565. }
  1566. static void shrink_submounts(struct mount *mnt);
  1567. static int do_umount_root(struct super_block *sb)
  1568. {
  1569. int ret = 0;
  1570. down_write(&sb->s_umount);
  1571. if (!sb_rdonly(sb)) {
  1572. struct fs_context *fc;
  1573. fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
  1574. SB_RDONLY);
  1575. if (IS_ERR(fc)) {
  1576. ret = PTR_ERR(fc);
  1577. } else {
  1578. ret = parse_monolithic_mount_data(fc, NULL);
  1579. if (!ret)
  1580. ret = reconfigure_super(fc);
  1581. put_fs_context(fc);
  1582. }
  1583. }
  1584. up_write(&sb->s_umount);
  1585. return ret;
  1586. }
  1587. static int do_umount(struct mount *mnt, int flags)
  1588. {
  1589. #ifdef CONFIG_KDP_NS
  1590. struct super_block *sb = ((struct kdp_mount *)mnt)->mnt->mnt_sb;
  1591. #else
  1592. struct super_block *sb = mnt->mnt.mnt_sb;
  1593. #endif
  1594. int retval;
  1595. #ifdef CONFIG_KDP_NS
  1596. retval = security_sb_umount(((struct kdp_mount *)mnt)->mnt, flags);
  1597. #else
  1598. retval = security_sb_umount(&mnt->mnt, flags);
  1599. #endif
  1600. if (retval)
  1601. return retval;
  1602. /*
  1603. * Allow userspace to request a mountpoint be expired rather than
  1604. * unmounting unconditionally. Unmount only happens if:
  1605. * (1) the mark is already set (the mark is cleared by mntput())
  1606. * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
  1607. */
  1608. if (flags & MNT_EXPIRE) {
  1609. #ifdef CONFIG_KDP_NS
  1610. if (((struct kdp_mount *)mnt)->mnt == current->fs->root.mnt ||
  1611. #else
  1612. if (&mnt->mnt == current->fs->root.mnt ||
  1613. #endif
  1614. flags & (MNT_FORCE | MNT_DETACH))
  1615. return -EINVAL;
  1616. /*
  1617. * probably don't strictly need the lock here if we examined
  1618. * all race cases, but it's a slowpath.
  1619. */
  1620. lock_mount_hash();
  1621. if (mnt_get_count(mnt) != 2) {
  1622. unlock_mount_hash();
  1623. return -EBUSY;
  1624. }
  1625. unlock_mount_hash();
  1626. if (!xchg(&mnt->mnt_expiry_mark, 1))
  1627. return -EAGAIN;
  1628. }
  1629. /*
  1630. * If we may have to abort operations to get out of this
  1631. * mount, and they will themselves hold resources we must
  1632. * allow the fs to do things. In the Unix tradition of
  1633. * 'Gee thats tricky lets do it in userspace' the umount_begin
  1634. * might fail to complete on the first run through as other tasks
  1635. * must return, and the like. Thats for the mount program to worry
  1636. * about for the moment.
  1637. */
  1638. if (flags & MNT_FORCE && sb->s_op->umount_begin) {
  1639. sb->s_op->umount_begin(sb);
  1640. }
  1641. /*
  1642. * No sense to grab the lock for this test, but test itself looks
  1643. * somewhat bogus. Suggestions for better replacement?
  1644. * Ho-hum... In principle, we might treat that as umount + switch
  1645. * to rootfs. GC would eventually take care of the old vfsmount.
  1646. * Actually it makes sense, especially if rootfs would contain a
  1647. * /reboot - static binary that would close all descriptors and
  1648. * call reboot(9). Then init(8) could umount root and exec /reboot.
  1649. */
  1650. #ifdef CONFIG_KDP_NS
  1651. if (((struct kdp_mount *)mnt)->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
  1652. #else
  1653. if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
  1654. #endif
  1655. /*
  1656. * Special case for "unmounting" root ...
  1657. * we just try to remount it readonly.
  1658. */
  1659. if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
  1660. return -EPERM;
  1661. return do_umount_root(sb);
  1662. }
  1663. namespace_lock();
  1664. lock_mount_hash();
  1665. /* Recheck MNT_LOCKED with the locks held */
  1666. retval = -EINVAL;
  1667. #ifdef CONFIG_KDP_NS
  1668. if (((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_LOCKED)
  1669. #else
  1670. if (mnt->mnt.mnt_flags & MNT_LOCKED)
  1671. #endif
  1672. goto out;
  1673. event++;
  1674. if (flags & MNT_DETACH) {
  1675. if (!list_empty(&mnt->mnt_list))
  1676. umount_tree(mnt, UMOUNT_PROPAGATE);
  1677. retval = 0;
  1678. } else {
  1679. shrink_submounts(mnt);
  1680. retval = -EBUSY;
  1681. if (!propagate_mount_busy(mnt, 2)) {
  1682. if (!list_empty(&mnt->mnt_list))
  1683. umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
  1684. retval = 0;
  1685. }
  1686. }
  1687. out:
  1688. unlock_mount_hash();
  1689. namespace_unlock();
  1690. return retval;
  1691. }
  1692. /*
  1693. * __detach_mounts - lazily unmount all mounts on the specified dentry
  1694. *
  1695. * During unlink, rmdir, and d_drop it is possible to loose the path
  1696. * to an existing mountpoint, and wind up leaking the mount.
  1697. * detach_mounts allows lazily unmounting those mounts instead of
  1698. * leaking them.
  1699. *
  1700. * The caller may hold dentry->d_inode->i_mutex.
  1701. */
  1702. void __detach_mounts(struct dentry *dentry)
  1703. {
  1704. struct mountpoint *mp;
  1705. struct mount *mnt;
  1706. namespace_lock();
  1707. lock_mount_hash();
  1708. mp = lookup_mountpoint(dentry);
  1709. if (!mp)
  1710. goto out_unlock;
  1711. event++;
  1712. while (!hlist_empty(&mp->m_list)) {
  1713. mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
  1714. #ifdef CONFIG_KDP_NS
  1715. if (((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_UMOUNT) {
  1716. #else
  1717. if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
  1718. #endif
  1719. umount_mnt(mnt);
  1720. hlist_add_head(&mnt->mnt_umount, &unmounted);
  1721. }
  1722. else umount_tree(mnt, UMOUNT_CONNECTED);
  1723. }
  1724. put_mountpoint(mp);
  1725. out_unlock:
  1726. unlock_mount_hash();
  1727. namespace_unlock();
  1728. }
  1729. /*
  1730. * Is the caller allowed to modify his namespace?
  1731. */
  1732. bool may_mount(void)
  1733. {
  1734. return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
  1735. }
  1736. static void warn_mandlock(void)
  1737. {
  1738. pr_warn_once("=======================================================\n"
  1739. "WARNING: The mand mount option has been deprecated and\n"
  1740. " and is ignored by this kernel. Remove the mand\n"
  1741. " option from the mount to silence this warning.\n"
  1742. "=======================================================\n");
  1743. }
  1744. static int can_umount(const struct path *path, int flags)
  1745. {
  1746. struct mount *mnt = real_mount(path->mnt);
  1747. if (!may_mount())
  1748. return -EPERM;
  1749. if (path->dentry != path->mnt->mnt_root)
  1750. return -EINVAL;
  1751. if (!check_mnt(mnt))
  1752. return -EINVAL;
  1753. #ifdef CONFIG_KDP_NS
  1754. if (((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_LOCKED)
  1755. #else
  1756. if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
  1757. #endif
  1758. return -EINVAL;
  1759. if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
  1760. return -EPERM;
  1761. return 0;
  1762. }
  1763. // caller is responsible for flags being sane
  1764. int path_umount(struct path *path, int flags)
  1765. {
  1766. struct mount *mnt = real_mount(path->mnt);
  1767. int ret;
  1768. ret = can_umount(path, flags);
  1769. if (!ret)
  1770. ret = do_umount(mnt, flags);
  1771. /* we mustn't call path_put() as that would clear mnt_expiry_mark */
  1772. dput(path->dentry);
  1773. mntput_no_expire(mnt);
  1774. return ret;
  1775. }
  1776. static int ksys_umount(char __user *name, int flags)
  1777. {
  1778. int lookup_flags = LOOKUP_MOUNTPOINT;
  1779. struct path path;
  1780. int ret;
  1781. // basic validity checks done first
  1782. if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
  1783. return -EINVAL;
  1784. if (!(flags & UMOUNT_NOFOLLOW))
  1785. lookup_flags |= LOOKUP_FOLLOW;
  1786. ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
  1787. if (ret)
  1788. return ret;
  1789. return path_umount(&path, flags);
  1790. }
  1791. SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
  1792. {
  1793. return ksys_umount(name, flags);
  1794. }
  1795. #ifdef __ARCH_WANT_SYS_OLDUMOUNT
  1796. /*
  1797. * The 2.0 compatible umount. No flags.
  1798. */
  1799. SYSCALL_DEFINE1(oldumount, char __user *, name)
  1800. {
  1801. return ksys_umount(name, 0);
  1802. }
  1803. #endif
  1804. static bool is_mnt_ns_file(struct dentry *dentry)
  1805. {
  1806. /* Is this a proxy for a mount namespace? */
  1807. return dentry->d_op == &ns_dentry_operations &&
  1808. dentry->d_fsdata == &mntns_operations;
  1809. }
  1810. static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
  1811. {
  1812. return container_of(ns, struct mnt_namespace, ns);
  1813. }
  1814. struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
  1815. {
  1816. return &mnt->ns;
  1817. }
  1818. static bool mnt_ns_loop(struct dentry *dentry)
  1819. {
  1820. /* Could bind mounting the mount namespace inode cause a
  1821. * mount namespace loop?
  1822. */
  1823. struct mnt_namespace *mnt_ns;
  1824. if (!is_mnt_ns_file(dentry))
  1825. return false;
  1826. mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
  1827. return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
  1828. }
  1829. struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
  1830. int flag)
  1831. {
  1832. struct mount *res, *p, *q, *r, *parent;
  1833. if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
  1834. return ERR_PTR(-EINVAL);
  1835. if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
  1836. return ERR_PTR(-EINVAL);
  1837. res = q = clone_mnt(mnt, dentry, flag);
  1838. if (IS_ERR(q))
  1839. return q;
  1840. q->mnt_mountpoint = mnt->mnt_mountpoint;
  1841. p = mnt;
  1842. list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
  1843. struct mount *s;
  1844. if (!is_subdir(r->mnt_mountpoint, dentry))
  1845. continue;
  1846. for (s = r; s; s = next_mnt(s, r)) {
  1847. if (!(flag & CL_COPY_UNBINDABLE) &&
  1848. IS_MNT_UNBINDABLE(s)) {
  1849. #ifdef CONFIG_KDP_NS
  1850. if (((struct kdp_mount *)s)->mnt->mnt_flags & MNT_LOCKED) {
  1851. #else
  1852. if (s->mnt.mnt_flags & MNT_LOCKED) {
  1853. #endif
  1854. /* Both unbindable and locked. */
  1855. q = ERR_PTR(-EPERM);
  1856. goto out;
  1857. } else {
  1858. s = skip_mnt_tree(s);
  1859. continue;
  1860. }
  1861. }
  1862. if (!(flag & CL_COPY_MNT_NS_FILE) &&
  1863. #ifdef CONFIG_KDP_NS
  1864. is_mnt_ns_file(((struct kdp_mount *)s)->mnt->mnt_root)) {
  1865. #else
  1866. is_mnt_ns_file(s->mnt.mnt_root)) {
  1867. #endif
  1868. s = skip_mnt_tree(s);
  1869. continue;
  1870. }
  1871. while (p != s->mnt_parent) {
  1872. p = p->mnt_parent;
  1873. q = q->mnt_parent;
  1874. }
  1875. p = s;
  1876. parent = q;
  1877. #ifdef CONFIG_KDP_NS
  1878. q = clone_mnt(p, ((struct kdp_mount *)p)->mnt->mnt_root, flag);
  1879. #else
  1880. q = clone_mnt(p, p->mnt.mnt_root, flag);
  1881. #endif
  1882. if (IS_ERR(q))
  1883. goto out;
  1884. lock_mount_hash();
  1885. list_add_tail(&q->mnt_list, &res->mnt_list);
  1886. attach_mnt(q, parent, p->mnt_mp);
  1887. unlock_mount_hash();
  1888. }
  1889. }
  1890. return res;
  1891. out:
  1892. if (res) {
  1893. lock_mount_hash();
  1894. umount_tree(res, UMOUNT_SYNC);
  1895. unlock_mount_hash();
  1896. }
  1897. return q;
  1898. }
  1899. /* Caller should check returned pointer for errors */
  1900. struct vfsmount *collect_mounts(const struct path *path)
  1901. {
  1902. struct mount *tree;
  1903. namespace_lock();
  1904. if (!check_mnt(real_mount(path->mnt)))
  1905. tree = ERR_PTR(-EINVAL);
  1906. else
  1907. tree = copy_tree(real_mount(path->mnt), path->dentry,
  1908. CL_COPY_ALL | CL_PRIVATE);
  1909. namespace_unlock();
  1910. if (IS_ERR(tree))
  1911. return ERR_CAST(tree);
  1912. #ifdef CONFIG_KDP_NS
  1913. return ((struct kdp_mount *)tree)->mnt;
  1914. #else
  1915. return &tree->mnt;
  1916. #endif
  1917. }
  1918. static void free_mnt_ns(struct mnt_namespace *);
  1919. static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
  1920. void dissolve_on_fput(struct vfsmount *mnt)
  1921. {
  1922. struct mnt_namespace *ns;
  1923. namespace_lock();
  1924. lock_mount_hash();
  1925. ns = real_mount(mnt)->mnt_ns;
  1926. if (ns) {
  1927. if (is_anon_ns(ns))
  1928. umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
  1929. else
  1930. ns = NULL;
  1931. }
  1932. unlock_mount_hash();
  1933. namespace_unlock();
  1934. if (ns)
  1935. free_mnt_ns(ns);
  1936. }
  1937. void drop_collected_mounts(struct vfsmount *mnt)
  1938. {
  1939. namespace_lock();
  1940. lock_mount_hash();
  1941. umount_tree(real_mount(mnt), 0);
  1942. unlock_mount_hash();
  1943. namespace_unlock();
  1944. }
  1945. static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
  1946. {
  1947. struct mount *child;
  1948. list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
  1949. if (!is_subdir(child->mnt_mountpoint, dentry))
  1950. continue;
  1951. #ifdef CONFIG_KDP_NS
  1952. if (((struct kdp_mount *)child)->mnt->mnt_flags & MNT_LOCKED)
  1953. #else
  1954. if (child->mnt.mnt_flags & MNT_LOCKED)
  1955. #endif
  1956. return true;
  1957. }
  1958. return false;
  1959. }
  1960. /**
  1961. * clone_private_mount - create a private clone of a path
  1962. * @path: path to clone
  1963. *
  1964. * This creates a new vfsmount, which will be the clone of @path. The new mount
  1965. * will not be attached anywhere in the namespace and will be private (i.e.
  1966. * changes to the originating mount won't be propagated into this).
  1967. *
  1968. * Release with mntput().
  1969. */
  1970. struct vfsmount *clone_private_mount(const struct path *path)
  1971. {
  1972. struct mount *old_mnt = real_mount(path->mnt);
  1973. struct mount *new_mnt;
  1974. down_read(&namespace_sem);
  1975. if (IS_MNT_UNBINDABLE(old_mnt))
  1976. goto invalid;
  1977. if (!check_mnt(old_mnt))
  1978. goto invalid;
  1979. if (has_locked_children(old_mnt, path->dentry))
  1980. goto invalid;
  1981. new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
  1982. up_read(&namespace_sem);
  1983. if (IS_ERR(new_mnt))
  1984. return ERR_CAST(new_mnt);
  1985. /* Longterm mount to be removed by kern_unmount*() */
  1986. #ifdef CONFIG_KDP_NS
  1987. ((struct kdp_mount *)new_mnt)->mount.mnt_ns = MNT_NS_INTERNAL;
  1988. return ((struct kdp_mount *)new_mnt)->mnt;
  1989. #else
  1990. new_mnt->mnt_ns = MNT_NS_INTERNAL;
  1991. return &new_mnt->mnt;
  1992. #endif
  1993. invalid:
  1994. up_read(&namespace_sem);
  1995. return ERR_PTR(-EINVAL);
  1996. }
  1997. EXPORT_SYMBOL_GPL(clone_private_mount);
  1998. int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
  1999. struct vfsmount *root)
  2000. {
  2001. struct mount *mnt;
  2002. int res = f(root, arg);
  2003. if (res)
  2004. return res;
  2005. list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
  2006. #ifdef CONFIG_KDP_NS
  2007. res = f(((struct kdp_mount *)mnt)->mnt, arg);
  2008. #else
  2009. res = f(&mnt->mnt, arg);
  2010. #endif
  2011. if (res)
  2012. return res;
  2013. }
  2014. return 0;
  2015. }
  2016. static void lock_mnt_tree(struct mount *mnt)
  2017. {
  2018. struct mount *p;
  2019. for (p = mnt; p; p = next_mnt(p, mnt)) {
  2020. #ifdef CONFIG_KDP_NS
  2021. int flags = ((struct kdp_mount *)p)->mnt->mnt_flags;
  2022. #else
  2023. int flags = p->mnt.mnt_flags;
  2024. #endif
  2025. /* Don't allow unprivileged users to change mount flags */
  2026. flags |= MNT_LOCK_ATIME;
  2027. if (flags & MNT_READONLY)
  2028. flags |= MNT_LOCK_READONLY;
  2029. if (flags & MNT_NODEV)
  2030. flags |= MNT_LOCK_NODEV;
  2031. if (flags & MNT_NOSUID)
  2032. flags |= MNT_LOCK_NOSUID;
  2033. if (flags & MNT_NOEXEC)
  2034. flags |= MNT_LOCK_NOEXEC;
  2035. /* Don't allow unprivileged users to reveal what is under a mount */
  2036. if (list_empty(&p->mnt_expire))
  2037. flags |= MNT_LOCKED;
  2038. #ifdef CONFIG_KDP_NS
  2039. kdp_assign_mnt_flags(((struct kdp_mount *)p)->mnt, flags);
  2040. #else
  2041. p->mnt.mnt_flags = flags;
  2042. #endif
  2043. }
  2044. }
  2045. static void cleanup_group_ids(struct mount *mnt, struct mount *end)
  2046. {
  2047. struct mount *p;
  2048. for (p = mnt; p != end; p = next_mnt(p, mnt)) {
  2049. if (p->mnt_group_id && !IS_MNT_SHARED(p))
  2050. mnt_release_group_id(p);
  2051. }
  2052. }
  2053. static int invent_group_ids(struct mount *mnt, bool recurse)
  2054. {
  2055. struct mount *p;
  2056. for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
  2057. if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
  2058. int err = mnt_alloc_group_id(p);
  2059. if (err) {
  2060. cleanup_group_ids(mnt, p);
  2061. return err;
  2062. }
  2063. }
  2064. }
  2065. return 0;
  2066. }
  2067. int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
  2068. {
  2069. unsigned int max = READ_ONCE(sysctl_mount_max);
  2070. unsigned int mounts = 0;
  2071. struct mount *p;
  2072. if (ns->mounts >= max)
  2073. return -ENOSPC;
  2074. max -= ns->mounts;
  2075. if (ns->pending_mounts >= max)
  2076. return -ENOSPC;
  2077. max -= ns->pending_mounts;
  2078. for (p = mnt; p; p = next_mnt(p, mnt))
  2079. mounts++;
  2080. if (mounts > max)
  2081. return -ENOSPC;
  2082. ns->pending_mounts += mounts;
  2083. return 0;
  2084. }
  2085. /*
  2086. * @source_mnt : mount tree to be attached
  2087. * @nd : place the mount tree @source_mnt is attached
  2088. * @parent_nd : if non-null, detach the source_mnt from its parent and
  2089. * store the parent mount and mountpoint dentry.
  2090. * (done when source_mnt is moved)
  2091. *
  2092. * NOTE: in the table below explains the semantics when a source mount
  2093. * of a given type is attached to a destination mount of a given type.
  2094. * ---------------------------------------------------------------------------
  2095. * | BIND MOUNT OPERATION |
  2096. * |**************************************************************************
  2097. * | source-->| shared | private | slave | unbindable |
  2098. * | dest | | | | |
  2099. * | | | | | | |
  2100. * | v | | | | |
  2101. * |**************************************************************************
  2102. * | shared | shared (++) | shared (+) | shared(+++)| invalid |
  2103. * | | | | | |
  2104. * |non-shared| shared (+) | private | slave (*) | invalid |
  2105. * ***************************************************************************
  2106. * A bind operation clones the source mount and mounts the clone on the
  2107. * destination mount.
  2108. *
  2109. * (++) the cloned mount is propagated to all the mounts in the propagation
  2110. * tree of the destination mount and the cloned mount is added to
  2111. * the peer group of the source mount.
  2112. * (+) the cloned mount is created under the destination mount and is marked
  2113. * as shared. The cloned mount is added to the peer group of the source
  2114. * mount.
  2115. * (+++) the mount is propagated to all the mounts in the propagation tree
  2116. * of the destination mount and the cloned mount is made slave
  2117. * of the same master as that of the source mount. The cloned mount
  2118. * is marked as 'shared and slave'.
  2119. * (*) the cloned mount is made a slave of the same master as that of the
  2120. * source mount.
  2121. *
  2122. * ---------------------------------------------------------------------------
  2123. * | MOVE MOUNT OPERATION |
  2124. * |**************************************************************************
  2125. * | source-->| shared | private | slave | unbindable |
  2126. * | dest | | | | |
  2127. * | | | | | | |
  2128. * | v | | | | |
  2129. * |**************************************************************************
  2130. * | shared | shared (+) | shared (+) | shared(+++) | invalid |
  2131. * | | | | | |
  2132. * |non-shared| shared (+*) | private | slave (*) | unbindable |
  2133. * ***************************************************************************
  2134. *
  2135. * (+) the mount is moved to the destination. And is then propagated to
  2136. * all the mounts in the propagation tree of the destination mount.
  2137. * (+*) the mount is moved to the destination.
  2138. * (+++) the mount is moved to the destination and is then propagated to
  2139. * all the mounts belonging to the destination mount's propagation tree.
  2140. * the mount is marked as 'shared and slave'.
  2141. * (*) the mount continues to be a slave at the new location.
  2142. *
  2143. * if the source mount is a tree, the operations explained above is
  2144. * applied to each mount in the tree.
  2145. * Must be called without spinlocks held, since this function can sleep
  2146. * in allocations.
  2147. */
  2148. static int attach_recursive_mnt(struct mount *source_mnt,
  2149. struct mount *dest_mnt,
  2150. struct mountpoint *dest_mp,
  2151. bool moving)
  2152. {
  2153. struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
  2154. HLIST_HEAD(tree_list);
  2155. struct mnt_namespace *ns = dest_mnt->mnt_ns;
  2156. struct mountpoint *smp;
  2157. struct mount *child, *p;
  2158. struct hlist_node *n;
  2159. int err;
  2160. /* Preallocate a mountpoint in case the new mounts need
  2161. * to be tucked under other mounts.
  2162. */
  2163. #ifdef CONFIG_KDP_NS
  2164. int nsflags;
  2165. smp = get_mountpoint(((struct kdp_mount *)source_mnt)->mnt->mnt_root);
  2166. #else
  2167. smp = get_mountpoint(source_mnt->mnt.mnt_root);
  2168. #endif
  2169. if (IS_ERR(smp))
  2170. return PTR_ERR(smp);
  2171. /* Is there space to add these mounts to the mount namespace? */
  2172. if (!moving) {
  2173. err = count_mounts(ns, source_mnt);
  2174. if (err)
  2175. goto out;
  2176. }
  2177. if (IS_MNT_SHARED(dest_mnt)) {
  2178. err = invent_group_ids(source_mnt, true);
  2179. if (err)
  2180. goto out;
  2181. err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
  2182. lock_mount_hash();
  2183. if (err)
  2184. goto out_cleanup_ids;
  2185. for (p = source_mnt; p; p = next_mnt(p, source_mnt))
  2186. set_mnt_shared(p);
  2187. } else {
  2188. lock_mount_hash();
  2189. }
  2190. if (moving) {
  2191. unhash_mnt(source_mnt);
  2192. attach_mnt(source_mnt, dest_mnt, dest_mp);
  2193. touch_mnt_namespace(source_mnt->mnt_ns);
  2194. } else {
  2195. if (source_mnt->mnt_ns) {
  2196. /* move from anon - the caller will destroy */
  2197. list_del_init(&source_mnt->mnt_ns->list);
  2198. }
  2199. mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
  2200. commit_tree(source_mnt);
  2201. }
  2202. hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
  2203. struct mount *q;
  2204. hlist_del_init(&child->mnt_hash);
  2205. #ifdef CONFIG_KDP_NS
  2206. q = __lookup_mnt(((struct kdp_mount *)child->mnt_parent)->mnt,
  2207. #else
  2208. q = __lookup_mnt(&child->mnt_parent->mnt,
  2209. #endif
  2210. child->mnt_mountpoint);
  2211. if (q)
  2212. mnt_change_mountpoint(child, smp, q);
  2213. /* Notice when we are propagating across user namespaces */
  2214. if (child->mnt_parent->mnt_ns->user_ns != user_ns)
  2215. lock_mnt_tree(child);
  2216. #ifdef CONFIG_KDP_NS
  2217. nsflags = ((struct kdp_mount *)child)->mnt->mnt_flags & ~MNT_LOCKED;
  2218. kdp_assign_mnt_flags(((struct kdp_mount *)child)->mnt, nsflags);
  2219. #else
  2220. child->mnt.mnt_flags &= ~MNT_LOCKED;
  2221. #endif
  2222. commit_tree(child);
  2223. }
  2224. put_mountpoint(smp);
  2225. unlock_mount_hash();
  2226. return 0;
  2227. out_cleanup_ids:
  2228. while (!hlist_empty(&tree_list)) {
  2229. child = hlist_entry(tree_list.first, struct mount, mnt_hash);
  2230. child->mnt_parent->mnt_ns->pending_mounts = 0;
  2231. umount_tree(child, UMOUNT_SYNC);
  2232. }
  2233. unlock_mount_hash();
  2234. cleanup_group_ids(source_mnt, NULL);
  2235. out:
  2236. ns->pending_mounts = 0;
  2237. read_seqlock_excl(&mount_lock);
  2238. put_mountpoint(smp);
  2239. read_sequnlock_excl(&mount_lock);
  2240. return err;
  2241. }
  2242. static struct mountpoint *lock_mount(struct path *path)
  2243. {
  2244. struct vfsmount *mnt;
  2245. struct dentry *dentry = path->dentry;
  2246. retry:
  2247. inode_lock(dentry->d_inode);
  2248. if (unlikely(cant_mount(dentry))) {
  2249. inode_unlock(dentry->d_inode);
  2250. return ERR_PTR(-ENOENT);
  2251. }
  2252. namespace_lock();
  2253. mnt = lookup_mnt(path);
  2254. if (likely(!mnt)) {
  2255. struct mountpoint *mp = get_mountpoint(dentry);
  2256. if (IS_ERR(mp)) {
  2257. namespace_unlock();
  2258. inode_unlock(dentry->d_inode);
  2259. return mp;
  2260. }
  2261. return mp;
  2262. }
  2263. namespace_unlock();
  2264. inode_unlock(path->dentry->d_inode);
  2265. path_put(path);
  2266. path->mnt = mnt;
  2267. dentry = path->dentry = dget(mnt->mnt_root);
  2268. goto retry;
  2269. }
  2270. static void unlock_mount(struct mountpoint *where)
  2271. {
  2272. struct dentry *dentry = where->m_dentry;
  2273. read_seqlock_excl(&mount_lock);
  2274. put_mountpoint(where);
  2275. read_sequnlock_excl(&mount_lock);
  2276. namespace_unlock();
  2277. inode_unlock(dentry->d_inode);
  2278. }
  2279. static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
  2280. {
  2281. #ifdef CONFIG_KDP_NS
  2282. if (((struct kdp_mount *)mnt)->mnt->mnt_sb->s_flags & SB_NOUSER)
  2283. #else
  2284. if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
  2285. #endif
  2286. return -EINVAL;
  2287. if (d_is_dir(mp->m_dentry) !=
  2288. #ifdef CONFIG_KDP_NS
  2289. d_is_dir(((struct kdp_mount *)mnt)->mnt->mnt_root))
  2290. #else
  2291. d_is_dir(mnt->mnt.mnt_root))
  2292. #endif
  2293. return -ENOTDIR;
  2294. return attach_recursive_mnt(mnt, p, mp, false);
  2295. }
  2296. /*
  2297. * Sanity check the flags to change_mnt_propagation.
  2298. */
  2299. static int flags_to_propagation_type(int ms_flags)
  2300. {
  2301. int type = ms_flags & ~(MS_REC | MS_SILENT);
  2302. /* Fail if any non-propagation flags are set */
  2303. if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
  2304. return 0;
  2305. /* Only one propagation flag should be set */
  2306. if (!is_power_of_2(type))
  2307. return 0;
  2308. return type;
  2309. }
  2310. /*
  2311. * recursively change the type of the mountpoint.
  2312. */
  2313. static int do_change_type(struct path *path, int ms_flags)
  2314. {
  2315. struct mount *m;
  2316. struct mount *mnt = real_mount(path->mnt);
  2317. int recurse = ms_flags & MS_REC;
  2318. int type;
  2319. int err = 0;
  2320. if (path->dentry != path->mnt->mnt_root)
  2321. return -EINVAL;
  2322. type = flags_to_propagation_type(ms_flags);
  2323. if (!type)
  2324. return -EINVAL;
  2325. namespace_lock();
  2326. if (type == MS_SHARED) {
  2327. err = invent_group_ids(mnt, recurse);
  2328. if (err)
  2329. goto out_unlock;
  2330. }
  2331. lock_mount_hash();
  2332. for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
  2333. change_mnt_propagation(m, type);
  2334. unlock_mount_hash();
  2335. out_unlock:
  2336. namespace_unlock();
  2337. return err;
  2338. }
  2339. static struct mount *__do_loopback(struct path *old_path, int recurse)
  2340. {
  2341. struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
  2342. #ifdef CONFIG_KDP_NS
  2343. int nsflags;
  2344. #endif
  2345. if (IS_MNT_UNBINDABLE(old))
  2346. return mnt;
  2347. if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
  2348. return mnt;
  2349. if (!recurse && has_locked_children(old, old_path->dentry))
  2350. return mnt;
  2351. if (recurse)
  2352. mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
  2353. else
  2354. mnt = clone_mnt(old, old_path->dentry, 0);
  2355. #ifdef CONFIG_KDP_NS
  2356. if (!IS_ERR(mnt)) {
  2357. nsflags = ((struct kdp_mount *)mnt)->mnt->mnt_flags & ~MNT_LOCKED;
  2358. kdp_assign_mnt_flags(((struct kdp_mount *)mnt)->mnt, nsflags);
  2359. }
  2360. #else
  2361. if (!IS_ERR(mnt))
  2362. mnt->mnt.mnt_flags &= ~MNT_LOCKED;
  2363. #endif
  2364. return mnt;
  2365. }
  2366. /*
  2367. * do loopback mount.
  2368. */
  2369. static int do_loopback(struct path *path, const char *old_name,
  2370. int recurse)
  2371. {
  2372. struct path old_path;
  2373. struct mount *mnt = NULL, *parent;
  2374. struct mountpoint *mp;
  2375. int err;
  2376. if (!old_name || !*old_name)
  2377. return -EINVAL;
  2378. err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
  2379. if (err)
  2380. return err;
  2381. err = -EINVAL;
  2382. if (mnt_ns_loop(old_path.dentry))
  2383. goto out;
  2384. mp = lock_mount(path);
  2385. if (IS_ERR(mp)) {
  2386. err = PTR_ERR(mp);
  2387. goto out;
  2388. }
  2389. parent = real_mount(path->mnt);
  2390. if (!check_mnt(parent))
  2391. goto out2;
  2392. mnt = __do_loopback(&old_path, recurse);
  2393. if (IS_ERR(mnt)) {
  2394. err = PTR_ERR(mnt);
  2395. goto out2;
  2396. }
  2397. err = graft_tree(mnt, parent, mp);
  2398. if (err) {
  2399. lock_mount_hash();
  2400. umount_tree(mnt, UMOUNT_SYNC);
  2401. unlock_mount_hash();
  2402. }
  2403. out2:
  2404. unlock_mount(mp);
  2405. out:
  2406. path_put(&old_path);
  2407. return err;
  2408. }
  2409. static struct file *open_detached_copy(struct path *path, bool recursive)
  2410. {
  2411. struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
  2412. struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
  2413. struct mount *mnt, *p;
  2414. struct file *file;
  2415. if (IS_ERR(ns))
  2416. return ERR_CAST(ns);
  2417. namespace_lock();
  2418. mnt = __do_loopback(path, recursive);
  2419. if (IS_ERR(mnt)) {
  2420. namespace_unlock();
  2421. free_mnt_ns(ns);
  2422. return ERR_CAST(mnt);
  2423. }
  2424. lock_mount_hash();
  2425. for (p = mnt; p; p = next_mnt(p, mnt)) {
  2426. p->mnt_ns = ns;
  2427. ns->mounts++;
  2428. }
  2429. ns->root = mnt;
  2430. list_add_tail(&ns->list, &mnt->mnt_list);
  2431. #ifdef CONFIG_KDP_NS
  2432. mntget(((struct kdp_mount *)mnt)->mnt);
  2433. unlock_mount_hash();
  2434. namespace_unlock();
  2435. mntput(path->mnt);
  2436. path->mnt = ((struct kdp_mount *)mnt)->mnt;
  2437. #else
  2438. mntget(&mnt->mnt);
  2439. unlock_mount_hash();
  2440. namespace_unlock();
  2441. mntput(path->mnt);
  2442. path->mnt = &mnt->mnt;
  2443. #endif
  2444. file = dentry_open(path, O_PATH, current_cred());
  2445. if (IS_ERR(file))
  2446. dissolve_on_fput(path->mnt);
  2447. else
  2448. file->f_mode |= FMODE_NEED_UNMOUNT;
  2449. return file;
  2450. }
  2451. SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
  2452. {
  2453. struct file *file;
  2454. struct path path;
  2455. int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
  2456. bool detached = flags & OPEN_TREE_CLONE;
  2457. int error;
  2458. int fd;
  2459. BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
  2460. if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
  2461. AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
  2462. OPEN_TREE_CLOEXEC))
  2463. return -EINVAL;
  2464. if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
  2465. return -EINVAL;
  2466. if (flags & AT_NO_AUTOMOUNT)
  2467. lookup_flags &= ~LOOKUP_AUTOMOUNT;
  2468. if (flags & AT_SYMLINK_NOFOLLOW)
  2469. lookup_flags &= ~LOOKUP_FOLLOW;
  2470. if (flags & AT_EMPTY_PATH)
  2471. lookup_flags |= LOOKUP_EMPTY;
  2472. if (detached && !may_mount())
  2473. return -EPERM;
  2474. fd = get_unused_fd_flags(flags & O_CLOEXEC);
  2475. if (fd < 0)
  2476. return fd;
  2477. error = user_path_at(dfd, filename, lookup_flags, &path);
  2478. if (unlikely(error)) {
  2479. file = ERR_PTR(error);
  2480. } else {
  2481. if (detached)
  2482. file = open_detached_copy(&path, flags & AT_RECURSIVE);
  2483. else
  2484. file = dentry_open(&path, O_PATH, current_cred());
  2485. path_put(&path);
  2486. }
  2487. if (IS_ERR(file)) {
  2488. put_unused_fd(fd);
  2489. return PTR_ERR(file);
  2490. }
  2491. fd_install(fd, file);
  2492. return fd;
  2493. }
  2494. /*
  2495. * Don't allow locked mount flags to be cleared.
  2496. *
  2497. * No locks need to be held here while testing the various MNT_LOCK
  2498. * flags because those flags can never be cleared once they are set.
  2499. */
  2500. static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
  2501. {
  2502. #ifdef CONFIG_KDP_NS
  2503. unsigned int fl = ((struct kdp_mount *)mnt)->mnt->mnt_flags;
  2504. #else
  2505. unsigned int fl = mnt->mnt.mnt_flags;
  2506. #endif
  2507. if ((fl & MNT_LOCK_READONLY) &&
  2508. !(mnt_flags & MNT_READONLY))
  2509. return false;
  2510. if ((fl & MNT_LOCK_NODEV) &&
  2511. !(mnt_flags & MNT_NODEV))
  2512. return false;
  2513. if ((fl & MNT_LOCK_NOSUID) &&
  2514. !(mnt_flags & MNT_NOSUID))
  2515. return false;
  2516. if ((fl & MNT_LOCK_NOEXEC) &&
  2517. !(mnt_flags & MNT_NOEXEC))
  2518. return false;
  2519. if ((fl & MNT_LOCK_ATIME) &&
  2520. ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
  2521. return false;
  2522. return true;
  2523. }
  2524. static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
  2525. {
  2526. bool readonly_request = (mnt_flags & MNT_READONLY);
  2527. #ifdef CONFIG_KDP_NS
  2528. if (readonly_request == __mnt_is_readonly(((struct kdp_mount *)mnt)->mnt))
  2529. #else
  2530. if (readonly_request == __mnt_is_readonly(&mnt->mnt))
  2531. #endif
  2532. return 0;
  2533. if (readonly_request)
  2534. return mnt_make_readonly(mnt);
  2535. #ifdef CONFIG_KDP_NS
  2536. kdp_clear_mnt_flags(((struct kdp_mount *)mnt)->mnt, MNT_READONLY);
  2537. #else
  2538. mnt->mnt.mnt_flags &= ~MNT_READONLY;
  2539. #endif
  2540. return 0;
  2541. }
  2542. static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
  2543. {
  2544. #ifdef CONFIG_KDP_NS
  2545. mnt_flags |= ((struct kdp_mount *)mnt)->mnt->mnt_flags & ~MNT_USER_SETTABLE_MASK;
  2546. kdp_assign_mnt_flags(((struct kdp_mount *)mnt)->mnt, mnt_flags);
  2547. #else
  2548. mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
  2549. mnt->mnt.mnt_flags = mnt_flags;
  2550. #endif
  2551. touch_mnt_namespace(mnt->mnt_ns);
  2552. }
  2553. static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
  2554. {
  2555. struct super_block *sb = mnt->mnt_sb;
  2556. if (!__mnt_is_readonly(mnt) &&
  2557. (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
  2558. (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
  2559. char *buf = (char *)__get_free_page(GFP_KERNEL);
  2560. char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
  2561. struct tm tm;
  2562. time64_to_tm(sb->s_time_max, 0, &tm);
  2563. pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
  2564. sb->s_type->name,
  2565. is_mounted(mnt) ? "remounted" : "mounted",
  2566. mntpath,
  2567. tm.tm_year+1900, (unsigned long long)sb->s_time_max);
  2568. free_page((unsigned long)buf);
  2569. sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
  2570. }
  2571. }
  2572. /*
  2573. * Handle reconfiguration of the mountpoint only without alteration of the
  2574. * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
  2575. * to mount(2).
  2576. */
  2577. static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
  2578. {
  2579. struct super_block *sb = path->mnt->mnt_sb;
  2580. struct mount *mnt = real_mount(path->mnt);
  2581. int ret;
  2582. if (!check_mnt(mnt))
  2583. return -EINVAL;
  2584. #ifdef CONFIG_KDP_NS
  2585. if (path->dentry != ((struct kdp_mount *)mnt)->mnt->mnt_root)
  2586. #else
  2587. if (path->dentry != mnt->mnt.mnt_root)
  2588. #endif
  2589. return -EINVAL;
  2590. if (!can_change_locked_flags(mnt, mnt_flags))
  2591. return -EPERM;
  2592. /*
  2593. * We're only checking whether the superblock is read-only not
  2594. * changing it, so only take down_read(&sb->s_umount).
  2595. */
  2596. down_read(&sb->s_umount);
  2597. lock_mount_hash();
  2598. ret = change_mount_ro_state(mnt, mnt_flags);
  2599. if (ret == 0)
  2600. set_mount_attributes(mnt, mnt_flags);
  2601. unlock_mount_hash();
  2602. up_read(&sb->s_umount);
  2603. #ifdef CONFIG_KDP_NS
  2604. mnt_warn_timestamp_expiry(path, ((struct kdp_mount *)mnt)->mnt);
  2605. #else
  2606. mnt_warn_timestamp_expiry(path, &mnt->mnt);
  2607. #endif
  2608. return ret;
  2609. }
  2610. /*
  2611. * change filesystem flags. dir should be a physical root of filesystem.
  2612. * If you've mounted a non-root directory somewhere and want to do remount
  2613. * on it - tough luck.
  2614. */
  2615. static int do_remount(struct path *path, int ms_flags, int sb_flags,
  2616. int mnt_flags, void *data)
  2617. {
  2618. int err;
  2619. struct super_block *sb = path->mnt->mnt_sb;
  2620. struct mount *mnt = real_mount(path->mnt);
  2621. struct fs_context *fc;
  2622. if (!check_mnt(mnt))
  2623. return -EINVAL;
  2624. if (path->dentry != path->mnt->mnt_root)
  2625. return -EINVAL;
  2626. if (!can_change_locked_flags(mnt, mnt_flags))
  2627. return -EPERM;
  2628. fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
  2629. if (IS_ERR(fc))
  2630. return PTR_ERR(fc);
  2631. fc->oldapi = true;
  2632. err = parse_monolithic_mount_data(fc, data);
  2633. if (!err) {
  2634. down_write(&sb->s_umount);
  2635. err = -EPERM;
  2636. if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
  2637. err = reconfigure_super(fc);
  2638. if (!err) {
  2639. lock_mount_hash();
  2640. set_mount_attributes(mnt, mnt_flags);
  2641. unlock_mount_hash();
  2642. }
  2643. }
  2644. up_write(&sb->s_umount);
  2645. }
  2646. #ifdef CONFIG_KDP_NS
  2647. mnt_warn_timestamp_expiry(path, ((struct kdp_mount *)mnt)->mnt);
  2648. #else
  2649. mnt_warn_timestamp_expiry(path, &mnt->mnt);
  2650. #endif
  2651. put_fs_context(fc);
  2652. return err;
  2653. }
  2654. static inline int tree_contains_unbindable(struct mount *mnt)
  2655. {
  2656. struct mount *p;
  2657. for (p = mnt; p; p = next_mnt(p, mnt)) {
  2658. if (IS_MNT_UNBINDABLE(p))
  2659. return 1;
  2660. }
  2661. return 0;
  2662. }
  2663. /*
  2664. * Check that there aren't references to earlier/same mount namespaces in the
  2665. * specified subtree. Such references can act as pins for mount namespaces
  2666. * that aren't checked by the mount-cycle checking code, thereby allowing
  2667. * cycles to be made.
  2668. */
  2669. static bool check_for_nsfs_mounts(struct mount *subtree)
  2670. {
  2671. struct mount *p;
  2672. bool ret = false;
  2673. lock_mount_hash();
  2674. for (p = subtree; p; p = next_mnt(p, subtree))
  2675. #ifdef CONFIG_KDP_NS
  2676. if (mnt_ns_loop(((struct kdp_mount *)p)->mnt->mnt_root))
  2677. #else
  2678. if (mnt_ns_loop(p->mnt.mnt_root))
  2679. #endif
  2680. goto out;
  2681. ret = true;
  2682. out:
  2683. unlock_mount_hash();
  2684. return ret;
  2685. }
  2686. static int do_set_group(struct path *from_path, struct path *to_path)
  2687. {
  2688. struct mount *from, *to;
  2689. int err;
  2690. from = real_mount(from_path->mnt);
  2691. to = real_mount(to_path->mnt);
  2692. namespace_lock();
  2693. err = -EINVAL;
  2694. /* To and From must be mounted */
  2695. #ifdef CONFIG_KDP_NS
  2696. if (!is_mounted(((struct kdp_mount *)from)->mnt))
  2697. goto out;
  2698. if (!is_mounted(((struct kdp_mount *)to)->mnt))
  2699. goto out;
  2700. #else
  2701. if (!is_mounted(&from->mnt))
  2702. goto out;
  2703. if (!is_mounted(&to->mnt))
  2704. goto out;
  2705. #endif
  2706. err = -EPERM;
  2707. /* We should be allowed to modify mount namespaces of both mounts */
  2708. if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
  2709. goto out;
  2710. if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
  2711. goto out;
  2712. err = -EINVAL;
  2713. /* To and From paths should be mount roots */
  2714. if (from_path->dentry != from_path->mnt->mnt_root)
  2715. goto out;
  2716. if (to_path->dentry != to_path->mnt->mnt_root)
  2717. goto out;
  2718. #ifdef CONFIG_KDP_NS
  2719. /* Setting sharing groups is only allowed across same superblock */
  2720. if (((struct kdp_mount *)from)->mnt->mnt_sb != ((struct kdp_mount *)to)->mnt->mnt_sb)
  2721. goto out;
  2722. /* From mount root should be wider than To mount root */
  2723. if (!is_subdir(((struct kdp_mount *)to)->mnt->mnt_root, ((struct kdp_mount *)from)->mnt->mnt_root))
  2724. goto out;
  2725. /* From mount should not have locked children in place of To's root */
  2726. if (has_locked_children(from, ((struct kdp_mount *)to)->mnt->mnt_root))
  2727. goto out;
  2728. #else
  2729. /* Setting sharing groups is only allowed across same superblock */
  2730. if (from->mnt.mnt_sb != to->mnt.mnt_sb)
  2731. goto out;
  2732. /* From mount root should be wider than To mount root */
  2733. if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
  2734. goto out;
  2735. /* From mount should not have locked children in place of To's root */
  2736. if (has_locked_children(from, to->mnt.mnt_root))
  2737. goto out;
  2738. #endif
  2739. /* Setting sharing groups is only allowed on private mounts */
  2740. if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
  2741. goto out;
  2742. /* From should not be private */
  2743. if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
  2744. goto out;
  2745. if (IS_MNT_SLAVE(from)) {
  2746. struct mount *m = from->mnt_master;
  2747. list_add(&to->mnt_slave, &m->mnt_slave_list);
  2748. to->mnt_master = m;
  2749. }
  2750. if (IS_MNT_SHARED(from)) {
  2751. to->mnt_group_id = from->mnt_group_id;
  2752. list_add(&to->mnt_share, &from->mnt_share);
  2753. lock_mount_hash();
  2754. set_mnt_shared(to);
  2755. unlock_mount_hash();
  2756. }
  2757. err = 0;
  2758. out:
  2759. namespace_unlock();
  2760. return err;
  2761. }
  2762. static int do_move_mount(struct path *old_path, struct path *new_path)
  2763. {
  2764. struct mnt_namespace *ns;
  2765. struct mount *p;
  2766. struct mount *old;
  2767. struct mount *parent;
  2768. struct mountpoint *mp, *old_mp;
  2769. int err;
  2770. bool attached;
  2771. mp = lock_mount(new_path);
  2772. if (IS_ERR(mp))
  2773. return PTR_ERR(mp);
  2774. old = real_mount(old_path->mnt);
  2775. p = real_mount(new_path->mnt);
  2776. parent = old->mnt_parent;
  2777. attached = mnt_has_parent(old);
  2778. old_mp = old->mnt_mp;
  2779. ns = old->mnt_ns;
  2780. err = -EINVAL;
  2781. /* The mountpoint must be in our namespace. */
  2782. if (!check_mnt(p))
  2783. goto out;
  2784. /* The thing moved must be mounted... */
  2785. #ifdef CONFIG_KDP_NS
  2786. if (!is_mounted(((struct kdp_mount *)old)->mnt))
  2787. #else
  2788. if (!is_mounted(&old->mnt))
  2789. #endif
  2790. goto out;
  2791. /* ... and either ours or the root of anon namespace */
  2792. if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
  2793. goto out;
  2794. #ifdef CONFIG_KDP_NS
  2795. if (((struct kdp_mount *)old)->mnt->mnt_flags & MNT_LOCKED)
  2796. #else
  2797. if (old->mnt.mnt_flags & MNT_LOCKED)
  2798. #endif
  2799. goto out;
  2800. if (old_path->dentry != old_path->mnt->mnt_root)
  2801. goto out;
  2802. if (d_is_dir(new_path->dentry) !=
  2803. d_is_dir(old_path->dentry))
  2804. goto out;
  2805. /*
  2806. * Don't move a mount residing in a shared parent.
  2807. */
  2808. if (attached && IS_MNT_SHARED(parent))
  2809. goto out;
  2810. /*
  2811. * Don't move a mount tree containing unbindable mounts to a destination
  2812. * mount which is shared.
  2813. */
  2814. if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
  2815. goto out;
  2816. err = -ELOOP;
  2817. if (!check_for_nsfs_mounts(old))
  2818. goto out;
  2819. for (; mnt_has_parent(p); p = p->mnt_parent)
  2820. if (p == old)
  2821. goto out;
  2822. err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
  2823. attached);
  2824. if (err)
  2825. goto out;
  2826. /* if the mount is moved, it should no longer be expire
  2827. * automatically */
  2828. list_del_init(&old->mnt_expire);
  2829. if (attached)
  2830. put_mountpoint(old_mp);
  2831. out:
  2832. unlock_mount(mp);
  2833. if (!err) {
  2834. if (attached)
  2835. mntput_no_expire(parent);
  2836. else
  2837. free_mnt_ns(ns);
  2838. }
  2839. return err;
  2840. }
  2841. static int do_move_mount_old(struct path *path, const char *old_name)
  2842. {
  2843. struct path old_path;
  2844. int err;
  2845. if (!old_name || !*old_name)
  2846. return -EINVAL;
  2847. err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
  2848. if (err)
  2849. return err;
  2850. err = do_move_mount(&old_path, path);
  2851. path_put(&old_path);
  2852. return err;
  2853. }
  2854. /*
  2855. * add a mount into a namespace's mount tree
  2856. */
  2857. static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
  2858. const struct path *path, int mnt_flags)
  2859. {
  2860. struct mount *parent = real_mount(path->mnt);
  2861. mnt_flags &= ~MNT_INTERNAL_FLAGS;
  2862. if (unlikely(!check_mnt(parent))) {
  2863. /* that's acceptable only for automounts done in private ns */
  2864. if (!(mnt_flags & MNT_SHRINKABLE))
  2865. return -EINVAL;
  2866. /* ... and for those we'd better have mountpoint still alive */
  2867. if (!parent->mnt_ns)
  2868. return -EINVAL;
  2869. }
  2870. /* Refuse the same filesystem on the same mount point */
  2871. #ifdef CONFIG_KDP_NS
  2872. if (path->mnt->mnt_sb == ((struct kdp_mount *)newmnt)->mnt->mnt_sb &&
  2873. #else
  2874. if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
  2875. #endif
  2876. path->mnt->mnt_root == path->dentry)
  2877. return -EBUSY;
  2878. #ifdef CONFIG_KDP_NS
  2879. if (d_is_symlink(((struct kdp_mount *)newmnt)->mnt->mnt_root))
  2880. #else
  2881. if (d_is_symlink(newmnt->mnt.mnt_root))
  2882. #endif
  2883. return -EINVAL;
  2884. #ifdef CONFIG_KDP_NS
  2885. kdp_assign_mnt_flags(((struct kdp_mount *)newmnt)->mnt, mnt_flags);
  2886. #else
  2887. newmnt->mnt.mnt_flags = mnt_flags;
  2888. #endif
  2889. return graft_tree(newmnt, parent, mp);
  2890. }
  2891. static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
  2892. /*
  2893. * Create a new mount using a superblock configuration and request it
  2894. * be added to the namespace tree.
  2895. */
  2896. static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
  2897. unsigned int mnt_flags)
  2898. {
  2899. struct vfsmount *mnt;
  2900. struct mountpoint *mp;
  2901. struct super_block *sb = fc->root->d_sb;
  2902. int error;
  2903. error = security_sb_kern_mount(sb);
  2904. if (!error && mount_too_revealing(sb, &mnt_flags))
  2905. error = -EPERM;
  2906. if (unlikely(error)) {
  2907. fc_drop_locked(fc);
  2908. return error;
  2909. }
  2910. up_write(&sb->s_umount);
  2911. mnt = vfs_create_mount(fc);
  2912. if (IS_ERR(mnt))
  2913. return PTR_ERR(mnt);
  2914. mnt_warn_timestamp_expiry(mountpoint, mnt);
  2915. mp = lock_mount(mountpoint);
  2916. if (IS_ERR(mp)) {
  2917. mntput(mnt);
  2918. return PTR_ERR(mp);
  2919. }
  2920. error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
  2921. unlock_mount(mp);
  2922. if (error < 0)
  2923. mntput(mnt);
  2924. return error;
  2925. }
  2926. /*
  2927. * create a new mount for userspace and request it to be added into the
  2928. * namespace's tree
  2929. */
  2930. static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
  2931. int mnt_flags, const char *name, void *data)
  2932. {
  2933. struct file_system_type *type;
  2934. struct fs_context *fc;
  2935. const char *subtype = NULL;
  2936. int err = 0;
  2937. if (!fstype)
  2938. return -EINVAL;
  2939. type = get_fs_type(fstype);
  2940. if (!type)
  2941. return -ENODEV;
  2942. if (type->fs_flags & FS_HAS_SUBTYPE) {
  2943. subtype = strchr(fstype, '.');
  2944. if (subtype) {
  2945. subtype++;
  2946. if (!*subtype) {
  2947. put_filesystem(type);
  2948. return -EINVAL;
  2949. }
  2950. }
  2951. }
  2952. fc = fs_context_for_mount(type, sb_flags);
  2953. put_filesystem(type);
  2954. if (IS_ERR(fc))
  2955. return PTR_ERR(fc);
  2956. if (subtype)
  2957. err = vfs_parse_fs_string(fc, "subtype",
  2958. subtype, strlen(subtype));
  2959. if (!err && name)
  2960. err = vfs_parse_fs_string(fc, "source", name, strlen(name));
  2961. if (!err)
  2962. err = parse_monolithic_mount_data(fc, data);
  2963. if (!err && !mount_capable(fc))
  2964. err = -EPERM;
  2965. if (!err)
  2966. err = vfs_get_tree(fc);
  2967. if (!err)
  2968. err = do_new_mount_fc(fc, path, mnt_flags);
  2969. put_fs_context(fc);
  2970. return err;
  2971. }
  2972. int finish_automount(struct vfsmount *m, const struct path *path)
  2973. {
  2974. struct dentry *dentry = path->dentry;
  2975. struct mountpoint *mp;
  2976. struct mount *mnt;
  2977. int err;
  2978. if (!m)
  2979. return 0;
  2980. if (IS_ERR(m))
  2981. return PTR_ERR(m);
  2982. mnt = real_mount(m);
  2983. /* The new mount record should have at least 2 refs to prevent it being
  2984. * expired before we get a chance to add it
  2985. */
  2986. BUG_ON(mnt_get_count(mnt) < 2);
  2987. if (m->mnt_sb == path->mnt->mnt_sb &&
  2988. m->mnt_root == dentry) {
  2989. err = -ELOOP;
  2990. goto discard;
  2991. }
  2992. /*
  2993. * we don't want to use lock_mount() - in this case finding something
  2994. * that overmounts our mountpoint to be means "quitely drop what we've
  2995. * got", not "try to mount it on top".
  2996. */
  2997. inode_lock(dentry->d_inode);
  2998. namespace_lock();
  2999. if (unlikely(cant_mount(dentry))) {
  3000. err = -ENOENT;
  3001. goto discard_locked;
  3002. }
  3003. rcu_read_lock();
  3004. if (unlikely(__lookup_mnt(path->mnt, dentry))) {
  3005. rcu_read_unlock();
  3006. err = 0;
  3007. goto discard_locked;
  3008. }
  3009. rcu_read_unlock();
  3010. mp = get_mountpoint(dentry);
  3011. if (IS_ERR(mp)) {
  3012. err = PTR_ERR(mp);
  3013. goto discard_locked;
  3014. }
  3015. err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
  3016. unlock_mount(mp);
  3017. if (unlikely(err))
  3018. goto discard;
  3019. mntput(m);
  3020. return 0;
  3021. discard_locked:
  3022. namespace_unlock();
  3023. inode_unlock(dentry->d_inode);
  3024. discard:
  3025. /* remove m from any expiration list it may be on */
  3026. if (!list_empty(&mnt->mnt_expire)) {
  3027. namespace_lock();
  3028. list_del_init(&mnt->mnt_expire);
  3029. namespace_unlock();
  3030. }
  3031. mntput(m);
  3032. mntput(m);
  3033. return err;
  3034. }
  3035. /**
  3036. * mnt_set_expiry - Put a mount on an expiration list
  3037. * @mnt: The mount to list.
  3038. * @expiry_list: The list to add the mount to.
  3039. */
  3040. void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
  3041. {
  3042. namespace_lock();
  3043. list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
  3044. namespace_unlock();
  3045. }
  3046. EXPORT_SYMBOL(mnt_set_expiry);
  3047. /*
  3048. * process a list of expirable mountpoints with the intent of discarding any
  3049. * mountpoints that aren't in use and haven't been touched since last we came
  3050. * here
  3051. */
  3052. void mark_mounts_for_expiry(struct list_head *mounts)
  3053. {
  3054. struct mount *mnt, *next;
  3055. LIST_HEAD(graveyard);
  3056. if (list_empty(mounts))
  3057. return;
  3058. namespace_lock();
  3059. lock_mount_hash();
  3060. /* extract from the expiration list every vfsmount that matches the
  3061. * following criteria:
  3062. * - only referenced by its parent vfsmount
  3063. * - still marked for expiry (marked on the last call here; marks are
  3064. * cleared by mntput())
  3065. */
  3066. list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
  3067. if (!xchg(&mnt->mnt_expiry_mark, 1) ||
  3068. propagate_mount_busy(mnt, 1))
  3069. continue;
  3070. list_move(&mnt->mnt_expire, &graveyard);
  3071. }
  3072. while (!list_empty(&graveyard)) {
  3073. mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
  3074. touch_mnt_namespace(mnt->mnt_ns);
  3075. umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
  3076. }
  3077. unlock_mount_hash();
  3078. namespace_unlock();
  3079. }
  3080. EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
  3081. /*
  3082. * Ripoff of 'select_parent()'
  3083. *
  3084. * search the list of submounts for a given mountpoint, and move any
  3085. * shrinkable submounts to the 'graveyard' list.
  3086. */
  3087. static int select_submounts(struct mount *parent, struct list_head *graveyard)
  3088. {
  3089. struct mount *this_parent = parent;
  3090. struct list_head *next;
  3091. int found = 0;
  3092. repeat:
  3093. next = this_parent->mnt_mounts.next;
  3094. resume:
  3095. while (next != &this_parent->mnt_mounts) {
  3096. struct list_head *tmp = next;
  3097. struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
  3098. next = tmp->next;
  3099. #ifdef CONFIG_KDP_NS
  3100. if (!(((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_SHRINKABLE))
  3101. #else
  3102. if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
  3103. #endif
  3104. continue;
  3105. /*
  3106. * Descend a level if the d_mounts list is non-empty.
  3107. */
  3108. if (!list_empty(&mnt->mnt_mounts)) {
  3109. this_parent = mnt;
  3110. goto repeat;
  3111. }
  3112. if (!propagate_mount_busy(mnt, 1)) {
  3113. list_move_tail(&mnt->mnt_expire, graveyard);
  3114. found++;
  3115. }
  3116. }
  3117. /*
  3118. * All done at this level ... ascend and resume the search
  3119. */
  3120. if (this_parent != parent) {
  3121. next = this_parent->mnt_child.next;
  3122. this_parent = this_parent->mnt_parent;
  3123. goto resume;
  3124. }
  3125. return found;
  3126. }
  3127. /*
  3128. * process a list of expirable mountpoints with the intent of discarding any
  3129. * submounts of a specific parent mountpoint
  3130. *
  3131. * mount_lock must be held for write
  3132. */
  3133. static void shrink_submounts(struct mount *mnt)
  3134. {
  3135. LIST_HEAD(graveyard);
  3136. struct mount *m;
  3137. /* extract submounts of 'mountpoint' from the expiration list */
  3138. while (select_submounts(mnt, &graveyard)) {
  3139. while (!list_empty(&graveyard)) {
  3140. m = list_first_entry(&graveyard, struct mount,
  3141. mnt_expire);
  3142. touch_mnt_namespace(m->mnt_ns);
  3143. umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
  3144. }
  3145. }
  3146. }
  3147. static void *copy_mount_options(const void __user * data)
  3148. {
  3149. char *copy;
  3150. unsigned left, offset;
  3151. if (!data)
  3152. return NULL;
  3153. copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3154. if (!copy)
  3155. return ERR_PTR(-ENOMEM);
  3156. left = copy_from_user(copy, data, PAGE_SIZE);
  3157. /*
  3158. * Not all architectures have an exact copy_from_user(). Resort to
  3159. * byte at a time.
  3160. */
  3161. offset = PAGE_SIZE - left;
  3162. while (left) {
  3163. char c;
  3164. if (get_user(c, (const char __user *)data + offset))
  3165. break;
  3166. copy[offset] = c;
  3167. left--;
  3168. offset++;
  3169. }
  3170. if (left == PAGE_SIZE) {
  3171. kfree(copy);
  3172. return ERR_PTR(-EFAULT);
  3173. }
  3174. return copy;
  3175. }
  3176. static char *copy_mount_string(const void __user *data)
  3177. {
  3178. return data ? strndup_user(data, PATH_MAX) : NULL;
  3179. }
  3180. /*
  3181. * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
  3182. * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
  3183. *
  3184. * data is a (void *) that can point to any structure up to
  3185. * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
  3186. * information (or be NULL).
  3187. *
  3188. * Pre-0.97 versions of mount() didn't have a flags word.
  3189. * When the flags word was introduced its top half was required
  3190. * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
  3191. * Therefore, if this magic number is present, it carries no information
  3192. * and must be discarded.
  3193. */
  3194. int path_mount(const char *dev_name, struct path *path,
  3195. const char *type_page, unsigned long flags, void *data_page)
  3196. {
  3197. unsigned int mnt_flags = 0, sb_flags;
  3198. int ret;
  3199. /* Discard magic */
  3200. if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
  3201. flags &= ~MS_MGC_MSK;
  3202. /* Basic sanity checks */
  3203. if (data_page)
  3204. ((char *)data_page)[PAGE_SIZE - 1] = 0;
  3205. if (flags & MS_NOUSER)
  3206. return -EINVAL;
  3207. ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
  3208. if (ret)
  3209. return ret;
  3210. if (!may_mount())
  3211. return -EPERM;
  3212. if (flags & SB_MANDLOCK)
  3213. warn_mandlock();
  3214. /* Default to relatime unless overriden */
  3215. if (!(flags & MS_NOATIME))
  3216. mnt_flags |= MNT_RELATIME;
  3217. /* Separate the per-mountpoint flags */
  3218. if (flags & MS_NOSUID)
  3219. mnt_flags |= MNT_NOSUID;
  3220. if (flags & MS_NODEV)
  3221. mnt_flags |= MNT_NODEV;
  3222. if (flags & MS_NOEXEC)
  3223. mnt_flags |= MNT_NOEXEC;
  3224. if (flags & MS_NOATIME)
  3225. mnt_flags |= MNT_NOATIME;
  3226. if (flags & MS_NODIRATIME)
  3227. mnt_flags |= MNT_NODIRATIME;
  3228. if (flags & MS_STRICTATIME)
  3229. mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
  3230. if (flags & MS_RDONLY)
  3231. mnt_flags |= MNT_READONLY;
  3232. if (flags & MS_NOSYMFOLLOW)
  3233. mnt_flags |= MNT_NOSYMFOLLOW;
  3234. /* The default atime for remount is preservation */
  3235. if ((flags & MS_REMOUNT) &&
  3236. ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
  3237. MS_STRICTATIME)) == 0)) {
  3238. mnt_flags &= ~MNT_ATIME_MASK;
  3239. mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
  3240. }
  3241. sb_flags = flags & (SB_RDONLY |
  3242. SB_SYNCHRONOUS |
  3243. SB_MANDLOCK |
  3244. SB_DIRSYNC |
  3245. SB_SILENT |
  3246. SB_POSIXACL |
  3247. SB_LAZYTIME |
  3248. SB_I_VERSION);
  3249. if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
  3250. return do_reconfigure_mnt(path, mnt_flags);
  3251. if (flags & MS_REMOUNT)
  3252. return do_remount(path, flags, sb_flags, mnt_flags, data_page);
  3253. if (flags & MS_BIND)
  3254. return do_loopback(path, dev_name, flags & MS_REC);
  3255. if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
  3256. return do_change_type(path, flags);
  3257. if (flags & MS_MOVE)
  3258. return do_move_mount_old(path, dev_name);
  3259. return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
  3260. data_page);
  3261. }
  3262. long do_mount(const char *dev_name, const char __user *dir_name,
  3263. const char *type_page, unsigned long flags, void *data_page)
  3264. {
  3265. struct path path;
  3266. int ret;
  3267. ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
  3268. if (ret)
  3269. return ret;
  3270. ret = path_mount(dev_name, &path, type_page, flags, data_page);
  3271. path_put(&path);
  3272. return ret;
  3273. }
  3274. static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
  3275. {
  3276. return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
  3277. }
  3278. static void dec_mnt_namespaces(struct ucounts *ucounts)
  3279. {
  3280. dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
  3281. }
  3282. static void free_mnt_ns(struct mnt_namespace *ns)
  3283. {
  3284. if (!is_anon_ns(ns))
  3285. ns_free_inum(&ns->ns);
  3286. dec_mnt_namespaces(ns->ucounts);
  3287. put_user_ns(ns->user_ns);
  3288. kfree(ns);
  3289. }
  3290. /*
  3291. * Assign a sequence number so we can detect when we attempt to bind
  3292. * mount a reference to an older mount namespace into the current
  3293. * mount namespace, preventing reference counting loops. A 64bit
  3294. * number incrementing at 10Ghz will take 12,427 years to wrap which
  3295. * is effectively never, so we can ignore the possibility.
  3296. */
  3297. static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
  3298. static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
  3299. {
  3300. struct mnt_namespace *new_ns;
  3301. struct ucounts *ucounts;
  3302. int ret;
  3303. ucounts = inc_mnt_namespaces(user_ns);
  3304. if (!ucounts)
  3305. return ERR_PTR(-ENOSPC);
  3306. new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT);
  3307. if (!new_ns) {
  3308. dec_mnt_namespaces(ucounts);
  3309. return ERR_PTR(-ENOMEM);
  3310. }
  3311. if (!anon) {
  3312. ret = ns_alloc_inum(&new_ns->ns);
  3313. if (ret) {
  3314. kfree(new_ns);
  3315. dec_mnt_namespaces(ucounts);
  3316. return ERR_PTR(ret);
  3317. }
  3318. }
  3319. new_ns->ns.ops = &mntns_operations;
  3320. if (!anon)
  3321. new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
  3322. refcount_set(&new_ns->ns.count, 1);
  3323. INIT_LIST_HEAD(&new_ns->list);
  3324. init_waitqueue_head(&new_ns->poll);
  3325. spin_lock_init(&new_ns->ns_lock);
  3326. new_ns->user_ns = get_user_ns(user_ns);
  3327. new_ns->ucounts = ucounts;
  3328. return new_ns;
  3329. }
  3330. __latent_entropy
  3331. struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
  3332. struct user_namespace *user_ns, struct fs_struct *new_fs)
  3333. {
  3334. struct mnt_namespace *new_ns;
  3335. struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
  3336. struct mount *p, *q;
  3337. struct mount *old;
  3338. struct mount *new;
  3339. int copy_flags;
  3340. BUG_ON(!ns);
  3341. if (likely(!(flags & CLONE_NEWNS))) {
  3342. get_mnt_ns(ns);
  3343. return ns;
  3344. }
  3345. old = ns->root;
  3346. new_ns = alloc_mnt_ns(user_ns, false);
  3347. if (IS_ERR(new_ns))
  3348. return new_ns;
  3349. namespace_lock();
  3350. /* First pass: copy the tree topology */
  3351. copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
  3352. if (user_ns != ns->user_ns)
  3353. copy_flags |= CL_SHARED_TO_SLAVE;
  3354. #ifdef CONFIG_KDP_NS
  3355. new = copy_tree(old, ((struct kdp_mount *)old)->mnt->mnt_root, copy_flags);
  3356. #else
  3357. new = copy_tree(old, old->mnt.mnt_root, copy_flags);
  3358. #endif
  3359. if (IS_ERR(new)) {
  3360. namespace_unlock();
  3361. free_mnt_ns(new_ns);
  3362. return ERR_CAST(new);
  3363. }
  3364. if (user_ns != ns->user_ns) {
  3365. lock_mount_hash();
  3366. lock_mnt_tree(new);
  3367. unlock_mount_hash();
  3368. }
  3369. new_ns->root = new;
  3370. list_add_tail(&new_ns->list, &new->mnt_list);
  3371. /*
  3372. * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
  3373. * as belonging to new namespace. We have already acquired a private
  3374. * fs_struct, so tsk->fs->lock is not needed.
  3375. */
  3376. p = old;
  3377. q = new;
  3378. while (p) {
  3379. q->mnt_ns = new_ns;
  3380. new_ns->mounts++;
  3381. if (new_fs) {
  3382. #ifdef CONFIG_KDP_NS
  3383. if (((struct kdp_mount *)p)->mnt == new_fs->root.mnt) {
  3384. new_fs->root.mnt = mntget(((struct kdp_mount *)q)->mnt);
  3385. rootmnt = ((struct kdp_mount *)p)->mnt;
  3386. #else
  3387. if (&p->mnt == new_fs->root.mnt) {
  3388. new_fs->root.mnt = mntget(&q->mnt);
  3389. rootmnt = &p->mnt;
  3390. #endif
  3391. }
  3392. #ifdef CONFIG_KDP_NS
  3393. if (((struct kdp_mount *)p)->mnt == new_fs->pwd.mnt) {
  3394. new_fs->pwd.mnt = mntget(((struct kdp_mount *)q)->mnt);
  3395. pwdmnt = ((struct kdp_mount *)p)->mnt;
  3396. #else
  3397. if (&p->mnt == new_fs->pwd.mnt) {
  3398. new_fs->pwd.mnt = mntget(&q->mnt);
  3399. pwdmnt = &p->mnt;
  3400. #endif
  3401. }
  3402. }
  3403. p = next_mnt(p, old);
  3404. q = next_mnt(q, new);
  3405. if (!q)
  3406. break;
  3407. #ifdef CONFIG_KDP_NS
  3408. while (((struct kdp_mount *)p)->mnt->mnt_root != ((struct kdp_mount *)q)->mnt->mnt_root)
  3409. #else
  3410. while (p->mnt.mnt_root != q->mnt.mnt_root)
  3411. #endif
  3412. p = next_mnt(p, old);
  3413. }
  3414. namespace_unlock();
  3415. if (rootmnt)
  3416. mntput(rootmnt);
  3417. if (pwdmnt)
  3418. mntput(pwdmnt);
  3419. return new_ns;
  3420. }
  3421. struct dentry *mount_subtree(struct vfsmount *m, const char *name)
  3422. {
  3423. struct mount *mnt = real_mount(m);
  3424. struct mnt_namespace *ns;
  3425. struct super_block *s;
  3426. struct path path;
  3427. int err;
  3428. ns = alloc_mnt_ns(&init_user_ns, true);
  3429. if (IS_ERR(ns)) {
  3430. mntput(m);
  3431. return ERR_CAST(ns);
  3432. }
  3433. mnt->mnt_ns = ns;
  3434. ns->root = mnt;
  3435. ns->mounts++;
  3436. list_add(&mnt->mnt_list, &ns->list);
  3437. err = vfs_path_lookup(m->mnt_root, m,
  3438. name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
  3439. put_mnt_ns(ns);
  3440. if (err)
  3441. return ERR_PTR(err);
  3442. /* trade a vfsmount reference for active sb one */
  3443. s = path.mnt->mnt_sb;
  3444. atomic_inc(&s->s_active);
  3445. mntput(path.mnt);
  3446. /* lock the sucker */
  3447. down_write(&s->s_umount);
  3448. /* ... and return the root of (sub)tree on it */
  3449. return path.dentry;
  3450. }
  3451. EXPORT_SYMBOL(mount_subtree);
  3452. SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
  3453. char __user *, type, unsigned long, flags, void __user *, data)
  3454. {
  3455. int ret;
  3456. char *kernel_type;
  3457. char *kernel_dev;
  3458. void *options;
  3459. kernel_type = copy_mount_string(type);
  3460. ret = PTR_ERR(kernel_type);
  3461. if (IS_ERR(kernel_type))
  3462. goto out_type;
  3463. kernel_dev = copy_mount_string(dev_name);
  3464. ret = PTR_ERR(kernel_dev);
  3465. if (IS_ERR(kernel_dev))
  3466. goto out_dev;
  3467. options = copy_mount_options(data);
  3468. ret = PTR_ERR(options);
  3469. if (IS_ERR(options))
  3470. goto out_data;
  3471. ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
  3472. kfree(options);
  3473. out_data:
  3474. kfree(kernel_dev);
  3475. out_dev:
  3476. kfree(kernel_type);
  3477. out_type:
  3478. return ret;
  3479. }
  3480. #define FSMOUNT_VALID_FLAGS \
  3481. (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
  3482. MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
  3483. MOUNT_ATTR_NOSYMFOLLOW)
  3484. #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
  3485. #define MOUNT_SETATTR_PROPAGATION_FLAGS \
  3486. (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
  3487. static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
  3488. {
  3489. unsigned int mnt_flags = 0;
  3490. if (attr_flags & MOUNT_ATTR_RDONLY)
  3491. mnt_flags |= MNT_READONLY;
  3492. if (attr_flags & MOUNT_ATTR_NOSUID)
  3493. mnt_flags |= MNT_NOSUID;
  3494. if (attr_flags & MOUNT_ATTR_NODEV)
  3495. mnt_flags |= MNT_NODEV;
  3496. if (attr_flags & MOUNT_ATTR_NOEXEC)
  3497. mnt_flags |= MNT_NOEXEC;
  3498. if (attr_flags & MOUNT_ATTR_NODIRATIME)
  3499. mnt_flags |= MNT_NODIRATIME;
  3500. if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
  3501. mnt_flags |= MNT_NOSYMFOLLOW;
  3502. return mnt_flags;
  3503. }
  3504. /*
  3505. * Create a kernel mount representation for a new, prepared superblock
  3506. * (specified by fs_fd) and attach to an open_tree-like file descriptor.
  3507. */
  3508. SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
  3509. unsigned int, attr_flags)
  3510. {
  3511. struct mnt_namespace *ns;
  3512. struct fs_context *fc;
  3513. struct file *file;
  3514. struct path newmount;
  3515. struct mount *mnt;
  3516. struct fd f;
  3517. unsigned int mnt_flags = 0;
  3518. long ret;
  3519. if (!may_mount())
  3520. return -EPERM;
  3521. if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
  3522. return -EINVAL;
  3523. if (attr_flags & ~FSMOUNT_VALID_FLAGS)
  3524. return -EINVAL;
  3525. mnt_flags = attr_flags_to_mnt_flags(attr_flags);
  3526. switch (attr_flags & MOUNT_ATTR__ATIME) {
  3527. case MOUNT_ATTR_STRICTATIME:
  3528. break;
  3529. case MOUNT_ATTR_NOATIME:
  3530. mnt_flags |= MNT_NOATIME;
  3531. break;
  3532. case MOUNT_ATTR_RELATIME:
  3533. mnt_flags |= MNT_RELATIME;
  3534. break;
  3535. default:
  3536. return -EINVAL;
  3537. }
  3538. f = fdget(fs_fd);
  3539. if (!f.file)
  3540. return -EBADF;
  3541. ret = -EINVAL;
  3542. if (f.file->f_op != &fscontext_fops)
  3543. goto err_fsfd;
  3544. fc = f.file->private_data;
  3545. ret = mutex_lock_interruptible(&fc->uapi_mutex);
  3546. if (ret < 0)
  3547. goto err_fsfd;
  3548. /* There must be a valid superblock or we can't mount it */
  3549. ret = -EINVAL;
  3550. if (!fc->root)
  3551. goto err_unlock;
  3552. ret = -EPERM;
  3553. if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
  3554. pr_warn("VFS: Mount too revealing\n");
  3555. goto err_unlock;
  3556. }
  3557. ret = -EBUSY;
  3558. if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
  3559. goto err_unlock;
  3560. if (fc->sb_flags & SB_MANDLOCK)
  3561. warn_mandlock();
  3562. newmount.mnt = vfs_create_mount(fc);
  3563. if (IS_ERR(newmount.mnt)) {
  3564. ret = PTR_ERR(newmount.mnt);
  3565. goto err_unlock;
  3566. }
  3567. newmount.dentry = dget(fc->root);
  3568. newmount.mnt->mnt_flags = mnt_flags;
  3569. /* We've done the mount bit - now move the file context into more or
  3570. * less the same state as if we'd done an fspick(). We don't want to
  3571. * do any memory allocation or anything like that at this point as we
  3572. * don't want to have to handle any errors incurred.
  3573. */
  3574. vfs_clean_context(fc);
  3575. ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
  3576. if (IS_ERR(ns)) {
  3577. ret = PTR_ERR(ns);
  3578. goto err_path;
  3579. }
  3580. mnt = real_mount(newmount.mnt);
  3581. mnt->mnt_ns = ns;
  3582. ns->root = mnt;
  3583. ns->mounts = 1;
  3584. list_add(&mnt->mnt_list, &ns->list);
  3585. mntget(newmount.mnt);
  3586. /* Attach to an apparent O_PATH fd with a note that we need to unmount
  3587. * it, not just simply put it.
  3588. */
  3589. file = dentry_open(&newmount, O_PATH, fc->cred);
  3590. if (IS_ERR(file)) {
  3591. dissolve_on_fput(newmount.mnt);
  3592. ret = PTR_ERR(file);
  3593. goto err_path;
  3594. }
  3595. file->f_mode |= FMODE_NEED_UNMOUNT;
  3596. ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
  3597. if (ret >= 0)
  3598. fd_install(ret, file);
  3599. else
  3600. fput(file);
  3601. err_path:
  3602. path_put(&newmount);
  3603. err_unlock:
  3604. mutex_unlock(&fc->uapi_mutex);
  3605. err_fsfd:
  3606. fdput(f);
  3607. return ret;
  3608. }
  3609. /*
  3610. * Move a mount from one place to another. In combination with
  3611. * fsopen()/fsmount() this is used to install a new mount and in combination
  3612. * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
  3613. * a mount subtree.
  3614. *
  3615. * Note the flags value is a combination of MOVE_MOUNT_* flags.
  3616. */
  3617. SYSCALL_DEFINE5(move_mount,
  3618. int, from_dfd, const char __user *, from_pathname,
  3619. int, to_dfd, const char __user *, to_pathname,
  3620. unsigned int, flags)
  3621. {
  3622. struct path from_path, to_path;
  3623. unsigned int lflags;
  3624. int ret = 0;
  3625. if (!may_mount())
  3626. return -EPERM;
  3627. if (flags & ~MOVE_MOUNT__MASK)
  3628. return -EINVAL;
  3629. /* If someone gives a pathname, they aren't permitted to move
  3630. * from an fd that requires unmount as we can't get at the flag
  3631. * to clear it afterwards.
  3632. */
  3633. lflags = 0;
  3634. if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
  3635. if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
  3636. if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
  3637. ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
  3638. if (ret < 0)
  3639. return ret;
  3640. lflags = 0;
  3641. if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
  3642. if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
  3643. if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
  3644. ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
  3645. if (ret < 0)
  3646. goto out_from;
  3647. ret = security_move_mount(&from_path, &to_path);
  3648. if (ret < 0)
  3649. goto out_to;
  3650. if (flags & MOVE_MOUNT_SET_GROUP)
  3651. ret = do_set_group(&from_path, &to_path);
  3652. else
  3653. ret = do_move_mount(&from_path, &to_path);
  3654. out_to:
  3655. path_put(&to_path);
  3656. out_from:
  3657. path_put(&from_path);
  3658. return ret;
  3659. }
  3660. /*
  3661. * Return true if path is reachable from root
  3662. *
  3663. * namespace_sem or mount_lock is held
  3664. */
  3665. bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
  3666. const struct path *root)
  3667. {
  3668. #ifdef CONFIG_KDP_NS
  3669. while (((struct kdp_mount *)mnt)->mnt != root->mnt && mnt_has_parent(mnt)) {
  3670. #else
  3671. while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
  3672. #endif
  3673. dentry = mnt->mnt_mountpoint;
  3674. mnt = mnt->mnt_parent;
  3675. }
  3676. #ifdef CONFIG_KDP_NS
  3677. return ((struct kdp_mount *)mnt)->mnt == root->mnt && is_subdir(dentry, root->dentry);
  3678. #else
  3679. return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
  3680. #endif
  3681. }
  3682. bool path_is_under(const struct path *path1, const struct path *path2)
  3683. {
  3684. bool res;
  3685. read_seqlock_excl(&mount_lock);
  3686. res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
  3687. read_sequnlock_excl(&mount_lock);
  3688. return res;
  3689. }
  3690. EXPORT_SYMBOL(path_is_under);
  3691. /*
  3692. * pivot_root Semantics:
  3693. * Moves the root file system of the current process to the directory put_old,
  3694. * makes new_root as the new root file system of the current process, and sets
  3695. * root/cwd of all processes which had them on the current root to new_root.
  3696. *
  3697. * Restrictions:
  3698. * The new_root and put_old must be directories, and must not be on the
  3699. * same file system as the current process root. The put_old must be
  3700. * underneath new_root, i.e. adding a non-zero number of /.. to the string
  3701. * pointed to by put_old must yield the same directory as new_root. No other
  3702. * file system may be mounted on put_old. After all, new_root is a mountpoint.
  3703. *
  3704. * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
  3705. * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
  3706. * in this situation.
  3707. *
  3708. * Notes:
  3709. * - we don't move root/cwd if they are not at the root (reason: if something
  3710. * cared enough to change them, it's probably wrong to force them elsewhere)
  3711. * - it's okay to pick a root that isn't the root of a file system, e.g.
  3712. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
  3713. * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
  3714. * first.
  3715. */
  3716. SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
  3717. const char __user *, put_old)
  3718. {
  3719. struct path new, old, root;
  3720. struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
  3721. struct mountpoint *old_mp, *root_mp;
  3722. int error;
  3723. if (!may_mount())
  3724. return -EPERM;
  3725. error = user_path_at(AT_FDCWD, new_root,
  3726. LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
  3727. if (error)
  3728. goto out0;
  3729. error = user_path_at(AT_FDCWD, put_old,
  3730. LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
  3731. if (error)
  3732. goto out1;
  3733. error = security_sb_pivotroot(&old, &new);
  3734. if (error)
  3735. goto out2;
  3736. get_fs_root(current->fs, &root);
  3737. old_mp = lock_mount(&old);
  3738. error = PTR_ERR(old_mp);
  3739. if (IS_ERR(old_mp))
  3740. goto out3;
  3741. error = -EINVAL;
  3742. new_mnt = real_mount(new.mnt);
  3743. root_mnt = real_mount(root.mnt);
  3744. old_mnt = real_mount(old.mnt);
  3745. ex_parent = new_mnt->mnt_parent;
  3746. root_parent = root_mnt->mnt_parent;
  3747. if (IS_MNT_SHARED(old_mnt) ||
  3748. IS_MNT_SHARED(ex_parent) ||
  3749. IS_MNT_SHARED(root_parent))
  3750. goto out4;
  3751. if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
  3752. goto out4;
  3753. #ifdef CONFIG_KDP_NS
  3754. if (((struct kdp_mount *)new_mnt)->mnt->mnt_flags & MNT_LOCKED)
  3755. #else
  3756. if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
  3757. #endif
  3758. goto out4;
  3759. error = -ENOENT;
  3760. if (d_unlinked(new.dentry))
  3761. goto out4;
  3762. error = -EBUSY;
  3763. if (new_mnt == root_mnt || old_mnt == root_mnt)
  3764. goto out4; /* loop, on the same file system */
  3765. error = -EINVAL;
  3766. if (root.mnt->mnt_root != root.dentry)
  3767. goto out4; /* not a mountpoint */
  3768. if (!mnt_has_parent(root_mnt))
  3769. goto out4; /* not attached */
  3770. if (new.mnt->mnt_root != new.dentry)
  3771. goto out4; /* not a mountpoint */
  3772. if (!mnt_has_parent(new_mnt))
  3773. goto out4; /* not attached */
  3774. /* make sure we can reach put_old from new_root */
  3775. if (!is_path_reachable(old_mnt, old.dentry, &new))
  3776. goto out4;
  3777. /* make certain new is below the root */
  3778. if (!is_path_reachable(new_mnt, new.dentry, &root))
  3779. goto out4;
  3780. lock_mount_hash();
  3781. umount_mnt(new_mnt);
  3782. root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
  3783. #ifdef CONFIG_KDP_NS
  3784. if (((struct kdp_mount *)root_mnt)->mnt->mnt_flags & MNT_LOCKED) {
  3785. kdp_set_mnt_flags(((struct kdp_mount *)new_mnt)->mnt, MNT_LOCKED);
  3786. kdp_clear_mnt_flags(((struct kdp_mount *)root_mnt)->mnt, MNT_LOCKED);
  3787. }
  3788. #else
  3789. if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
  3790. new_mnt->mnt.mnt_flags |= MNT_LOCKED;
  3791. root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
  3792. }
  3793. #endif
  3794. /* mount old root on put_old */
  3795. attach_mnt(root_mnt, old_mnt, old_mp);
  3796. /* mount new_root on / */
  3797. attach_mnt(new_mnt, root_parent, root_mp);
  3798. mnt_add_count(root_parent, -1);
  3799. touch_mnt_namespace(current->nsproxy->mnt_ns);
  3800. /* A moved mount should not expire automatically */
  3801. list_del_init(&new_mnt->mnt_expire);
  3802. put_mountpoint(root_mp);
  3803. unlock_mount_hash();
  3804. chroot_fs_refs(&root, &new);
  3805. error = 0;
  3806. out4:
  3807. unlock_mount(old_mp);
  3808. if (!error)
  3809. mntput_no_expire(ex_parent);
  3810. out3:
  3811. path_put(&root);
  3812. out2:
  3813. path_put(&old);
  3814. out1:
  3815. path_put(&new);
  3816. out0:
  3817. return error;
  3818. }
  3819. static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
  3820. {
  3821. #ifdef CONFIG_KDP_NS
  3822. unsigned int flags = ((struct kdp_mount *)mnt)->mnt->mnt_flags;
  3823. #else
  3824. unsigned int flags = mnt->mnt.mnt_flags;
  3825. #endif
  3826. /* flags to clear */
  3827. flags &= ~kattr->attr_clr;
  3828. /* flags to raise */
  3829. flags |= kattr->attr_set;
  3830. return flags;
  3831. }
  3832. static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
  3833. {
  3834. #ifdef CONFIG_KDP_NS
  3835. struct vfsmount *m = ((struct kdp_mount *)mnt)->mnt;
  3836. #else
  3837. struct vfsmount *m = &mnt->mnt;
  3838. #endif
  3839. struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
  3840. if (!kattr->mnt_userns)
  3841. return 0;
  3842. /*
  3843. * Creating an idmapped mount with the filesystem wide idmapping
  3844. * doesn't make sense so block that. We don't allow mushy semantics.
  3845. */
  3846. if (kattr->mnt_userns == fs_userns)
  3847. return -EINVAL;
  3848. /*
  3849. * Once a mount has been idmapped we don't allow it to change its
  3850. * mapping. It makes things simpler and callers can just create
  3851. * another bind-mount they can idmap if they want to.
  3852. */
  3853. if (is_idmapped_mnt(m))
  3854. return -EPERM;
  3855. /* The underlying filesystem doesn't support idmapped mounts yet. */
  3856. if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
  3857. return -EINVAL;
  3858. /* We're not controlling the superblock. */
  3859. if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
  3860. return -EPERM;
  3861. /* Mount has already been visible in the filesystem hierarchy. */
  3862. if (!is_anon_ns(mnt->mnt_ns))
  3863. return -EINVAL;
  3864. return 0;
  3865. }
  3866. /**
  3867. * mnt_allow_writers() - check whether the attribute change allows writers
  3868. * @kattr: the new mount attributes
  3869. * @mnt: the mount to which @kattr will be applied
  3870. *
  3871. * Check whether thew new mount attributes in @kattr allow concurrent writers.
  3872. *
  3873. * Return: true if writers need to be held, false if not
  3874. */
  3875. static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
  3876. const struct mount *mnt)
  3877. {
  3878. return (!(kattr->attr_set & MNT_READONLY) ||
  3879. #ifdef CONFIG_KDP_NS
  3880. (((struct kdp_mount *)mnt)->mnt->mnt_flags & MNT_READONLY)) &&
  3881. #else
  3882. (mnt->mnt.mnt_flags & MNT_READONLY)) &&
  3883. #endif
  3884. !kattr->mnt_userns;
  3885. }
  3886. static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
  3887. {
  3888. struct mount *m;
  3889. int err;
  3890. for (m = mnt; m; m = next_mnt(m, mnt)) {
  3891. if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
  3892. err = -EPERM;
  3893. break;
  3894. }
  3895. err = can_idmap_mount(kattr, m);
  3896. if (err)
  3897. break;
  3898. if (!mnt_allow_writers(kattr, m)) {
  3899. err = mnt_hold_writers(m);
  3900. if (err)
  3901. break;
  3902. }
  3903. if (!kattr->recurse)
  3904. return 0;
  3905. }
  3906. if (err) {
  3907. struct mount *p;
  3908. /*
  3909. * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
  3910. * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
  3911. * mounts and needs to take care to include the first mount.
  3912. */
  3913. for (p = mnt; p; p = next_mnt(p, mnt)) {
  3914. /* If we had to hold writers unblock them. */
  3915. #ifdef CONFIG_KDP_NS
  3916. if (((struct kdp_mount *)p)->mnt->mnt_flags & MNT_WRITE_HOLD)
  3917. #else
  3918. if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
  3919. #endif
  3920. mnt_unhold_writers(p);
  3921. /*
  3922. * We're done once the first mount we changed got
  3923. * MNT_WRITE_HOLD unset.
  3924. */
  3925. if (p == m)
  3926. break;
  3927. }
  3928. }
  3929. return err;
  3930. }
  3931. static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
  3932. {
  3933. struct user_namespace *mnt_userns, *old_mnt_userns;
  3934. if (!kattr->mnt_userns)
  3935. return;
  3936. /*
  3937. * We're the only ones able to change the mount's idmapping. So
  3938. * mnt->mnt.mnt_userns is stable and we can retrieve it directly.
  3939. */
  3940. #ifdef CONFIG_KDP_NS
  3941. old_mnt_userns = ((struct kdp_mount *)mnt)->mnt->mnt_userns;
  3942. #else
  3943. old_mnt_userns = mnt->mnt.mnt_userns;
  3944. #endif
  3945. mnt_userns = get_user_ns(kattr->mnt_userns);
  3946. /* Pairs with smp_load_acquire() in mnt_user_ns(). */
  3947. #ifdef CONFIG_KDP_NS
  3948. compiletime_assert_atomic_type(((struct kdp_mount *)mnt)->mnt);
  3949. __smp_mb();
  3950. kdp_set_mnt_userns(((struct kdp_mount *)mnt)->mnt, mnt_userns);
  3951. #else
  3952. smp_store_release(&mnt->mnt.mnt_userns, mnt_userns);
  3953. #endif
  3954. /*
  3955. * If this is an idmapped filesystem drop the reference we've taken
  3956. * in vfs_create_mount() before.
  3957. */
  3958. if (!initial_idmapping(old_mnt_userns))
  3959. put_user_ns(old_mnt_userns);
  3960. }
  3961. static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
  3962. {
  3963. struct mount *m;
  3964. for (m = mnt; m; m = next_mnt(m, mnt)) {
  3965. unsigned int flags;
  3966. do_idmap_mount(kattr, m);
  3967. flags = recalc_flags(kattr, m);
  3968. #ifdef CONFIG_KDP_NS
  3969. kdp_assign_mnt_flags(((struct kdp_mount *)m)->mnt, flags);
  3970. #else
  3971. WRITE_ONCE(m->mnt.mnt_flags, flags);
  3972. #endif
  3973. /* If we had to hold writers unblock them. */
  3974. #ifdef CONFIG_KDP_NS
  3975. if (((struct kdp_mount*)m)->mnt->mnt_flags & MNT_WRITE_HOLD)
  3976. #else
  3977. if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
  3978. #endif
  3979. mnt_unhold_writers(m);
  3980. if (kattr->propagation)
  3981. change_mnt_propagation(m, kattr->propagation);
  3982. if (!kattr->recurse)
  3983. break;
  3984. }
  3985. touch_mnt_namespace(mnt->mnt_ns);
  3986. }
  3987. static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
  3988. {
  3989. struct mount *mnt = real_mount(path->mnt);
  3990. int err = 0;
  3991. #ifdef CONFIG_KDP_NS
  3992. if (path->dentry != ((struct kdp_mount *)mnt)->mnt->mnt_root)
  3993. #else
  3994. if (path->dentry != mnt->mnt.mnt_root)
  3995. #endif
  3996. return -EINVAL;
  3997. if (kattr->propagation) {
  3998. /*
  3999. * Only take namespace_lock() if we're actually changing
  4000. * propagation.
  4001. */
  4002. namespace_lock();
  4003. if (kattr->propagation == MS_SHARED) {
  4004. err = invent_group_ids(mnt, kattr->recurse);
  4005. if (err) {
  4006. namespace_unlock();
  4007. return err;
  4008. }
  4009. }
  4010. }
  4011. err = -EINVAL;
  4012. lock_mount_hash();
  4013. /* Ensure that this isn't anything purely vfs internal. */
  4014. if (!is_mounted(&mnt->mnt))
  4015. goto out;
  4016. /*
  4017. * If this is an attached mount make sure it's located in the callers
  4018. * mount namespace. If it's not don't let the caller interact with it.
  4019. * If this is a detached mount make sure it has an anonymous mount
  4020. * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE.
  4021. */
  4022. if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns)))
  4023. goto out;
  4024. /*
  4025. * First, we get the mount tree in a shape where we can change mount
  4026. * properties without failure. If we succeeded to do so we commit all
  4027. * changes and if we failed we clean up.
  4028. */
  4029. err = mount_setattr_prepare(kattr, mnt);
  4030. if (!err)
  4031. mount_setattr_commit(kattr, mnt);
  4032. out:
  4033. unlock_mount_hash();
  4034. if (kattr->propagation) {
  4035. if (err)
  4036. cleanup_group_ids(mnt, NULL);
  4037. namespace_unlock();
  4038. }
  4039. return err;
  4040. }
  4041. static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
  4042. struct mount_kattr *kattr, unsigned int flags)
  4043. {
  4044. int err = 0;
  4045. struct ns_common *ns;
  4046. struct user_namespace *mnt_userns;
  4047. struct file *file;
  4048. if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
  4049. return 0;
  4050. /*
  4051. * We currently do not support clearing an idmapped mount. If this ever
  4052. * is a use-case we can revisit this but for now let's keep it simple
  4053. * and not allow it.
  4054. */
  4055. if (attr->attr_clr & MOUNT_ATTR_IDMAP)
  4056. return -EINVAL;
  4057. if (attr->userns_fd > INT_MAX)
  4058. return -EINVAL;
  4059. file = fget(attr->userns_fd);
  4060. if (!file)
  4061. return -EBADF;
  4062. if (!proc_ns_file(file)) {
  4063. err = -EINVAL;
  4064. goto out_fput;
  4065. }
  4066. ns = get_proc_ns(file_inode(file));
  4067. if (ns->ops->type != CLONE_NEWUSER) {
  4068. err = -EINVAL;
  4069. goto out_fput;
  4070. }
  4071. /*
  4072. * The initial idmapping cannot be used to create an idmapped
  4073. * mount. We use the initial idmapping as an indicator of a mount
  4074. * that is not idmapped. It can simply be passed into helpers that
  4075. * are aware of idmapped mounts as a convenient shortcut. A user
  4076. * can just create a dedicated identity mapping to achieve the same
  4077. * result.
  4078. */
  4079. mnt_userns = container_of(ns, struct user_namespace, ns);
  4080. if (initial_idmapping(mnt_userns)) {
  4081. err = -EPERM;
  4082. goto out_fput;
  4083. }
  4084. /* We're not controlling the target namespace. */
  4085. if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
  4086. err = -EPERM;
  4087. goto out_fput;
  4088. }
  4089. kattr->mnt_userns = get_user_ns(mnt_userns);
  4090. out_fput:
  4091. fput(file);
  4092. return err;
  4093. }
  4094. static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
  4095. struct mount_kattr *kattr, unsigned int flags)
  4096. {
  4097. unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
  4098. if (flags & AT_NO_AUTOMOUNT)
  4099. lookup_flags &= ~LOOKUP_AUTOMOUNT;
  4100. if (flags & AT_SYMLINK_NOFOLLOW)
  4101. lookup_flags &= ~LOOKUP_FOLLOW;
  4102. if (flags & AT_EMPTY_PATH)
  4103. lookup_flags |= LOOKUP_EMPTY;
  4104. *kattr = (struct mount_kattr) {
  4105. .lookup_flags = lookup_flags,
  4106. .recurse = !!(flags & AT_RECURSIVE),
  4107. };
  4108. if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
  4109. return -EINVAL;
  4110. if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
  4111. return -EINVAL;
  4112. kattr->propagation = attr->propagation;
  4113. if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
  4114. return -EINVAL;
  4115. kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
  4116. kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
  4117. /*
  4118. * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
  4119. * users wanting to transition to a different atime setting cannot
  4120. * simply specify the atime setting in @attr_set, but must also
  4121. * specify MOUNT_ATTR__ATIME in the @attr_clr field.
  4122. * So ensure that MOUNT_ATTR__ATIME can't be partially set in
  4123. * @attr_clr and that @attr_set can't have any atime bits set if
  4124. * MOUNT_ATTR__ATIME isn't set in @attr_clr.
  4125. */
  4126. if (attr->attr_clr & MOUNT_ATTR__ATIME) {
  4127. if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
  4128. return -EINVAL;
  4129. /*
  4130. * Clear all previous time settings as they are mutually
  4131. * exclusive.
  4132. */
  4133. kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
  4134. switch (attr->attr_set & MOUNT_ATTR__ATIME) {
  4135. case MOUNT_ATTR_RELATIME:
  4136. kattr->attr_set |= MNT_RELATIME;
  4137. break;
  4138. case MOUNT_ATTR_NOATIME:
  4139. kattr->attr_set |= MNT_NOATIME;
  4140. break;
  4141. case MOUNT_ATTR_STRICTATIME:
  4142. break;
  4143. default:
  4144. return -EINVAL;
  4145. }
  4146. } else {
  4147. if (attr->attr_set & MOUNT_ATTR__ATIME)
  4148. return -EINVAL;
  4149. }
  4150. return build_mount_idmapped(attr, usize, kattr, flags);
  4151. }
  4152. static void finish_mount_kattr(struct mount_kattr *kattr)
  4153. {
  4154. put_user_ns(kattr->mnt_userns);
  4155. kattr->mnt_userns = NULL;
  4156. }
  4157. SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
  4158. unsigned int, flags, struct mount_attr __user *, uattr,
  4159. size_t, usize)
  4160. {
  4161. int err;
  4162. struct path target;
  4163. struct mount_attr attr;
  4164. struct mount_kattr kattr;
  4165. BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
  4166. if (flags & ~(AT_EMPTY_PATH |
  4167. AT_RECURSIVE |
  4168. AT_SYMLINK_NOFOLLOW |
  4169. AT_NO_AUTOMOUNT))
  4170. return -EINVAL;
  4171. if (unlikely(usize > PAGE_SIZE))
  4172. return -E2BIG;
  4173. if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
  4174. return -EINVAL;
  4175. if (!may_mount())
  4176. return -EPERM;
  4177. err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
  4178. if (err)
  4179. return err;
  4180. /* Don't bother walking through the mounts if this is a nop. */
  4181. if (attr.attr_set == 0 &&
  4182. attr.attr_clr == 0 &&
  4183. attr.propagation == 0)
  4184. return 0;
  4185. err = build_mount_kattr(&attr, usize, &kattr, flags);
  4186. if (err)
  4187. return err;
  4188. err = user_path_at(dfd, path, kattr.lookup_flags, &target);
  4189. if (!err) {
  4190. err = do_mount_setattr(&target, &kattr);
  4191. path_put(&target);
  4192. }
  4193. finish_mount_kattr(&kattr);
  4194. return err;
  4195. }
  4196. static void __init init_mount_tree(void)
  4197. {
  4198. struct vfsmount *mnt;
  4199. struct mount *m;
  4200. struct mnt_namespace *ns;
  4201. struct path root;
  4202. mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
  4203. if (IS_ERR(mnt))
  4204. panic("Can't create rootfs");
  4205. ns = alloc_mnt_ns(&init_user_ns, false);
  4206. if (IS_ERR(ns))
  4207. panic("Can't allocate initial namespace");
  4208. m = real_mount(mnt);
  4209. m->mnt_ns = ns;
  4210. ns->root = m;
  4211. ns->mounts = 1;
  4212. list_add(&m->mnt_list, &ns->list);
  4213. init_task.nsproxy->mnt_ns = ns;
  4214. get_mnt_ns(ns);
  4215. root.mnt = mnt;
  4216. root.dentry = mnt->mnt_root;
  4217. #ifdef CONFIG_KDP_NS
  4218. kdp_set_mnt_flags(mnt, MNT_LOCKED);
  4219. #else
  4220. mnt->mnt_flags |= MNT_LOCKED;
  4221. #endif
  4222. set_fs_pwd(current->fs, &root);
  4223. set_fs_root(current->fs, &root);
  4224. }
  4225. void __init mnt_init(void)
  4226. {
  4227. int err;
  4228. #ifdef CONFIG_KDP_NS
  4229. mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct kdp_mount),
  4230. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
  4231. kdp_mnt_init();
  4232. #else
  4233. mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
  4234. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
  4235. #endif
  4236. mount_hashtable = alloc_large_system_hash("Mount-cache",
  4237. sizeof(struct hlist_head),
  4238. mhash_entries, 19,
  4239. HASH_ZERO,
  4240. &m_hash_shift, &m_hash_mask, 0, 0);
  4241. mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
  4242. sizeof(struct hlist_head),
  4243. mphash_entries, 19,
  4244. HASH_ZERO,
  4245. &mp_hash_shift, &mp_hash_mask, 0, 0);
  4246. if (!mount_hashtable || !mountpoint_hashtable)
  4247. panic("Failed to allocate mount hash table\n");
  4248. kernfs_init();
  4249. err = sysfs_init();
  4250. if (err)
  4251. printk(KERN_WARNING "%s: sysfs_init error: %d\n",
  4252. __func__, err);
  4253. fs_kobj = kobject_create_and_add("fs", NULL);
  4254. if (!fs_kobj)
  4255. printk(KERN_WARNING "%s: kobj create error\n", __func__);
  4256. shmem_init();
  4257. init_rootfs();
  4258. init_mount_tree();
  4259. }
  4260. void put_mnt_ns(struct mnt_namespace *ns)
  4261. {
  4262. if (!refcount_dec_and_test(&ns->ns.count))
  4263. return;
  4264. #ifdef CONFIG_KDP_NS
  4265. drop_collected_mounts(((struct kdp_mount *)ns->root)->mnt);
  4266. #else
  4267. drop_collected_mounts(&ns->root->mnt);
  4268. #endif
  4269. free_mnt_ns(ns);
  4270. }
  4271. struct vfsmount *kern_mount(struct file_system_type *type)
  4272. {
  4273. struct vfsmount *mnt;
  4274. mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
  4275. if (!IS_ERR(mnt)) {
  4276. /*
  4277. * it is a longterm mount, don't release mnt until
  4278. * we unmount before file sys is unregistered
  4279. */
  4280. real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
  4281. }
  4282. return mnt;
  4283. }
  4284. EXPORT_SYMBOL_GPL(kern_mount);
  4285. void kern_unmount(struct vfsmount *mnt)
  4286. {
  4287. /* release long term mount so mount point can be released */
  4288. if (!IS_ERR_OR_NULL(mnt)) {
  4289. real_mount(mnt)->mnt_ns = NULL;
  4290. synchronize_rcu(); /* yecchhh... */
  4291. mntput(mnt);
  4292. }
  4293. }
  4294. EXPORT_SYMBOL(kern_unmount);
  4295. void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
  4296. {
  4297. unsigned int i;
  4298. for (i = 0; i < num; i++)
  4299. if (mnt[i])
  4300. real_mount(mnt[i])->mnt_ns = NULL;
  4301. synchronize_rcu_expedited();
  4302. for (i = 0; i < num; i++)
  4303. mntput(mnt[i]);
  4304. }
  4305. EXPORT_SYMBOL(kern_unmount_array);
  4306. bool our_mnt(struct vfsmount *mnt)
  4307. {
  4308. return check_mnt(real_mount(mnt));
  4309. }
  4310. bool current_chrooted(void)
  4311. {
  4312. /* Does the current process have a non-standard root */
  4313. struct path ns_root;
  4314. struct path fs_root;
  4315. bool chrooted;
  4316. /* Find the namespace root */
  4317. #ifdef CONFIG_KDP_NS
  4318. ns_root.mnt = ((struct kdp_mount *)current->nsproxy->mnt_ns->root)->mnt;
  4319. #else
  4320. ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
  4321. #endif
  4322. ns_root.dentry = ns_root.mnt->mnt_root;
  4323. path_get(&ns_root);
  4324. while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
  4325. ;
  4326. get_fs_root(current->fs, &fs_root);
  4327. chrooted = !path_equal(&fs_root, &ns_root);
  4328. path_put(&fs_root);
  4329. path_put(&ns_root);
  4330. return chrooted;
  4331. }
  4332. static bool mnt_already_visible(struct mnt_namespace *ns,
  4333. const struct super_block *sb,
  4334. int *new_mnt_flags)
  4335. {
  4336. int new_flags = *new_mnt_flags;
  4337. struct mount *mnt;
  4338. bool visible = false;
  4339. down_read(&namespace_sem);
  4340. lock_ns_list(ns);
  4341. list_for_each_entry(mnt, &ns->list, mnt_list) {
  4342. struct mount *child;
  4343. int mnt_flags;
  4344. if (mnt_is_cursor(mnt))
  4345. continue;
  4346. #ifdef CONFIG_KDP_NS
  4347. if (((struct kdp_mount *)mnt)->mnt->mnt_sb->s_type != sb->s_type)
  4348. continue;
  4349. /* This mount is not fully visible if it's root directory
  4350. * is not the root directory of the filesystem.
  4351. */
  4352. if (((struct kdp_mount *)mnt)->mnt->mnt_root != ((struct kdp_mount *)mnt)->mnt->mnt_sb->s_root)
  4353. continue;
  4354. /* A local view of the mount flags */
  4355. mnt_flags = ((struct kdp_mount *)mnt)->mnt->mnt_flags;
  4356. /* Don't miss readonly hidden in the superblock flags */
  4357. if (sb_rdonly(((struct kdp_mount *)mnt)->mnt->mnt_sb))
  4358. mnt_flags |= MNT_LOCK_READONLY;
  4359. #else
  4360. if (mnt->mnt.mnt_sb->s_type != sb->s_type)
  4361. continue;
  4362. /* This mount is not fully visible if it's root directory
  4363. * is not the root directory of the filesystem.
  4364. */
  4365. if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
  4366. continue;
  4367. /* A local view of the mount flags */
  4368. mnt_flags = mnt->mnt.mnt_flags;
  4369. /* Don't miss readonly hidden in the superblock flags */
  4370. if (sb_rdonly(mnt->mnt.mnt_sb))
  4371. mnt_flags |= MNT_LOCK_READONLY;
  4372. #endif
  4373. /* Verify the mount flags are equal to or more permissive
  4374. * than the proposed new mount.
  4375. */
  4376. if ((mnt_flags & MNT_LOCK_READONLY) &&
  4377. !(new_flags & MNT_READONLY))
  4378. continue;
  4379. if ((mnt_flags & MNT_LOCK_ATIME) &&
  4380. ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
  4381. continue;
  4382. /* This mount is not fully visible if there are any
  4383. * locked child mounts that cover anything except for
  4384. * empty directories.
  4385. */
  4386. list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
  4387. struct inode *inode = child->mnt_mountpoint->d_inode;
  4388. /* Only worry about locked mounts */
  4389. #ifdef CONFIG_KDP_NS
  4390. if (!(((struct kdp_mount *)child)->mnt->mnt_flags & MNT_LOCKED))
  4391. continue;
  4392. #else
  4393. if (!(child->mnt.mnt_flags & MNT_LOCKED))
  4394. continue;
  4395. #endif
  4396. /* Is the directory permanetly empty? */
  4397. if (!is_empty_dir_inode(inode))
  4398. goto next;
  4399. }
  4400. /* Preserve the locked attributes */
  4401. *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
  4402. MNT_LOCK_ATIME);
  4403. visible = true;
  4404. goto found;
  4405. next: ;
  4406. }
  4407. found:
  4408. unlock_ns_list(ns);
  4409. up_read(&namespace_sem);
  4410. return visible;
  4411. }
  4412. static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
  4413. {
  4414. const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
  4415. struct mnt_namespace *ns = current->nsproxy->mnt_ns;
  4416. unsigned long s_iflags;
  4417. if (ns->user_ns == &init_user_ns)
  4418. return false;
  4419. /* Can this filesystem be too revealing? */
  4420. s_iflags = sb->s_iflags;
  4421. if (!(s_iflags & SB_I_USERNS_VISIBLE))
  4422. return false;
  4423. if ((s_iflags & required_iflags) != required_iflags) {
  4424. WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
  4425. required_iflags);
  4426. return true;
  4427. }
  4428. return !mnt_already_visible(ns, sb, new_mnt_flags);
  4429. }
  4430. bool mnt_may_suid(struct vfsmount *mnt)
  4431. {
  4432. /*
  4433. * Foreign mounts (accessed via fchdir or through /proc
  4434. * symlinks) are always treated as if they are nosuid. This
  4435. * prevents namespaces from trusting potentially unsafe
  4436. * suid/sgid bits, file caps, or security labels that originate
  4437. * in other namespaces.
  4438. */
  4439. return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
  4440. current_in_userns(mnt->mnt_sb->s_user_ns);
  4441. }
  4442. static struct ns_common *mntns_get(struct task_struct *task)
  4443. {
  4444. struct ns_common *ns = NULL;
  4445. struct nsproxy *nsproxy;
  4446. task_lock(task);
  4447. nsproxy = task->nsproxy;
  4448. if (nsproxy) {
  4449. ns = &nsproxy->mnt_ns->ns;
  4450. get_mnt_ns(to_mnt_ns(ns));
  4451. }
  4452. task_unlock(task);
  4453. return ns;
  4454. }
  4455. static void mntns_put(struct ns_common *ns)
  4456. {
  4457. put_mnt_ns(to_mnt_ns(ns));
  4458. }
  4459. static int mntns_install(struct nsset *nsset, struct ns_common *ns)
  4460. {
  4461. struct nsproxy *nsproxy = nsset->nsproxy;
  4462. struct fs_struct *fs = nsset->fs;
  4463. struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
  4464. struct user_namespace *user_ns = nsset->cred->user_ns;
  4465. struct path root;
  4466. int err;
  4467. if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
  4468. !ns_capable(user_ns, CAP_SYS_CHROOT) ||
  4469. !ns_capable(user_ns, CAP_SYS_ADMIN))
  4470. return -EPERM;
  4471. if (is_anon_ns(mnt_ns))
  4472. return -EINVAL;
  4473. if (fs->users != 1)
  4474. return -EINVAL;
  4475. get_mnt_ns(mnt_ns);
  4476. old_mnt_ns = nsproxy->mnt_ns;
  4477. nsproxy->mnt_ns = mnt_ns;
  4478. /* Find the root */
  4479. #ifdef CONFIG_KDP_NS
  4480. err = vfs_path_lookup(((struct kdp_mount *)mnt_ns->root)->mnt->mnt_root, ((struct kdp_mount *)mnt_ns->root)->mnt,
  4481. "/", LOOKUP_DOWN, &root);
  4482. #else
  4483. err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
  4484. "/", LOOKUP_DOWN, &root);
  4485. #endif
  4486. if (err) {
  4487. /* revert to old namespace */
  4488. nsproxy->mnt_ns = old_mnt_ns;
  4489. put_mnt_ns(mnt_ns);
  4490. return err;
  4491. }
  4492. put_mnt_ns(old_mnt_ns);
  4493. /* Update the pwd and root */
  4494. set_fs_pwd(fs, &root);
  4495. set_fs_root(fs, &root);
  4496. path_put(&root);
  4497. return 0;
  4498. }
  4499. static struct user_namespace *mntns_owner(struct ns_common *ns)
  4500. {
  4501. return to_mnt_ns(ns)->user_ns;
  4502. }
  4503. const struct proc_ns_operations mntns_operations = {
  4504. .name = "mnt",
  4505. .type = CLONE_NEWNS,
  4506. .get = mntns_get,
  4507. .put = mntns_put,
  4508. .install = mntns_install,
  4509. .owner = mntns_owner,
  4510. };
  4511. #ifdef CONFIG_SYSCTL
  4512. static struct ctl_table fs_namespace_sysctls[] = {
  4513. {
  4514. .procname = "mount-max",
  4515. .data = &sysctl_mount_max,
  4516. .maxlen = sizeof(unsigned int),
  4517. .mode = 0644,
  4518. .proc_handler = proc_dointvec_minmax,
  4519. .extra1 = SYSCTL_ONE,
  4520. },
  4521. { }
  4522. };
  4523. static int __init init_fs_namespace_sysctls(void)
  4524. {
  4525. register_sysctl_init("fs", fs_namespace_sysctls);
  4526. return 0;
  4527. }
  4528. fs_initcall(init_fs_namespace_sysctls);
  4529. #endif /* CONFIG_SYSCTL */