inode.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * Nadia Yvette Chambers, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. * License: GPL
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/thread_info.h>
  11. #include <asm/current.h>
  12. #include <linux/falloc.h>
  13. #include <linux/fs.h>
  14. #include <linux/mount.h>
  15. #include <linux/file.h>
  16. #include <linux/kernel.h>
  17. #include <linux/writeback.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/highmem.h>
  20. #include <linux/init.h>
  21. #include <linux/string.h>
  22. #include <linux/capability.h>
  23. #include <linux/ctype.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/fs_parser.h>
  28. #include <linux/mman.h>
  29. #include <linux/slab.h>
  30. #include <linux/dnotify.h>
  31. #include <linux/statfs.h>
  32. #include <linux/security.h>
  33. #include <linux/magic.h>
  34. #include <linux/migrate.h>
  35. #include <linux/uio.h>
  36. #include <linux/uaccess.h>
  37. #include <linux/sched/mm.h>
  38. static const struct address_space_operations hugetlbfs_aops;
  39. const struct file_operations hugetlbfs_file_operations;
  40. static const struct inode_operations hugetlbfs_dir_inode_operations;
  41. static const struct inode_operations hugetlbfs_inode_operations;
  42. enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
  43. struct hugetlbfs_fs_context {
  44. struct hstate *hstate;
  45. unsigned long long max_size_opt;
  46. unsigned long long min_size_opt;
  47. long max_hpages;
  48. long nr_inodes;
  49. long min_hpages;
  50. enum hugetlbfs_size_type max_val_type;
  51. enum hugetlbfs_size_type min_val_type;
  52. kuid_t uid;
  53. kgid_t gid;
  54. umode_t mode;
  55. };
  56. int sysctl_hugetlb_shm_group;
  57. enum hugetlb_param {
  58. Opt_gid,
  59. Opt_min_size,
  60. Opt_mode,
  61. Opt_nr_inodes,
  62. Opt_pagesize,
  63. Opt_size,
  64. Opt_uid,
  65. };
  66. static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
  67. fsparam_u32 ("gid", Opt_gid),
  68. fsparam_string("min_size", Opt_min_size),
  69. fsparam_u32oct("mode", Opt_mode),
  70. fsparam_string("nr_inodes", Opt_nr_inodes),
  71. fsparam_string("pagesize", Opt_pagesize),
  72. fsparam_string("size", Opt_size),
  73. fsparam_u32 ("uid", Opt_uid),
  74. {}
  75. };
  76. #ifdef CONFIG_NUMA
  77. static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
  78. struct inode *inode, pgoff_t index)
  79. {
  80. vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
  81. index);
  82. }
  83. static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
  84. {
  85. mpol_cond_put(vma->vm_policy);
  86. }
  87. #else
  88. static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
  89. struct inode *inode, pgoff_t index)
  90. {
  91. }
  92. static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
  93. {
  94. }
  95. #endif
  96. /*
  97. * Mask used when checking the page offset value passed in via system
  98. * calls. This value will be converted to a loff_t which is signed.
  99. * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
  100. * value. The extra bit (- 1 in the shift value) is to take the sign
  101. * bit into account.
  102. */
  103. #define PGOFF_LOFFT_MAX \
  104. (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
  105. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  106. {
  107. struct inode *inode = file_inode(file);
  108. struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
  109. loff_t len, vma_len;
  110. int ret;
  111. struct hstate *h = hstate_file(file);
  112. /*
  113. * vma address alignment (but not the pgoff alignment) has
  114. * already been checked by prepare_hugepage_range. If you add
  115. * any error returns here, do so after setting VM_HUGETLB, so
  116. * is_vm_hugetlb_page tests below unmap_region go the right
  117. * way when do_mmap unwinds (may be important on powerpc
  118. * and ia64).
  119. */
  120. vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
  121. vma->vm_ops = &hugetlb_vm_ops;
  122. ret = seal_check_future_write(info->seals, vma);
  123. if (ret)
  124. return ret;
  125. /*
  126. * page based offset in vm_pgoff could be sufficiently large to
  127. * overflow a loff_t when converted to byte offset. This can
  128. * only happen on architectures where sizeof(loff_t) ==
  129. * sizeof(unsigned long). So, only check in those instances.
  130. */
  131. if (sizeof(unsigned long) == sizeof(loff_t)) {
  132. if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
  133. return -EINVAL;
  134. }
  135. /* must be huge page aligned */
  136. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  137. return -EINVAL;
  138. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  139. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  140. /* check for overflow */
  141. if (len < vma_len)
  142. return -EINVAL;
  143. inode_lock(inode);
  144. file_accessed(file);
  145. ret = -ENOMEM;
  146. if (!hugetlb_reserve_pages(inode,
  147. vma->vm_pgoff >> huge_page_order(h),
  148. len >> huge_page_shift(h), vma,
  149. vma->vm_flags))
  150. goto out;
  151. ret = 0;
  152. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  153. i_size_write(inode, len);
  154. out:
  155. inode_unlock(inode);
  156. return ret;
  157. }
  158. /*
  159. * Called under mmap_write_lock(mm).
  160. */
  161. static unsigned long
  162. hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
  163. unsigned long len, unsigned long pgoff, unsigned long flags)
  164. {
  165. struct hstate *h = hstate_file(file);
  166. struct vm_unmapped_area_info info;
  167. info.flags = 0;
  168. info.length = len;
  169. info.low_limit = current->mm->mmap_base;
  170. info.high_limit = arch_get_mmap_end(addr, len, flags);
  171. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  172. info.align_offset = 0;
  173. return vm_unmapped_area(&info);
  174. }
  175. static unsigned long
  176. hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
  177. unsigned long len, unsigned long pgoff, unsigned long flags)
  178. {
  179. struct hstate *h = hstate_file(file);
  180. struct vm_unmapped_area_info info;
  181. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  182. info.length = len;
  183. info.low_limit = max(PAGE_SIZE, mmap_min_addr);
  184. info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
  185. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  186. info.align_offset = 0;
  187. addr = vm_unmapped_area(&info);
  188. /*
  189. * A failed mmap() very likely causes application failure,
  190. * so fall back to the bottom-up function here. This scenario
  191. * can happen with large stack limits and large mmap()
  192. * allocations.
  193. */
  194. if (unlikely(offset_in_page(addr))) {
  195. VM_BUG_ON(addr != -ENOMEM);
  196. info.flags = 0;
  197. info.low_limit = current->mm->mmap_base;
  198. info.high_limit = arch_get_mmap_end(addr, len, flags);
  199. addr = vm_unmapped_area(&info);
  200. }
  201. return addr;
  202. }
  203. unsigned long
  204. generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  205. unsigned long len, unsigned long pgoff,
  206. unsigned long flags)
  207. {
  208. struct mm_struct *mm = current->mm;
  209. struct vm_area_struct *vma;
  210. struct hstate *h = hstate_file(file);
  211. const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
  212. if (len & ~huge_page_mask(h))
  213. return -EINVAL;
  214. if (len > TASK_SIZE)
  215. return -ENOMEM;
  216. if (flags & MAP_FIXED) {
  217. if (prepare_hugepage_range(file, addr, len))
  218. return -EINVAL;
  219. return addr;
  220. }
  221. if (addr) {
  222. addr = ALIGN(addr, huge_page_size(h));
  223. vma = find_vma(mm, addr);
  224. if (mmap_end - len >= addr &&
  225. (!vma || addr + len <= vm_start_gap(vma)))
  226. return addr;
  227. }
  228. /*
  229. * Use mm->get_unmapped_area value as a hint to use topdown routine.
  230. * If architectures have special needs, they should define their own
  231. * version of hugetlb_get_unmapped_area.
  232. */
  233. if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
  234. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  235. pgoff, flags);
  236. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  237. pgoff, flags);
  238. }
  239. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  240. static unsigned long
  241. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  242. unsigned long len, unsigned long pgoff,
  243. unsigned long flags)
  244. {
  245. return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
  246. }
  247. #endif
  248. /*
  249. * Support for read() - Find the page attached to f_mapping and copy out the
  250. * data. This provides functionality similar to filemap_read().
  251. */
  252. static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
  253. {
  254. struct file *file = iocb->ki_filp;
  255. struct hstate *h = hstate_file(file);
  256. struct address_space *mapping = file->f_mapping;
  257. struct inode *inode = mapping->host;
  258. unsigned long index = iocb->ki_pos >> huge_page_shift(h);
  259. unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
  260. unsigned long end_index;
  261. loff_t isize;
  262. ssize_t retval = 0;
  263. while (iov_iter_count(to)) {
  264. struct page *page;
  265. size_t nr, copied;
  266. /* nr is the maximum number of bytes to copy from this page */
  267. nr = huge_page_size(h);
  268. isize = i_size_read(inode);
  269. if (!isize)
  270. break;
  271. end_index = (isize - 1) >> huge_page_shift(h);
  272. if (index > end_index)
  273. break;
  274. if (index == end_index) {
  275. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  276. if (nr <= offset)
  277. break;
  278. }
  279. nr = nr - offset;
  280. /* Find the page */
  281. page = find_lock_page(mapping, index);
  282. if (unlikely(page == NULL)) {
  283. /*
  284. * We have a HOLE, zero out the user-buffer for the
  285. * length of the hole or request.
  286. */
  287. copied = iov_iter_zero(nr, to);
  288. } else {
  289. unlock_page(page);
  290. if (PageHWPoison(page)) {
  291. put_page(page);
  292. retval = -EIO;
  293. break;
  294. }
  295. /*
  296. * We have the page, copy it to user space buffer.
  297. */
  298. copied = copy_page_to_iter(page, offset, nr, to);
  299. put_page(page);
  300. }
  301. offset += copied;
  302. retval += copied;
  303. if (copied != nr && iov_iter_count(to)) {
  304. if (!retval)
  305. retval = -EFAULT;
  306. break;
  307. }
  308. index += offset >> huge_page_shift(h);
  309. offset &= ~huge_page_mask(h);
  310. }
  311. iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
  312. return retval;
  313. }
  314. static int hugetlbfs_write_begin(struct file *file,
  315. struct address_space *mapping,
  316. loff_t pos, unsigned len,
  317. struct page **pagep, void **fsdata)
  318. {
  319. return -EINVAL;
  320. }
  321. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  322. loff_t pos, unsigned len, unsigned copied,
  323. struct page *page, void *fsdata)
  324. {
  325. BUG();
  326. return -EINVAL;
  327. }
  328. static void hugetlb_delete_from_page_cache(struct page *page)
  329. {
  330. ClearPageDirty(page);
  331. ClearPageUptodate(page);
  332. delete_from_page_cache(page);
  333. }
  334. /*
  335. * Called with i_mmap_rwsem held for inode based vma maps. This makes
  336. * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault
  337. * mutex for the page in the mapping. So, we can not race with page being
  338. * faulted into the vma.
  339. */
  340. static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
  341. unsigned long addr, struct page *page)
  342. {
  343. pte_t *ptep, pte;
  344. ptep = huge_pte_offset(vma->vm_mm, addr,
  345. huge_page_size(hstate_vma(vma)));
  346. if (!ptep)
  347. return false;
  348. pte = huge_ptep_get(ptep);
  349. if (huge_pte_none(pte) || !pte_present(pte))
  350. return false;
  351. if (pte_page(pte) == page)
  352. return true;
  353. return false;
  354. }
  355. /*
  356. * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
  357. * No, because the interval tree returns us only those vmas
  358. * which overlap the truncated area starting at pgoff,
  359. * and no vma on a 32-bit arch can span beyond the 4GB.
  360. */
  361. static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
  362. {
  363. if (vma->vm_pgoff < start)
  364. return (start - vma->vm_pgoff) << PAGE_SHIFT;
  365. else
  366. return 0;
  367. }
  368. static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
  369. {
  370. unsigned long t_end;
  371. if (!end)
  372. return vma->vm_end;
  373. t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
  374. if (t_end > vma->vm_end)
  375. t_end = vma->vm_end;
  376. return t_end;
  377. }
  378. /*
  379. * Called with hugetlb fault mutex held. Therefore, no more mappings to
  380. * this folio can be created while executing the routine.
  381. */
  382. static void hugetlb_unmap_file_folio(struct hstate *h,
  383. struct address_space *mapping,
  384. struct folio *folio, pgoff_t index)
  385. {
  386. struct rb_root_cached *root = &mapping->i_mmap;
  387. struct hugetlb_vma_lock *vma_lock;
  388. struct page *page = &folio->page;
  389. struct vm_area_struct *vma;
  390. unsigned long v_start;
  391. unsigned long v_end;
  392. pgoff_t start, end;
  393. start = index * pages_per_huge_page(h);
  394. end = (index + 1) * pages_per_huge_page(h);
  395. i_mmap_lock_write(mapping);
  396. retry:
  397. vma_lock = NULL;
  398. vma_interval_tree_foreach(vma, root, start, end - 1) {
  399. v_start = vma_offset_start(vma, start);
  400. v_end = vma_offset_end(vma, end);
  401. if (!hugetlb_vma_maps_page(vma, vma->vm_start + v_start, page))
  402. continue;
  403. if (!hugetlb_vma_trylock_write(vma)) {
  404. vma_lock = vma->vm_private_data;
  405. /*
  406. * If we can not get vma lock, we need to drop
  407. * immap_sema and take locks in order. First,
  408. * take a ref on the vma_lock structure so that
  409. * we can be guaranteed it will not go away when
  410. * dropping immap_sema.
  411. */
  412. kref_get(&vma_lock->refs);
  413. break;
  414. }
  415. unmap_hugepage_range(vma, vma->vm_start + v_start, v_end,
  416. NULL, ZAP_FLAG_DROP_MARKER);
  417. hugetlb_vma_unlock_write(vma);
  418. }
  419. i_mmap_unlock_write(mapping);
  420. if (vma_lock) {
  421. /*
  422. * Wait on vma_lock. We know it is still valid as we have
  423. * a reference. We must 'open code' vma locking as we do
  424. * not know if vma_lock is still attached to vma.
  425. */
  426. down_write(&vma_lock->rw_sema);
  427. i_mmap_lock_write(mapping);
  428. vma = vma_lock->vma;
  429. if (!vma) {
  430. /*
  431. * If lock is no longer attached to vma, then just
  432. * unlock, drop our reference and retry looking for
  433. * other vmas.
  434. */
  435. up_write(&vma_lock->rw_sema);
  436. kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
  437. goto retry;
  438. }
  439. /*
  440. * vma_lock is still attached to vma. Check to see if vma
  441. * still maps page and if so, unmap.
  442. */
  443. v_start = vma_offset_start(vma, start);
  444. v_end = vma_offset_end(vma, end);
  445. if (hugetlb_vma_maps_page(vma, vma->vm_start + v_start, page))
  446. unmap_hugepage_range(vma, vma->vm_start + v_start,
  447. v_end, NULL,
  448. ZAP_FLAG_DROP_MARKER);
  449. kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
  450. hugetlb_vma_unlock_write(vma);
  451. goto retry;
  452. }
  453. }
  454. static void
  455. hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
  456. zap_flags_t zap_flags)
  457. {
  458. struct vm_area_struct *vma;
  459. /*
  460. * end == 0 indicates that the entire range after start should be
  461. * unmapped. Note, end is exclusive, whereas the interval tree takes
  462. * an inclusive "last".
  463. */
  464. vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
  465. unsigned long v_start;
  466. unsigned long v_end;
  467. if (!hugetlb_vma_trylock_write(vma))
  468. continue;
  469. v_start = vma_offset_start(vma, start);
  470. v_end = vma_offset_end(vma, end);
  471. unmap_hugepage_range(vma, vma->vm_start + v_start, v_end,
  472. NULL, zap_flags);
  473. /*
  474. * Note that vma lock only exists for shared/non-private
  475. * vmas. Therefore, lock is not held when calling
  476. * unmap_hugepage_range for private vmas.
  477. */
  478. hugetlb_vma_unlock_write(vma);
  479. }
  480. }
  481. /*
  482. * Called with hugetlb fault mutex held.
  483. * Returns true if page was actually removed, false otherwise.
  484. */
  485. static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
  486. struct address_space *mapping,
  487. struct folio *folio, pgoff_t index,
  488. bool truncate_op)
  489. {
  490. bool ret = false;
  491. /*
  492. * If folio is mapped, it was faulted in after being
  493. * unmapped in caller. Unmap (again) while holding
  494. * the fault mutex. The mutex will prevent faults
  495. * until we finish removing the folio.
  496. */
  497. if (unlikely(folio_mapped(folio)))
  498. hugetlb_unmap_file_folio(h, mapping, folio, index);
  499. folio_lock(folio);
  500. /*
  501. * We must remove the folio from page cache before removing
  502. * the region/ reserve map (hugetlb_unreserve_pages). In
  503. * rare out of memory conditions, removal of the region/reserve
  504. * map could fail. Correspondingly, the subpool and global
  505. * reserve usage count can need to be adjusted.
  506. */
  507. VM_BUG_ON(HPageRestoreReserve(&folio->page));
  508. hugetlb_delete_from_page_cache(&folio->page);
  509. ret = true;
  510. if (!truncate_op) {
  511. if (unlikely(hugetlb_unreserve_pages(inode, index,
  512. index + 1, 1)))
  513. hugetlb_fix_reserve_counts(inode);
  514. }
  515. folio_unlock(folio);
  516. return ret;
  517. }
  518. /*
  519. * remove_inode_hugepages handles two distinct cases: truncation and hole
  520. * punch. There are subtle differences in operation for each case.
  521. *
  522. * truncation is indicated by end of range being LLONG_MAX
  523. * In this case, we first scan the range and release found pages.
  524. * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
  525. * maps and global counts. Page faults can race with truncation.
  526. * During faults, hugetlb_no_page() checks i_size before page allocation,
  527. * and again after obtaining page table lock. It will 'back out'
  528. * allocations in the truncated range.
  529. * hole punch is indicated if end is not LLONG_MAX
  530. * In the hole punch case we scan the range and release found pages.
  531. * Only when releasing a page is the associated region/reserve map
  532. * deleted. The region/reserve map for ranges without associated
  533. * pages are not modified. Page faults can race with hole punch.
  534. * This is indicated if we find a mapped page.
  535. * Note: If the passed end of range value is beyond the end of file, but
  536. * not LLONG_MAX this routine still performs a hole punch operation.
  537. */
  538. static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
  539. loff_t lend)
  540. {
  541. struct hstate *h = hstate_inode(inode);
  542. struct address_space *mapping = &inode->i_data;
  543. const pgoff_t start = lstart >> huge_page_shift(h);
  544. const pgoff_t end = lend >> huge_page_shift(h);
  545. struct folio_batch fbatch;
  546. pgoff_t next, index;
  547. int i, freed = 0;
  548. bool truncate_op = (lend == LLONG_MAX);
  549. folio_batch_init(&fbatch);
  550. next = start;
  551. while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
  552. for (i = 0; i < folio_batch_count(&fbatch); ++i) {
  553. struct folio *folio = fbatch.folios[i];
  554. u32 hash = 0;
  555. index = folio->index;
  556. hash = hugetlb_fault_mutex_hash(mapping, index);
  557. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  558. /*
  559. * Remove folio that was part of folio_batch.
  560. */
  561. if (remove_inode_single_folio(h, inode, mapping, folio,
  562. index, truncate_op))
  563. freed++;
  564. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  565. }
  566. folio_batch_release(&fbatch);
  567. cond_resched();
  568. }
  569. if (truncate_op)
  570. (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
  571. }
  572. static void hugetlbfs_evict_inode(struct inode *inode)
  573. {
  574. struct resv_map *resv_map;
  575. remove_inode_hugepages(inode, 0, LLONG_MAX);
  576. /*
  577. * Get the resv_map from the address space embedded in the inode.
  578. * This is the address space which points to any resv_map allocated
  579. * at inode creation time. If this is a device special inode,
  580. * i_mapping may not point to the original address space.
  581. */
  582. resv_map = (struct resv_map *)(&inode->i_data)->private_data;
  583. /* Only regular and link inodes have associated reserve maps */
  584. if (resv_map)
  585. resv_map_release(&resv_map->refs);
  586. clear_inode(inode);
  587. }
  588. static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  589. {
  590. pgoff_t pgoff;
  591. struct address_space *mapping = inode->i_mapping;
  592. struct hstate *h = hstate_inode(inode);
  593. BUG_ON(offset & ~huge_page_mask(h));
  594. pgoff = offset >> PAGE_SHIFT;
  595. i_size_write(inode, offset);
  596. i_mmap_lock_write(mapping);
  597. if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
  598. hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
  599. ZAP_FLAG_DROP_MARKER);
  600. i_mmap_unlock_write(mapping);
  601. remove_inode_hugepages(inode, offset, LLONG_MAX);
  602. }
  603. static void hugetlbfs_zero_partial_page(struct hstate *h,
  604. struct address_space *mapping,
  605. loff_t start,
  606. loff_t end)
  607. {
  608. pgoff_t idx = start >> huge_page_shift(h);
  609. struct folio *folio;
  610. folio = filemap_lock_folio(mapping, idx);
  611. if (!folio)
  612. return;
  613. start = start & ~huge_page_mask(h);
  614. end = end & ~huge_page_mask(h);
  615. if (!end)
  616. end = huge_page_size(h);
  617. folio_zero_segment(folio, (size_t)start, (size_t)end);
  618. folio_unlock(folio);
  619. folio_put(folio);
  620. }
  621. static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
  622. {
  623. struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
  624. struct address_space *mapping = inode->i_mapping;
  625. struct hstate *h = hstate_inode(inode);
  626. loff_t hpage_size = huge_page_size(h);
  627. loff_t hole_start, hole_end;
  628. /*
  629. * hole_start and hole_end indicate the full pages within the hole.
  630. */
  631. hole_start = round_up(offset, hpage_size);
  632. hole_end = round_down(offset + len, hpage_size);
  633. inode_lock(inode);
  634. /* protected by i_rwsem */
  635. if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
  636. inode_unlock(inode);
  637. return -EPERM;
  638. }
  639. i_mmap_lock_write(mapping);
  640. /* If range starts before first full page, zero partial page. */
  641. if (offset < hole_start)
  642. hugetlbfs_zero_partial_page(h, mapping,
  643. offset, min(offset + len, hole_start));
  644. /* Unmap users of full pages in the hole. */
  645. if (hole_end > hole_start) {
  646. if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
  647. hugetlb_vmdelete_list(&mapping->i_mmap,
  648. hole_start >> PAGE_SHIFT,
  649. hole_end >> PAGE_SHIFT, 0);
  650. }
  651. /* If range extends beyond last full page, zero partial page. */
  652. if ((offset + len) > hole_end && (offset + len) > hole_start)
  653. hugetlbfs_zero_partial_page(h, mapping,
  654. hole_end, offset + len);
  655. i_mmap_unlock_write(mapping);
  656. /* Remove full pages from the file. */
  657. if (hole_end > hole_start)
  658. remove_inode_hugepages(inode, hole_start, hole_end);
  659. inode_unlock(inode);
  660. return 0;
  661. }
  662. static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
  663. loff_t len)
  664. {
  665. struct inode *inode = file_inode(file);
  666. struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
  667. struct address_space *mapping = inode->i_mapping;
  668. struct hstate *h = hstate_inode(inode);
  669. struct vm_area_struct pseudo_vma;
  670. struct mm_struct *mm = current->mm;
  671. loff_t hpage_size = huge_page_size(h);
  672. unsigned long hpage_shift = huge_page_shift(h);
  673. pgoff_t start, index, end;
  674. int error;
  675. u32 hash;
  676. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  677. return -EOPNOTSUPP;
  678. if (mode & FALLOC_FL_PUNCH_HOLE)
  679. return hugetlbfs_punch_hole(inode, offset, len);
  680. /*
  681. * Default preallocate case.
  682. * For this range, start is rounded down and end is rounded up
  683. * as well as being converted to page offsets.
  684. */
  685. start = offset >> hpage_shift;
  686. end = (offset + len + hpage_size - 1) >> hpage_shift;
  687. inode_lock(inode);
  688. /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
  689. error = inode_newsize_ok(inode, offset + len);
  690. if (error)
  691. goto out;
  692. if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
  693. error = -EPERM;
  694. goto out;
  695. }
  696. /*
  697. * Initialize a pseudo vma as this is required by the huge page
  698. * allocation routines. If NUMA is configured, use page index
  699. * as input to create an allocation policy.
  700. */
  701. vma_init(&pseudo_vma, mm);
  702. vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
  703. pseudo_vma.vm_file = file;
  704. for (index = start; index < end; index++) {
  705. /*
  706. * This is supposed to be the vaddr where the page is being
  707. * faulted in, but we have no vaddr here.
  708. */
  709. struct page *page;
  710. unsigned long addr;
  711. cond_resched();
  712. /*
  713. * fallocate(2) manpage permits EINTR; we may have been
  714. * interrupted because we are using up too much memory.
  715. */
  716. if (signal_pending(current)) {
  717. error = -EINTR;
  718. break;
  719. }
  720. /* Set numa allocation policy based on index */
  721. hugetlb_set_vma_policy(&pseudo_vma, inode, index);
  722. /* addr is the offset within the file (zero based) */
  723. addr = index * hpage_size;
  724. /* mutex taken here, fault path and hole punch */
  725. hash = hugetlb_fault_mutex_hash(mapping, index);
  726. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  727. /* See if already present in mapping to avoid alloc/free */
  728. page = find_get_page(mapping, index);
  729. if (page) {
  730. put_page(page);
  731. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  732. hugetlb_drop_vma_policy(&pseudo_vma);
  733. continue;
  734. }
  735. /*
  736. * Allocate page without setting the avoid_reserve argument.
  737. * There certainly are no reserves associated with the
  738. * pseudo_vma. However, there could be shared mappings with
  739. * reserves for the file at the inode level. If we fallocate
  740. * pages in these areas, we need to consume the reserves
  741. * to keep reservation accounting consistent.
  742. */
  743. page = alloc_huge_page(&pseudo_vma, addr, 0);
  744. hugetlb_drop_vma_policy(&pseudo_vma);
  745. if (IS_ERR(page)) {
  746. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  747. error = PTR_ERR(page);
  748. goto out;
  749. }
  750. clear_huge_page(page, addr, pages_per_huge_page(h));
  751. __SetPageUptodate(page);
  752. error = hugetlb_add_to_page_cache(page, mapping, index);
  753. if (unlikely(error)) {
  754. restore_reserve_on_error(h, &pseudo_vma, addr, page);
  755. put_page(page);
  756. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  757. goto out;
  758. }
  759. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  760. SetHPageMigratable(page);
  761. /*
  762. * unlock_page because locked by hugetlb_add_to_page_cache()
  763. * put_page() due to reference from alloc_huge_page()
  764. */
  765. unlock_page(page);
  766. put_page(page);
  767. }
  768. if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
  769. i_size_write(inode, offset + len);
  770. inode->i_ctime = current_time(inode);
  771. out:
  772. inode_unlock(inode);
  773. return error;
  774. }
  775. static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
  776. struct dentry *dentry, struct iattr *attr)
  777. {
  778. struct inode *inode = d_inode(dentry);
  779. struct hstate *h = hstate_inode(inode);
  780. int error;
  781. unsigned int ia_valid = attr->ia_valid;
  782. struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
  783. error = setattr_prepare(&init_user_ns, dentry, attr);
  784. if (error)
  785. return error;
  786. if (ia_valid & ATTR_SIZE) {
  787. loff_t oldsize = inode->i_size;
  788. loff_t newsize = attr->ia_size;
  789. if (newsize & ~huge_page_mask(h))
  790. return -EINVAL;
  791. /* protected by i_rwsem */
  792. if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
  793. (newsize > oldsize && (info->seals & F_SEAL_GROW)))
  794. return -EPERM;
  795. hugetlb_vmtruncate(inode, newsize);
  796. }
  797. setattr_copy(&init_user_ns, inode, attr);
  798. mark_inode_dirty(inode);
  799. return 0;
  800. }
  801. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  802. struct hugetlbfs_fs_context *ctx)
  803. {
  804. struct inode *inode;
  805. inode = new_inode(sb);
  806. if (inode) {
  807. inode->i_ino = get_next_ino();
  808. inode->i_mode = S_IFDIR | ctx->mode;
  809. inode->i_uid = ctx->uid;
  810. inode->i_gid = ctx->gid;
  811. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  812. inode->i_op = &hugetlbfs_dir_inode_operations;
  813. inode->i_fop = &simple_dir_operations;
  814. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  815. inc_nlink(inode);
  816. lockdep_annotate_inode_mutex_key(inode);
  817. }
  818. return inode;
  819. }
  820. /*
  821. * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
  822. * be taken from reclaim -- unlike regular filesystems. This needs an
  823. * annotation because huge_pmd_share() does an allocation under hugetlb's
  824. * i_mmap_rwsem.
  825. */
  826. static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
  827. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  828. struct inode *dir,
  829. umode_t mode, dev_t dev)
  830. {
  831. struct inode *inode;
  832. struct resv_map *resv_map = NULL;
  833. /*
  834. * Reserve maps are only needed for inodes that can have associated
  835. * page allocations.
  836. */
  837. if (S_ISREG(mode) || S_ISLNK(mode)) {
  838. resv_map = resv_map_alloc();
  839. if (!resv_map)
  840. return NULL;
  841. }
  842. inode = new_inode(sb);
  843. if (inode) {
  844. struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
  845. inode->i_ino = get_next_ino();
  846. inode_init_owner(&init_user_ns, inode, dir, mode);
  847. lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
  848. &hugetlbfs_i_mmap_rwsem_key);
  849. inode->i_mapping->a_ops = &hugetlbfs_aops;
  850. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  851. inode->i_mapping->private_data = resv_map;
  852. info->seals = F_SEAL_SEAL;
  853. switch (mode & S_IFMT) {
  854. default:
  855. init_special_inode(inode, mode, dev);
  856. break;
  857. case S_IFREG:
  858. inode->i_op = &hugetlbfs_inode_operations;
  859. inode->i_fop = &hugetlbfs_file_operations;
  860. break;
  861. case S_IFDIR:
  862. inode->i_op = &hugetlbfs_dir_inode_operations;
  863. inode->i_fop = &simple_dir_operations;
  864. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  865. inc_nlink(inode);
  866. break;
  867. case S_IFLNK:
  868. inode->i_op = &page_symlink_inode_operations;
  869. inode_nohighmem(inode);
  870. break;
  871. }
  872. lockdep_annotate_inode_mutex_key(inode);
  873. } else {
  874. if (resv_map)
  875. kref_put(&resv_map->refs, resv_map_release);
  876. }
  877. return inode;
  878. }
  879. /*
  880. * File creation. Allocate an inode, and we're done..
  881. */
  882. static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
  883. struct dentry *dentry, umode_t mode, dev_t dev)
  884. {
  885. struct inode *inode;
  886. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  887. if (!inode)
  888. return -ENOSPC;
  889. dir->i_ctime = dir->i_mtime = current_time(dir);
  890. d_instantiate(dentry, inode);
  891. dget(dentry);/* Extra count - pin the dentry in core */
  892. return 0;
  893. }
  894. static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
  895. struct dentry *dentry, umode_t mode)
  896. {
  897. int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
  898. mode | S_IFDIR, 0);
  899. if (!retval)
  900. inc_nlink(dir);
  901. return retval;
  902. }
  903. static int hugetlbfs_create(struct user_namespace *mnt_userns,
  904. struct inode *dir, struct dentry *dentry,
  905. umode_t mode, bool excl)
  906. {
  907. return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
  908. }
  909. static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
  910. struct inode *dir, struct file *file,
  911. umode_t mode)
  912. {
  913. struct inode *inode;
  914. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0);
  915. if (!inode)
  916. return -ENOSPC;
  917. dir->i_ctime = dir->i_mtime = current_time(dir);
  918. d_tmpfile(file, inode);
  919. return finish_open_simple(file, 0);
  920. }
  921. static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
  922. struct inode *dir, struct dentry *dentry,
  923. const char *symname)
  924. {
  925. struct inode *inode;
  926. int error = -ENOSPC;
  927. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  928. if (inode) {
  929. int l = strlen(symname)+1;
  930. error = page_symlink(inode, symname, l);
  931. if (!error) {
  932. d_instantiate(dentry, inode);
  933. dget(dentry);
  934. } else
  935. iput(inode);
  936. }
  937. dir->i_ctime = dir->i_mtime = current_time(dir);
  938. return error;
  939. }
  940. #ifdef CONFIG_MIGRATION
  941. static int hugetlbfs_migrate_folio(struct address_space *mapping,
  942. struct folio *dst, struct folio *src,
  943. enum migrate_mode mode)
  944. {
  945. int rc;
  946. rc = migrate_huge_page_move_mapping(mapping, dst, src);
  947. if (rc != MIGRATEPAGE_SUCCESS)
  948. return rc;
  949. if (hugetlb_page_subpool(&src->page)) {
  950. hugetlb_set_page_subpool(&dst->page,
  951. hugetlb_page_subpool(&src->page));
  952. hugetlb_set_page_subpool(&src->page, NULL);
  953. }
  954. if (mode != MIGRATE_SYNC_NO_COPY)
  955. folio_migrate_copy(dst, src);
  956. else
  957. folio_migrate_flags(dst, src);
  958. return MIGRATEPAGE_SUCCESS;
  959. }
  960. #else
  961. #define hugetlbfs_migrate_folio NULL
  962. #endif
  963. static int hugetlbfs_error_remove_page(struct address_space *mapping,
  964. struct page *page)
  965. {
  966. return 0;
  967. }
  968. /*
  969. * Display the mount options in /proc/mounts.
  970. */
  971. static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
  972. {
  973. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
  974. struct hugepage_subpool *spool = sbinfo->spool;
  975. unsigned long hpage_size = huge_page_size(sbinfo->hstate);
  976. unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
  977. char mod;
  978. if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
  979. seq_printf(m, ",uid=%u",
  980. from_kuid_munged(&init_user_ns, sbinfo->uid));
  981. if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
  982. seq_printf(m, ",gid=%u",
  983. from_kgid_munged(&init_user_ns, sbinfo->gid));
  984. if (sbinfo->mode != 0755)
  985. seq_printf(m, ",mode=%o", sbinfo->mode);
  986. if (sbinfo->max_inodes != -1)
  987. seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
  988. hpage_size /= 1024;
  989. mod = 'K';
  990. if (hpage_size >= 1024) {
  991. hpage_size /= 1024;
  992. mod = 'M';
  993. }
  994. seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
  995. if (spool) {
  996. if (spool->max_hpages != -1)
  997. seq_printf(m, ",size=%llu",
  998. (unsigned long long)spool->max_hpages << hpage_shift);
  999. if (spool->min_hpages != -1)
  1000. seq_printf(m, ",min_size=%llu",
  1001. (unsigned long long)spool->min_hpages << hpage_shift);
  1002. }
  1003. return 0;
  1004. }
  1005. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  1006. {
  1007. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  1008. struct hstate *h = hstate_inode(d_inode(dentry));
  1009. buf->f_type = HUGETLBFS_MAGIC;
  1010. buf->f_bsize = huge_page_size(h);
  1011. if (sbinfo) {
  1012. spin_lock(&sbinfo->stat_lock);
  1013. /* If no limits set, just report 0 or -1 for max/free/used
  1014. * blocks, like simple_statfs() */
  1015. if (sbinfo->spool) {
  1016. long free_pages;
  1017. spin_lock_irq(&sbinfo->spool->lock);
  1018. buf->f_blocks = sbinfo->spool->max_hpages;
  1019. free_pages = sbinfo->spool->max_hpages
  1020. - sbinfo->spool->used_hpages;
  1021. buf->f_bavail = buf->f_bfree = free_pages;
  1022. spin_unlock_irq(&sbinfo->spool->lock);
  1023. buf->f_files = sbinfo->max_inodes;
  1024. buf->f_ffree = sbinfo->free_inodes;
  1025. }
  1026. spin_unlock(&sbinfo->stat_lock);
  1027. }
  1028. buf->f_namelen = NAME_MAX;
  1029. return 0;
  1030. }
  1031. static void hugetlbfs_put_super(struct super_block *sb)
  1032. {
  1033. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  1034. if (sbi) {
  1035. sb->s_fs_info = NULL;
  1036. if (sbi->spool)
  1037. hugepage_put_subpool(sbi->spool);
  1038. kfree(sbi);
  1039. }
  1040. }
  1041. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  1042. {
  1043. if (sbinfo->free_inodes >= 0) {
  1044. spin_lock(&sbinfo->stat_lock);
  1045. if (unlikely(!sbinfo->free_inodes)) {
  1046. spin_unlock(&sbinfo->stat_lock);
  1047. return 0;
  1048. }
  1049. sbinfo->free_inodes--;
  1050. spin_unlock(&sbinfo->stat_lock);
  1051. }
  1052. return 1;
  1053. }
  1054. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  1055. {
  1056. if (sbinfo->free_inodes >= 0) {
  1057. spin_lock(&sbinfo->stat_lock);
  1058. sbinfo->free_inodes++;
  1059. spin_unlock(&sbinfo->stat_lock);
  1060. }
  1061. }
  1062. static struct kmem_cache *hugetlbfs_inode_cachep;
  1063. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  1064. {
  1065. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  1066. struct hugetlbfs_inode_info *p;
  1067. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  1068. return NULL;
  1069. p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
  1070. if (unlikely(!p)) {
  1071. hugetlbfs_inc_free_inodes(sbinfo);
  1072. return NULL;
  1073. }
  1074. /*
  1075. * Any time after allocation, hugetlbfs_destroy_inode can be called
  1076. * for the inode. mpol_free_shared_policy is unconditionally called
  1077. * as part of hugetlbfs_destroy_inode. So, initialize policy here
  1078. * in case of a quick call to destroy.
  1079. *
  1080. * Note that the policy is initialized even if we are creating a
  1081. * private inode. This simplifies hugetlbfs_destroy_inode.
  1082. */
  1083. mpol_shared_policy_init(&p->policy, NULL);
  1084. return &p->vfs_inode;
  1085. }
  1086. static void hugetlbfs_free_inode(struct inode *inode)
  1087. {
  1088. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  1089. }
  1090. static void hugetlbfs_destroy_inode(struct inode *inode)
  1091. {
  1092. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  1093. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  1094. }
  1095. static const struct address_space_operations hugetlbfs_aops = {
  1096. .write_begin = hugetlbfs_write_begin,
  1097. .write_end = hugetlbfs_write_end,
  1098. .dirty_folio = noop_dirty_folio,
  1099. .migrate_folio = hugetlbfs_migrate_folio,
  1100. .error_remove_page = hugetlbfs_error_remove_page,
  1101. };
  1102. static void init_once(void *foo)
  1103. {
  1104. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  1105. inode_init_once(&ei->vfs_inode);
  1106. }
  1107. const struct file_operations hugetlbfs_file_operations = {
  1108. .read_iter = hugetlbfs_read_iter,
  1109. .mmap = hugetlbfs_file_mmap,
  1110. .fsync = noop_fsync,
  1111. .get_unmapped_area = hugetlb_get_unmapped_area,
  1112. .llseek = default_llseek,
  1113. .fallocate = hugetlbfs_fallocate,
  1114. };
  1115. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  1116. .create = hugetlbfs_create,
  1117. .lookup = simple_lookup,
  1118. .link = simple_link,
  1119. .unlink = simple_unlink,
  1120. .symlink = hugetlbfs_symlink,
  1121. .mkdir = hugetlbfs_mkdir,
  1122. .rmdir = simple_rmdir,
  1123. .mknod = hugetlbfs_mknod,
  1124. .rename = simple_rename,
  1125. .setattr = hugetlbfs_setattr,
  1126. .tmpfile = hugetlbfs_tmpfile,
  1127. };
  1128. static const struct inode_operations hugetlbfs_inode_operations = {
  1129. .setattr = hugetlbfs_setattr,
  1130. };
  1131. static const struct super_operations hugetlbfs_ops = {
  1132. .alloc_inode = hugetlbfs_alloc_inode,
  1133. .free_inode = hugetlbfs_free_inode,
  1134. .destroy_inode = hugetlbfs_destroy_inode,
  1135. .evict_inode = hugetlbfs_evict_inode,
  1136. .statfs = hugetlbfs_statfs,
  1137. .put_super = hugetlbfs_put_super,
  1138. .show_options = hugetlbfs_show_options,
  1139. };
  1140. /*
  1141. * Convert size option passed from command line to number of huge pages
  1142. * in the pool specified by hstate. Size option could be in bytes
  1143. * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
  1144. */
  1145. static long
  1146. hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
  1147. enum hugetlbfs_size_type val_type)
  1148. {
  1149. if (val_type == NO_SIZE)
  1150. return -1;
  1151. if (val_type == SIZE_PERCENT) {
  1152. size_opt <<= huge_page_shift(h);
  1153. size_opt *= h->max_huge_pages;
  1154. do_div(size_opt, 100);
  1155. }
  1156. size_opt >>= huge_page_shift(h);
  1157. return size_opt;
  1158. }
  1159. /*
  1160. * Parse one mount parameter.
  1161. */
  1162. static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
  1163. {
  1164. struct hugetlbfs_fs_context *ctx = fc->fs_private;
  1165. struct fs_parse_result result;
  1166. char *rest;
  1167. unsigned long ps;
  1168. int opt;
  1169. opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
  1170. if (opt < 0)
  1171. return opt;
  1172. switch (opt) {
  1173. case Opt_uid:
  1174. ctx->uid = make_kuid(current_user_ns(), result.uint_32);
  1175. if (!uid_valid(ctx->uid))
  1176. goto bad_val;
  1177. return 0;
  1178. case Opt_gid:
  1179. ctx->gid = make_kgid(current_user_ns(), result.uint_32);
  1180. if (!gid_valid(ctx->gid))
  1181. goto bad_val;
  1182. return 0;
  1183. case Opt_mode:
  1184. ctx->mode = result.uint_32 & 01777U;
  1185. return 0;
  1186. case Opt_size:
  1187. /* memparse() will accept a K/M/G without a digit */
  1188. if (!param->string || !isdigit(param->string[0]))
  1189. goto bad_val;
  1190. ctx->max_size_opt = memparse(param->string, &rest);
  1191. ctx->max_val_type = SIZE_STD;
  1192. if (*rest == '%')
  1193. ctx->max_val_type = SIZE_PERCENT;
  1194. return 0;
  1195. case Opt_nr_inodes:
  1196. /* memparse() will accept a K/M/G without a digit */
  1197. if (!param->string || !isdigit(param->string[0]))
  1198. goto bad_val;
  1199. ctx->nr_inodes = memparse(param->string, &rest);
  1200. return 0;
  1201. case Opt_pagesize:
  1202. ps = memparse(param->string, &rest);
  1203. ctx->hstate = size_to_hstate(ps);
  1204. if (!ctx->hstate) {
  1205. pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
  1206. return -EINVAL;
  1207. }
  1208. return 0;
  1209. case Opt_min_size:
  1210. /* memparse() will accept a K/M/G without a digit */
  1211. if (!param->string || !isdigit(param->string[0]))
  1212. goto bad_val;
  1213. ctx->min_size_opt = memparse(param->string, &rest);
  1214. ctx->min_val_type = SIZE_STD;
  1215. if (*rest == '%')
  1216. ctx->min_val_type = SIZE_PERCENT;
  1217. return 0;
  1218. default:
  1219. return -EINVAL;
  1220. }
  1221. bad_val:
  1222. return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
  1223. param->string, param->key);
  1224. }
  1225. /*
  1226. * Validate the parsed options.
  1227. */
  1228. static int hugetlbfs_validate(struct fs_context *fc)
  1229. {
  1230. struct hugetlbfs_fs_context *ctx = fc->fs_private;
  1231. /*
  1232. * Use huge page pool size (in hstate) to convert the size
  1233. * options to number of huge pages. If NO_SIZE, -1 is returned.
  1234. */
  1235. ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
  1236. ctx->max_size_opt,
  1237. ctx->max_val_type);
  1238. ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
  1239. ctx->min_size_opt,
  1240. ctx->min_val_type);
  1241. /*
  1242. * If max_size was specified, then min_size must be smaller
  1243. */
  1244. if (ctx->max_val_type > NO_SIZE &&
  1245. ctx->min_hpages > ctx->max_hpages) {
  1246. pr_err("Minimum size can not be greater than maximum size\n");
  1247. return -EINVAL;
  1248. }
  1249. return 0;
  1250. }
  1251. static int
  1252. hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
  1253. {
  1254. struct hugetlbfs_fs_context *ctx = fc->fs_private;
  1255. struct hugetlbfs_sb_info *sbinfo;
  1256. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  1257. if (!sbinfo)
  1258. return -ENOMEM;
  1259. sb->s_fs_info = sbinfo;
  1260. spin_lock_init(&sbinfo->stat_lock);
  1261. sbinfo->hstate = ctx->hstate;
  1262. sbinfo->max_inodes = ctx->nr_inodes;
  1263. sbinfo->free_inodes = ctx->nr_inodes;
  1264. sbinfo->spool = NULL;
  1265. sbinfo->uid = ctx->uid;
  1266. sbinfo->gid = ctx->gid;
  1267. sbinfo->mode = ctx->mode;
  1268. /*
  1269. * Allocate and initialize subpool if maximum or minimum size is
  1270. * specified. Any needed reservations (for minimum size) are taken
  1271. * when the subpool is created.
  1272. */
  1273. if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
  1274. sbinfo->spool = hugepage_new_subpool(ctx->hstate,
  1275. ctx->max_hpages,
  1276. ctx->min_hpages);
  1277. if (!sbinfo->spool)
  1278. goto out_free;
  1279. }
  1280. sb->s_maxbytes = MAX_LFS_FILESIZE;
  1281. sb->s_blocksize = huge_page_size(ctx->hstate);
  1282. sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
  1283. sb->s_magic = HUGETLBFS_MAGIC;
  1284. sb->s_op = &hugetlbfs_ops;
  1285. sb->s_time_gran = 1;
  1286. /*
  1287. * Due to the special and limited functionality of hugetlbfs, it does
  1288. * not work well as a stacking filesystem.
  1289. */
  1290. sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
  1291. sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
  1292. if (!sb->s_root)
  1293. goto out_free;
  1294. return 0;
  1295. out_free:
  1296. kfree(sbinfo->spool);
  1297. kfree(sbinfo);
  1298. return -ENOMEM;
  1299. }
  1300. static int hugetlbfs_get_tree(struct fs_context *fc)
  1301. {
  1302. int err = hugetlbfs_validate(fc);
  1303. if (err)
  1304. return err;
  1305. return get_tree_nodev(fc, hugetlbfs_fill_super);
  1306. }
  1307. static void hugetlbfs_fs_context_free(struct fs_context *fc)
  1308. {
  1309. kfree(fc->fs_private);
  1310. }
  1311. static const struct fs_context_operations hugetlbfs_fs_context_ops = {
  1312. .free = hugetlbfs_fs_context_free,
  1313. .parse_param = hugetlbfs_parse_param,
  1314. .get_tree = hugetlbfs_get_tree,
  1315. };
  1316. static int hugetlbfs_init_fs_context(struct fs_context *fc)
  1317. {
  1318. struct hugetlbfs_fs_context *ctx;
  1319. ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
  1320. if (!ctx)
  1321. return -ENOMEM;
  1322. ctx->max_hpages = -1; /* No limit on size by default */
  1323. ctx->nr_inodes = -1; /* No limit on number of inodes by default */
  1324. ctx->uid = current_fsuid();
  1325. ctx->gid = current_fsgid();
  1326. ctx->mode = 0755;
  1327. ctx->hstate = &default_hstate;
  1328. ctx->min_hpages = -1; /* No default minimum size */
  1329. ctx->max_val_type = NO_SIZE;
  1330. ctx->min_val_type = NO_SIZE;
  1331. fc->fs_private = ctx;
  1332. fc->ops = &hugetlbfs_fs_context_ops;
  1333. return 0;
  1334. }
  1335. static struct file_system_type hugetlbfs_fs_type = {
  1336. .name = "hugetlbfs",
  1337. .init_fs_context = hugetlbfs_init_fs_context,
  1338. .parameters = hugetlb_fs_parameters,
  1339. .kill_sb = kill_litter_super,
  1340. };
  1341. static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
  1342. static int can_do_hugetlb_shm(void)
  1343. {
  1344. kgid_t shm_group;
  1345. shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
  1346. return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
  1347. }
  1348. static int get_hstate_idx(int page_size_log)
  1349. {
  1350. struct hstate *h = hstate_sizelog(page_size_log);
  1351. if (!h)
  1352. return -1;
  1353. return hstate_index(h);
  1354. }
  1355. /*
  1356. * Note that size should be aligned to proper hugepage size in caller side,
  1357. * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
  1358. */
  1359. struct file *hugetlb_file_setup(const char *name, size_t size,
  1360. vm_flags_t acctflag, int creat_flags,
  1361. int page_size_log)
  1362. {
  1363. struct inode *inode;
  1364. struct vfsmount *mnt;
  1365. int hstate_idx;
  1366. struct file *file;
  1367. hstate_idx = get_hstate_idx(page_size_log);
  1368. if (hstate_idx < 0)
  1369. return ERR_PTR(-ENODEV);
  1370. mnt = hugetlbfs_vfsmount[hstate_idx];
  1371. if (!mnt)
  1372. return ERR_PTR(-ENOENT);
  1373. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  1374. struct ucounts *ucounts = current_ucounts();
  1375. if (user_shm_lock(size, ucounts)) {
  1376. pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
  1377. current->comm, current->pid);
  1378. user_shm_unlock(size, ucounts);
  1379. }
  1380. return ERR_PTR(-EPERM);
  1381. }
  1382. file = ERR_PTR(-ENOSPC);
  1383. inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
  1384. if (!inode)
  1385. goto out;
  1386. if (creat_flags == HUGETLB_SHMFS_INODE)
  1387. inode->i_flags |= S_PRIVATE;
  1388. inode->i_size = size;
  1389. clear_nlink(inode);
  1390. if (!hugetlb_reserve_pages(inode, 0,
  1391. size >> huge_page_shift(hstate_inode(inode)), NULL,
  1392. acctflag))
  1393. file = ERR_PTR(-ENOMEM);
  1394. else
  1395. file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
  1396. &hugetlbfs_file_operations);
  1397. if (!IS_ERR(file))
  1398. return file;
  1399. iput(inode);
  1400. out:
  1401. return file;
  1402. }
  1403. static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
  1404. {
  1405. struct fs_context *fc;
  1406. struct vfsmount *mnt;
  1407. fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
  1408. if (IS_ERR(fc)) {
  1409. mnt = ERR_CAST(fc);
  1410. } else {
  1411. struct hugetlbfs_fs_context *ctx = fc->fs_private;
  1412. ctx->hstate = h;
  1413. mnt = fc_mount(fc);
  1414. put_fs_context(fc);
  1415. }
  1416. if (IS_ERR(mnt))
  1417. pr_err("Cannot mount internal hugetlbfs for page size %luK",
  1418. huge_page_size(h) / SZ_1K);
  1419. return mnt;
  1420. }
  1421. static int __init init_hugetlbfs_fs(void)
  1422. {
  1423. struct vfsmount *mnt;
  1424. struct hstate *h;
  1425. int error;
  1426. int i;
  1427. if (!hugepages_supported()) {
  1428. pr_info("disabling because there are no supported hugepage sizes\n");
  1429. return -ENOTSUPP;
  1430. }
  1431. error = -ENOMEM;
  1432. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  1433. sizeof(struct hugetlbfs_inode_info),
  1434. 0, SLAB_ACCOUNT, init_once);
  1435. if (hugetlbfs_inode_cachep == NULL)
  1436. goto out;
  1437. error = register_filesystem(&hugetlbfs_fs_type);
  1438. if (error)
  1439. goto out_free;
  1440. /* default hstate mount is required */
  1441. mnt = mount_one_hugetlbfs(&default_hstate);
  1442. if (IS_ERR(mnt)) {
  1443. error = PTR_ERR(mnt);
  1444. goto out_unreg;
  1445. }
  1446. hugetlbfs_vfsmount[default_hstate_idx] = mnt;
  1447. /* other hstates are optional */
  1448. i = 0;
  1449. for_each_hstate(h) {
  1450. if (i == default_hstate_idx) {
  1451. i++;
  1452. continue;
  1453. }
  1454. mnt = mount_one_hugetlbfs(h);
  1455. if (IS_ERR(mnt))
  1456. hugetlbfs_vfsmount[i] = NULL;
  1457. else
  1458. hugetlbfs_vfsmount[i] = mnt;
  1459. i++;
  1460. }
  1461. return 0;
  1462. out_unreg:
  1463. (void)unregister_filesystem(&hugetlbfs_fs_type);
  1464. out_free:
  1465. kmem_cache_destroy(hugetlbfs_inode_cachep);
  1466. out:
  1467. return error;
  1468. }
  1469. fs_initcall(init_hugetlbfs_fs)