hugetlb.h 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_HUGETLB_H
  3. #define _LINUX_HUGETLB_H
  4. #include <linux/mm_types.h>
  5. #include <linux/mmdebug.h>
  6. #include <linux/fs.h>
  7. #include <linux/hugetlb_inline.h>
  8. #include <linux/cgroup.h>
  9. #include <linux/page_ref.h>
  10. #include <linux/list.h>
  11. #include <linux/kref.h>
  12. #include <linux/pgtable.h>
  13. #include <linux/gfp.h>
  14. #include <linux/userfaultfd_k.h>
  15. struct ctl_table;
  16. struct user_struct;
  17. struct mmu_gather;
  18. struct node;
  19. #ifndef CONFIG_ARCH_HAS_HUGEPD
  20. typedef struct { unsigned long pd; } hugepd_t;
  21. #define is_hugepd(hugepd) (0)
  22. #define __hugepd(x) ((hugepd_t) { (x) })
  23. #endif
  24. #ifdef CONFIG_HUGETLB_PAGE
  25. #include <linux/mempolicy.h>
  26. #include <linux/shm.h>
  27. #include <asm/tlbflush.h>
  28. /*
  29. * For HugeTLB page, there are more metadata to save in the struct page. But
  30. * the head struct page cannot meet our needs, so we have to abuse other tail
  31. * struct page to store the metadata. In order to avoid conflicts caused by
  32. * subsequent use of more tail struct pages, we gather these discrete indexes
  33. * of tail struct page here.
  34. */
  35. enum {
  36. SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
  37. #ifdef CONFIG_CGROUP_HUGETLB
  38. SUBPAGE_INDEX_CGROUP, /* reuse page->private */
  39. SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
  40. __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
  41. #endif
  42. #ifdef CONFIG_MEMORY_FAILURE
  43. SUBPAGE_INDEX_HWPOISON,
  44. #endif
  45. __NR_USED_SUBPAGE,
  46. };
  47. struct hugepage_subpool {
  48. spinlock_t lock;
  49. long count;
  50. long max_hpages; /* Maximum huge pages or -1 if no maximum. */
  51. long used_hpages; /* Used count against maximum, includes */
  52. /* both allocated and reserved pages. */
  53. struct hstate *hstate;
  54. long min_hpages; /* Minimum huge pages or -1 if no minimum. */
  55. long rsv_hpages; /* Pages reserved against global pool to */
  56. /* satisfy minimum size. */
  57. };
  58. struct resv_map {
  59. struct kref refs;
  60. spinlock_t lock;
  61. struct list_head regions;
  62. long adds_in_progress;
  63. struct list_head region_cache;
  64. long region_cache_count;
  65. struct rw_semaphore rw_sema;
  66. #ifdef CONFIG_CGROUP_HUGETLB
  67. /*
  68. * On private mappings, the counter to uncharge reservations is stored
  69. * here. If these fields are 0, then either the mapping is shared, or
  70. * cgroup accounting is disabled for this resv_map.
  71. */
  72. struct page_counter *reservation_counter;
  73. unsigned long pages_per_hpage;
  74. struct cgroup_subsys_state *css;
  75. #endif
  76. };
  77. /*
  78. * Region tracking -- allows tracking of reservations and instantiated pages
  79. * across the pages in a mapping.
  80. *
  81. * The region data structures are embedded into a resv_map and protected
  82. * by a resv_map's lock. The set of regions within the resv_map represent
  83. * reservations for huge pages, or huge pages that have already been
  84. * instantiated within the map. The from and to elements are huge page
  85. * indices into the associated mapping. from indicates the starting index
  86. * of the region. to represents the first index past the end of the region.
  87. *
  88. * For example, a file region structure with from == 0 and to == 4 represents
  89. * four huge pages in a mapping. It is important to note that the to element
  90. * represents the first element past the end of the region. This is used in
  91. * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
  92. *
  93. * Interval notation of the form [from, to) will be used to indicate that
  94. * the endpoint from is inclusive and to is exclusive.
  95. */
  96. struct file_region {
  97. struct list_head link;
  98. long from;
  99. long to;
  100. #ifdef CONFIG_CGROUP_HUGETLB
  101. /*
  102. * On shared mappings, each reserved region appears as a struct
  103. * file_region in resv_map. These fields hold the info needed to
  104. * uncharge each reservation.
  105. */
  106. struct page_counter *reservation_counter;
  107. struct cgroup_subsys_state *css;
  108. #endif
  109. };
  110. struct hugetlb_vma_lock {
  111. struct kref refs;
  112. struct rw_semaphore rw_sema;
  113. struct vm_area_struct *vma;
  114. };
  115. extern struct resv_map *resv_map_alloc(void);
  116. void resv_map_release(struct kref *ref);
  117. extern spinlock_t hugetlb_lock;
  118. extern int hugetlb_max_hstate __read_mostly;
  119. #define for_each_hstate(h) \
  120. for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
  121. struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  122. long min_hpages);
  123. void hugepage_put_subpool(struct hugepage_subpool *spool);
  124. void hugetlb_dup_vma_private(struct vm_area_struct *vma);
  125. void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
  126. int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
  127. int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
  128. loff_t *);
  129. int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
  130. loff_t *);
  131. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
  132. loff_t *);
  133. int move_hugetlb_page_tables(struct vm_area_struct *vma,
  134. struct vm_area_struct *new_vma,
  135. unsigned long old_addr, unsigned long new_addr,
  136. unsigned long len);
  137. int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
  138. struct vm_area_struct *, struct vm_area_struct *);
  139. long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  140. struct page **, struct vm_area_struct **,
  141. unsigned long *, unsigned long *, long, unsigned int,
  142. int *);
  143. void unmap_hugepage_range(struct vm_area_struct *,
  144. unsigned long, unsigned long, struct page *,
  145. zap_flags_t);
  146. void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  147. struct vm_area_struct *vma,
  148. unsigned long start, unsigned long end,
  149. struct page *ref_page, zap_flags_t zap_flags);
  150. void hugetlb_report_meminfo(struct seq_file *);
  151. int hugetlb_report_node_meminfo(char *buf, int len, int nid);
  152. void hugetlb_show_meminfo_node(int nid);
  153. unsigned long hugetlb_total_pages(void);
  154. vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  155. unsigned long address, unsigned int flags);
  156. #ifdef CONFIG_USERFAULTFD
  157. int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
  158. struct vm_area_struct *dst_vma,
  159. unsigned long dst_addr,
  160. unsigned long src_addr,
  161. enum mcopy_atomic_mode mode,
  162. struct page **pagep,
  163. bool wp_copy);
  164. #endif /* CONFIG_USERFAULTFD */
  165. bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
  166. struct vm_area_struct *vma,
  167. vm_flags_t vm_flags);
  168. long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
  169. long freed);
  170. int isolate_hugetlb(struct page *page, struct list_head *list);
  171. int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
  172. int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
  173. void putback_active_hugepage(struct page *page);
  174. void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
  175. void free_huge_page(struct page *page);
  176. void hugetlb_fix_reserve_counts(struct inode *inode);
  177. extern struct mutex *hugetlb_fault_mutex_table;
  178. u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
  179. pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
  180. unsigned long addr, pud_t *pud);
  181. struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
  182. extern int sysctl_hugetlb_shm_group;
  183. extern struct list_head huge_boot_pages;
  184. /* arch callbacks */
  185. pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  186. unsigned long addr, unsigned long sz);
  187. pte_t *huge_pte_offset(struct mm_struct *mm,
  188. unsigned long addr, unsigned long sz);
  189. unsigned long hugetlb_mask_last_page(struct hstate *h);
  190. int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
  191. unsigned long addr, pte_t *ptep);
  192. void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
  193. unsigned long *start, unsigned long *end);
  194. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  195. int write);
  196. struct page *follow_huge_pd(struct vm_area_struct *vma,
  197. unsigned long address, hugepd_t hpd,
  198. int flags, int pdshift);
  199. struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
  200. int flags);
  201. struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
  202. pud_t *pud, int flags);
  203. struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
  204. pgd_t *pgd, int flags);
  205. void hugetlb_vma_lock_read(struct vm_area_struct *vma);
  206. void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
  207. void hugetlb_vma_lock_write(struct vm_area_struct *vma);
  208. void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
  209. int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
  210. void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
  211. void hugetlb_vma_lock_release(struct kref *kref);
  212. int pmd_huge(pmd_t pmd);
  213. int pud_huge(pud_t pud);
  214. unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
  215. unsigned long address, unsigned long end, pgprot_t newprot,
  216. unsigned long cp_flags);
  217. bool is_hugetlb_entry_migration(pte_t pte);
  218. void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
  219. #else /* !CONFIG_HUGETLB_PAGE */
  220. static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
  221. {
  222. }
  223. static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
  224. {
  225. }
  226. static inline unsigned long hugetlb_total_pages(void)
  227. {
  228. return 0;
  229. }
  230. static inline struct address_space *hugetlb_page_mapping_lock_write(
  231. struct page *hpage)
  232. {
  233. return NULL;
  234. }
  235. static inline int huge_pmd_unshare(struct mm_struct *mm,
  236. struct vm_area_struct *vma,
  237. unsigned long addr, pte_t *ptep)
  238. {
  239. return 0;
  240. }
  241. static inline void adjust_range_if_pmd_sharing_possible(
  242. struct vm_area_struct *vma,
  243. unsigned long *start, unsigned long *end)
  244. {
  245. }
  246. static inline long follow_hugetlb_page(struct mm_struct *mm,
  247. struct vm_area_struct *vma, struct page **pages,
  248. struct vm_area_struct **vmas, unsigned long *position,
  249. unsigned long *nr_pages, long i, unsigned int flags,
  250. int *nonblocking)
  251. {
  252. BUG();
  253. return 0;
  254. }
  255. static inline struct page *follow_huge_addr(struct mm_struct *mm,
  256. unsigned long address, int write)
  257. {
  258. return ERR_PTR(-EINVAL);
  259. }
  260. static inline int copy_hugetlb_page_range(struct mm_struct *dst,
  261. struct mm_struct *src,
  262. struct vm_area_struct *dst_vma,
  263. struct vm_area_struct *src_vma)
  264. {
  265. BUG();
  266. return 0;
  267. }
  268. static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
  269. struct vm_area_struct *new_vma,
  270. unsigned long old_addr,
  271. unsigned long new_addr,
  272. unsigned long len)
  273. {
  274. BUG();
  275. return 0;
  276. }
  277. static inline void hugetlb_report_meminfo(struct seq_file *m)
  278. {
  279. }
  280. static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
  281. {
  282. return 0;
  283. }
  284. static inline void hugetlb_show_meminfo_node(int nid)
  285. {
  286. }
  287. static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
  288. unsigned long address, hugepd_t hpd, int flags,
  289. int pdshift)
  290. {
  291. return NULL;
  292. }
  293. static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
  294. unsigned long address, int flags)
  295. {
  296. return NULL;
  297. }
  298. static inline struct page *follow_huge_pud(struct mm_struct *mm,
  299. unsigned long address, pud_t *pud, int flags)
  300. {
  301. return NULL;
  302. }
  303. static inline struct page *follow_huge_pgd(struct mm_struct *mm,
  304. unsigned long address, pgd_t *pgd, int flags)
  305. {
  306. return NULL;
  307. }
  308. static inline int prepare_hugepage_range(struct file *file,
  309. unsigned long addr, unsigned long len)
  310. {
  311. return -EINVAL;
  312. }
  313. static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
  314. {
  315. }
  316. static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
  317. {
  318. }
  319. static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
  320. {
  321. }
  322. static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
  323. {
  324. }
  325. static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
  326. {
  327. return 1;
  328. }
  329. static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
  330. {
  331. }
  332. static inline int pmd_huge(pmd_t pmd)
  333. {
  334. return 0;
  335. }
  336. static inline int pud_huge(pud_t pud)
  337. {
  338. return 0;
  339. }
  340. static inline int is_hugepage_only_range(struct mm_struct *mm,
  341. unsigned long addr, unsigned long len)
  342. {
  343. return 0;
  344. }
  345. static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  346. unsigned long addr, unsigned long end,
  347. unsigned long floor, unsigned long ceiling)
  348. {
  349. BUG();
  350. }
  351. #ifdef CONFIG_USERFAULTFD
  352. static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
  353. pte_t *dst_pte,
  354. struct vm_area_struct *dst_vma,
  355. unsigned long dst_addr,
  356. unsigned long src_addr,
  357. enum mcopy_atomic_mode mode,
  358. struct page **pagep,
  359. bool wp_copy)
  360. {
  361. BUG();
  362. return 0;
  363. }
  364. #endif /* CONFIG_USERFAULTFD */
  365. static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
  366. unsigned long sz)
  367. {
  368. return NULL;
  369. }
  370. static inline int isolate_hugetlb(struct page *page, struct list_head *list)
  371. {
  372. return -EBUSY;
  373. }
  374. static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
  375. {
  376. return 0;
  377. }
  378. static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
  379. {
  380. return 0;
  381. }
  382. static inline void putback_active_hugepage(struct page *page)
  383. {
  384. }
  385. static inline void move_hugetlb_state(struct page *oldpage,
  386. struct page *newpage, int reason)
  387. {
  388. }
  389. static inline unsigned long hugetlb_change_protection(
  390. struct vm_area_struct *vma, unsigned long address,
  391. unsigned long end, pgprot_t newprot,
  392. unsigned long cp_flags)
  393. {
  394. return 0;
  395. }
  396. static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  397. struct vm_area_struct *vma, unsigned long start,
  398. unsigned long end, struct page *ref_page,
  399. zap_flags_t zap_flags)
  400. {
  401. BUG();
  402. }
  403. static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
  404. struct vm_area_struct *vma, unsigned long address,
  405. unsigned int flags)
  406. {
  407. BUG();
  408. return 0;
  409. }
  410. static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
  411. #endif /* !CONFIG_HUGETLB_PAGE */
  412. /*
  413. * hugepages at page global directory. If arch support
  414. * hugepages at pgd level, they need to define this.
  415. */
  416. #ifndef pgd_huge
  417. #define pgd_huge(x) 0
  418. #endif
  419. #ifndef p4d_huge
  420. #define p4d_huge(x) 0
  421. #endif
  422. #ifndef pgd_write
  423. static inline int pgd_write(pgd_t pgd)
  424. {
  425. BUG();
  426. return 0;
  427. }
  428. #endif
  429. #define HUGETLB_ANON_FILE "anon_hugepage"
  430. enum {
  431. /*
  432. * The file will be used as an shm file so shmfs accounting rules
  433. * apply
  434. */
  435. HUGETLB_SHMFS_INODE = 1,
  436. /*
  437. * The file is being created on the internal vfs mount and shmfs
  438. * accounting rules do not apply
  439. */
  440. HUGETLB_ANONHUGE_INODE = 2,
  441. };
  442. #ifdef CONFIG_HUGETLBFS
  443. struct hugetlbfs_sb_info {
  444. long max_inodes; /* inodes allowed */
  445. long free_inodes; /* inodes free */
  446. spinlock_t stat_lock;
  447. struct hstate *hstate;
  448. struct hugepage_subpool *spool;
  449. kuid_t uid;
  450. kgid_t gid;
  451. umode_t mode;
  452. };
  453. static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
  454. {
  455. return sb->s_fs_info;
  456. }
  457. struct hugetlbfs_inode_info {
  458. struct shared_policy policy;
  459. struct inode vfs_inode;
  460. unsigned int seals;
  461. };
  462. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  463. {
  464. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  465. }
  466. extern const struct file_operations hugetlbfs_file_operations;
  467. extern const struct vm_operations_struct hugetlb_vm_ops;
  468. struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
  469. int creat_flags, int page_size_log);
  470. static inline bool is_file_hugepages(struct file *file)
  471. {
  472. if (file->f_op == &hugetlbfs_file_operations)
  473. return true;
  474. return is_file_shm_hugepages(file);
  475. }
  476. static inline struct hstate *hstate_inode(struct inode *i)
  477. {
  478. return HUGETLBFS_SB(i->i_sb)->hstate;
  479. }
  480. #else /* !CONFIG_HUGETLBFS */
  481. #define is_file_hugepages(file) false
  482. static inline struct file *
  483. hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
  484. int creat_flags, int page_size_log)
  485. {
  486. return ERR_PTR(-ENOSYS);
  487. }
  488. static inline struct hstate *hstate_inode(struct inode *i)
  489. {
  490. return NULL;
  491. }
  492. #endif /* !CONFIG_HUGETLBFS */
  493. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  494. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  495. unsigned long len, unsigned long pgoff,
  496. unsigned long flags);
  497. #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
  498. unsigned long
  499. generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  500. unsigned long len, unsigned long pgoff,
  501. unsigned long flags);
  502. /*
  503. * huegtlb page specific state flags. These flags are located in page.private
  504. * of the hugetlb head page. Functions created via the below macros should be
  505. * used to manipulate these flags.
  506. *
  507. * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
  508. * allocation time. Cleared when page is fully instantiated. Free
  509. * routine checks flag to restore a reservation on error paths.
  510. * Synchronization: Examined or modified by code that knows it has
  511. * the only reference to page. i.e. After allocation but before use
  512. * or when the page is being freed.
  513. * HPG_migratable - Set after a newly allocated page is added to the page
  514. * cache and/or page tables. Indicates the page is a candidate for
  515. * migration.
  516. * Synchronization: Initially set after new page allocation with no
  517. * locking. When examined and modified during migration processing
  518. * (isolate, migrate, putback) the hugetlb_lock is held.
  519. * HPG_temporary - Set on a page that is temporarily allocated from the buddy
  520. * allocator. Typically used for migration target pages when no pages
  521. * are available in the pool. The hugetlb free page path will
  522. * immediately free pages with this flag set to the buddy allocator.
  523. * Synchronization: Can be set after huge page allocation from buddy when
  524. * code knows it has only reference. All other examinations and
  525. * modifications require hugetlb_lock.
  526. * HPG_freed - Set when page is on the free lists.
  527. * Synchronization: hugetlb_lock held for examination and modification.
  528. * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
  529. * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
  530. * that is not tracked by raw_hwp_page list.
  531. */
  532. enum hugetlb_page_flags {
  533. HPG_restore_reserve = 0,
  534. HPG_migratable,
  535. HPG_temporary,
  536. HPG_freed,
  537. HPG_vmemmap_optimized,
  538. HPG_raw_hwp_unreliable,
  539. __NR_HPAGEFLAGS,
  540. };
  541. /*
  542. * Macros to create test, set and clear function definitions for
  543. * hugetlb specific page flags.
  544. */
  545. #ifdef CONFIG_HUGETLB_PAGE
  546. #define TESTHPAGEFLAG(uname, flname) \
  547. static inline int HPage##uname(struct page *page) \
  548. { return test_bit(HPG_##flname, &(page->private)); }
  549. #define SETHPAGEFLAG(uname, flname) \
  550. static inline void SetHPage##uname(struct page *page) \
  551. { set_bit(HPG_##flname, &(page->private)); }
  552. #define CLEARHPAGEFLAG(uname, flname) \
  553. static inline void ClearHPage##uname(struct page *page) \
  554. { clear_bit(HPG_##flname, &(page->private)); }
  555. #else
  556. #define TESTHPAGEFLAG(uname, flname) \
  557. static inline int HPage##uname(struct page *page) \
  558. { return 0; }
  559. #define SETHPAGEFLAG(uname, flname) \
  560. static inline void SetHPage##uname(struct page *page) \
  561. { }
  562. #define CLEARHPAGEFLAG(uname, flname) \
  563. static inline void ClearHPage##uname(struct page *page) \
  564. { }
  565. #endif
  566. #define HPAGEFLAG(uname, flname) \
  567. TESTHPAGEFLAG(uname, flname) \
  568. SETHPAGEFLAG(uname, flname) \
  569. CLEARHPAGEFLAG(uname, flname) \
  570. /*
  571. * Create functions associated with hugetlb page flags
  572. */
  573. HPAGEFLAG(RestoreReserve, restore_reserve)
  574. HPAGEFLAG(Migratable, migratable)
  575. HPAGEFLAG(Temporary, temporary)
  576. HPAGEFLAG(Freed, freed)
  577. HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
  578. HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
  579. #ifdef CONFIG_HUGETLB_PAGE
  580. #define HSTATE_NAME_LEN 32
  581. /* Defines one hugetlb page size */
  582. struct hstate {
  583. struct mutex resize_lock;
  584. int next_nid_to_alloc;
  585. int next_nid_to_free;
  586. unsigned int order;
  587. unsigned int demote_order;
  588. unsigned long mask;
  589. unsigned long max_huge_pages;
  590. unsigned long nr_huge_pages;
  591. unsigned long free_huge_pages;
  592. unsigned long resv_huge_pages;
  593. unsigned long surplus_huge_pages;
  594. unsigned long nr_overcommit_huge_pages;
  595. struct list_head hugepage_activelist;
  596. struct list_head hugepage_freelists[MAX_NUMNODES];
  597. unsigned int max_huge_pages_node[MAX_NUMNODES];
  598. unsigned int nr_huge_pages_node[MAX_NUMNODES];
  599. unsigned int free_huge_pages_node[MAX_NUMNODES];
  600. unsigned int surplus_huge_pages_node[MAX_NUMNODES];
  601. #ifdef CONFIG_CGROUP_HUGETLB
  602. /* cgroup control files */
  603. struct cftype cgroup_files_dfl[8];
  604. struct cftype cgroup_files_legacy[10];
  605. #endif
  606. char name[HSTATE_NAME_LEN];
  607. };
  608. struct huge_bootmem_page {
  609. struct list_head list;
  610. struct hstate *hstate;
  611. };
  612. int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
  613. struct page *alloc_huge_page(struct vm_area_struct *vma,
  614. unsigned long addr, int avoid_reserve);
  615. struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
  616. nodemask_t *nmask, gfp_t gfp_mask);
  617. struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
  618. unsigned long address);
  619. int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
  620. pgoff_t idx);
  621. void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
  622. unsigned long address, struct page *page);
  623. /* arch callback */
  624. int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
  625. int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
  626. bool __init hugetlb_node_alloc_supported(void);
  627. void __init hugetlb_add_hstate(unsigned order);
  628. bool __init arch_hugetlb_valid_size(unsigned long size);
  629. struct hstate *size_to_hstate(unsigned long size);
  630. #ifndef HUGE_MAX_HSTATE
  631. #define HUGE_MAX_HSTATE 1
  632. #endif
  633. extern struct hstate hstates[HUGE_MAX_HSTATE];
  634. extern unsigned int default_hstate_idx;
  635. #define default_hstate (hstates[default_hstate_idx])
  636. /*
  637. * hugetlb page subpool pointer located in hpage[1].private
  638. */
  639. static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
  640. {
  641. return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
  642. }
  643. static inline void hugetlb_set_page_subpool(struct page *hpage,
  644. struct hugepage_subpool *subpool)
  645. {
  646. set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
  647. }
  648. static inline struct hstate *hstate_file(struct file *f)
  649. {
  650. return hstate_inode(file_inode(f));
  651. }
  652. static inline struct hstate *hstate_sizelog(int page_size_log)
  653. {
  654. if (!page_size_log)
  655. return &default_hstate;
  656. if (page_size_log < BITS_PER_LONG)
  657. return size_to_hstate(1UL << page_size_log);
  658. return NULL;
  659. }
  660. static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
  661. {
  662. return hstate_file(vma->vm_file);
  663. }
  664. static inline unsigned long huge_page_size(const struct hstate *h)
  665. {
  666. return (unsigned long)PAGE_SIZE << h->order;
  667. }
  668. extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
  669. extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
  670. static inline unsigned long huge_page_mask(struct hstate *h)
  671. {
  672. return h->mask;
  673. }
  674. static inline unsigned int huge_page_order(struct hstate *h)
  675. {
  676. return h->order;
  677. }
  678. static inline unsigned huge_page_shift(struct hstate *h)
  679. {
  680. return h->order + PAGE_SHIFT;
  681. }
  682. static inline bool hstate_is_gigantic(struct hstate *h)
  683. {
  684. return huge_page_order(h) >= MAX_ORDER;
  685. }
  686. static inline unsigned int pages_per_huge_page(const struct hstate *h)
  687. {
  688. return 1 << h->order;
  689. }
  690. static inline unsigned int blocks_per_huge_page(struct hstate *h)
  691. {
  692. return huge_page_size(h) / 512;
  693. }
  694. #include <asm/hugetlb.h>
  695. #ifndef is_hugepage_only_range
  696. static inline int is_hugepage_only_range(struct mm_struct *mm,
  697. unsigned long addr, unsigned long len)
  698. {
  699. return 0;
  700. }
  701. #define is_hugepage_only_range is_hugepage_only_range
  702. #endif
  703. #ifndef arch_clear_hugepage_flags
  704. static inline void arch_clear_hugepage_flags(struct page *page) { }
  705. #define arch_clear_hugepage_flags arch_clear_hugepage_flags
  706. #endif
  707. #ifndef arch_make_huge_pte
  708. static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
  709. vm_flags_t flags)
  710. {
  711. return pte_mkhuge(entry);
  712. }
  713. #endif
  714. static inline struct hstate *page_hstate(struct page *page)
  715. {
  716. VM_BUG_ON_PAGE(!PageHuge(page), page);
  717. return size_to_hstate(page_size(page));
  718. }
  719. static inline unsigned hstate_index_to_shift(unsigned index)
  720. {
  721. return hstates[index].order + PAGE_SHIFT;
  722. }
  723. static inline int hstate_index(struct hstate *h)
  724. {
  725. return h - hstates;
  726. }
  727. extern int dissolve_free_huge_page(struct page *page);
  728. extern int dissolve_free_huge_pages(unsigned long start_pfn,
  729. unsigned long end_pfn);
  730. #ifdef CONFIG_MEMORY_FAILURE
  731. extern void hugetlb_clear_page_hwpoison(struct page *hpage);
  732. #else
  733. static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
  734. {
  735. }
  736. #endif
  737. #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
  738. #ifndef arch_hugetlb_migration_supported
  739. static inline bool arch_hugetlb_migration_supported(struct hstate *h)
  740. {
  741. if ((huge_page_shift(h) == PMD_SHIFT) ||
  742. (huge_page_shift(h) == PUD_SHIFT) ||
  743. (huge_page_shift(h) == PGDIR_SHIFT))
  744. return true;
  745. else
  746. return false;
  747. }
  748. #endif
  749. #else
  750. static inline bool arch_hugetlb_migration_supported(struct hstate *h)
  751. {
  752. return false;
  753. }
  754. #endif
  755. static inline bool hugepage_migration_supported(struct hstate *h)
  756. {
  757. return arch_hugetlb_migration_supported(h);
  758. }
  759. bool __vma_private_lock(struct vm_area_struct *vma);
  760. /*
  761. * Movability check is different as compared to migration check.
  762. * It determines whether or not a huge page should be placed on
  763. * movable zone or not. Movability of any huge page should be
  764. * required only if huge page size is supported for migration.
  765. * There won't be any reason for the huge page to be movable if
  766. * it is not migratable to start with. Also the size of the huge
  767. * page should be large enough to be placed under a movable zone
  768. * and still feasible enough to be migratable. Just the presence
  769. * in movable zone does not make the migration feasible.
  770. *
  771. * So even though large huge page sizes like the gigantic ones
  772. * are migratable they should not be movable because its not
  773. * feasible to migrate them from movable zone.
  774. */
  775. static inline bool hugepage_movable_supported(struct hstate *h)
  776. {
  777. if (!hugepage_migration_supported(h))
  778. return false;
  779. if (hstate_is_gigantic(h))
  780. return false;
  781. return true;
  782. }
  783. /* Movability of hugepages depends on migration support. */
  784. static inline gfp_t htlb_alloc_mask(struct hstate *h)
  785. {
  786. if (hugepage_movable_supported(h))
  787. return GFP_HIGHUSER_MOVABLE;
  788. else
  789. return GFP_HIGHUSER;
  790. }
  791. static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
  792. {
  793. gfp_t modified_mask = htlb_alloc_mask(h);
  794. /* Some callers might want to enforce node */
  795. modified_mask |= (gfp_mask & __GFP_THISNODE);
  796. modified_mask |= (gfp_mask & __GFP_NOWARN);
  797. return modified_mask;
  798. }
  799. static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
  800. struct mm_struct *mm, pte_t *pte)
  801. {
  802. if (huge_page_size(h) == PMD_SIZE)
  803. return pmd_lockptr(mm, (pmd_t *) pte);
  804. VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
  805. return &mm->page_table_lock;
  806. }
  807. #ifndef hugepages_supported
  808. /*
  809. * Some platform decide whether they support huge pages at boot
  810. * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
  811. * when there is no such support
  812. */
  813. #define hugepages_supported() (HPAGE_SHIFT != 0)
  814. #endif
  815. void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
  816. static inline void hugetlb_count_init(struct mm_struct *mm)
  817. {
  818. atomic_long_set(&mm->hugetlb_usage, 0);
  819. }
  820. static inline void hugetlb_count_add(long l, struct mm_struct *mm)
  821. {
  822. atomic_long_add(l, &mm->hugetlb_usage);
  823. }
  824. static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
  825. {
  826. atomic_long_sub(l, &mm->hugetlb_usage);
  827. }
  828. #ifndef huge_ptep_modify_prot_start
  829. #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
  830. static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
  831. unsigned long addr, pte_t *ptep)
  832. {
  833. return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
  834. }
  835. #endif
  836. #ifndef huge_ptep_modify_prot_commit
  837. #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
  838. static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
  839. unsigned long addr, pte_t *ptep,
  840. pte_t old_pte, pte_t pte)
  841. {
  842. set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
  843. }
  844. #endif
  845. #ifdef CONFIG_NUMA
  846. void hugetlb_register_node(struct node *node);
  847. void hugetlb_unregister_node(struct node *node);
  848. #endif
  849. #else /* CONFIG_HUGETLB_PAGE */
  850. struct hstate {};
  851. static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
  852. {
  853. return NULL;
  854. }
  855. static inline int isolate_or_dissolve_huge_page(struct page *page,
  856. struct list_head *list)
  857. {
  858. return -ENOMEM;
  859. }
  860. static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
  861. unsigned long addr,
  862. int avoid_reserve)
  863. {
  864. return NULL;
  865. }
  866. static inline struct page *
  867. alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
  868. nodemask_t *nmask, gfp_t gfp_mask)
  869. {
  870. return NULL;
  871. }
  872. static inline struct page *alloc_huge_page_vma(struct hstate *h,
  873. struct vm_area_struct *vma,
  874. unsigned long address)
  875. {
  876. return NULL;
  877. }
  878. static inline int __alloc_bootmem_huge_page(struct hstate *h)
  879. {
  880. return 0;
  881. }
  882. static inline struct hstate *hstate_file(struct file *f)
  883. {
  884. return NULL;
  885. }
  886. static inline struct hstate *hstate_sizelog(int page_size_log)
  887. {
  888. return NULL;
  889. }
  890. static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
  891. {
  892. return NULL;
  893. }
  894. static inline struct hstate *page_hstate(struct page *page)
  895. {
  896. return NULL;
  897. }
  898. static inline struct hstate *size_to_hstate(unsigned long size)
  899. {
  900. return NULL;
  901. }
  902. static inline unsigned long huge_page_size(struct hstate *h)
  903. {
  904. return PAGE_SIZE;
  905. }
  906. static inline unsigned long huge_page_mask(struct hstate *h)
  907. {
  908. return PAGE_MASK;
  909. }
  910. static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  911. {
  912. return PAGE_SIZE;
  913. }
  914. static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  915. {
  916. return PAGE_SIZE;
  917. }
  918. static inline unsigned int huge_page_order(struct hstate *h)
  919. {
  920. return 0;
  921. }
  922. static inline unsigned int huge_page_shift(struct hstate *h)
  923. {
  924. return PAGE_SHIFT;
  925. }
  926. static inline bool hstate_is_gigantic(struct hstate *h)
  927. {
  928. return false;
  929. }
  930. static inline unsigned int pages_per_huge_page(struct hstate *h)
  931. {
  932. return 1;
  933. }
  934. static inline unsigned hstate_index_to_shift(unsigned index)
  935. {
  936. return 0;
  937. }
  938. static inline int hstate_index(struct hstate *h)
  939. {
  940. return 0;
  941. }
  942. static inline int dissolve_free_huge_page(struct page *page)
  943. {
  944. return 0;
  945. }
  946. static inline int dissolve_free_huge_pages(unsigned long start_pfn,
  947. unsigned long end_pfn)
  948. {
  949. return 0;
  950. }
  951. static inline bool hugepage_migration_supported(struct hstate *h)
  952. {
  953. return false;
  954. }
  955. static inline bool hugepage_movable_supported(struct hstate *h)
  956. {
  957. return false;
  958. }
  959. static inline gfp_t htlb_alloc_mask(struct hstate *h)
  960. {
  961. return 0;
  962. }
  963. static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
  964. {
  965. return 0;
  966. }
  967. static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
  968. struct mm_struct *mm, pte_t *pte)
  969. {
  970. return &mm->page_table_lock;
  971. }
  972. static inline void hugetlb_count_init(struct mm_struct *mm)
  973. {
  974. }
  975. static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
  976. {
  977. }
  978. static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
  979. {
  980. }
  981. static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
  982. unsigned long addr, pte_t *ptep)
  983. {
  984. return *ptep;
  985. }
  986. static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  987. pte_t *ptep, pte_t pte)
  988. {
  989. }
  990. static inline void hugetlb_register_node(struct node *node)
  991. {
  992. }
  993. static inline void hugetlb_unregister_node(struct node *node)
  994. {
  995. }
  996. #endif /* CONFIG_HUGETLB_PAGE */
  997. static inline spinlock_t *huge_pte_lock(struct hstate *h,
  998. struct mm_struct *mm, pte_t *pte)
  999. {
  1000. spinlock_t *ptl;
  1001. ptl = huge_pte_lockptr(h, mm, pte);
  1002. spin_lock(ptl);
  1003. return ptl;
  1004. }
  1005. #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
  1006. extern void __init hugetlb_cma_reserve(int order);
  1007. #else
  1008. static inline __init void hugetlb_cma_reserve(int order)
  1009. {
  1010. }
  1011. #endif
  1012. #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
  1013. static inline bool hugetlb_pmd_shared(pte_t *pte)
  1014. {
  1015. return page_count(virt_to_page(pte)) > 1;
  1016. }
  1017. #else
  1018. static inline bool hugetlb_pmd_shared(pte_t *pte)
  1019. {
  1020. return false;
  1021. }
  1022. #endif
  1023. bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
  1024. #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
  1025. /*
  1026. * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
  1027. * implement this.
  1028. */
  1029. #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
  1030. #endif
  1031. #endif /* _LINUX_HUGETLB_H */