page.h 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_PAGE_H
  3. #define _ASM_X86_PAGE_H
  4. #include <linux/types.h>
  5. #ifdef __KERNEL__
  6. #include <asm/page_types.h>
  7. #ifdef CONFIG_X86_64
  8. #include <asm/page_64.h>
  9. #else
  10. #include <asm/page_32.h>
  11. #endif /* CONFIG_X86_64 */
  12. #ifndef __ASSEMBLY__
  13. struct page;
  14. #include <linux/range.h>
  15. extern struct range pfn_mapped[];
  16. extern int nr_pfn_mapped;
  17. static inline void clear_user_page(void *page, unsigned long vaddr,
  18. struct page *pg)
  19. {
  20. clear_page(page);
  21. }
  22. static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
  23. struct page *topage)
  24. {
  25. copy_page(to, from);
  26. }
  27. #define alloc_zeroed_user_highpage_movable(vma, vaddr) \
  28. alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO | __GFP_CMA, vma, vaddr)
  29. #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
  30. #ifndef __pa
  31. #define __pa(x) __phys_addr((unsigned long)(x))
  32. #endif
  33. #define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
  34. /* __pa_symbol should be used for C visible symbols.
  35. This seems to be the official gcc blessed way to do such arithmetic. */
  36. /*
  37. * We need __phys_reloc_hide() here because gcc may assume that there is no
  38. * overflow during __pa() calculation and can optimize it unexpectedly.
  39. * Newer versions of gcc provide -fno-strict-overflow switch to handle this
  40. * case properly. Once all supported versions of gcc understand it, we can
  41. * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
  42. */
  43. #define __pa_symbol(x) \
  44. __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
  45. #ifndef __va
  46. #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
  47. #endif
  48. #define __boot_va(x) __va(x)
  49. #define __boot_pa(x) __pa(x)
  50. /*
  51. * virt_to_page(kaddr) returns a valid pointer if and only if
  52. * virt_addr_valid(kaddr) returns true.
  53. */
  54. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  55. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  56. extern bool __virt_addr_valid(unsigned long kaddr);
  57. #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
  58. static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
  59. {
  60. return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
  61. }
  62. static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
  63. {
  64. return __canonical_address(vaddr, vaddr_bits) == vaddr;
  65. }
  66. #endif /* __ASSEMBLY__ */
  67. #include <asm-generic/memory_model.h>
  68. #include <asm-generic/getorder.h>
  69. #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  70. #endif /* __KERNEL__ */
  71. #endif /* _ASM_X86_PAGE_H */