pte-walk.h 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. #ifndef _ASM_POWERPC_PTE_WALK_H
  2. #define _ASM_POWERPC_PTE_WALK_H
  3. #include <linux/sched.h>
  4. /* Don't use this directly */
  5. extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
  6. bool *is_thp, unsigned *hshift);
  7. static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
  8. bool *is_thp, unsigned *hshift)
  9. {
  10. pte_t *pte;
  11. VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
  12. pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
  13. #if defined(CONFIG_DEBUG_VM) && \
  14. !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
  15. /*
  16. * We should not find huge page if these configs are not enabled.
  17. */
  18. if (hshift)
  19. WARN_ON(*hshift);
  20. #endif
  21. return pte;
  22. }
  23. static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
  24. {
  25. pgd_t *pgdir = init_mm.pgd;
  26. return __find_linux_pte(pgdir, ea, NULL, hshift);
  27. }
  28. /*
  29. * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
  30. * physical address, without taking locks. This can be used in real-mode.
  31. */
  32. static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
  33. {
  34. pte_t *ptep;
  35. phys_addr_t pa;
  36. int hugepage_shift;
  37. /*
  38. * init_mm does not free page tables, and does not do THP. It may
  39. * have huge pages from huge vmalloc / ioremap etc.
  40. */
  41. ptep = find_init_mm_pte(addr, &hugepage_shift);
  42. if (WARN_ON(!ptep))
  43. return 0;
  44. pa = PFN_PHYS(pte_pfn(*ptep));
  45. if (!hugepage_shift)
  46. hugepage_shift = PAGE_SHIFT;
  47. pa |= addr & ((1ul << hugepage_shift) - 1);
  48. return pa;
  49. }
  50. /*
  51. * This is what we should always use. Any other lockless page table lookup needs
  52. * careful audit against THP split.
  53. */
  54. static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
  55. bool *is_thp, unsigned *hshift)
  56. {
  57. pte_t *pte;
  58. VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
  59. VM_WARN(pgdir != current->mm->pgd,
  60. "%s lock less page table lookup called on wrong mm\n", __func__);
  61. pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
  62. #if defined(CONFIG_DEBUG_VM) && \
  63. !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
  64. /*
  65. * We should not find huge page if these configs are not enabled.
  66. */
  67. if (hshift)
  68. WARN_ON(*hshift);
  69. #endif
  70. return pte;
  71. }
  72. #endif /* _ASM_POWERPC_PTE_WALK_H */