pgalloc.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Page table support for the Hexagon architecture
  4. *
  5. * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
  6. */
  7. #ifndef _ASM_PGALLOC_H
  8. #define _ASM_PGALLOC_H
  9. #include <asm/mem-layout.h>
  10. #include <asm/atomic.h>
  11. #include <asm-generic/pgalloc.h>
  12. extern unsigned long long kmap_generation;
  13. /*
  14. * Page table creation interface
  15. */
  16. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  17. {
  18. pgd_t *pgd;
  19. pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
  20. /*
  21. * There may be better ways to do this, but to ensure
  22. * that new address spaces always contain the kernel
  23. * base mapping, and to ensure that the user area is
  24. * initially marked invalid, initialize the new map
  25. * map with a copy of the kernel's persistent map.
  26. */
  27. memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
  28. mm->context.generation = kmap_generation;
  29. /* Physical version is what is passed to virtual machine on switch */
  30. mm->context.ptbase = __pa(pgd);
  31. return pgd;
  32. }
  33. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  34. pgtable_t pte)
  35. {
  36. /*
  37. * Conveniently, zero in 3 LSB means indirect 4K page table.
  38. * Not so convenient when you're trying to vary the page size.
  39. */
  40. set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
  41. HEXAGON_L1_PTE_SIZE));
  42. }
  43. /*
  44. * Other architectures seem to have ways of making all processes
  45. * share the same pmd's for their kernel mappings, but the v0.3
  46. * Hexagon VM spec has a "monolithic" L1 table for user and kernel
  47. * segments. We track "generations" of the kernel map to minimize
  48. * overhead, and update the "slave" copies of the kernel mappings
  49. * as part of switch_mm. However, we still need to update the
  50. * kernel map of the active thread who's calling pmd_populate_kernel...
  51. */
  52. static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
  53. pte_t *pte)
  54. {
  55. extern spinlock_t kmap_gen_lock;
  56. pmd_t *ppmd;
  57. int pmdindex;
  58. spin_lock(&kmap_gen_lock);
  59. kmap_generation++;
  60. mm->context.generation = kmap_generation;
  61. current->active_mm->context.generation = kmap_generation;
  62. spin_unlock(&kmap_gen_lock);
  63. set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
  64. /*
  65. * Now the "slave" copy of the current thread.
  66. * This is pointer arithmetic, not byte addresses!
  67. */
  68. pmdindex = (pgd_t *)pmd - mm->pgd;
  69. ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
  70. set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
  71. if (pmdindex > max_kernel_seg)
  72. max_kernel_seg = pmdindex;
  73. }
  74. #define __pte_free_tlb(tlb, pte, addr) \
  75. do { \
  76. pgtable_pte_page_dtor((pte)); \
  77. tlb_remove_page((tlb), (pte)); \
  78. } while (0)
  79. #endif